From b9ddd22e829a83a8e0395c9553c5facb6bfb07b0 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 20 Oct 2016 11:59:26 -0700 Subject: [PATCH 001/255] Added UMOUNT tag to suspend cleanup --- .../src/python/onl/install/ShellApp.py | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index c0ff2fa2..19cfaf6f 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -112,11 +112,16 @@ class Onie(AppBase): PROG = "onie-shell" + UNMOUNT = True + # default, unmount directories once the initrd is extracted + def run(self): self.pm = ProcMountsParser() self.blkid = BlkidParser(log=self.log.getChild("blkid")) self.mtd = ProcMtdParser(log=self.log.getChild("mtd")) + self.dctx = None + self.onieDir = None def _g(d): pat = os.path.join(d, "onie/initrd.img*") @@ -134,22 +139,28 @@ class Onie(AppBase): parts = [p for p in self.pm.mounts if p.device == dev] if parts: - onieDir = parts[0] - self.log.debug("found ONIE boot mounted at %s", onieDir) - initrd = _g(onieDir) + self.log.debug("found ONIE boot mounted at %s", parts[0].dir) + initrd = _g(parts[0].dir) if initrd is None: - self.log.warn("cannot find ONIE initrd on %s", onieDir) + self.log.warn("cannot find ONIE initrd on %s", parts[0].dir) else: + self.onieDir = parts[0].dir self.log.debug("found ONIE initrd at %s", initrd) - return _runInitrdShell(initrd) + return self._runInitrdShell(initrd) - with MountContext(dev, log=self.log) as ctx: - initrd = _g(ctx.dir) + # else, try to mount the directory containing the initrd + with MountContext(dev, log=self.log) as self.dctx: + initrd = _g(self.dctx.dir) if initrd is None: self.log.warn("cannot find ONIE initrd on %s", dev) else: self.log.debug("found ONIE initrd at %s", initrd) - return self._runInitrdShell(initrd) + try: + return self._runInitrdShell(initrd) + finally: + if not self.UMOUNT: + self.onieDir = self.dctx.hostDir + self.dctx.detach() self.log.warn("cannot find an ONIE initrd") return 1 @@ -163,6 +174,7 @@ class Onie(AppBase): self.log.debug("cannot find ONIE initrd on %s (%s)", part.device, part.dir) else: + self.onieDir = part.dir self.log.debug("found ONIE initrd at %s", initrd) return self._runInitrdShell(initrd) From 759b002e4ddf03345cd69a8d1e2f4082d96812c0 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 20 Oct 2016 11:59:45 -0700 Subject: [PATCH 002/255] Added attach/detach support --- .../src/python/onl/install/InstallUtils.py | 33 ++++++++++++++++--- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index 830e042e..386f318a 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -219,6 +219,9 @@ class MountContext(SubprocessMixin): if not self.device and not self.label: raise ValueError("no device or label specified") + self._detachMounted = False + self._detachHostDir = None + def __enter__(self): dev = self.device if dev is None: @@ -245,7 +248,7 @@ class MountContext(SubprocessMixin): self.mounted = True return self - def __exit__(self, type, value, tb): + def shutdown(self): mounted = False if self.mounted: @@ -263,8 +266,18 @@ class MountContext(SubprocessMixin): if self.hostDir is not None: self.rmdir(self.hostDir) + def __exit__(self, type, value, tb): + self.shutdown() return False + def detach(self): + self.mounted, self._detachMounted = False, self.mounted + self.hostDir, self._detachHostdir = None, self.hostDir + + def attach(self): + self.mounted = self._detachMounted + self.hostDir = self._detachHostdir + class BlkidEntry: def __init__(self, device, **kwargs): @@ -665,6 +678,8 @@ class InitrdContext(SubprocessMixin): self.ilog.setLevel(logging.INFO) self.log = self.hlog + self._detachInitrd = None + def _unpack(self): self.dir = self.mkdtemp(prefix="chroot-", suffix=".d") @@ -783,7 +798,7 @@ class InitrdContext(SubprocessMixin): return self - def __exit__(self, type, value, tb): + def shutdown(self): p = ProcMountsParser() dirs = [e.dir for e in p.mounts if e.dir.startswith(self.dir)] @@ -803,13 +818,21 @@ class InitrdContext(SubprocessMixin): else: self.log.debug("saving chroot in %s", self.dir) + def __exit__(self, type, value, tb): + self.shutdown() return False + def detach(self): + self.initrd, self._detachInitrd = None, self.initrd + + def attach(self): + self.initrd = self._detachInitrd + @classmethod - def mkChroot(self, initrd, log=None): - with InitrdContext(initrd=initrd, log=log) as ctx: + def mkChroot(cls, initrd, log=None): + with cls(initrd=initrd, log=log) as ctx: initrdDir = ctx.dir - ctx.initrd = None + ctx.detach() # save the unpacked directory, do not clean it up # (it's inside this chroot anyway) return initrdDir From d129388145f77045314b238e38ecffd6ba36ba9d Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 20 Oct 2016 12:00:48 -0700 Subject: [PATCH 003/255] Refactor to use OnieHelper --- .../src/python/onl/install/App.py | 76 ++++++++++++++----- 1 file changed, 59 insertions(+), 17 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py index fec1128e..5ab00ed7 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py @@ -17,9 +17,32 @@ import time from InstallUtils import InitrdContext from InstallUtils import SubprocessMixin from InstallUtils import ProcMountsParser +from ShellApp import Onie import ConfUtils, BaseInstall -class App(SubprocessMixin): +class OnieHelper(Onie): + """Unpack the initrd, but keep it around.""" + + UMOUNT = False + # leave self.onieDir mounted + + ictx = None + + def _runInitrdShell(self, initrd): + with InitrdContext(initrd, log=self.log) as self.ictx: + self.initrdDir = self.ictx.dir + self.ictx.detach() + + def shutdown(self): + + self.ictx.attach() + self.ictx.shutdown() + + if self.dctx is not None: + self.dctx.attach() + self.dctx.shutdown() + +class App(SubprocessMixin, object): def __init__(self, url=None, debug=False, force=False, @@ -43,6 +66,8 @@ class App(SubprocessMixin): self.nextUpdate = None + self.onieHelper = None + def run(self): if self.url is not None: @@ -123,27 +148,27 @@ class App(SubprocessMixin): self.log.info("please reboot this system now.") return 0 - def runLocal(self): + def runLocalOrChroot(self): - self.log.info("getting installer configuration") - if os.path.exists(ConfUtils.MachineConf.PATH): - self.machineConf = ConfUtils.MachineConf() - else: - self.log.warn("missing /etc/machine.conf from ONIE runtime") - self.machineConf = ConfUtils.MachineConf(path='/dev/null') - self.installerConf = ConfUtils.InstallerConf() + if self.machineConf is None: + self.log.error("missing machine.conf") + return 1 + if self.installerConf is None: + self.log.error("missing installer.conf") + return 1 ##self.log.info("using native GRUB") ##self.grubEnv = ConfUtils.GrubEnv(log=self.log.getChild("grub")) - pat = "/mnt/onie-boot/onie/initrd.img*" - l = glob.glob(pat) - if l: - initrd = l[0] - self.log.info("using native ONIE initrd+chroot GRUB (%s)", initrd) - initrdDir = InitrdContext.mkChroot(initrd, log=self.log) - self.grubEnv = ConfUtils.ChrootGrubEnv(initrdDir, - bootDir="/mnt/onie-boot", + self.onieHelper = OnieHelper(log=self.log) + code = self.onieHelper.run() + if code: + self.log.warn("cannot find ONIE initrd") + + if self.onieHelper.onieDir is not None: + self.log.info("using native ONIE initrd+chroot GRUB (%s)", self.onieHelper.onieDir) + self.grubEnv = ConfUtils.ChrootGrubEnv(self.onieHelper.initrdDir, + bootDir=self.onieHelper.onieDir, path="/grub/grubenv", log=self.log.getChild("grub")) # direct access using ONIE initrd as a chroot @@ -216,6 +241,19 @@ class App(SubprocessMixin): self.log.info("Install finished.") return 0 + def runLocal(self): + + self.log.info("getting installer configuration") + if os.path.exists(ConfUtils.MachineConf.PATH): + self.machineConf = ConfUtils.MachineConf() + else: + self.log.warn("missing /etc/machine.conf from ONIE runtime") + self.machineConf = ConfUtils.MachineConf(path='/dev/null') + + self.installerConf = ConfUtils.InstallerConf() + + return self.runLocalOrChroot() + def findPlatform(self): plat = arch = None @@ -302,6 +340,10 @@ class App(SubprocessMixin): if installer is not None: installer.shutdown() + h, self.onieHelper = self.onieHelper, None + if h is not None: + h.shutdown() + def post_mortem(self): self.log.info("re-attaching to tty") fdno = os.open("/dev/console", os.O_RDWR) From 08e6bddfbbc4c0674fa82041a7d541fc4227aef3 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 20 Oct 2016 12:03:35 -0700 Subject: [PATCH 004/255] WIP parted API updates -- DO NO MERGE --- .../src/python/onl/install/BaseInstall.py | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index eb25aa9f..cf3f299e 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -21,6 +21,12 @@ from InstallUtils import ProcMountsParser import onl.YamlUtils from onl.sysconfig import sysconfig +try: + PartedException = parted._ped.PartedException +except AttributeError: + import _ped + PartedException = _ped.PartedException + class Base: class installmeta: @@ -382,7 +388,7 @@ class Base: for m in pm.mounts: if m.device.startswith(self.device): if not self.force: - self.log.error("mount %s on %s will be erased by install", + self.log.error("mount %s on %s will be erased by install (try --force)", m.dir, m.device) return 1 else: @@ -668,9 +674,6 @@ class UbootInstaller(SubprocessMixin, Base): self.device = self.im.getDevice() - code = self.assertUnmounted() - if code: return code - self.rawLoaderDevice = None # set to a partition device for raw loader install, # default to None for FS-based install @@ -686,9 +689,12 @@ class UbootInstaller(SubprocessMixin, Base): return 0 self.log.warn("disk %s has wrong label %s", self.device, self.partedDisk.type) - except parted._ped.PartedException as ex: + except PartedException as ex: self.log.error("cannot get partition table from %s: %s", self.device, str(ex)) + except AttributeError as ex: + self.log.error("XXX cannot get partition table from %s: %s", + self.device, str(ex)) self.log.info("creating msdos label on %s") self.partedDisk = parted.freshDisk(self.partedDevice, 'msdos') @@ -727,6 +733,7 @@ class UbootInstaller(SubprocessMixin, Base): break if not loaderBasename: + raise ValueError("platform loader file missing!") self.log.error("The platform loader file is missing.") return 1 @@ -783,6 +790,9 @@ class UbootInstaller(SubprocessMixin, Base): self.log.error("not a block device: %s", self.device) return 1 + code = self.assertUnmounted() + if code: return code + code = self.maybeCreateLabel() if code: return code From c799c398634e83cb7dd4f8e604b006dc558c6e2e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 10 Oct 2016 17:42:38 +0000 Subject: [PATCH 005/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 1fe8a60b..60d7d7fd 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 1fe8a60b83155758a44a62b3e6c89bac2bde7769 +Subproject commit 60d7d7fd9abd05ed5c9969ac48ebf40838c93116 From e3030a6a1b83e8d0820715589442eed66236eeee Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 10 Oct 2016 15:08:19 -0700 Subject: [PATCH 006/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 60d7d7fd..feff1c37 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 60d7d7fd9abd05ed5c9969ac48ebf40838c93116 +Subproject commit feff1c37eb206ac0fc3b8bcb5ab3514b693ed2df From 43c30cf18741eec4de88f2c5eeeb7edd2682a449 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 11 Oct 2016 20:30:35 +0000 Subject: [PATCH 007/255] The onl-pki script has moved. --- .../all/initrds/loader-initrd-files/src/bin/sysinit | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit b/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit index 81d327e0..983b4799 100755 --- a/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit +++ b/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit @@ -91,20 +91,15 @@ if [ ! -f /etc/onl/abort ]; then # Use default boot-config. cp /etc/onl/boot-config-default /etc/onl/boot-config fi - - # - # Initialize the /mnt/flash/boot area. - # - #mkdir -p /mnt/data/boot - #rm -rf /mnt/flash/boot/* fi # # Initialize PKI # -if [ -f /sbin/onl-pki ]; then - /sbin/onl-pki --init +ONL_PKI=/usr/bin/onl-pki +if [ -f "$ONL_PKI" ]; then + "$ONL_PKI" --init fi From d3e977ac0eee7cf231b3753b50a6840088213858 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 12 Oct 2016 18:41:45 +0000 Subject: [PATCH 008/255] Latest --- sm/bigcode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/bigcode b/sm/bigcode index fb78f3db..19fb14d8 160000 --- a/sm/bigcode +++ b/sm/bigcode @@ -1 +1 @@ -Subproject commit fb78f3dbac61413aec9b88f9dce14369ebaf68cf +Subproject commit 19fb14d8a5424284d2b082b65f4f5a269a587fd5 From 86cedd5babfa28af67ec677b1ea485acc4739ba0 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 13 Oct 2016 18:03:31 +0000 Subject: [PATCH 009/255] - Improve retry and timeout performance for the isch driver - adm1021 device detection option --- .../drivers-hwmon-adm1021-detect.patch | 29 +++++++++++++++ .../drivers-i2c-busses-i2c-isch-timeout.patch | 36 +++++++++++++++++++ .../kernels/3.2.65-1+deb7u2/patches/series | 2 ++ 3 files changed, 67 insertions(+) create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-hwmon-adm1021-detect.patch create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-i2c-busses-i2c-isch-timeout.patch diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-hwmon-adm1021-detect.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-hwmon-adm1021-detect.patch new file mode 100644 index 00000000..079fb085 --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-hwmon-adm1021-detect.patch @@ -0,0 +1,29 @@ +--- a/drivers/hwmon/adm1021.c 2014-12-14 08:24:02.000000000 -0800 ++++ b/drivers/hwmon/adm1021.c 2016-10-13 10:48:10.045055678 -0700 +@@ -105,6 +105,7 @@ static struct adm1021_data *adm1021_upda + /* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */ + static int read_only; + ++static int detect = 1; + + static const struct i2c_device_id adm1021_id[] = { + { "adm1021", adm1021 }, +@@ -295,6 +296,9 @@ static int adm1021_detect(struct i2c_cli + "smbus byte data not supported!\n"); + return -ENODEV; + } ++ if(detect == 0) { ++ return -ENODEV; ++ } + + status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS); + conv_rate = i2c_smbus_read_byte_data(client, +@@ -510,6 +514,8 @@ MODULE_LICENSE("GPL"); + + module_param(read_only, bool, 0); + MODULE_PARM_DESC(read_only, "Don't set any values, read only mode"); ++module_param(detect, bool, 1); ++MODULE_PARM_DESC(detect, "Enable or disable device detection."); + + module_init(sensors_adm1021_init) + module_exit(sensors_adm1021_exit) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-i2c-busses-i2c-isch-timeout.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-i2c-busses-i2c-isch-timeout.patch new file mode 100644 index 00000000..013b3725 --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-i2c-busses-i2c-isch-timeout.patch @@ -0,0 +1,36 @@ +--- a/drivers/i2c/busses/i2c-isch.c 2014-12-14 08:24:02.000000000 -0800 ++++ b/drivers/i2c/busses/i2c-isch.c 2016-10-13 08:02:44.564840300 -0700 +@@ -47,7 +47,7 @@ + #define SMBBLKDAT (0x20 + sch_smba) + + /* Other settings */ +-#define MAX_TIMEOUT 500 ++#define MAX_RETRIES 5000 + + /* I2C constants */ + #define SCH_QUICK 0x00 +@@ -68,7 +68,7 @@ static int sch_transaction(void) + { + int temp; + int result = 0; +- int timeout = 0; ++ int retries = 0; + + dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, " + "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), +@@ -100,12 +100,12 @@ static int sch_transaction(void) + outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT); + + do { +- msleep(1); ++ usleep_range(100, 200); + temp = inb(SMBHSTSTS) & 0x0f; +- } while ((temp & 0x08) && (timeout++ < MAX_TIMEOUT)); ++ } while ((temp & 0x08) && (retries++ < MAX_RETRIES)); + + /* If the SMBus is still busy, we give up */ +- if (timeout > MAX_TIMEOUT) { ++ if (retries > MAX_RETRIES) { + dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); + result = -ETIMEDOUT; + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index 929c0e53..f93799e2 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -251,3 +251,5 @@ platform-accton-as7716_32x-device-drivers.patch driver-broadcom-tigon3.patch mgmt-port-init-config.patch arch-intel-reboot-cf9-cold.patch +drivers-hwmon-adm1021-detect.patch +drivers-i2c-busses-i2c-isch-timeout.patch From bc4d2e6f97b8908dbd627eb4386a190add6323e5 Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Thu, 13 Oct 2016 17:19:31 -0700 Subject: [PATCH 010/255] Do not show CRC under System Information for "show version" --- .../base/all/vendor-config-onl/src/python/onl/platform/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index 19f6be9d..f9e0e20e 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -45,7 +45,7 @@ class OnlInfoObject(object): @staticmethod def string(d, indent=DEFAULT_INDENT): - return "\n".join( sorted("%s%s: %s" % (indent,k,v) for k,v in d.iteritems() if not k.startswith('_') and d[k] is not None) ) + return "\n".join( sorted("%s%s: %s" % (indent,k,v) for k,v in d.iteritems() if not k.startswith('_') and d[k] is not None and k != 'CRC')) ############################################################ From ac03653c5f94618a11a397fdc6203d75ac53678a Mon Sep 17 00:00:00 2001 From: Steven Noble Date: Thu, 13 Oct 2016 20:44:31 -0700 Subject: [PATCH 011/255] Kernel configuration options and updated at24 patch for kernel 3.16 --- .../configs/x86_64-all/x86_64-all.config | 29 ++++++++++++- ...-at24-fix-odd-length-two-byte-access.patch | 43 +++++++------------ 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config index 7a66710e..8546a1f2 100644 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config @@ -818,6 +818,7 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=y CONFIG_NETFILTER_XT_TARGET_HL=y CONFIG_NETFILTER_XT_TARGET_HMARK=y CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +# CONFIG_NETFILTER_XT_TARGET_LED is not set CONFIG_NETFILTER_XT_TARGET_LOG=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NETMAP=y @@ -2116,7 +2117,18 @@ CONFIG_SENSORS_LM90=y # CONFIG_SENSORS_NCT6683 is not set # CONFIG_SENSORS_NCT6775 is not set # CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set +CONFIG_PMBUS=y +CONFIG_SENSORS_PMBUS=y +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LTC2978 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX34440 is not set +CONFIG_SENSORS_DNI_DPS460=y +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_UCD9000 is not set +CONFIG_SENSORS_UCD9200=y +# CONFIG_SENSORS_ZL6100 is not set # CONFIG_SENSORS_SHT15 is not set # CONFIG_SENSORS_SHT21 is not set # CONFIG_SENSORS_SHTC1 is not set @@ -2648,7 +2660,20 @@ CONFIG_LEDS_CLASS=y # # LED Triggers # -# CONFIG_LEDS_TRIGGERS is not set +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +CONFIG_LEDS_TRIGGER_GPIO=y +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set # CONFIG_EDAC is not set diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch index 709220f3..6060b15e 100644 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch +++ b/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch @@ -1,29 +1,18 @@ -driver at24 fix odd length two byte access - -From: Cumulus Networks - -For I2C_SMBUS_WORD_DATA read accesses check if the access length is -one or two bytes. For transactions that have an odd length eventualy -we read 1 byte at the end to complete the request. - -The previous code always used a count of 2, which works fine if the -requested total length is even. If the requested length was odd, -however, the code would cause a kernel OOPS. - -The while (count) loop would go forever as count went from 1 to -1, -never becoming zero. Also the return buffer would overrun. - -This patch allows for reading an odd number of bytes in -I2C_SMBUS_WORD_DATA mode. ---- - drivers/misc/eeprom/at24.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c -index d87f77f..9e9256a 100644 ---- a/drivers/misc/eeprom/at24.c -+++ b/drivers/misc/eeprom/at24.c -@@ -192,7 +192,8 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, +--- a/drivers/misc/eeprom/at24.c 2016-10-06 12:45:49.290365545 +0000 ++++ b/drivers/misc/eeprom/at24.c 2016-10-06 12:47:08.630368526 +0000 +@@ -84,9 +84,9 @@ + * + * This value is forced to be a power of two so that writes align on pages. + */ +-static unsigned io_limit = 128; ++static unsigned io_limit = 32; + module_param(io_limit, uint, 0); +-MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); ++MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 32)"); + + /* + * Specs often allow 5 msec for a page write, sometimes 20 msec; +@@ -192,7 +192,8 @@ count = I2C_SMBUS_BLOCK_MAX; break; case I2C_SMBUS_WORD_DATA: @@ -33,7 +22,7 @@ index d87f77f..9e9256a 100644 break; case I2C_SMBUS_BYTE_DATA: count = 1; -@@ -237,7 +238,8 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, +@@ -237,7 +238,8 @@ status = i2c_smbus_read_word_data(client, offset); if (status >= 0) { buf[0] = status & 0xff; From 05ef1f19b4392c2bd551c37c7959bfe11a9e6cc9 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 14 Oct 2016 18:07:35 +0000 Subject: [PATCH 012/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index feff1c37..fe6a4b28 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit feff1c37eb206ac0fc3b8bcb5ab3514b693ed2df +Subproject commit fe6a4b286335b1a286e41c562cff4a95a5497c2c From 7c3d3a2c3bb6516d04c7f46a9bded26415d8cd05 Mon Sep 17 00:00:00 2001 From: Sridhar Pitchai Date: Fri, 14 Oct 2016 16:36:45 -0700 Subject: [PATCH 013/255] Platform x86-64-agema-agc7648 without ONLP (#113) --- packages/platforms/agema/Makefile | 1 + .../platforms/agema/vendor-config/Makefile | 1 + .../platforms/agema/vendor-config/PKG.yml | 1 + .../src/python/agema/__init__.py | 7 ++++ packages/platforms/agema/x86-64/Makefile | 1 + .../x86-64/x86-64-agema-agc7648/Makefile | 1 + .../x86-64/x86-64-agema-agc7648/onlp/Makefile | 1 + .../x86-64/x86-64-agema-agc7648/onlp/PKG.yml | 15 ++++++++ .../platform-config/Makefile | 1 + .../platform-config/r0/Makefile | 1 + .../platform-config/r0/PKG.yml | 1 + .../r0/src/lib/x86-64-agc7648-r0.yml | 35 +++++++++++++++++++ .../src/python/x86_64_agc7648_r0/__init__.py | 8 +++++ 13 files changed, 74 insertions(+) create mode 100644 packages/platforms/agema/Makefile create mode 100644 packages/platforms/agema/vendor-config/Makefile create mode 100644 packages/platforms/agema/vendor-config/PKG.yml create mode 100644 packages/platforms/agema/vendor-config/src/python/agema/__init__.py create mode 100644 packages/platforms/agema/x86-64/Makefile create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/Makefile create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/Makefile create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/Makefile create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/Makefile create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/PKG.yml create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/lib/x86-64-agc7648-r0.yml create mode 100644 packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/python/x86_64_agc7648_r0/__init__.py diff --git a/packages/platforms/agema/Makefile b/packages/platforms/agema/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/agema/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/agema/vendor-config/Makefile b/packages/platforms/agema/vendor-config/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/agema/vendor-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/agema/vendor-config/PKG.yml b/packages/platforms/agema/vendor-config/PKG.yml new file mode 100644 index 00000000..9cc81777 --- /dev/null +++ b/packages/platforms/agema/vendor-config/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-vendor.yml VENDOR=agema Vendor=Agema diff --git a/packages/platforms/agema/vendor-config/src/python/agema/__init__.py b/packages/platforms/agema/vendor-config/src/python/agema/__init__.py new file mode 100644 index 00000000..5e56e4a9 --- /dev/null +++ b/packages/platforms/agema/vendor-config/src/python/agema/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/python + +from onl.platform.base import * + +class OnlPlatformAgema(OnlPlatformBase): + MANUFACTURER='Agema' + PRIVATE_ENTERPRISE_NUMBER=65530 diff --git a/packages/platforms/agema/x86-64/Makefile b/packages/platforms/agema/x86-64/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/agema/x86-64/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/Makefile b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/Makefile b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml new file mode 100644 index 00000000..46f47033 --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml @@ -0,0 +1,15 @@ +variables: + platform: x86-64-agc7648-r0 + install: /lib/platform-config/${platform}/onl + +common: + version: 1.0.0 + arch: amd64 + copyright: Copyright 2013, 2014, 2015 Big Switch Networks + maintainer: support@bigswitch.com + comment: dummy package for ONLP on Wedge +packages: + - name: onlp-${platform} + summary: ONLP Package for the ${platform} platform. + + changelog: initial version diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/Makefile b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/Makefile b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/PKG.yml b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/PKG.yml new file mode 100644 index 00000000..e1d73496 --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=agema PLATFORM=x86-64-agc7648-r0 diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/lib/x86-64-agc7648-r0.yml b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/lib/x86-64-agc7648-r0.yml new file mode 100644 index 00000000..79300f3f --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/lib/x86-64-agc7648-r0.yml @@ -0,0 +1,35 @@ +--- + +###################################################################### +# +# platform-config for x86_64-agc7648-r0 +# +# +###################################################################### + +x86-64-agc7648-r0: + + grub: + + serial: >- + --port=0x3f8 + --speed=115200 + --word=8 + --parity=0 + --stop=1 + + kernel: + <<: *kernel-3-18 + + args: >- + nopat + console=ttyS0,57600n8 + rd_NO_MD + rd_NO_LUKS + intel_iommu=off + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:14.0 diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/python/x86_64_agc7648_r0/__init__.py b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/python/x86_64_agc7648_r0/__init__.py new file mode 100644 index 00000000..15e7d82b --- /dev/null +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/platform-config/r0/src/python/x86_64_agc7648_r0/__init__.py @@ -0,0 +1,8 @@ +from onl.platform.base import * +from onl.platform.accton import * + +class OnlPlatform_x86_64_agc7648_r0(OnlPlatformAgema, + OnlPlatformPortConfig_48x10_6x40): + MODEL="agc7648" + PLATFORM="x86-64-agc7648-r0" + SYS_OBJECT_ID=".7648.1" From 09a4d38fa8b2a0bc12c68bdd73ec2e1f9b99e48f Mon Sep 17 00:00:00 2001 From: Charlie Lewis Date: Fri, 14 Oct 2016 16:37:51 -0700 Subject: [PATCH 014/255] improve formatting of markdown for readibility (#108) --- docs/Building.md | 49 +++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/docs/Building.md b/docs/Building.md index e66ab7ad..693f83b5 100644 --- a/docs/Building.md +++ b/docs/Building.md @@ -41,17 +41,17 @@ If you would like to build by hand you can do the following: #> git clone https://github.com/opencomputeproject/OpenNetworkLinux #> cd OpenNetworkLinux - #> docker/tools/onlbuilder (-8) # enter the docker workspace + #> docker/tools/onlbuilder (-8) # enter the docker workspace #> apt-cacher-ng - #> source setup.env # pull in necessary environment variables + #> source setup.env # pull in necessary environment variables #> make amd64 ppc # make onl for $platform (currently amd64 or powerpc) The resulting ONIE installers are in -$ONL/RELEASE/$SUITE/$ARCH/ONL-2.*INSTALLER, i.e. -RELEASE/jessie/amd64/ONL-2.0.0_ONL-OS_2015-12-12.0252-ffce159_AMD64_INSTALLER +`$ONL/RELEASE/$SUITE/$ARCH/ONL-2.*INSTALLER`, i.e. +`RELEASE/jessie/amd64/ONL-2.0.0_ONL-OS_2015-12-12.0252-ffce159_AMD64_INSTALLER` and the SWI files (if you want them) are in -$ONL/RELEASE/$SUITE/$ARCH/ONL*.swi. i.e. -RELEASE/jessie/amd64/ONL-2.0.0_ONL-OS_2015-12-12.0252-ffce159_AMD64.swi +`$ONL/RELEASE/$SUITE/$ARCH/ONL*.swi`. i.e. +`RELEASE/jessie/amd64/ONL-2.0.0_ONL-OS_2015-12-12.0252-ffce159_AMD64.swi` @@ -69,13 +69,13 @@ Common docker related issues: - Beware that `apt-get install docker` installs a dock application not docker :-) You want the lxc-docker package instead. - Some versions of docker are unhappy if you use a local DNS caching resolver: - e.g., you have 127.0.0.1 in your /etc/resolv.conf - - if you have this, specify DNS="--dns 8.8.8.8" when you enter the docker environment + - if you have this, specify `DNS="--dns 8.8.8.8"` when you enter the docker environment - e.g., `make DNS="--dns 8.8.8.8" docker` Consider enabling builds for non-privileged users with: - `sudo usermod -aG docker $USER` -- If you run as non-root without this, you will get errors like "..: dial unix /var/run/docker.sock: permission denied" +- If you run as non-root without this, you will get errors like `..: dial unix /var/run/docker.sock: permission denied` - Building as root is fine as well (it immediately jumps into a root build shell), so this optional #Additional Build Details @@ -134,43 +134,47 @@ Example setup on new Debian 8.2 installation Install sudo and add yourself to the sudoers: As root: - +``` apt-get install sudo - vi /etc/sudoers.d/username +``` Add the line: - +``` username ALL=(ALL:ALL) ALL +``` Add the docker key: - +``` sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D gpg: key 2C52609D: public key "Docker Release Tool (releasedocker) " imported gpg: Total number processed: 1 gpg: imported: 1 (RSA: 1) +``` Install necessary items, make, binfmt-support and apt-transport-https (for docker): - +``` sudo apt-get install apt-transport-https make binfmt-support +``` Add the docker repository to your system: - +``` sudo vi /etc/apt/sources.list.d/docker.list - +``` Add the following line to the file: - +``` deb https://apt.dockerproject.org/repo debian-jessie main +``` Install Docker: - +``` sudo apt-get update - sudo apt-get install docker-engine +``` Test Docker: - +``` sudo docker run hello-world Unable to find image 'hello-world:latest' locally @@ -182,21 +186,24 @@ Status: Downloaded newer image for hello-world:latest Hello from Docker. This message shows that your installation appears to be working correctly. +``` Add yourself to the docker group: - +``` sudo gpasswd -a snoble docker Adding user snoble to group docker +``` logout and log back in for the group to take effect: Clone the OpenNetworkLinux repository: - +``` git clone https://github.com/opencomputeproject/OpenNetworkLinux.git Cloning into 'OpenNetworkLinux'... Checking connectivity... done. +``` Build OpenNetworkLinux: From fad2b5cd8260895005573183150e1cc9b2ebbc72 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 18 Oct 2016 11:30:47 -0700 Subject: [PATCH 015/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index fe6a4b28..5ae11894 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit fe6a4b286335b1a286e41c562cff4a95a5497c2c +Subproject commit 5ae11894fd5667434ad8ce5e7ae4b17eaf06567b From c87583271817f1b867709a1ec788a186f32cf23e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 19 Oct 2016 14:54:11 +0000 Subject: [PATCH 016/255] Generalize to all exceptions. --- .../vendor-config-onl/src/python/onl/install/BaseInstall.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index cf3f299e..27286acc 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -692,9 +692,9 @@ class UbootInstaller(SubprocessMixin, Base): except PartedException as ex: self.log.error("cannot get partition table from %s: %s", self.device, str(ex)) - except AttributeError as ex: - self.log.error("XXX cannot get partition table from %s: %s", - self.device, str(ex)) + except Error as ex: + self.log.exception("cannot get partition table from %s" + self.device) self.log.info("creating msdos label on %s") self.partedDisk = parted.freshDisk(self.partedDevice, 'msdos') From 31738e8855f58552c884914df6719ae843a1f3e1 Mon Sep 17 00:00:00 2001 From: Steven Noble Date: Wed, 19 Oct 2016 10:14:43 -0700 Subject: [PATCH 017/255] Updates docs for Wedge to include Wedge 100 (#115) --- docs/Building.md | 8 ++++---- docs/GettingStartedWedge.md | 31 ++++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/docs/Building.md b/docs/Building.md index 693f83b5..f4c02f7a 100644 --- a/docs/Building.md +++ b/docs/Building.md @@ -105,8 +105,8 @@ Adding/Removing packages from a SWI: The list of packages for a given SWI are in - $ONL/packages/base/any/rootfs/common/$ARCH-packages.yml # for $ARCH specific packages - $ONL/packages/base/any/rootfs/common/common-packages.yml # for $ARCH-independent packages + $ONL/packages/base/any/rootfs/$suite/common/$ARCH-packages.yml # for $ARCH specific packages + $ONL/packages/base/any/rootfs/$suite/common/common-packages.yml # for $ARCH-independent packages Build a software image (SWI) for all powerpc platforms: ------------------------------------------------------------ @@ -190,9 +190,9 @@ This message shows that your installation appears to be working correctly. Add yourself to the docker group: ``` -sudo gpasswd -a snoble docker +sudo gpasswd -a user1 docker -Adding user snoble to group docker +Adding user user1 to group docker ``` logout and log back in for the group to take effect: diff --git a/docs/GettingStartedWedge.md b/docs/GettingStartedWedge.md index 42fe39c9..2bfcd926 100644 --- a/docs/GettingStartedWedge.md +++ b/docs/GettingStartedWedge.md @@ -9,17 +9,20 @@ Once installed, ONL has a default account ("root") with a default password the root password before the system comes up. You will need to enable the network interface before you can run the FBOSS agent. -FBOSS is installed and set to run the configuration created for a OCP ONL +On the Wedge 40 FBOSS is installed and set to run the configuration created for a OCP ONL on Wedge Demo. This configuration sets up the first physical QSFP port of the wedge as 4 10G ports (via a break out cable) and configures vlans and ip addresses on them. +On the Wedge 100 FBOSS is installed and set to run a generic configuration that breaks all +ports into 25G (each 100G port is broken into 4 ports for a total of 128 ports) + ONIE Manual Install ------------------------------------------------ If your Accton Wedge does not have ONIE installed, you will need to install it before you can proceed. -1) Download the ONIE rescue image from http://opennetlinux.org/binaries/accton-wedge/onie-recovery-x86_64-accton_wedge_16x-r0.iso +1) Download the ONIE rescue image for the Wedge 40 from http://opennetlinux.org/binaries/accton-wedge/onie-recovery-x86_64-accton_wedge_16x-r0.iso for the Wedge 100 it is http://opennetlinux.org/binaries/accton-wedge/onie-recovery-x86-64-facebook-wedge100-r0.iso 2) Burn the image onto a USB (a USB with a minimum size of 256M is necessary) @@ -40,8 +43,8 @@ configuration menu linux installation, you will need to either reboot (if possible) or hit ctrl-x, exiting to the BMC and issue the "wedge_power reset" command to power-cycle the microserver, run sol.sh again and hit F2 when the BIOS status screen appears -10) One you are in the BIOS configuration, move to the boot screen and change -the boot mode from UEFI to Legacy +10) For the Wedge 40 Once you are in the BIOS configuration, move to the boot screen and change +the boot mode from UEFI to Legacy. For the Wedge 100 you will choose the non UEFI version of your USB disk. 11) In the boot device list, make sure that the USB is set to #1 @@ -56,13 +59,15 @@ reboot 16) *IMPORTANT* Remove the USB from the system before proceeding to the ONL install +17) On the Wedge 100 go back into the BIOS and set device P0 to be the main boot drive. + ONL Manual Install ------------------------------------------------ 1) Attach a serial terminal to the wedge 2) Boot switch and choose "ONIE: Rescue" to go to ONIE''s interactive mode -3) From the ONIE# prompt run "install_url http://opennetlinux.org/binaries/latest-wedge-2.0.installer" +3) From the ONIE# prompt run "install_url http://opennetlinux.org/binaries/latest-DEB8-AMD64-installed.installer" 4) Wait for the install to finish and the system to reboot @@ -72,18 +77,23 @@ password "onl" 6) Configure the ma1 interface either via dhcp (dhclient ma1) or manually 7) Install fboss using the commands - + + For the Wedge 40 #> apt-get update #> apt-get install fboss + Or for the Wedge 100 + #> apt-get install fboss-w100 + 8) From the command prompt you can start fboss by using the command "service fboss_wedge_agent start" -9) The first time you start the fboss_wedge_agent service it will download -the OpenNSL library from the Broadcom github account. +9) On the Wedge 40 The first time you start the fboss_wedge_agent service it will download +the OpenNSL library from the Broadcom github account. On the Wedge 100, it OpenNSL is included +as a package. 10) Once the library is installed, fboss_wedge_agent will start, using the -default configuration located at /etc/fboss/ocp-demo.json +default configuration located at /etc/fboss/ocp-demo.json for the Wedge 40 or /etc/fboss/sample_config.json for the Wedge 100 11) You can confirm that the fboss_wedge_agent is running by issuing the command "service fboss_wedge_agent status" @@ -94,6 +104,9 @@ command "service fboss_wedge_agent status" Modifying The fboss_wedge_agent configuration ------------------------------------------------ + +The rest of this document is based on the Wedge 40 but the Wedge 100 is similar. + In the /etc/init.d/fboss_wedge_agent script, you will locate a section where the configuration file "FBOSS_DAEMON_OPTIONS" is set: From db0790fac81674b5807b6c505ab37549285a8bbe Mon Sep 17 00:00:00 2001 From: Sridhar Pitchai Date: Wed, 19 Oct 2016 10:15:03 -0700 Subject: [PATCH 018/255] Platform x86-64-alphanetworks-snx60a0-486f-r0 (#114) Fixing the console port --- .../r0/src/lib/x86-64-alphanetworks-snx60a0-486f-r0.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/platforms/alphanetworks/x86-64-alphanetworks-snx60a0-486f/platform-config/r0/src/lib/x86-64-alphanetworks-snx60a0-486f-r0.yml b/packages/platforms/alphanetworks/x86-64-alphanetworks-snx60a0-486f/platform-config/r0/src/lib/x86-64-alphanetworks-snx60a0-486f-r0.yml index d55b1cc0..abd98158 100644 --- a/packages/platforms/alphanetworks/x86-64-alphanetworks-snx60a0-486f/platform-config/r0/src/lib/x86-64-alphanetworks-snx60a0-486f-r0.yml +++ b/packages/platforms/alphanetworks/x86-64-alphanetworks-snx60a0-486f/platform-config/r0/src/lib/x86-64-alphanetworks-snx60a0-486f-r0.yml @@ -10,7 +10,7 @@ x86-64-alphanetworks-snx60a0-486f-r0: grub: serial: >- - --port=0x3f8 + --port=0x2f8 --speed=115200 --word=8 --parity=no @@ -21,5 +21,4 @@ x86-64-alphanetworks-snx60a0-486f-r0: args: >- nopat - console=ttyS0,115200n8 - + console=ttyS1,115200n8 From 079765bf8593d2f0ce91aee90b5e71b4635156ce Mon Sep 17 00:00:00 2001 From: Steven Noble Date: Wed, 19 Oct 2016 21:51:01 -0700 Subject: [PATCH 019/255] Updates the Wedge 40 and 100 systems to use the 3.16 kernel (#117) --- .../platform-config/r0/src/lib/x86-64-accton-wedge-16x-r0.yml | 2 +- .../platform-config/r0/src/lib/x86-64-facebook-wedge100-r0.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/platform-config/r0/src/lib/x86-64-accton-wedge-16x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/platform-config/r0/src/lib/x86-64-accton-wedge-16x-r0.yml index 133874d1..17765a92 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/platform-config/r0/src/lib/x86-64-accton-wedge-16x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/platform-config/r0/src/lib/x86-64-accton-wedge-16x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-wedge-16x-r0: --stop=1 kernel: - <<: *kernel-3-18 + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/platform-config/r0/src/lib/x86-64-facebook-wedge100-r0.yml b/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/platform-config/r0/src/lib/x86-64-facebook-wedge100-r0.yml index fdcbd263..c80d99e7 100644 --- a/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/platform-config/r0/src/lib/x86-64-facebook-wedge100-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/platform-config/r0/src/lib/x86-64-facebook-wedge100-r0.yml @@ -18,7 +18,7 @@ x86-64-facebook-wedge100-r0: --stop=1 kernel: - <<: *kernel-3-18 + <<: *kernel-3-16 args: >- nopat From 1c12aa5189615854d6b318b2d4943cb9e57c7d95 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 24 Oct 2016 13:11:02 -0700 Subject: [PATCH 020/255] Added serialization function --- .../src/python/onl/install/ConfUtils.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ConfUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ConfUtils.py index b81fd347..4f2a831c 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ConfUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ConfUtils.py @@ -7,6 +7,7 @@ import os import logging import subprocess from InstallUtils import SubprocessMixin, ChrootSubprocessMixin, MountContext +from cStringIO import StringIO class ConfBase: @@ -45,6 +46,14 @@ class ConfBase: def __setattr__(self, attr, val): self.__dict__['_data'][attr] = val + def dumps(self): + """Generate a serialized representation.""" + buf = StringIO() + data = self.__dict__.get('_data', {}) + for key, val in data.iteritems(): + buf.write("%s=\"%s\"\n" % (key, val,)) + return buf.getvalue() + class ConfFileBase(ConfBase): PATH = None From ead42bc86044d92b8261fda8719a5bff28d0db2f Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 24 Oct 2016 13:11:21 -0700 Subject: [PATCH 021/255] Added upgrade-shell --- .../vendor-config-onl/src/bin/upgrade-shell | 7 +++ .../src/python/onl/install/ShellApp.py | 48 ++++++++++++++++++- 2 files changed, 54 insertions(+), 1 deletion(-) create mode 100755 packages/base/all/vendor-config-onl/src/bin/upgrade-shell diff --git a/packages/base/all/vendor-config-onl/src/bin/upgrade-shell b/packages/base/all/vendor-config-onl/src/bin/upgrade-shell new file mode 100755 index 00000000..7e488d28 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/upgrade-shell @@ -0,0 +1,7 @@ +#!/usr/bin/python + +"""Run the upgrade image +""" + +import onl.install.ShellApp +onl.install.ShellApp.Upgrader.main() diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index 19cfaf6f..79c40e59 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -16,8 +16,9 @@ from InstallUtils import BlkidParser import Fit import onl.platform.current +from onl.sysconfig import sysconfig -class AppBase(SubprocessMixin): +class AppBase(SubprocessMixin, object): @property def PROG(self): @@ -109,6 +110,7 @@ class AppBase(SubprocessMixin): sys.exit(code) class Onie(AppBase): + """Application shell that uses the ONIE runtime.""" PROG = "onie-shell" @@ -193,6 +195,7 @@ class Onie(AppBase): return 1 class Loader(AppBase): + """Application shell that uses the (installed) loader runtime.""" PROG = "loader-shell" @@ -309,6 +312,49 @@ class Loader(AppBase): self.log.error("invalid platform-config") return 1 +class Upgrader(AppBase): + """Application shell that uses on-disk upgrade loader runtime.""" + + PROG = "upgrade-shell" + + def runGrub(self): + + d = sysconfig.upgrade.loader.package.dir + for b in sysconfig.upgrade.loader.package.grub: + p = os.path.join(d, b) + if os.path.exists(p): + self.log.debug("found upgrade initrd at %s", p) + return self._runInitrdShell(p) + + self.log.error("cannot find upgrade initrd") + return 1 + + def runUboot(self): + + d = sysconfig.upgrade.loader.package.dir + for b in sysconfig.upgrade.loader.package.fit: + p = os.path.join(d, b) + if os.path.exists(p): + self.log.debug("found upgrade FIT image %s", p) + return self._runFitShell(p) + + self.log.error("cannot find FIT image") + return 1 + + def run(self): + + self.platform = onl.platform.current.OnlPlatform() + self.pc = self.platform.platform_config + + if 'grub' in self.pc: + return self.runGrub() + + if 'flat_image_tree' in self.pc: + return self.runUboot() + + self.log.error("invalid platform-config") + return 1 + main = Onie.main if __name__ == "__main__": From 8d9bedd92e82659837705e099ca45d2e7251ab85 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 24 Oct 2016 13:11:42 -0700 Subject: [PATCH 022/255] Added cp-R, added support for devtmpfs --- .../src/python/onl/install/InstallUtils.py | 78 ++++++++++++++----- 1 file changed, 58 insertions(+), 20 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index 386f318a..9ce429fc 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -177,6 +177,27 @@ class SubprocessMixin: # don't believe it self.check_call(cmd, vmode=self.V1) + def cpR(self, srcRoot, dstRoot): + srcRoot = os.path.abspath(srcRoot) + dstRoot = os.path.abspath(dstRoot) + dstRoot = os.path.join(dstRoot, os.path.split(srcRoot)[1]) + for r, dl, fl in os.walk(srcRoot): + + for de in dl: + src = os.path.join(r, de) + subdir = src[len(srcRoot)+1:] + dst = os.path.join(dstRoot, subdir) + if not os.path.exists(dst): + self.log.debug("+ /bin/mkdir -p %s", dst) + os.mkdirs(dst) + + for fe in fl: + src = os.path.join(r, de) + subdir = src[len(srcRoot)+1:] + dst = os.path.join(dstRoot, subdir) + self.log.debug("+ /bin/cp -a %s %s", src, dst) + shutil.copy2(src, dst) + class TempdirContext(SubprocessMixin): def __init__(self, prefix=None, suffix=None, chroot=None, log=None): @@ -678,6 +699,7 @@ class InitrdContext(SubprocessMixin): self.ilog.setLevel(logging.INFO) self.log = self.hlog + self._hasDevTmpfs = False self._detachInitrd = None def _unpack(self): @@ -739,27 +761,28 @@ class InitrdContext(SubprocessMixin): else: self.unlink(dst) - for e in os.listdir("/dev"): - src = os.path.join("/dev", e) - dst = os.path.join(dev2, e) - if os.path.islink(src): - self.symlink(os.readlink(src), dst) - elif os.path.isdir(src): - self.mkdir(dst) - elif os.path.isfile(src): - self.copy2(src, dst) - else: - st = os.stat(src) - if stat.S_ISBLK(st.st_mode): - maj, min = os.major(st.st_rdev), os.minor(st.st_rdev) - self.log.debug("+ mknod %s b %d %d", dst, maj, min) - os.mknod(dst, st.st_mode, st.st_rdev) - elif stat.S_ISCHR(st.st_mode): - maj, min = os.major(st.st_rdev), os.minor(st.st_rdev) - self.log.debug("+ mknod %s c %d %d", dst, maj, min) - os.mknod(dst, st.st_mode, st.st_rdev) + if not self._hasDevTmpfs: + for e in os.listdir("/dev"): + src = os.path.join("/dev", e) + dst = os.path.join(dev2, e) + if os.path.islink(src): + self.symlink(os.readlink(src), dst) + elif os.path.isdir(src): + self.mkdir(dst) + elif os.path.isfile(src): + self.copy2(src, dst) else: - self.log.debug("skipping device %s", src) + st = os.stat(src) + if stat.S_ISBLK(st.st_mode): + maj, min = os.major(st.st_rdev), os.minor(st.st_rdev) + self.log.debug("+ mknod %s b %d %d", dst, maj, min) + os.mknod(dst, st.st_mode, st.st_rdev) + elif stat.S_ISCHR(st.st_mode): + maj, min = os.major(st.st_rdev), os.minor(st.st_rdev) + self.log.debug("+ mknod %s c %d %d", dst, maj, min) + os.mknod(dst, st.st_mode, st.st_rdev) + else: + self.log.debug("skipping device %s", src) dst = os.path.join(self.dir, "dev/pts") if not os.path.exists(dst): @@ -772,6 +795,11 @@ class InitrdContext(SubprocessMixin): def __enter__(self): + with open("/proc/filesystems") as fd: + buf = fd.read() + if "devtmpfs" in buf: + self._hasDevTmpfs = True + if self.initrd is not None: self.log.debug("extracting initrd %s", self.initrd) @@ -792,6 +820,16 @@ class InitrdContext(SubprocessMixin): cmd = ('mount', '-t', 'sysfs', 'sysfs', dst,) self.check_call(cmd, vmode=self.V1) + # maybe mount devtmpfs + if self._hasDevTmpfs: + dst = os.path.join(self.dir, "dev") + cmd = ('mount', '-t', 'devtmpfs', 'devtmpfs', dst,) + self.check_call(cmd, vmode=self.V1) + + dst = os.path.join(self.dir, "dev/pts") + if not os.path.exists(dst): + self.mkdir(dst) + dst = os.path.join(self.dir, "dev/pts") cmd = ('mount', '-t', 'devpts', 'devpts', dst,) self.check_call(cmd, vmode=self.V1) From 5e0ea4516e5a21ea220ec8818fa8d4c35753d77e Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 24 Oct 2016 13:11:54 -0700 Subject: [PATCH 023/255] Update comment --- packages/base/all/vendor-config-onl/src/bin/loader-shell | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/bin/loader-shell b/packages/base/all/vendor-config-onl/src/bin/loader-shell index e006aef4..9e758889 100755 --- a/packages/base/all/vendor-config-onl/src/bin/loader-shell +++ b/packages/base/all/vendor-config-onl/src/bin/loader-shell @@ -1,6 +1,6 @@ #!/usr/bin/python -"""Run native ONIE tools +"""Run native (on-disk) loader tools """ import onl.install.ShellApp From b6ce6eb566431b90a9acd80a0b9bbb8c4c78fb99 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 24 Oct 2016 13:12:03 -0700 Subject: [PATCH 024/255] Added devtmpfs workaround --- .../vendor-config-onl/src/lib/install/lib.sh | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/lib/install/lib.sh b/packages/base/all/vendor-config-onl/src/lib/install/lib.sh index 388ab892..74dd273f 100644 --- a/packages/base/all/vendor-config-onl/src/lib/install/lib.sh +++ b/packages/base/all/vendor-config-onl/src/lib/install/lib.sh @@ -62,17 +62,26 @@ installer_mkchroot() { local rootdir rootdir=$1 + local hasDevTmpfs + if grep -q devtmpfs /proc/filesystems; then + hasDevTmpfs=1 + fi + # special handling for /dev, which usually already has nested mounts installer_say "Setting up /dev" rm -fr "${rootdir}/dev"/* - for dev in /dev/*; do - if test -d "$dev"; then - mkdir "${rootdir}${dev}" - else - cp -a "$dev" "${rootdir}${dev}" - fi - done - mkdir -p "${rootdir}/dev/pts" + if test "$hasDevTmpfs"; then + : + else + for dev in /dev/*; do + if test -d "$dev"; then + mkdir "${rootdir}${dev}" + else + cp -a "$dev" "${rootdir}${dev}" + fi + done + mkdir -p "${rootdir}/dev/pts" + fi installer_say "Setting up /run" rm -fr "${rootdir}/run"/* @@ -99,6 +108,10 @@ installer_mkchroot() { installer_say "Setting up mounts" mount -t proc proc "${rootdir}/proc" mount -t sysfs sysfs "${rootdir}/sys" + if test "$hasDevTmpfs"; then + mount -t devtmpfs devtmpfs "${rootdir}/dev" + mkdir -p ${rootdir}/dev/pts + fi mount -t devpts devpts "${rootdir}/dev/pts" if test ${TMPDIR+set}; then From c10374ddc6c5077472bddfa3658294f3c509791c Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 24 Oct 2016 16:51:52 -0700 Subject: [PATCH 025/255] Fixed cpR implementation --- .../src/python/onl/install/InstallUtils.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index 9ce429fc..3f1ead96 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -188,15 +188,13 @@ class SubprocessMixin: subdir = src[len(srcRoot)+1:] dst = os.path.join(dstRoot, subdir) if not os.path.exists(dst): - self.log.debug("+ /bin/mkdir -p %s", dst) - os.mkdirs(dst) + self.makedirs(dst) for fe in fl: - src = os.path.join(r, de) + src = os.path.join(r, fe) subdir = src[len(srcRoot)+1:] dst = os.path.join(dstRoot, subdir) - self.log.debug("+ /bin/cp -a %s %s", src, dst) - shutil.copy2(src, dst) + self.copy2(src, dst) class TempdirContext(SubprocessMixin): From f8f297512159cfd9276e26b1fb60a0ede86897e2 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 25 Oct 2016 18:07:26 +0000 Subject: [PATCH 026/255] New SYSTEM_COMPATIBILITY_VERSION (SCV) key This key is designed to indicate the overall system compatibility between different installations. For example, the system configuration in ONL2.0 is different from ONL 1.0 (partition scheme, loader/version management, etc) and attempting to boot a 1.0 switch image on a 2.0 system will not work properly. This key exists to facilitate operational compatibility and upgrade steps. When a switch image is booted against a loader with an SCV different than it expects then a full re-install will need to be performed in order to upgrade the system before that image can run. If the SCV is different then an in-place loader-upgrade cannot be performed either. The upgrade process will be updated to include an SCV check and perform a re-install. The loader upgrade sequence will be predicated on SCV equality. If the SCV is not equal after all upgrade operations then switch has the option of rebooting instead of continuing the init process. This will be configured through the sysconfig interface. --- tools/onlvi.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/onlvi.py b/tools/onlvi.py index d71be362..6bee3bbe 100644 --- a/tools/onlvi.py +++ b/tools/onlvi.py @@ -52,3 +52,6 @@ class OnlVersionImplementation(object): def V_FNAME_RELEASE_ID(self, data): return "%s-%s" % (self.V_VERSION_ID(data), self.V_FNAME_BUILD_ID(data)) + + def V_SYSTEM_COMPATIBILITY_VERSION(self, data): + return "2" From 06ad896fd4f9b11233f8b31ffdfee85c0805f435 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Tue, 25 Oct 2016 14:36:22 -0700 Subject: [PATCH 027/255] Added system upgrade key --- .../all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml index 6527b70d..10278f7e 100644 --- a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml +++ b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml @@ -19,6 +19,8 @@ installer: upgrade: onie: auto: advisory + system: + auto: advisory loader: auto: advisory versions: /etc/onl/loader/versions.json From 6f9d993696d281a477cbff1750d08a191fcc6526 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Tue, 25 Oct 2016 14:37:16 -0700 Subject: [PATCH 028/255] the --extract option is unimplemented --- tools/onlpm.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/onlpm.py b/tools/onlpm.py index e41e639d..58e991eb 100755 --- a/tools/onlpm.py +++ b/tools/onlpm.py @@ -1043,7 +1043,6 @@ if __name__ == '__main__': ap.add_argument("--repo-package-dir", default=os.environ.get('ONLPM_OPTION_REPO_PACKAGE_DIR', 'packages')) ap.add_argument("--packagedirs", nargs='+', metavar='PACKAGEDIR') ap.add_argument("--subdir", default=os.getcwd()) - ap.add_argument("--extract", metavar='PACKAGE') ap.add_argument("--extract-dir", nargs=2, metavar=('PACKAGE', 'DIR'), action='append') ap.add_argument("--force", action='store_true') ap.add_argument("--list", action='store_true'); From df146fd1226087c29e0cd3fe1ff36ebd84255995 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 19 Oct 2016 21:34:54 +0000 Subject: [PATCH 029/255] Onie and Loader infrastructure improvements - Loader and ONIE moved to importable classes for tool usage - OnieUpgradeBase updated. --- .../src/python/onl/upgrade/loader.py | 121 ++++++++++++++++++ .../src/python/onl/upgrade/onie.py | 59 +++++++++ .../src/python/onl/upgrade/ubase.py | 47 +++---- 3 files changed, 204 insertions(+), 23 deletions(-) create mode 100755 packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py create mode 100755 packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py new file mode 100755 index 00000000..10df045e --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +############################################################ +# +# ONL Loader Upgrade +# +############################################################ +import os +import sys +import fnmatch + +from onl.upgrade import ubase +from onl.sysconfig import sysconfig +from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite + +class LoaderUpgradeBase(ubase.BaseUpgrade): + name="loader" + Name="Loader" + title="Loader Upgrade Check" + atype="A Loader" + + current_version_key="Current Loader Version" + next_version_key="Next Loader Version" + + def auto_upgrade_default(self): + return sysconfig.upgrade.loader.auto + + def init_versions(self): + + # + # Current Loader version file. + # If this file doesn't exist then in-place upgrade is not supported. + # + ETC_LOADER_VERSIONS_JSON = sysconfig.upgrade.loader.versions + + # Upgrade Loader Version file. + NEXT_LOADER_VERSIONS_JSON = os.path.join(sysconfig.upgrade.loader.package.dir, "manifest.json") + + + self.current_version = self.load_json(ETC_LOADER_VERSIONS_JSON, + "RELEASE_ID", + None) + + self.next_version = self.load_json(NEXT_LOADER_VERSIONS_JSON, + "version", {}).get('RELEASE_ID', None) + + def summarize(self): + self.logger.info("Current Loader Version: %s" % self.current_version) + self.logger.info(" Next Loader Version: %s" % self.next_version) + self.logger.info("") + + + def upgrade_notes(self): + return """ + * A single reboot will be required to complete this upgrade. +""" + + +class LoaderUpgrade_Fit(LoaderUpgradeBase): + + def do_upgrade(self, forced=False): + + fit_image = None + for f in sysconfig.upgrade.loader.package.fit: + fp = os.path.join(sysconfig.upgrade.loader.package.dir, f) + if os.path.exists(fp): + fit_image = fp; + break + + if fit_image is None: + self.abort("The FIT upgrade image is missing. Upgrade cannot continue.") + + with OnlMountContextReadWrite("ONL-BOOT", self.logger) as d: + self.copyfile(fit_image, os.path.join(d.directory, "%s.itb" % (self.platform.platform()))) + + self.reboot() + + +class LoaderUpgrade_x86_64(LoaderUpgradeBase): + + def do_upgrade(self, forced=False): + + X86_64_UPGRADE_DIR=sysconfig.upgrade.loader.package.dir + X86_64_UPGRADE_KERNEL_PATTERNS = [ "kernel-*" ] + + with OnlMountContextReadWrite("ONL-BOOT", self.logger) as d: + for f in os.listdir(X86_64_UPGRADE_DIR): + for pattern in X86_64_UPGRADE_KERNEL_PATTERNS: + if fnmatch.fnmatch(f, pattern): + self.copyfile(os.path.join(X86_64_UPGRADE_DIR, f), os.path.join(d.directory, f)) + + initrd = None + for c in sysconfig.upgrade.loader.package.grub: + initrd = os.path.join(X86_64_UPGRADE_DIR, c) + if os.path.exists(initrd): + break + else: + initrd = None + + if initrd: + self.copyfile(initrd, os.path.join(d.directory, "%s.cpio.gz" % self.platform.platform())) + else: + self.abort("Initrd is missing. Upgrade cannot continue.") + + # Disabled until it can be resolved with the new installer. + #src = "/lib/platform-config/current/onl/boot/grub.cfg" + #dst = os.path.join(d.directory, "grub/grub.cfg") + #if os.path.exists(src): + # self.copyfile(src, dst) + + self.reboot() + + +import platform +arch = platform.machine() +LoaderUpgrade = None + +if arch in [ 'ppc', 'armv7l', 'aarch64', 'arm64' ]: + LoaderUpgrade = LoaderUpgrade_Fit +elif arch == 'x86_64': + LoaderUpgrade = LoaderUpgrade_x86_64 + diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py new file mode 100755 index 00000000..a4f6beb2 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py @@ -0,0 +1,59 @@ +#!/usr/bin/python -u + +import os +import sys + +from onl.upgrade import ubase +from onl.sysconfig import sysconfig +from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite + +class OnieUpgrade(ubase.BaseOnieUpgrade): + + name="onie" + Name="ONIE" + title="ONIE Upgrade Check" + atype="An ONIE" + + current_version_key="Current ONIE Version" + next_version_key="Next ONIE Version" + + def init_versions(self): + + # Get the current platform ONIE version + self.current_version = self.platform.onie_version() + self.next_version = None + self.updater = None + + self.manifest = self.load_json(os.path.join(sysconfig.upgrade.onie.package.dir, "manifest.json")) + + if self.manifest is None: + self.finish("No ONIE updater available for the current platform.") + + if 'onie-version' not in self.manifest: + self.finish("No ONIE version in the upgrade manifest.") + else: + self.next_version = self.manifest['onie-version'] + + if 'onie-updater' not in self.manifest: + self.finish("No ONIE updater in the upgrade manifest.") + + + def summarize(self): + self.logger.info("Current ONIE Version: %s" % self.current_version) + self.logger.info(" Next ONIE Version: %s" % self.manifest.get('onie-version')) + self.logger.info(" Updater: %s" % self.manifest.get('onie-updater')) + self.logger.info("") + + def upgrade_notes(self): + return """ + * The system will reboot into ONIE to complete the update, and then reboot to return to Switch Light +""" + + def do_upgrade(self, forced=False): + self.install_onie_updater(sysconfig.upgrade.onie.package.dir, + self.manifest['onie-updater']) + self.initiate_onie_update() + + def do_no_upgrade(self): + self.clean_onie_updater() + diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py index 68f3828f..af2b677d 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py @@ -15,7 +15,9 @@ import string import argparse import yaml from time import sleep + from onl.platform.current import OnlPlatform +from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite class BaseUpgrade(object): @@ -365,19 +367,19 @@ If you choose not to perform this upgrade booting cannot continue.""" % self.aty class BaseOnieUpgrade(BaseUpgrade): - ONIE_UPDATER_PATH = "/mnt/flash2/onie-updater" + ONIE_UPDATER_CONTEXT = "ONL-IMAGES" + ONIE_UPDATER_PATH = "/mnt/onl/images" def install_onie_updater(self, src_dir, updater): - if type(updater) is list: - # Copy all files in the list to /mnt/flash2 + with OnlMountContextReadWrite(self.ONIE_UPDATER_CONTEXT, logger=None): + if type(updater) is not list: + updater = [ updater ] + + # Copy all files in the list to ONIE_UPDATER_PATH for f in updater: src = os.path.join(src_dir, f) - dst = os.path.join("/mnt/flash2", f) + dst = os.path.join(self.ONIE_UPDATER_PATH, f) self.copyfile(src, dst) - else: - # Copy single updater to /mnt/flash2/onie-updater - src = os.path.join(src_dir, updater) - self.copyfile(src, self.ONIE_UPDATER_PATH) def initiate_onie_update(self): @@ -394,27 +396,26 @@ class BaseOnieUpgrade(BaseUpgrade): self.abort("Could not set ONIE Boot Mode to Update. Upgrade cannot continue.") self.umount(OB) - SL = "/mnt/sl-boot" - self.mount(SL, label="SL-BOOT") - with open("/mnt/sl-boot/grub/grub.cfg", "a") as f: - f.write("set default=ONIE\n") - self.umount(SL) + with OnlMountContextReadWrite("ONL-BOOT", logger=None): + with open("/mnt/onl/boot/grub/grub.cfg", "a") as f: + f.write("set default=ONIE\n") self.reboot() else: self.abort("Architecture %s unhandled." % self.arch) def clean_onie_updater(self): - if os.path.exists(self.ONIE_UPDATER_PATH): - self.logger.info("Removing previous onie-updater.") - os.remove(self.ONIE_UPDATER_PATH) + with OnlMountContextReadWrite(self.ONIE_UPDATER_CONTEXT, logger=None): + updater = os.path.join(self.ONIE_UPDATER_PATH, "onie-updater") + if os.path.exists(updater): + self.logger.info("Removing previous onie-updater.") + os.remove(updater) - -def upgrade_status(): - data = {} - if os.path.exists(BaseUpgrade.UPGRADE_STATUS_JSON): - with open(BaseUpgrade.UPGRADE_STATUS_JSON) as f: - data = json.load(f) - return data +# def upgrade_status(): +# data = {} +# if os.path.exists(BaseUpgrade.UPGRADE_STATUS_JSON): +# with open(BaseUpgrade.UPGRADE_STATUS_JSON) as f: +# data = json.load(f) +# return data From 03bb6b718d43fe8444373ff6ea2568ddd5bbebce Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 19 Oct 2016 21:36:00 +0000 Subject: [PATCH 030/255] Add default ONIE upgrade package settings. --- .../src/boot.d/60.upgrade-onie | 3 + .../src/boot.d/61.upgrade-onie | 55 -------- .../src/boot.d/62.upgrade-loader | 129 +----------------- .../src/etc/onl/sysconfig/00-defaults.yml | 4 + 4 files changed, 9 insertions(+), 182 deletions(-) create mode 100755 packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie delete mode 100755 packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-onie diff --git a/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie b/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie new file mode 100755 index 00000000..a6daef06 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.upgrade.onie import OnieUpgrade +OnieUpgrade().main() diff --git a/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-onie b/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-onie deleted file mode 100755 index 5f0e8883..00000000 --- a/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-onie +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/python -u -from onl.upgrade import ubase - -class ONIE_Upgrade(ubase.BaseOnieUpgrade): - name="onie" - Name="ONIE" - title="ONIE Upgrade Check" - atype="An ONIE" - - current_version_key="Current ONIE Version" - next_version_key="Next ONIE Version" - - def init_versions(self): - - # Get the current platform ONIE version - self.current_version = self.platform.onie_version() - self.next_version = None - self.updater = None - - (udir, um, data) = self.platform.upgrade_manifest("onie") - self.udir = udir - self.data = data - - if data: - self.next_version = data.get('onie-version', None) - - if data: - self.updater = data.get('onie-updater', None) - - if self.updater is None: - self.finish("No ONIE updater available for the current platform.") - - def summarize(self): - self.logger.info("Current ONIE Version: %s" % self.current_version) - self.logger.info(" Next ONIE Version: %s" % self.next_version) - self.logger.info(" Force-Update: %s" % self.data['force-update']) - self.logger.info(" Updater: %s" % self.updater) - self.logger.info("") - - def upgrade_notes(self): - return """ - * The system will reboot into ONIE to complete the update, and then reboot to return to Switch Light -""" - - def do_upgrade(self, forced=False): - self.install_onie_updater(self.udir, self.updater) - self.initiate_onie_update() - - def do_no_upgrade(self): - self.clean_onie_updater() - - -if __name__ == '__main__': - ONIE_Upgrade().main() - diff --git a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader b/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader index afe514df..02aa6d92 100755 --- a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader +++ b/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader @@ -1,128 +1,3 @@ #!/usr/bin/python -############################################################ -# -# ONL Loader Upgrade -# -############################################################ -import os -import sys -import fnmatch -from onl.upgrade import ubase -from onl.sysconfig import sysconfig -from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite - -class Loader_Upgrade(ubase.BaseUpgrade): - name="loader" - Name="Loader" - title="Loader Upgrade Check" - atype="A Loader" - - current_version_key="Current Loader Version" - next_version_key="Next Loader Version" - - def auto_upgrade_default(self): - return sysconfig.upgrade.loader.auto - - def init_versions(self): - - # - # Current Loader version file. - # If this file doesn't exist then in-place upgrade is not supported. - # - ETC_LOADER_VERSIONS_JSON = sysconfig.upgrade.loader.versions - - # Upgrade Loader Version file. - NEXT_LOADER_VERSIONS_JSON = os.path.join(sysconfig.upgrade.loader.package.dir, "manifest.json") - - - self.current_version = self.load_json(ETC_LOADER_VERSIONS_JSON, - "RELEASE_ID", - None) - - self.next_version = self.load_json(NEXT_LOADER_VERSIONS_JSON, - "version", {}).get('RELEASE_ID', None) - - def summarize(self): - self.logger.info("Current Loader Version: %s" % self.current_version) - self.logger.info(" Next Loader Version: %s" % self.next_version) - self.logger.info("") - - - def upgrade_notes(self): - return """ - * A single reboot will be required to complete this upgrade. -""" - - -class Loader_Upgrade_FIT(Loader_Upgrade): - - def do_upgrade(self, forced=False): - - fit_image = None - for f in sysconfig.upgrade.loader.package.fit: - fp = os.path.join(sysconfig.upgrade.loader.package.dir, f) - if os.path.exists(fp): - fit_image = fp; - break - - if fit_image is None: - self.abort("The FIT upgrade image is missing. Upgrade cannot continue.") - - with OnlMountContextReadWrite("ONL-BOOT", self.logger) as d: - self.copyfile(fit_image, os.path.join(d.directory, "%s.itb" % (self.platform.platform()))) - - self.reboot() - - -class Loader_Upgrade_x86_64(Loader_Upgrade): - - def do_upgrade(self, forced=False): - - X86_64_UPGRADE_DIR=sysconfig.upgrade.loader.package.dir - X86_64_UPGRADE_KERNEL_PATTERNS = [ "kernel-*" ] - - with OnlMountContextReadWrite("ONL-BOOT", self.logger) as d: - for f in os.listdir(X86_64_UPGRADE_DIR): - for pattern in X86_64_UPGRADE_KERNEL_PATTERNS: - if fnmatch.fnmatch(f, pattern): - self.copyfile(os.path.join(X86_64_UPGRADE_DIR, f), os.path.join(d.directory, f)) - - initrd = None - for c in sysconfig.upgrade.loader.package.grub: - initrd = os.path.join(X86_64_UPGRADE_DIR, c) - if os.path.exists(initrd): - break - else: - initrd = None - - if initrd: - self.copyfile(initrd, os.path.join(d.directory, "%s.cpio.gz" % self.platform.platform())) - else: - self.abort("Initrd is missing. Upgrade cannot continue.") - - # Disabled until it can be resolved with the new installer. - #src = "/lib/platform-config/current/onl/boot/grub.cfg" - #dst = os.path.join(d.directory, "grub/grub.cfg") - #if os.path.exists(src): - # self.copyfile(src, dst) - - self.reboot() - - - - -if __name__ == '__main__': - import platform - - arch = platform.machine() - klass = None - - if arch in [ 'ppc', 'armv7l', 'aarch64', 'arm64' ]: - klass = Loader_Upgrade_FIT - elif arch == 'x86_64': - klass = Loader_Upgrade_x86_64 - else: - sys.stderr.write("Loader Upgrade: The current architecture (%s) is not supported for upgrade.\n" % arch) - - if klass: - klass().main() +from onl.upgrade.loader import LoaderUpgrade +LoaderUpgrade().main() diff --git a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml index 10278f7e..de61b257 100644 --- a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml +++ b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml @@ -19,8 +19,12 @@ installer: upgrade: onie: auto: advisory + package: + dir: /lib/platform-config/current/onl/upgrade/onie + system: auto: advisory + loader: auto: advisory versions: /etc/onl/loader/versions.json From b7879951f0cb4076e274298ff1071419061983da Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 20 Oct 2016 21:15:05 +0000 Subject: [PATCH 031/255] Commit the sysctl settings prior to first mount. --- packages/base/all/boot.d/src/50.initmounts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/base/all/boot.d/src/50.initmounts b/packages/base/all/boot.d/src/50.initmounts index 71f7410f..48717381 100755 --- a/packages/base/all/boot.d/src/50.initmounts +++ b/packages/base/all/boot.d/src/50.initmounts @@ -1,6 +1,10 @@ #!/bin/sh . /lib/lsb/init-functions + +# Apply the existing sysctl settings prior to first mount. +sysctl --quiet --system + log_action_begin_msg "Mounting filesystems..." onl-mounts -q mount all log_action_end_msg 0 From 4a5732241cf5248d575db10cf551d56c15129a47 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 20 Oct 2016 14:16:01 -0700 Subject: [PATCH 032/255] Add platform and firmware keys. --- .../vendor-config-onl/src/python/onl/platform/base.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index f9e0e20e..23163d29 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -90,6 +90,10 @@ class OnieInfo(object): } +class PlatformInfo(object): + CPLD_VERSIONS='CPLD Versions' + + ############################################################ # # ONL Platform Base @@ -107,7 +111,7 @@ class OnlPlatformBase(object): def __init__(self): self.add_info_json("onie_info", "%s/onie-info.json" % self.basedir_onl(), OnieInfo, required=False) - self.add_info_json("platform_info", "%s/platform-info.json" % self.basedir_onl(), + self.add_info_json("platform_info", "%s/platform-info.json" % self.basedir_onl(), PlatformInfo, required=False) # Find the base platform config @@ -217,6 +221,9 @@ class OnlPlatformBase(object): def onie_version(self): return self.onie_info.ONIE_VERSION + def firmware_version(self): + return self.platform_info.CPLD_VERSIONS + def upgrade_manifest(self, type_, override_dir=None): if override_dir: m = os.path.join(override_dir, "manifest.json") From b077ed028bcb8b3c08447484a3870e2863be3b21 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 20 Oct 2016 21:18:24 +0000 Subject: [PATCH 033/255] Enable ONIE and Firmware Upgrade Package Support --- .../src/boot.d/61.upgrade-firmware | 3 ++ .../src/etc/onl/sysconfig/00-defaults.yml | 6 +++ .../src/python/onl/upgrade/firmware.py | 52 +++++++++++++++++++ .../src/python/onl/upgrade/onie.py | 33 +++--------- .../src/python/onl/upgrade/ubase.py | 22 +++++--- 5 files changed, 84 insertions(+), 32 deletions(-) create mode 100755 packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware create mode 100755 packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py diff --git a/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware b/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware new file mode 100755 index 00000000..aa23c80c --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.upgrade.firmware import FirmwareUpgrade +FirmwareUpgrade().main() diff --git a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml index de61b257..a9519949 100644 --- a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml +++ b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml @@ -25,6 +25,11 @@ upgrade: system: auto: advisory + firmware: + auto: advisory + package: + dir: /lib/platform-config/current/onl/upgrade/firmware + loader: auto: advisory versions: /etc/onl/loader/versions.json @@ -39,6 +44,7 @@ upgrade: swi: auto: disabled + pki: key: name: key.pem diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py new file mode 100755 index 00000000..e9a08088 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py @@ -0,0 +1,52 @@ +#!/usr/bin/python -u + +import os +import sys + +from onl.upgrade import ubase +from onl.sysconfig import sysconfig + +class FirmwareUpgrade(ubase.BaseOnieUpgrade): + + name="firmware" + Name="Firmware" + title="Firmware Upgrade Check" + atype="A firmware" + + current_version_key="Current Firmware Version" + next_version_key="Next Firmware Version" + + def init_versions(self): + + # Get the current platform firmware version + self.current_version = self.platform.firmware_version() + self.next_version = None + self.updater = None + self.load_manifest(os.path.join(sysconfig.upgrade.firmware.package.dir, "manifest.json")) + + def do_upgrade(self, forced=False): + self.install_onie_updater(sysconfig.upgrade.firmware.package.dir, + self.manifest['updater']) + self.initiate_onie_update() + + + def upgrade_notes(self): + notes = """ + * Two reboots will be required to complete this upgrade. + + * Do not turn the power off on this device until the upgrade is complete. + Disrupting power during the firmware upgrade may result in an unrecoverable system.""" + + duration = self.manifest.get("duration", None) + if duration: + notes = notes + """ + + * THIS UPGRADE WILL REQUIRE APPROXIMATELY %s MINUTES. + The system will reboot when completed.""" % duration + + return notes + + + def do_no_upgrade(self): + self.clean_onie_updater() + diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py index a4f6beb2..fab08c0d 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py @@ -5,7 +5,6 @@ import sys from onl.upgrade import ubase from onl.sysconfig import sysconfig -from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite class OnieUpgrade(ubase.BaseOnieUpgrade): @@ -23,37 +22,19 @@ class OnieUpgrade(ubase.BaseOnieUpgrade): self.current_version = self.platform.onie_version() self.next_version = None self.updater = None + self.load_manifest(os.path.join(sysconfig.upgrade.onie.package.dir, "manifest.json")) - self.manifest = self.load_json(os.path.join(sysconfig.upgrade.onie.package.dir, "manifest.json")) + def do_upgrade(self, forced=False): + self.install_onie_updater(sysconfig.upgrade.onie.package.dir, + self.manifest['updater']) + self.initiate_onie_update() - if self.manifest is None: - self.finish("No ONIE updater available for the current platform.") - - if 'onie-version' not in self.manifest: - self.finish("No ONIE version in the upgrade manifest.") - else: - self.next_version = self.manifest['onie-version'] - - if 'onie-updater' not in self.manifest: - self.finish("No ONIE updater in the upgrade manifest.") - - - def summarize(self): - self.logger.info("Current ONIE Version: %s" % self.current_version) - self.logger.info(" Next ONIE Version: %s" % self.manifest.get('onie-version')) - self.logger.info(" Updater: %s" % self.manifest.get('onie-updater')) - self.logger.info("") + def do_no_upgrade(self): + self.clean_onie_updater() def upgrade_notes(self): return """ * The system will reboot into ONIE to complete the update, and then reboot to return to Switch Light """ - def do_upgrade(self, forced=False): - self.install_onie_updater(sysconfig.upgrade.onie.package.dir, - self.manifest['onie-updater']) - self.initiate_onie_update() - - def do_no_upgrade(self): - self.clean_onie_updater() diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py index af2b677d..2b5f6c22 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py @@ -411,11 +411,21 @@ class BaseOnieUpgrade(BaseUpgrade): self.logger.info("Removing previous onie-updater.") os.remove(updater) + def load_manifest(self, path): + self.manifest = self.load_json(path) -# def upgrade_status(): -# data = {} -# if os.path.exists(BaseUpgrade.UPGRADE_STATUS_JSON): -# with open(BaseUpgrade.UPGRADE_STATUS_JSON) as f: -# data = json.load(f) -# return data + if self.manifest is None: + self.finish("No %s updater available for the current platform." % self.Name) + if 'version' not in self.manifest: + self.finish("No %s version in the upgrade manifest." % self.Name) + else: + self.next_version = self.manifest['version'] + + if 'updater' not in self.manifest: + self.finish("No %s updater in the upgrade manifest." % self.Name) + + def summarize(self): + self.logger.info("Current %s Version: %s" % (self.Name, self.current_version)) + self.logger.info(" Next %s Version: %s" % (self.Name, self.manifest.get('version'))) + self.logger.info("") From 306842e7cc6bffb9b8e5a97ab718696317b989d3 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 21 Oct 2016 06:56:09 -0700 Subject: [PATCH 034/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 5ae11894..14f30a9e 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 5ae11894fd5667434ad8ce5e7ae4b17eaf06567b +Subproject commit 14f30a9e614fb075ae591838f9859eb1e3f188be From 222ffd25047d12699d01e68c1008b787cec5b273 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 21 Oct 2016 16:02:32 +0000 Subject: [PATCH 035/255] Fix uninstallation. --- packages/base/all/vendor-config-onl/PKG.yml | 2 +- .../all/vendor-config-onl/src/sbin/uninstall | 59 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) create mode 100755 packages/base/all/vendor-config-onl/src/sbin/uninstall diff --git a/packages/base/all/vendor-config-onl/PKG.yml b/packages/base/all/vendor-config-onl/PKG.yml index 3dc90ee8..0c0727bb 100644 --- a/packages/base/all/vendor-config-onl/PKG.yml +++ b/packages/base/all/vendor-config-onl/PKG.yml @@ -17,6 +17,6 @@ packages: - src/boot.d : /etc/boot.d - src/bin : /usr/bin - src/lib : /lib/vendor-config/onl - + - src/sbin : /sbin changelog: Changes diff --git a/packages/base/all/vendor-config-onl/src/sbin/uninstall b/packages/base/all/vendor-config-onl/src/sbin/uninstall new file mode 100755 index 00000000..8fe4742e --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/sbin/uninstall @@ -0,0 +1,59 @@ +#!/bin/sh +set -e + +uninstall_x86_64() +{ + # + # Set ONIE boot selection to uninstall + # + mkdir -p /mnt/onie-boot + mount -L ONIE-BOOT /mnt/onie-boot > /dev/null 2>&1 + + if [ "$1" = "factory" ]; then + /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o uninstall + else + /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o install + fi + + umount /mnt/onie-boot + + # + # Select ONIE as the boot default + # + onl-mounts mount boot --rw + echo "set default=ONIE" >> /mnt/onl/boot/grub/grub.cfg + onl-mounts mount boot +} + +uninstall_uboot() +{ + if [ "$1" = "factory" ]; then + fw_setenv onie_boot_reason uninstall + else + fw_setenv nos_bootcmd echo + fi +} + +uninstall() +{ + case `uname -m` in + x86_64) + uninstall_x86_64 $1 + ;; + ppc|armv7l) + uninstall_uboot $1 + ;; + *) + echo "Uninstall for the current architecture is not implemented. This is a bug." + exit 1 + ;; + esac +} + +############################################################ + +uninstall $1 +echo "The NOS will be removed at the next reboot." +exit 0 + + From 1d52f43a0b4ee0fab6fc5bb401822babd2cd5839 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 21 Oct 2016 18:12:33 +0000 Subject: [PATCH 036/255] Upgrade separation and cleanup. --- .../src/boot.d/60.upgrade-onie | 4 +-- .../src/boot.d/61.upgrade-firmware | 5 ++- .../src/boot.d/62.upgrade-loader | 4 +-- .../src/python/onl/upgrade/loader.py | 3 ++ .../src/python/onl/upgrade/onie.py | 1 - .../src/python/onl/upgrade/ubase.py | 35 ++++++++++++------- .../src/sbin/onl-upgrade-firmware | 3 ++ .../src/sbin/onl-upgrade-loader | 3 ++ .../src/sbin/onl-upgrade-onie | 3 ++ 9 files changed, 39 insertions(+), 22 deletions(-) create mode 100755 packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-firmware create mode 100755 packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-loader create mode 100755 packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-onie diff --git a/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie b/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie index a6daef06..540e24d3 100755 --- a/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie +++ b/packages/base/all/vendor-config-onl/src/boot.d/60.upgrade-onie @@ -1,3 +1 @@ -#!/usr/bin/python -from onl.upgrade.onie import OnieUpgrade -OnieUpgrade().main() +/sbin/onl-upgrade-onie diff --git a/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware b/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware index aa23c80c..41880e16 100755 --- a/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware +++ b/packages/base/all/vendor-config-onl/src/boot.d/61.upgrade-firmware @@ -1,3 +1,2 @@ -#!/usr/bin/python -from onl.upgrade.firmware import FirmwareUpgrade -FirmwareUpgrade().main() +/sbin/onl-upgrade-firmware + diff --git a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader b/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader index 02aa6d92..f802031a 100755 --- a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader +++ b/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader @@ -1,3 +1 @@ -#!/usr/bin/python -from onl.upgrade.loader import LoaderUpgrade -LoaderUpgrade().main() +/sbin/onl-upgrade-loader diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py index 10df045e..6637c9a7 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py @@ -43,6 +43,9 @@ class LoaderUpgradeBase(ubase.BaseUpgrade): self.next_version = self.load_json(NEXT_LOADER_VERSIONS_JSON, "version", {}).get('RELEASE_ID', None) + def prepare_upgrade(self): + pass + def summarize(self): self.logger.info("Current Loader Version: %s" % self.current_version) self.logger.info(" Next Loader Version: %s" % self.next_version) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py index fab08c0d..4cf81016 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py @@ -17,7 +17,6 @@ class OnieUpgrade(ubase.BaseOnieUpgrade): next_version_key="Next ONIE Version" def init_versions(self): - # Get the current platform ONIE version self.current_version = self.platform.onie_version() self.next_version = None diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py index 2b5f6c22..86562b61 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py @@ -41,6 +41,10 @@ class BaseUpgrade(object): self.platform = OnlPlatform() self.init() + self.current_version = None + self.next_version = None + self.init_versions() + def init(self): pass @@ -161,11 +165,16 @@ class BaseUpgrade(object): UPGRADE_STATUS_JSON = "/lib/platform-config/current/onl/upgrade.json" - def update_upgrade_status(self, key, value): + @staticmethod + def upgrade_status_get(): data = {} - if os.path.exists(self.UPGRADE_STATUS_JSON): - with open(self.UPGRADE_STATUS_JSON) as f: + if os.path.exists(BaseUpgrade.UPGRADE_STATUS_JSON): + with open(BaseUpgrade.UPGRADE_STATUS_JSON) as f: data = json.load(f) + return data + + def update_upgrade_status(self, key, value): + data = self.upgrade_status_get() data[key] = value with open(self.UPGRADE_STATUS_JSON, "w") as f: json.dump(data, f) @@ -177,6 +186,8 @@ class BaseUpgrade(object): def init_versions(self): raise Exception("init_versions() must be provided by the deriving class.") + def prepare_upgrade(self): + raise Exception("prepare_versions() must be provided by the deriving class.") # # Perform actual upgrade. Provided by derived class. @@ -192,9 +203,7 @@ class BaseUpgrade(object): def init_upgrade(self): - self.current_version = None - self.next_version = None - self.init_versions() + self.prepare_upgrade() self.update_upgrade_status(self.current_version_key, self.current_version) self.update_upgrade_status(self.next_version_key, self.next_version) @@ -411,20 +420,22 @@ class BaseOnieUpgrade(BaseUpgrade): self.logger.info("Removing previous onie-updater.") os.remove(updater) - def load_manifest(self, path): - self.manifest = self.load_json(path) - + def prepare_upgrade(self): if self.manifest is None: self.finish("No %s updater available for the current platform." % self.Name) - if 'version' not in self.manifest: + if self.next_version is None: self.finish("No %s version in the upgrade manifest." % self.Name) - else: - self.next_version = self.manifest['version'] if 'updater' not in self.manifest: self.finish("No %s updater in the upgrade manifest." % self.Name) + def load_manifest(self, path, required=True): + self.manifest = self.load_json(path) + self.next_version = None + if self.manifest: + self.next_version = self.manifest.get('version', None) + def summarize(self): self.logger.info("Current %s Version: %s" % (self.Name, self.current_version)) self.logger.info(" Next %s Version: %s" % (self.Name, self.manifest.get('version'))) diff --git a/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-firmware b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-firmware new file mode 100755 index 00000000..aa23c80c --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-firmware @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.upgrade.firmware import FirmwareUpgrade +FirmwareUpgrade().main() diff --git a/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-loader b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-loader new file mode 100755 index 00000000..02aa6d92 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-loader @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.upgrade.loader import LoaderUpgrade +LoaderUpgrade().main() diff --git a/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-onie b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-onie new file mode 100755 index 00000000..a6daef06 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-onie @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.upgrade.onie import OnieUpgrade +OnieUpgrade().main() From 820cc3d9737997e09051d746411335bbd6de46de Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 25 Oct 2016 18:02:00 +0000 Subject: [PATCH 037/255] - DHCP Timeout support in the Loader environment When $NETAUTO=dhcp the transaction will now timeout after $NETRETRIES attempts. If the system has a local install or SWI then booting can continue. For systems which require DHCP to be functional before booting can continue (for example SWIs downloaded via URL rather than locally) then NETRETRIES=infinite should be set in the boot-config. NETRETRIES can be set to any number of attempts, or infinite. The default value is 5 attempts. - Wait for ma1 linkup in the loader A short wait has been introduced to allow ma1 to linkup prior to continuing. --- .../initrds/loader-initrd-files/src/bin/ifup | 56 ++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/ifup b/packages/base/all/initrds/loader-initrd-files/src/bin/ifup index b9edbe67..eeef3eff 100644 --- a/packages/base/all/initrds/loader-initrd-files/src/bin/ifup +++ b/packages/base/all/initrds/loader-initrd-files/src/bin/ifup @@ -25,6 +25,7 @@ # Configure a network interface from settings in /etc/onl/NET # NETDEV: device name # NETAUTO: autoconfiguration method ("dhcp" or empty) +# NETRETRIES: autoconfiguration timeout # NETIP: IP address (/prefix optional for v4) # NETMASK: netmask (if NETIP has no prefix) # NETGW: default gateway IP address (optional) @@ -43,10 +44,39 @@ if [ "${NETHW}" ]; then ip link set dev ${NETDEV} addr ${NETHW} fi +# Default DHCP timeout is 10 requests in 10 seconds. +NETRETRIES_DEFAULT=10 +NETRETRIES=${NETRETRIES:-$NETRETRIES_DEFAULT} +if [ "$NETRETRIES" = "infinite" ]; then + NETRETRIES= +elif [ $(echo "$NETRETRIES" | tr -d '[:digit:]') ] || [ "$NETRETRIES" -lt 0 ]; then + echo "Warning: the NETRETRIES setting is currently '$NETRETRIES'. This is invalid and the default value of $NETRETRIES_DEFAULT will be used instead." + NETRETRIES=$NETRETRIES_DEFAULT +fi case "${NETAUTO}" in dhcp|auto) echo 1 >/proc/sys/net/ipv6/conf/${NETDEV}/autoconf - udhcpc -i ${NETDEV} + if [ -n "${NETRETRIES}" ]; then + if ! udhcpc --retries $NETRETRIES --now -i ${NETDEV}; then + echo "**********************************************************************" + echo "DHCP failed after $NETRETRIES attempts." + echo "**********************************************************************" + fi + else + while true; do + if udhcpc --retries $NETRETRIES_DEFAULT --now -i ${NETDEV}; then + break + fi + echo + echo "**********************************************************************" + echo "DHCP failed after $NETRETRIES_DEFAULT attempts." + echo "" + echo "No timeout is configured so DHCP requests will continue until successful." + echo " Press Ctrl-C to terminate and configure manually." + echo "" + echo "**********************************************************************" + done + fi ;; up) ifconfig "${NETDEV}" up @@ -87,3 +117,27 @@ for i in $(seq 30); do fi sleep 1 done +wait_link_up() +{ + local intf=$1 + local count=$2 + + local operstate="/sys/class/net/${intf}/operstate" + + echo "Waiting for link on ${intf}..." + local i=0 + [ -r $operstate ] && while [ $i -lt $count ] ; do + intf_operstate="$(cat $operstate)" + if [ "$intf_operstate" = "up" -o "$intf_operstate" = "unknown" ] ; then + echo "${intf}: up" + return 0 + fi + usleep 100000 + i=$(( $i + 1 )) + done + + echo "${intf}: down." + return 1 +} + +wait_link_up $NETDEV 100 From bff5e68d73511ef52ce67f0031d82852443742b5 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 25 Oct 2016 18:07:26 +0000 Subject: [PATCH 038/255] New SYSTEM_COMPATIBILITY_VERSION (SCV) key This key is designed to indicate the overall system compatibility between different installations. For example, the system configuration in ONL2.0 is different from ONL 1.0 (partition scheme, loader/version management, etc) and attempting to boot a 1.0 switch image on a 2.0 system will not work properly. This key exists to facilitate operational compatibility and upgrade steps. When a switch image is booted against a loader with an SCV different than it expects then a full re-install will need to be performed in order to upgrade the system before that image can run. If the SCV is different then an in-place loader-upgrade cannot be performed either. The upgrade process will be updated to include an SCV check and perform a re-install. The loader upgrade sequence will be predicated on SCV equality. If the SCV is not equal after all upgrade operations then switch has the option of rebooting instead of continuing the init process. This will be configured through the sysconfig interface. From ac3475f215f12b30af3ec7665331b9e45ccb8467 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 26 Oct 2016 12:23:28 -0700 Subject: [PATCH 039/255] Added upgrade-system step --- .../src/boot.d/62.upgrade-system | 1 + .../src/python/onl/install/SystemInstall.py | 307 ++++++++++++++++++ .../src/python/onl/upgrade/system.py | 78 +++++ .../src/sbin/onl-install-system | 7 + .../src/sbin/onl-upgrade-system | 3 + 5 files changed, 396 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-system create mode 100644 packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py create mode 100644 packages/base/all/vendor-config-onl/src/python/onl/upgrade/system.py create mode 100755 packages/base/all/vendor-config-onl/src/sbin/onl-install-system create mode 100755 packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-system diff --git a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-system b/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-system new file mode 100755 index 00000000..4fb8aadb --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-system @@ -0,0 +1 @@ +/sbin/onl-upgrade-system diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py new file mode 100644 index 00000000..d96557ac --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py @@ -0,0 +1,307 @@ +"""App.py + +Application code for swl-install. +""" + +import logging +import os, sys +import json +import tempfile +import zipfile +import shutil +import argparse +import fnmatch +import subprocess + +from onl.install.InstallUtils import InitrdContext, MountContext +from onl.install.InstallUtils import BlkidParser, ProcMountsParser +from onl.install.ConfUtils import MachineConf, InstallerConf +from onl.install.ShellApp import Onie, Upgrader +import onl.install.App +from onl.sysconfig import sysconfig +from onl.platform.current import OnlPlatform + +from onl.install.InstallUtils import SubprocessMixin + +class UpgradeHelper(Upgrader): + + def __init__(self, callback=None, log=None): + super(UpgradeHelper, self).__init__(log=log) + self.callback = callback + + def _runInitrdShell(self, p): + if self.callback is not None: + self.callback(self, p) + +class OnlBootContext(object): + + def __init__(self, log=None): + self.log = log or logging.getLogger(self.__class__.__name__) + self.dctx = None + self.dir = None + + def __enter__(self): + pm = ProcMountsParser() + logger = self.log.getChild("blkid") + blkid = BlkidParser(log=logger) + + try: + dev = blkid['ONL-BOOT'].device + except IndexError: + dev = None + if dev is None: + raise ValueError("cannot find ONL-BOOT") + + parts = [p for p in pm.mounts if p.device == dev] + if parts: + self.log.debug("found ONL-BOOT at %s", parts[0].dir) + self.dir = parts[0].dir + return self + + # else, mount this: + with MountContext(dev, log=self.log) as self.dctx: + self.log.debug("mounted ONL-BOOT %s --> %s", + dev, self.dctx.dir) + self.dir = self.dctx.dir + self.dctx.detach() + + return self + + def __exit__(self, type, value, tb): + dctx, self.dctx = self.dctx, None + if dctx is not None: + dctx.attach() + dctx.shutdown() + return False + +class App(SubprocessMixin): + + def __init__(self, force=False, log=None): + + if log is not None: + self.log = log + else: + self.log = logging.getLogger(self.__class__.__name__) + + self.force = force + + self.onieHelper = None + + def _runInitrd(self, helper, path): + with InitrdContext(initrd=path, log=self.log) as ctx: + + tdir = os.path.join(ctx.dir, "tmp") + abs_idir = tempfile.mkdtemp(dir=tdir, + prefix="installer-", suffix=".d") + chroot_idir = abs_idir[len(ctx.dir):] + + self.onieHelper = onl.install.App.OnieHelper(log=self.log) + code = self.onieHelper.run() + if code: + self.log.error("cannot find/unpack ONIE initrd") + return code + self.log.info("onie directory is %s", self.onieHelper.onieDir) + self.log.info("initrd directory is %s", self.onieHelper.initrdDir) + + src = os.path.join(self.onieHelper.initrdDir, "etc/machine.conf") + dst = os.path.join(ctx.dir, "etc/machine.conf") + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) + + h, self.onieHelper = self.onieHelper, None + if h is not None: + h.shutdown() + + src = "/etc/fw_env.config" + if os.path.exists(src): + dst = os.path.join(ctx.dir, "etc/fw_env.config") + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) + + srcRoot = "/etc/onl" + dstRoot = os.path.join(ctx.dir, "etc") + self.cpR(srcRoot, dstRoot) + + # constitute an /etc/onl/installer.conf in place + installerConf = InstallerConf(path="/dev/null") + + with open("/etc/onl/loader/versions.json") as fd: + data = json.load(fd) + installerConf.onl_version = data['VERSION_ID'] + + installerConf.installer_dir = chroot_idir + + abs_postinst = tempfile.mktemp(dir=abs_idir, + prefix="postinst-", suffix=".sh") + chroot_postinst = abs_postinst[len(ctx.dir):] + installerConf.installer_postinst = chroot_postinst + + # make an empty(ish) zip file (local path in installer_dir) for collateral + zipPath = tempfile.mktemp(dir=abs_idir, + prefix="install-", suffix=".zip") + with zipfile.ZipFile(zipPath, "w") as zf: + pass + installerConf.installer_zip = os.path.split(zipPath)[1] + + # finalize the local installer.conf + dst = os.path.join(ctx.dir, "etc/onl/installer.conf") + with open(dst, "w") as fd: + fd.write(installerConf.dumps()) + + # populate installer_dir with the contents of the loader upgrade + # See also Loader_Upgrade_x86_64.do_upgrade + # Here the initrd filename is as per the installer.zip; + # it is renamed on install to the grub directory + sdir = sysconfig.upgrade.loader.package.dir + + # get kernels for grub installs: + pats = ["kernel-*",] + for f in os.listdir(sdir): + for pat in pats: + if fnmatch.fnmatch(f, pat): + src = os.path.join(sdir, f) + dst = os.path.join(abs_idir, f) + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) + try: + l = sysconfig.upgrade.loader.package.grub + except AttributeError: + l = [] + for f in l: + src = os.path.join(sdir, f) + if os.path.exists(src): + dst = os.path.join(abs_idir, f) + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) + + # get FIT files from powerpc installs: + try: + l = sysconfig.upgrade.loader.package.fit + except AttributeError: + l = [] + for f in l: + src = os.path.join(sdir, f) + if os.path.exists(src): + dst = os.path.join(abs_idir, f) + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) + + with OnlBootContext(log=self.log) as octx: + src = os.path.join(octx.dir, "boot-config") + dst = os.path.join(abs_idir, "boot-config") + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) + + # chroot to the onl-install script + ##cmd = ('chroot', ctx.dir, + ## '/bin/sh', '-i') + if self.log.level < logging.INFO: + cmd = ('chroot', ctx.dir, "/usr/bin/onl-install", "--verbose", "--force",) + else: + cmd = ('chroot', ctx.dir, "/usr/bin/onl-install", "--force",) + try: + self.check_call(cmd) + except subprocess.CalledProcessError, what: + pass + + def run(self): + """XXX roth -- migrate this to onl.install.App.App + + XXX roth -- assume TMPDIR=/tmp. + """ + + pm = ProcMountsParser() + + # resize /tmp to be large enough for the initrd, see tmpfs + # nonsense in installer.sh.in + tflags = None + tdev = os.stat('/tmp').st_dev + pdir = None + for m in pm.mounts: + if m.fsType in ('ramfs', 'tmpfs',): + dev = os.stat(m.dir).st_dev + if dev == tdev: + self.log.info("found tmpfs/ramfs %s (%s)", dev, m.flags) + pdir = m.dir + tflags = m.flags + + # XXX glean this from install.sh.in (installer_tmpfs_kmin) + if pdir is None: + self.check_call(('mount', + '-o', 'size=1048576k', + '-t', 'tmpfs', + 'tmpfs', '/tmp',)) + else: + self.check_call(('mount', + '-o', 'remount,size=1048576k', + pdir,)) + + for m in pm.mounts: + if m.dir.startswith('/mnt/onl'): + if not self.force: + self.log.error("directory %s is still mounted (try --force)", m.dir) + return 1 + self.log.warn("unmounting %s (--force)", m.dir) + self.check_call(('umount', m.dir,)) + + upgrader = UpgradeHelper(callback=self._runInitrd, log=self.log) + try: + code = upgrader.run() + except: + self.log.exception("upgrader failed") + code = 1 + upgrader.shutdown() + return code + + def shutdown(self): + + h, self.onieHelper = self.onieHelper, None + if h is not None: + h.shutdown() + + @classmethod + def main(cls): + + logging.basicConfig() + logger = logging.getLogger("swl-install") + logger.setLevel(logging.DEBUG) + + # send to ONIE log + hnd = logging.FileHandler("/dev/console") + logger.addHandler(hnd) + logger.propagate = False + + onie_verbose = 'onie_verbose' in os.environ + installer_debug = 'installer_debug' in os.environ + + ap = argparse.ArgumentParser() + ap.add_argument('-v', '--verbose', action='store_true', + default=onie_verbose, + help="Enable verbose logging") + ap.add_argument('-D', '--debug', action='store_true', + default=installer_debug, + help="Enable python debugging") + ap.add_argument('-F', '--force', action='store_true', + help="Unmount filesystems before install") + ops = ap.parse_args() + + if ops.verbose: + logger.setLevel(logging.DEBUG) + + app = cls(force=ops.force, + log=logger) + try: + code = app.run() + except: + logger.exception("runner failed") + code = 1 + if ops.debug: + app.post_mortem() + + app.shutdown() + sys.exit(code) + +main = App.main + +if __name__ == "__main__": + main() diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/system.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/system.py new file mode 100644 index 00000000..9bcbf3cd --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/system.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +############################################################ +# +# ONL System Upgrade +# +############################################################ +import os +import sys +import fnmatch +from onl.upgrade import ubase +from onl.sysconfig import sysconfig +from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite + +from onl.install.SystemInstall import App + +class SystemUpgrade(ubase.BaseUpgrade): + name="system" + Name="System" + title="System Compatibility Version Check" + atype="A Compatible System" + + current_version_key="Current System Compatibility Version" + next_version_key="Next System Compatibility Version" + + def auto_upgrade_default(self): + return sysconfig.upgrade.system.auto + + def init_versions(self): + + # + # Current loader version file. + # If this file doesn't exist then in-place upgrade is not supported. + # + ETC_LOADER_VERSIONS_JSON = sysconfig.upgrade.loader.versions + + # Upgrade Loader Version file. + NEXT_LOADER_VERSIONS_JSON = os.path.join(sysconfig.upgrade.loader.package.dir, "manifest.json") + + VKEY = "SYSTEM_COMPATIBILITY_VERSION" + + self.current_version = self.load_json(ETC_LOADER_VERSIONS_JSON, VKEY, None) + + self.next_version = self.load_json(NEXT_LOADER_VERSIONS_JSON, + "version", {}).get(VKEY, None) + + def prepare_upgrade(self): + pass + + def summarize(self): + self.logger.info("Current System Compatibility Version: %s", + self.current_version) + self.logger.info(" Next System Compatibility Version: %s", + self.next_version) + self.logger.info("") + + + def upgrade_notes(self): + return """ + * One or more reboots will be required to complete this upgrade. +""" + + def do_upgrade(self, forced=False): + app = App(force=True, log=self.logger) + try: + code = app.run() + except: + self.logger.exception("upgrade failed") + code = 1 + app.shutdown() + if code: + self.abort("System upgrade failed.") + else: + self.logger.info("Upgrade succeeded, rebooting") + self.reboot() + +if __name__ == '__main__': + klass = SystemUpgrade + klass().main() diff --git a/packages/base/all/vendor-config-onl/src/sbin/onl-install-system b/packages/base/all/vendor-config-onl/src/sbin/onl-install-system new file mode 100755 index 00000000..9ae0fea3 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/sbin/onl-install-system @@ -0,0 +1,7 @@ +#!/usr/bin/python + +"""Re-install ONL using the ONL installer infrastructure. +""" + +import onl.install.SystemInstall +onl.install.SystemInstall.main() diff --git a/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-system b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-system new file mode 100755 index 00000000..efc4bdc7 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/sbin/onl-upgrade-system @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.upgrade.system import SystemUpgrade +SystemUpgrade().main() From 12b7eb52dfa6bfb41ac840d9773fcaba9636ba54 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 26 Oct 2016 12:23:55 -0700 Subject: [PATCH 040/255] Move upgrade scripts to make room --- .../src/boot.d/{62.upgrade-loader => 63.upgrade-loader} | 0 .../src/boot.d/{63.upgrade-swi => 64.upgrade-swi} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename packages/base/all/vendor-config-onl/src/boot.d/{62.upgrade-loader => 63.upgrade-loader} (100%) rename packages/base/all/vendor-config-onl/src/boot.d/{63.upgrade-swi => 64.upgrade-swi} (100%) diff --git a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader b/packages/base/all/vendor-config-onl/src/boot.d/63.upgrade-loader similarity index 100% rename from packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-loader rename to packages/base/all/vendor-config-onl/src/boot.d/63.upgrade-loader diff --git a/packages/base/all/vendor-config-onl/src/boot.d/63.upgrade-swi b/packages/base/all/vendor-config-onl/src/boot.d/64.upgrade-swi similarity index 100% rename from packages/base/all/vendor-config-onl/src/boot.d/63.upgrade-swi rename to packages/base/all/vendor-config-onl/src/boot.d/64.upgrade-swi From 515a881bb5777e640c45846385aa0b62775b5fe0 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 26 Oct 2016 17:33:29 -0700 Subject: [PATCH 041/255] Oops, botched merge --- .../all/vendor-config-onl/src/python/onl/install/BaseInstall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index dbdb0a5a..fffab336 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -693,7 +693,7 @@ class UbootInstaller(SubprocessMixin, Base): self.log.error("cannot get partition table from %s: %s", self.device, str(ex)) except Exception: - self.log.exception("cannot get partition table from %s" + self.log.exception("cannot get partition table from %s", self.device) self.log.info("creating msdos label on %s") From b8669dc2090b79ee5627fb0ea656715f3dd413d3 Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Tue, 1 Nov 2016 15:22:20 -0700 Subject: [PATCH 042/255] support SMBus block read --- .../base/any/onlp/src/onlplib/module/inc/onlplib/i2c.h | 5 +++++ packages/base/any/onlp/src/onlplib/module/src/i2c.c | 10 ++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/i2c.h b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/i2c.h index 39741014..e41ae0c0 100644 --- a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/i2c.h +++ b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/i2c.h @@ -66,6 +66,11 @@ */ #define ONLP_I2C_F_USE_BLOCK_READ 0x20 +/** + * Use SMBUS block reads if possible. + */ +#define ONLP_I2C_F_USE_SMBUS_BLOCK_READ 0x40 + /** * @brief Open and prepare for reading or writing. * @param bus The i2c bus number. diff --git a/packages/base/any/onlp/src/onlplib/module/src/i2c.c b/packages/base/any/onlp/src/onlplib/module/src/i2c.c index 480f7631..6c8ee372 100644 --- a/packages/base/any/onlp/src/onlplib/module/src/i2c.c +++ b/packages/base/any/onlp/src/onlplib/module/src/i2c.c @@ -105,11 +105,17 @@ onlp_i2c_block_read(int bus, uint8_t addr, uint8_t offset, int size, int count = size; uint8_t* p = rdata; while(count > 0) { + int rv; int rsize = (count >= ONLPLIB_CONFIG_I2C_BLOCK_SIZE) ? ONLPLIB_CONFIG_I2C_BLOCK_SIZE : count; - int rv = i2c_smbus_read_i2c_block_data(fd, - p - rdata, + if(flags & ONLP_I2C_F_USE_SMBUS_BLOCK_READ) { + rv = i2c_smbus_read_block_data(fd, offset, p); + } else { + rv = i2c_smbus_read_i2c_block_data(fd, + offset, rsize, p); + offset += rsize; + } if(rv != rsize) { AIM_LOG_ERROR("i2c-%d: reading address 0x%x, offset %d, size=%d failed: %{errno}", From abf0e19cc661215dbc2fe429cb02143420420196 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 2 Nov 2016 20:16:36 +0000 Subject: [PATCH 043/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index cb5096ee..fb7ec7a2 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit cb5096eeaeb19846ef2d5367facf349083f9f933 +Subproject commit fb7ec7a2f20f6cffd5dbd1f0a28a59407eb638ed From ac87756cda7406faa8be973fa20b180a76ce0bfc Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 3 Nov 2016 19:59:56 +0000 Subject: [PATCH 044/255] Patch CVE-2016-5195 (Dirty Cow). --- .../patches/CVE-2016-5195.patch | 75 +++++++++++++++++++ .../kernels/3.2.65-1+deb7u2/patches/series | 1 + packages/base/any/kernels/legacy/linux-3.8.13 | 2 +- packages/base/any/kernels/legacy/linux-3.9.6 | 2 +- 4 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/CVE-2016-5195.patch diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/CVE-2016-5195.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/CVE-2016-5195.patch new file mode 100644 index 00000000..199eb2ce --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/CVE-2016-5195.patch @@ -0,0 +1,75 @@ +diff -urpN a/include/linux/mm.h b/include/linux/mm.h +--- a/include/linux/mm.h 2016-11-02 14:46:33.278862661 -0700 ++++ b/include/linux/mm.h 2016-11-02 14:47:01.338863270 -0700 +@@ -1526,6 +1526,7 @@ struct page *follow_page(struct vm_area_ + #define FOLL_MLOCK 0x40 /* mark page as mlocked */ + #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ + #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ ++#define FOLL_COW 0x4000 /* internal GUP flag */ + + typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); +diff -urpN a/mm/memory.c b/mm/memory.c +--- a/mm/memory.c 2016-11-02 14:46:33.938862676 -0700 ++++ b/mm/memory.c 2016-11-02 14:50:52.086868277 -0700 +@@ -1427,6 +1427,23 @@ int zap_vma_ptes(struct vm_area_struct * + } + EXPORT_SYMBOL_GPL(zap_vma_ptes); + ++static inline bool can_follow_write_pte(pte_t pte, struct page *page, ++ unsigned int flags) ++{ ++ if (pte_write(pte)) ++ return true; ++ ++ /* ++ * Make sure that we are really following CoWed page. We do not really ++ * have to care about exclusiveness of the page because we only want ++ * to ensure that once COWed page hasn't disappeared in the meantime ++ * or it hasn't been merged to a KSM page. ++ */ ++ if ((flags & FOLL_FORCE) && (flags & FOLL_COW)) ++ return page && PageAnon(page) && !PageKsm(page); ++ ++ return false; ++} + /** + * follow_page - look up a page descriptor from a user-virtual address + * @vma: vm_area_struct mapping @address +@@ -1509,10 +1526,12 @@ split_fallthrough: + pte = *ptep; + if (!pte_present(pte)) + goto no_page; +- if ((flags & FOLL_WRITE) && !pte_write(pte)) +- goto unlock; + + page = vm_normal_page(vma, address, pte); ++ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) { ++ pte_unmap_unlock(ptep, ptl); ++ return NULL; ++ } + if (unlikely(!page)) { + if ((flags & FOLL_DUMP) || + !is_zero_pfn(pte_pfn(pte))) +@@ -1789,17 +1808,13 @@ int __get_user_pages(struct task_struct + * The VM_FAULT_WRITE bit tells us that + * do_wp_page has broken COW when necessary, + * even if maybe_mkwrite decided not to set +- * pte_write. We can thus safely do subsequent +- * page lookups as if they were reads. But only +- * do so when looping for pte_write is futile: +- * in some cases userspace may also be wanting +- * to write to the gotten user page, which a +- * read fault here might prevent (a readonly +- * page might get reCOWed by userspace write). ++ * pte_write. We cannot simply drop FOLL_WRITE ++ * here because the COWed page might be gone by ++ * the time we do the subsequent page lookups. + */ + if ((ret & VM_FAULT_WRITE) && + !(vma->vm_flags & VM_WRITE)) +- foll_flags &= ~FOLL_WRITE; ++ foll_flags |= FOLL_COW; + + cond_resched(); + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index f93799e2..e3e983f4 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -253,3 +253,4 @@ mgmt-port-init-config.patch arch-intel-reboot-cf9-cold.patch drivers-hwmon-adm1021-detect.patch drivers-i2c-busses-i2c-isch-timeout.patch +CVE-2016-5195.patch diff --git a/packages/base/any/kernels/legacy/linux-3.8.13 b/packages/base/any/kernels/legacy/linux-3.8.13 index 7cdec99d..6c803ff8 160000 --- a/packages/base/any/kernels/legacy/linux-3.8.13 +++ b/packages/base/any/kernels/legacy/linux-3.8.13 @@ -1 +1 @@ -Subproject commit 7cdec99d7aea20e8afa83771350bdae699d79ffc +Subproject commit 6c803ff857ba52549de43c52f47b714e623ed9d4 diff --git a/packages/base/any/kernels/legacy/linux-3.9.6 b/packages/base/any/kernels/legacy/linux-3.9.6 index 0106373d..34603c6e 160000 --- a/packages/base/any/kernels/legacy/linux-3.9.6 +++ b/packages/base/any/kernels/legacy/linux-3.9.6 @@ -1 +1 @@ -Subproject commit 0106373d79ecf4df3f8867c214ce180f4993c442 +Subproject commit 34603c6ec26840ad19991e62fcc617eee2ffec27 From b390c54b6cc1cdbd7cd950a614767ba004132baa Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 3 Nov 2016 20:01:09 +0000 Subject: [PATCH 045/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index fb7ec7a2..a7f832c1 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit fb7ec7a2f20f6cffd5dbd1f0a28a59407eb638ed +Subproject commit a7f832c14dcc7a146245c3a4a32448324f90171f From 6d1aaa134cfe9f075afa0816048e29984bf92b69 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 3 Nov 2016 13:58:40 -0700 Subject: [PATCH 046/255] Handle unformatted/corrupted partition tables, see SWL-3246 - handle more pyparted exceptions - properly clobber the disk signature --- .../src/python/onl/install/BaseInstall.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index bb60b0b7..45da8857 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -21,6 +21,14 @@ from InstallUtils import ProcMountsParser import onl.YamlUtils from onl.sysconfig import sysconfig +try: + PartedException = parted._ped.PartedException + DiskException = parted._ped.DiskException +except AttributeError: + import _ped + PartedException = _ped.PartedException + DiskException = _ped.DiskException + class Base: class installmeta: @@ -686,11 +694,17 @@ class UbootInstaller(SubprocessMixin, Base): return 0 self.log.warn("disk %s has wrong label %s", self.device, self.partedDisk.type) - except Exception as ex: + except (DiskException, PartedException) as ex: self.log.error("cannot get partition table from %s: %s", self.device, str(ex)) + except Exception as ex: + self.log.exception("cannot get partition table from %s: %s", + self.device) - self.log.info("creating msdos label on %s") + self.log.info("clobbering disk label on %s", self.device) + self.partedDevice.clobber() + + self.log.info("creating msdos label on %s", self.device) self.partedDisk = parted.freshDisk(self.partedDevice, 'msdos') return 0 From cbd57c100522ec764817d312d66de60af372c7cd Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Fri, 4 Nov 2016 12:35:50 -0700 Subject: [PATCH 047/255] Refactor boot loader config upgrade code --- .../src/python/onl/install/BaseInstall.py | 77 +++++++++++++++---- 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index bb60b0b7..5c7c25f7 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -84,6 +84,10 @@ class Base: self.log.error("not implemented") return 1 + def upgradeBootLoader(self): + self.log.error("not implemented") + return 1 + def shutdown(self): zf, self.zf = self.zf, None if zf: zf.close() @@ -511,19 +515,7 @@ class GrubInstaller(SubprocessMixin, Base): def installLoader(self): - ctx = {} - - kernel = self.im.platformConf['grub']['kernel'] - ctx['kernel'] = kernel['='] if type(kernel) == dict else kernel - ctx['args'] = self.im.platformConf['grub']['args'] - ctx['platform'] = self.im.installerConf.installer_platform - ctx['serial'] = self.im.platformConf['grub']['serial'] - - ctx['boot_menu_entry'] = sysconfig.installer.menu_name - ctx['boot_loading_name'] = sysconfig.installer.os_name - kernels = [] - for f in set(os.listdir(self.im.installerConf.installer_dir) + self.zf.namelist()): if 'kernel' in f: kernels.append(f) @@ -535,11 +527,10 @@ class GrubInstaller(SubprocessMixin, Base): initrd = i break - cf = GRUB_TPL % ctx - - self.log.info("Installing kernel") dev = self.blkidParts['ONL-BOOT'] + self.log.info("Installing kernel to %s", dev.device) + with MountContext(dev.device, log=self.log) as ctx: def _cp(b, dstname=None): if dstname is None: @@ -548,6 +539,29 @@ class GrubInstaller(SubprocessMixin, Base): self.installerCopy(b, dst, optional=True) [_cp(e) for e in kernels] _cp(initrd, "%s.cpio.gz" % self.im.installerConf.installer_platform) + + return 0 + + def installGrubCfg(self): + + dev = self.blkidParts['ONL-BOOT'] + + self.log.info("Installing grub.cfg to %s", dev.device) + + ctx = {} + + kernel = self.im.platformConf['grub']['kernel'] + ctx['kernel'] = kernel['='] if type(kernel) == dict else kernel + ctx['args'] = self.im.platformConf['grub']['args'] + ctx['platform'] = self.im.installerConf.installer_platform + ctx['serial'] = self.im.platformConf['grub']['serial'] + + ctx['boot_menu_entry'] = sysconfig.installer.menu_name + ctx['boot_loading_name'] = sysconfig.installer.os_name + + cf = GRUB_TPL % ctx + + with MountContext(dev.device, log=self.log) as ctx: d = os.path.join(ctx.dir, "grub") self.makedirs(d) dst = os.path.join(ctx.dir, 'grub/grub.cfg') @@ -611,6 +625,9 @@ class GrubInstaller(SubprocessMixin, Base): code = self.installLoader() if code: return code + code = self.installGrubCfg() + if code: return code + code = self.installBootConfig() if code: return code @@ -635,6 +652,17 @@ class GrubInstaller(SubprocessMixin, Base): return 1 return self.installGpt() + def upgradeBootLoader(self): + """Upgrade the boot loader settings.""" + + code = self.findGpt() + if code: return code + + code = self.installGrubCfg() + if code: return code + + return 0 + def shutdown(self): Base.shutdown(self) @@ -864,5 +892,24 @@ class UbootInstaller(SubprocessMixin, Base): return self.installUboot() + def upgradeBootLoader(self): + """Upgrade the boot loader settings as part of a loader upgrade.""" + + self.partedDevice = parted.getDevice(self.device) + self.partedDisk = parted.newDisk(self.partedDevice) + if self.partedDisk.type != 'msdos': + self.log.error("disk %s has wrong label %s", + self.device, self.partedDisk.type) + return 1 + + self.blkidParts = BlkidParser(log=self.log.getChild("blkid")) + + # XXX boot-config (and saved boot-config) should be unchanged during loader upgrade + + code = self.installUbootEnv() + if code: return code + + return 0 + def shutdown(self): Base.shutdown(self) From 3f0299027961a4d0d48c1eadb54f00acf4045ffe Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Fri, 4 Nov 2016 12:36:18 -0700 Subject: [PATCH 048/255] Added fit extraction context object, add attach/detach support --- .../src/python/onl/install/InstallUtils.py | 84 +++++++++++++++++-- 1 file changed, 75 insertions(+), 9 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index 830e042e..c30e307a 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -10,6 +10,8 @@ import tempfile import string import shutil +import Fit + class SubprocessMixin: V1 = "V1" @@ -210,8 +212,8 @@ class MountContext(SubprocessMixin): self.label = label self.fsType = fsType self.dir = None - self.hostDir = None - self.mounted = False + self.hostDir = self.__hostDir = None + self.mounted = self.__mounted = False self.log = log or logging.getLogger("mount") if self.device and self.label: @@ -245,7 +247,7 @@ class MountContext(SubprocessMixin): self.mounted = True return self - def __exit__(self, type, value, tb): + def shutdown(self): mounted = False if self.mounted: @@ -263,8 +265,18 @@ class MountContext(SubprocessMixin): if self.hostDir is not None: self.rmdir(self.hostDir) + def __exit__(self, type, value, tb): + self.shutdown() return False + def detach(self): + self.__mounted, self.mounted = self.mounted, False + self.__hostDir, self.hostDir = self.hostDir, None + + def attach(self): + self.mounted = self.__mounted + self.hostDir = self.__hostDir + class BlkidEntry: def __init__(self, device, **kwargs): @@ -665,6 +677,9 @@ class InitrdContext(SubprocessMixin): self.ilog.setLevel(logging.INFO) self.log = self.hlog + self.__initrd = None + self.__dir = None + def _unpack(self): self.dir = self.mkdtemp(prefix="chroot-", suffix=".d") @@ -783,7 +798,7 @@ class InitrdContext(SubprocessMixin): return self - def __exit__(self, type, value, tb): + def shutdown(self): p = ProcMountsParser() dirs = [e.dir for e in p.mounts if e.dir.startswith(self.dir)] @@ -797,23 +812,74 @@ class InitrdContext(SubprocessMixin): cmd = ('umount', p,) self.check_call(cmd, vmode=self.V1) - if self.initrd is not None: + if self.initrd and self.dir: self.log.debug("cleaning up chroot in %s", self.dir) self.rmtree(self.dir) - else: + elif self.dir: self.log.debug("saving chroot in %s", self.dir) + def __exit__(self, type, value, tb): + self.shutdown() return False + def detach(self): + self.__initrd, self.initrd = self.initrd, None + self.__dir, self.dir = self.dir, None + @classmethod - def mkChroot(self, initrd, log=None): - with InitrdContext(initrd=initrd, log=log) as ctx: + def mkChroot(cls, initrd, log=None): + with cls(initrd=initrd, log=log) as ctx: initrdDir = ctx.dir - ctx.initrd = None + ctx.detach() # save the unpacked directory, do not clean it up # (it's inside this chroot anyway) return initrdDir +class FitInitrdContext(SubprocessMixin): + + def __init__(self, path, log=None): + self.fitPath = path + self.log = log or logging.getLogger(self.__class__.__name__) + self.initrd = self.__initrd = None + + def __enter__(self): + self.log.debug("parsing FIT image in %s", self.fitPath) + p = Fit.Parser(path=self.fitPath, log=self.log) + node = p.getInitrdNode() + if node is None: + raise ValueError("cannot find initrd node in FDT") + prop = node.properties.get('data', None) + if prop is None: + raise ValueError("cannot find initrd data property in FDT") + + with open(device) as fd: + self.log.debug("reading initrd at [%x:%x]", + prop.offset, prop.offset+prop.sz) + fd.seek(prop.offset, 0) + buf = fd.read(prop.sz) + + fno, self.initrd = tempfile.mkstemp(prefix="initrd-", + suffix=".img") + self.log.debug("+ cat > %s", self.initrd) + with os.fdopen(fno, "w") as fd: + fd.write(buf) + return self + + def shutdown(self): + initrd, self.initrd = self.initrd, None + if initrd and os.path.exists(initrd): + self.unlink(initrd) + + def __exit__(self, eType, eValue, eTrace): + self.shutdown() + return False + + def detach(self): + self.__initrd, self.initrd = self.initrd, None + + def attach(self): + self.initrd = self.__initrd + class ChrootSubprocessMixin: chrootDir = None From cf1bfca4c05204d246eb192099638cbe090aed64 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Fri, 4 Nov 2016 12:36:55 -0700 Subject: [PATCH 049/255] Refactor to use fit extraction context object --- .../src/python/onl/install/ShellApp.py | 113 +++++++++++------- 1 file changed, 67 insertions(+), 46 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index c0ff2fa2..0d85d7f3 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -13,7 +13,7 @@ from InstallUtils import InitrdContext, MountContext from InstallUtils import SubprocessMixin from InstallUtils import ProcMountsParser, ProcMtdParser from InstallUtils import BlkidParser -import Fit +from InstallUtils import FitInitrdContext import onl.platform.current @@ -46,30 +46,8 @@ class AppBase(SubprocessMixin): return 0 def _runFitShell(self, device): - self.log.debug("parsing FIT image in %s", device) - p = Fit.Parser(path=device, log=self.log) - node = p.getInitrdNode() - if node is None: - self.log.error("cannot find initrd node in FDT") - return 1 - prop = node.properties.get('data', None) - if prop is None: - self.log.error("cannot find initrd data property in FDT") - return 1 - with open(device) as fd: - self.log.debug("reading initrd at [%x:%x]", - prop.offset, prop.offset+prop.sz) - fd.seek(prop.offset, 0) - buf = fd.read(prop.sz) - try: - fno, initrd = tempfile.mkstemp(prefix="initrd-", - suffix=".img") - self.log.debug("+ cat > %s", initrd) - with os.fdopen(fno, "w") as fd: - fd.write(buf) - return self._runInitrdShell(initrd) - finally: - self.unlink(initrd) + with FitInitrdContext(path=device, log=self.log) as ctx: + return self._runInitrdShell(ctx.initrd) def shutdown(self): pass @@ -108,11 +86,22 @@ class AppBase(SubprocessMixin): app.shutdown() sys.exit(code) -class Onie(AppBase): +class OnieBootContext: + """XXX roth -- overlap with onl.install.ShellApp.Onie, + also with onl.install.App.OnieHelper from system-upgrade branch... - PROG = "onie-shell" + XXX roth -- refactor all three bits of code here + """ - def run(self): + def __init__(self, log=None): + self.log = log or logging.getLogger(self.__class__.__name__) + + self.initrd = None + + self.pm = self.blkid = self.mtd = None + self.ictx = self.mctx = self.fctx = None + + def __enter__(self): self.pm = ProcMountsParser() self.blkid = BlkidParser(log=self.log.getChild("blkid")) @@ -138,21 +127,25 @@ class Onie(AppBase): self.log.debug("found ONIE boot mounted at %s", onieDir) initrd = _g(onieDir) if initrd is None: - self.log.warn("cannot find ONIE initrd on %s", onieDir) - else: - self.log.debug("found ONIE initrd at %s", initrd) - return _runInitrdShell(initrd) + raise ValueError("cannot find ONIE initrd on %s" % onieDir) + self.log.debug("found ONIE initrd at %s", initrd) + with InitrdContext(initrd=initrd, log=self.log) as self.ictx: + self.initrd = initrd + self.ictx.detach() + return self - with MountContext(dev, log=self.log) as ctx: + with MountContext(dev, log=self.log) as self.mctx: initrd = _g(ctx.dir) if initrd is None: - self.log.warn("cannot find ONIE initrd on %s", dev) - else: - self.log.debug("found ONIE initrd at %s", initrd) - return self._runInitrdShell(initrd) + raise ValueError("cannot find ONIE initrd on %s" % dev) + self.log.debug("found ONIE initrd at %s", initrd) + with InitrdContext(initrd=initrd, log=self.log) as self.ictx: + self.initrd = initrd + self.mctx.detach() + self.ictx.detach() + return self - self.log.warn("cannot find an ONIE initrd") - return 1 + raise ValueError("cannot find an ONIE initrd") # try to find onie initrd on a mounted fs (GRUB); # for ONIE images this is usually /mnt/onie-boot @@ -164,7 +157,10 @@ class Onie(AppBase): part.device, part.dir) else: self.log.debug("found ONIE initrd at %s", initrd) - return self._runInitrdShell(initrd) + with InitrdContext(initrd=initrd, log=self.log) as self.ictx: + self.initrd = initrd + self.ictx.detach() + return self # grovel through MTD devices (u-boot) parts = [p for p in self.mtd.parts if p.label == "onie"] @@ -172,13 +168,38 @@ class Onie(AppBase): part = parts[0] self.log.debug("found ONIE MTD device %s", part.charDevice or part.blockDevice) - return self._runFitShell(part.blockDevice) - elif self.mtd.mounts: - self.log.error("cannot find ONIE MTD device") - return 1 + with FitInitrdContext(part.blockDevice, log=self.log) as self.fctx: + with InitrdContext(initrd=self.fctx.initrd, log=self.log) as self.ictx: + self.initrd = self.fctx.initrd + self.fctx.detach() + self.ictx.detach() + return self - self.log.error("cannot find ONIE initrd") - return 1 + if self.mtd.mounts: + raise ValueError("cannot find ONIE MTD device") + + raise ValueError("cannot find ONIE initrd") + + def shutdown(self): + ctx, self.fctx = self.fctx, None: + if ctx is not None: ctx.shutdown() + ctx, self.ictx = self.ictx, None: + if ctx is not None: ctx.shutdown() + ctx, self.mctx = self.mctx, None: + if ctx is not None: ctx.shutdown() + + def __exit__(self, eType, eValue, eTrace): + self.shutdown() + return False + +class Onie(AppBase): + """XXX roth -- refactor in from loader.py code.""" + + PROG = "onie-shell" + + def run(self): + with OnieBootContext(log=self.log) as ctx: + return self._runInitrdShell(ctx.initrd) class Loader(AppBase): From 8979e5ebbab05f9ced5dbf98eecb06d2937bc291 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Fri, 4 Nov 2016 12:37:05 -0700 Subject: [PATCH 050/255] Added fixmes --- .../all/vendor-config-onl/src/python/onl/upgrade/loader.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py index 6637c9a7..b7ac149d 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py @@ -75,6 +75,8 @@ class LoaderUpgrade_Fit(LoaderUpgradeBase): with OnlMountContextReadWrite("ONL-BOOT", self.logger) as d: self.copyfile(fit_image, os.path.join(d.directory, "%s.itb" % (self.platform.platform()))) + # XXX re-install the firmware environment + self.reboot() @@ -110,6 +112,8 @@ class LoaderUpgrade_x86_64(LoaderUpgradeBase): #if os.path.exists(src): # self.copyfile(src, dst) + # XXX re-install the grub config + self.reboot() From a676d8ed2cbc450dadadf62d0696d03c66163868 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Fri, 4 Nov 2016 12:53:48 -0700 Subject: [PATCH 051/255] Fixed exception handler and log message --- .../vendor-config-onl/src/python/onl/install/BaseInstall.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index 45da8857..ec65e5e2 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -697,8 +697,8 @@ class UbootInstaller(SubprocessMixin, Base): except (DiskException, PartedException) as ex: self.log.error("cannot get partition table from %s: %s", self.device, str(ex)) - except Exception as ex: - self.log.exception("cannot get partition table from %s: %s", + except Exception: + self.log.exception("cannot get partition table from %s", self.device) self.log.info("clobbering disk label on %s", self.device) From 54e292f0ba81fdb2cc534cea366645e068ba3a32 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 4 Nov 2016 21:16:03 +0000 Subject: [PATCH 052/255] Factor out file delivery so it can be shared between packaging and release. --- tools/onlpm.py | 81 ++++++++++++++++++++++++-------------------------- 1 file changed, 39 insertions(+), 42 deletions(-) diff --git a/tools/onlpm.py b/tools/onlpm.py index 7a5bfc7c..d4d9464f 100755 --- a/tools/onlpm.py +++ b/tools/onlpm.py @@ -272,6 +272,36 @@ class OnlPackage(object): return True + @staticmethod + def copyf(src, dst, root): + if dst.startswith('/'): + dst = dst[1:] + + if os.path.isdir(src): + # + # Copy entire src directory to target directory + # + dstpath = os.path.join(root, dst) + logger.debug("Copytree %s -> %s" % (src, dstpath)) + shutil.copytree(src, dstpath) + else: + # + # If the destination ends in a '/' it means copy the filename + # as-is to that directory. + # + # If not, its a full rename to the destination. + # + if dst.endswith('/'): + dstpath = os.path.join(root, dst) + if not os.path.exists(dstpath): + os.makedirs(dstpath) + shutil.copy(src, dstpath) + else: + dstpath = os.path.join(root, os.path.dirname(dst)) + if not os.path.exists(dstpath): + os.makedirs(dstpath) + shutil.copyfile(src, os.path.join(root, dst)) + shutil.copymode(src, os.path.join(root, dst)) def build(self, dir_=None): @@ -312,36 +342,7 @@ class OnlPackage(object): self.pkg['__workdir'] = workdir for (src,dst) in self.pkg.get('files', {}): - - if dst.startswith('/'): - dst = dst[1:] - - if os.path.isdir(src): - # - # Copy entire src directory to target directory - # - dstpath = os.path.join(root, dst) - logger.debug("Copytree %s -> %s" % (src, dstpath)) - shutil.copytree(src, dstpath) - else: - # - # If the destination ends in a '/' it means copy the filename - # as-is to that directory. - # - # If not, its a full rename to the destination. - # - if dst.endswith('/'): - dstpath = os.path.join(root, dst) - if not os.path.exists(dstpath): - os.makedirs(dstpath) - shutil.copy(src, dstpath) - else: - dstpath = os.path.join(root, os.path.dirname(dst)) - if not os.path.exists(dstpath): - os.makedirs(dstpath) - shutil.copyfile(src, os.path.join(root, dst)) - shutil.copymode(src, os.path.join(root, dst)) - + OnlPackage.copyf(src, dst, root) for (link,src) in self.pkg.get('links', {}).iteritems(): logger.info("Linking %s -> %s..." % (link, src)) @@ -602,18 +603,14 @@ class OnlPackageGroup(object): if 'release' in self._pkgs: - release_list = onlu.validate_src_dst_file_tuples(self._pkgs['__directory'], - self._pkgs['release'], - dict(), - OnlPackageError) - for f in release_list: - release_dir = os.environ.get('ONLPM_OPTION_RELEASE_DIR', - os.path.join(os.environ.get('ONL', 'RELEASE'))) - dst = os.path.join(release_dir, g_dist_codename, f[1]) - if not os.path.exists(dst): - os.makedirs(dst) - logger.info("Releasing %s -> %s" % (os.path.basename(f[0]), dst)) - shutil.copy(f[0], dst) + for (src, dst) in onlu.validate_src_dst_file_tuples(self._pkgs['__directory'], + self._pkgs['release'], + dict(), + OnlPackageError): + root = os.path.join(os.environ.get('ONLPM_OPTION_RELEASE_DIR', + os.path.join(os.environ.get('ONL', 'RELEASE'))), + g_dist_codename) + OnlPackage.copyf(src, dst, root) return products From d3c65d04ddda5dd915a1613380b7819083409f78 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 4 Nov 2016 21:32:08 +0000 Subject: [PATCH 053/255] Remove output. --- tools/onlpm.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/onlpm.py b/tools/onlpm.py index d4d9464f..68e301e6 100755 --- a/tools/onlpm.py +++ b/tools/onlpm.py @@ -439,7 +439,6 @@ class OnlPackage(object): onlu.execute(command) # Grab the package from the workdir. There can be only one. - sys.stdout.write(workdir) files = glob.glob(os.path.join(workdir, '*.deb')) if len(files) == 0: raise OnlPackageError("No debian package.") From c85967e4dbf8473198e9743c997053f2a257649d Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 10:50:17 -0800 Subject: [PATCH 054/255] Handle unmount when detached --- .../src/python/onl/install/InstallUtils.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index c30e307a..830abf00 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -801,16 +801,20 @@ class InitrdContext(SubprocessMixin): def shutdown(self): p = ProcMountsParser() - dirs = [e.dir for e in p.mounts if e.dir.startswith(self.dir)] + if self.dir is not None: + dirs = [e.dir for e in p.mounts if e.dir.startswith(self.dir)] + else: + dirs = [] # XXX probabaly also kill files here # umount any nested mounts - self.log.debug("un-mounting mounts points in chroot %s", self.dir) - dirs.sort(reverse=True) - for p in dirs: - cmd = ('umount', p,) - self.check_call(cmd, vmode=self.V1) + if dirs: + self.log.debug("un-mounting mounts points in chroot %s", self.dir) + dirs.sort(reverse=True) + for p in dirs: + cmd = ('umount', p,) + self.check_call(cmd, vmode=self.V1) if self.initrd and self.dir: self.log.debug("cleaning up chroot in %s", self.dir) From 3dd88819212fab8f0ad53099fa4066bafc83ef2a Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 13:12:42 -0800 Subject: [PATCH 055/255] Updates for review feedback - use built-in mount context for ONL-BOOT - fix logger handle - minor nit for swiget if the SWI is missing --- .../src/python/onl/install/SystemInstall.py | 62 ++++--------------- 1 file changed, 11 insertions(+), 51 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py index d96557ac..b3339550 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py @@ -1,6 +1,6 @@ """App.py -Application code for swl-install. +Application code for onl-install. """ import logging @@ -13,15 +13,16 @@ import argparse import fnmatch import subprocess -from onl.install.InstallUtils import InitrdContext, MountContext -from onl.install.InstallUtils import BlkidParser, ProcMountsParser +from onl.install.InstallUtils import InitrdContext +from onl.install.InstallUtils import ProcMountsParser from onl.install.ConfUtils import MachineConf, InstallerConf from onl.install.ShellApp import Onie, Upgrader -import onl.install.App -from onl.sysconfig import sysconfig -from onl.platform.current import OnlPlatform - from onl.install.InstallUtils import SubprocessMixin +import onl.install.App + +from onl.sysconfig import sysconfig + +from onl.mounts import OnlMountContextReadWrite class UpgradeHelper(Upgrader): @@ -33,47 +34,6 @@ class UpgradeHelper(Upgrader): if self.callback is not None: self.callback(self, p) -class OnlBootContext(object): - - def __init__(self, log=None): - self.log = log or logging.getLogger(self.__class__.__name__) - self.dctx = None - self.dir = None - - def __enter__(self): - pm = ProcMountsParser() - logger = self.log.getChild("blkid") - blkid = BlkidParser(log=logger) - - try: - dev = blkid['ONL-BOOT'].device - except IndexError: - dev = None - if dev is None: - raise ValueError("cannot find ONL-BOOT") - - parts = [p for p in pm.mounts if p.device == dev] - if parts: - self.log.debug("found ONL-BOOT at %s", parts[0].dir) - self.dir = parts[0].dir - return self - - # else, mount this: - with MountContext(dev, log=self.log) as self.dctx: - self.log.debug("mounted ONL-BOOT %s --> %s", - dev, self.dctx.dir) - self.dir = self.dctx.dir - self.dctx.detach() - - return self - - def __exit__(self, type, value, tb): - dctx, self.dctx = self.dctx, None - if dctx is not None: - dctx.attach() - dctx.shutdown() - return False - class App(SubprocessMixin): def __init__(self, force=False, log=None): @@ -186,8 +146,8 @@ class App(SubprocessMixin): self.log.debug("+ /bin/cp %s %s", src, dst) shutil.copy2(src, dst) - with OnlBootContext(log=self.log) as octx: - src = os.path.join(octx.dir, "boot-config") + with OnlMountContextReadWrite('ONL-BOOT', logger=self.log) as octx: + src = os.path.join(octx.directory, "boot-config") dst = os.path.join(abs_idir, "boot-config") self.log.debug("+ /bin/cp %s %s", src, dst) shutil.copy2(src, dst) @@ -263,7 +223,7 @@ class App(SubprocessMixin): def main(cls): logging.basicConfig() - logger = logging.getLogger("swl-install") + logger = logging.getLogger("onl-install") logger.setLevel(logging.DEBUG) # send to ONIE log From 19d19ef51ca1e68f7393d5aa0fd6915a705456e9 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 13:12:54 -0800 Subject: [PATCH 056/255] Fix swiget for if SWI is missing --- packages/base/all/initrds/loader-initrd-files/src/bin/swiget | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/swiget b/packages/base/all/initrds/loader-initrd-files/src/bin/swiget index 34615678..333674a9 100755 --- a/packages/base/all/initrds/loader-initrd-files/src/bin/swiget +++ b/packages/base/all/initrds/loader-initrd-files/src/bin/swiget @@ -284,7 +284,7 @@ class Runner(onl.install.InstallUtils.SubprocessMixin): l = [x for x in os.listdir(d) if x.endswith('.swi')] l = [os.path.join(d, x) for x in l] l.sort(key=swiSortKey) - return l[-1] + return l[-1] if l else None pm = ProcMountsParser() parts = [x for x in pm.mounts if x.device == dev] @@ -294,6 +294,9 @@ class Runner(onl.install.InstallUtils.SubprocessMixin): self.log.info("found 'latest' swi %s", dst) else: dst = os.path.join(parts[0].dir, src) + if dst is None: + self.log.error("missing SWI") + return None if not os.path.exists(dst): self.log.error("missing SWI: %s", dst) return None From ad913e98c5c02965d166388c5d24636b8ab6ce52 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 17:53:49 -0800 Subject: [PATCH 057/255] Minor fixes after fallout from git pull --- .../vendor-config-onl/src/python/onl/install/InstallUtils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index a071d3b2..a831d20b 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -698,6 +698,7 @@ class InitrdContext(SubprocessMixin): self.__initrd = None self.__dir = None + self._hasDevTmpfs = False def _unpack(self): self.dir = self.mkdtemp(prefix="chroot-", @@ -895,7 +896,7 @@ class FitInitrdContext(SubprocessMixin): if prop is None: raise ValueError("cannot find initrd data property in FDT") - with open(device) as fd: + with open(self.fitPath) as fd: self.log.debug("reading initrd at [%x:%x]", prop.offset, prop.offset+prop.sz) fd.seek(prop.offset, 0) From c730004e2bc087175346e79913b3ba573e463dab Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 17:54:36 -0800 Subject: [PATCH 058/255] Initial working version of OnieBootContext --- .../src/python/onl/install/ShellApp.py | 39 ++++++++++++------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index 711c6d78..fe534512 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -88,11 +88,7 @@ class AppBase(SubprocessMixin, object): sys.exit(code) class OnieBootContext: - """XXX roth -- overlap with onl.install.ShellApp.Onie, - also with onl.install.App.OnieHelper from system-upgrade branch... - - XXX roth -- refactor all three bits of code here - """ + """Find the ONIE initrd and umpack/mount it.""" def __init__(self, log=None): self.log = log or logging.getLogger(self.__class__.__name__) @@ -100,15 +96,17 @@ class OnieBootContext: self.initrd = None self.pm = self.blkid = self.mtd = None - self.ictx = self.mctx = self.fctx = None + self.ictx = self.dctx = self.fctx = None + self.onieDir = None + self.initrdDir = None + + self.__ictx = self.__dctx = self.__fctx = None def __enter__(self): self.pm = ProcMountsParser() self.blkid = BlkidParser(log=self.log.getChild("blkid")) self.mtd = ProcMtdParser(log=self.log.getChild("mtd")) - self.dctx = None - self.onieDir = None def _g(d): pat = os.path.join(d, "onie/initrd.img*") @@ -133,18 +131,21 @@ class OnieBootContext: self.log.debug("found ONIE initrd at %s", initrd) with InitrdContext(initrd=initrd, log=self.log) as self.ictx: self.initrd = initrd + self.initrdDir = self.ictx.dir self.ictx.detach() return self # else, try to mount the directory containing the initrd - with MountContext(dev, log=self.log) as self.mctx: + with MountContext(dev, log=self.log) as self.dctx: initrd = _g(self.dctx.dir) if initrd is None: raise ValueError("cannot find ONIE initrd on %s" % dev) + self.onieDir = self.dctx.dir + self.dctx.detach() self.log.debug("found ONIE initrd at %s", initrd) with InitrdContext(initrd=initrd, log=self.log) as self.ictx: self.initrd = initrd - self.mctx.detach() + self.initrdDir = self.ictx.dir self.ictx.detach() return self @@ -163,6 +164,7 @@ class OnieBootContext: self.log.debug("found ONIE initrd at %s", initrd) with InitrdContext(initrd=initrd, log=self.log) as self.ictx: self.initrd = initrd + self.initrdDir = self.ictx.dir self.ictx.detach() return self @@ -176,6 +178,7 @@ class OnieBootContext: with InitrdContext(initrd=self.fctx.initrd, log=self.log) as self.ictx: self.initrd = self.fctx.initrd self.fctx.detach() + self.initrdDir = self.ictx.dir self.ictx.detach() return self @@ -185,17 +188,27 @@ class OnieBootContext: raise ValueError("cannot find ONIE initrd") def shutdown(self): - ctx, self.fctx = self.fctx, None: + ctx, self.fctx = self.fctx, None if ctx is not None: ctx.shutdown() - ctx, self.ictx = self.ictx, None: + ctx, self.ictx = self.ictx, None if ctx is not None: ctx.shutdown() - ctx, self.mctx = self.mctx, None: + ctx, self.dctx = self.dctx, None if ctx is not None: ctx.shutdown() def __exit__(self, eType, eValue, eTrace): self.shutdown() return False + def detach(self): + self.__fctx, self.fctx = self.fctx, None + self.__ictx, self.ictx = self.ictx, None + self.__dctx, self.dctx = self.dctx, None + + def attach(self): + self.fctx = self.__fctx + self.ictx = self.__ictx + self.dctx = self.__dctx + class Onie(AppBase): """XXX roth -- refactor in from loader.py code.""" From 9e1547906006a81963a70b819a567e102db7754d Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 17:54:50 -0800 Subject: [PATCH 059/255] Refactor to use OnieBootContext --- .../src/python/onl/install/App.py | 47 +++++-------------- 1 file changed, 12 insertions(+), 35 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py index 5ab00ed7..cd18cb97 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py @@ -17,31 +17,9 @@ import time from InstallUtils import InitrdContext from InstallUtils import SubprocessMixin from InstallUtils import ProcMountsParser -from ShellApp import Onie +from ShellApp import OnieBootContext import ConfUtils, BaseInstall -class OnieHelper(Onie): - """Unpack the initrd, but keep it around.""" - - UMOUNT = False - # leave self.onieDir mounted - - ictx = None - - def _runInitrdShell(self, initrd): - with InitrdContext(initrd, log=self.log) as self.ictx: - self.initrdDir = self.ictx.dir - self.ictx.detach() - - def shutdown(self): - - self.ictx.attach() - self.ictx.shutdown() - - if self.dctx is not None: - self.dctx.attach() - self.dctx.shutdown() - class App(SubprocessMixin, object): def __init__(self, url=None, @@ -66,7 +44,7 @@ class App(SubprocessMixin, object): self.nextUpdate = None - self.onieHelper = None + self.octx = None def run(self): @@ -160,15 +138,13 @@ class App(SubprocessMixin, object): ##self.log.info("using native GRUB") ##self.grubEnv = ConfUtils.GrubEnv(log=self.log.getChild("grub")) - self.onieHelper = OnieHelper(log=self.log) - code = self.onieHelper.run() - if code: - self.log.warn("cannot find ONIE initrd") + with OnieBootContext(log=self.log) as self.octx: + self.octx.detach() - if self.onieHelper.onieDir is not None: - self.log.info("using native ONIE initrd+chroot GRUB (%s)", self.onieHelper.onieDir) - self.grubEnv = ConfUtils.ChrootGrubEnv(self.onieHelper.initrdDir, - bootDir=self.onieHelper.onieDir, + if self.octx.onieDir is not None: + self.log.info("using native ONIE initrd+chroot GRUB (%s)", self.octx.onieDir) + self.grubEnv = ConfUtils.ChrootGrubEnv(self.octx.initrdDir, + bootDir=self.octx.onieDir, path="/grub/grubenv", log=self.log.getChild("grub")) # direct access using ONIE initrd as a chroot @@ -340,9 +316,10 @@ class App(SubprocessMixin, object): if installer is not None: installer.shutdown() - h, self.onieHelper = self.onieHelper, None - if h is not None: - h.shutdown() + ctx, self.octx = self.octx, None + if ctx: + ctx.attach() + ctx.shutdown() def post_mortem(self): self.log.info("re-attaching to tty") From 1eaf262f71a05363c4ce5213c02340cfb8a17b94 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 7 Nov 2016 17:55:06 -0800 Subject: [PATCH 060/255] Refactor to use OnieBootContext --- .../src/python/onl/install/SystemInstall.py | 31 ++++++------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py index b3339550..59d9ad31 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py @@ -16,7 +16,7 @@ import subprocess from onl.install.InstallUtils import InitrdContext from onl.install.InstallUtils import ProcMountsParser from onl.install.ConfUtils import MachineConf, InstallerConf -from onl.install.ShellApp import Onie, Upgrader +from onl.install.ShellApp import OnieBootContext, Upgrader from onl.install.InstallUtils import SubprocessMixin import onl.install.App @@ -45,8 +45,6 @@ class App(SubprocessMixin): self.force = force - self.onieHelper = None - def _runInitrd(self, helper, path): with InitrdContext(initrd=path, log=self.log) as ctx: @@ -55,22 +53,14 @@ class App(SubprocessMixin): prefix="installer-", suffix=".d") chroot_idir = abs_idir[len(ctx.dir):] - self.onieHelper = onl.install.App.OnieHelper(log=self.log) - code = self.onieHelper.run() - if code: - self.log.error("cannot find/unpack ONIE initrd") - return code - self.log.info("onie directory is %s", self.onieHelper.onieDir) - self.log.info("initrd directory is %s", self.onieHelper.initrdDir) + with OnieBootContext(log=self.log) as octx: + self.log.info("onie directory is %s", octx.onieDir) + self.log.info("initrd directory is %s", octx.initrdDir) - src = os.path.join(self.onieHelper.initrdDir, "etc/machine.conf") - dst = os.path.join(ctx.dir, "etc/machine.conf") - self.log.debug("+ /bin/cp %s %s", src, dst) - shutil.copy2(src, dst) - - h, self.onieHelper = self.onieHelper, None - if h is not None: - h.shutdown() + src = os.path.join(octx.initrdDir, "etc/machine.conf") + dst = os.path.join(ctx.dir, "etc/machine.conf") + self.log.debug("+ /bin/cp %s %s", src, dst) + shutil.copy2(src, dst) src = "/etc/fw_env.config" if os.path.exists(src): @@ -214,10 +204,7 @@ class App(SubprocessMixin): return code def shutdown(self): - - h, self.onieHelper = self.onieHelper, None - if h is not None: - h.shutdown() + pass @classmethod def main(cls): From 0508e7149c99ec998b9598305bc0925d78005417 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 8 Nov 2016 14:28:57 +0000 Subject: [PATCH 061/255] Latest --- packages/base/any/kernels/legacy/linux-3.8.13 | 2 +- packages/base/any/kernels/legacy/linux-3.9.6 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/base/any/kernels/legacy/linux-3.8.13 b/packages/base/any/kernels/legacy/linux-3.8.13 index 6c803ff8..94221f79 160000 --- a/packages/base/any/kernels/legacy/linux-3.8.13 +++ b/packages/base/any/kernels/legacy/linux-3.8.13 @@ -1 +1 @@ -Subproject commit 6c803ff857ba52549de43c52f47b714e623ed9d4 +Subproject commit 94221f79abb63e7113581a077eebc5dbcf89fa33 diff --git a/packages/base/any/kernels/legacy/linux-3.9.6 b/packages/base/any/kernels/legacy/linux-3.9.6 index 34603c6e..9b8f43a0 160000 --- a/packages/base/any/kernels/legacy/linux-3.9.6 +++ b/packages/base/any/kernels/legacy/linux-3.9.6 @@ -1 +1 @@ -Subproject commit 34603c6ec26840ad19991e62fcc617eee2ffec27 +Subproject commit 9b8f43a032e20ea640e8e749e4949d829aa28c54 From e2f9ea1fbaf4e7ed1142df8961f8cbf84abb4e53 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 8 Nov 2016 14:29:07 +0000 Subject: [PATCH 062/255] Add Device Mapper/Crypt support. --- .../configs/x86_64-all/x86_64-all.config | 30 ++++++++++++++++++- .../powerpc-e500v-all.config | 16 +++++++++- .../configs/x86_64-all/x86_64-all.config | 25 ++++++++++++---- 3 files changed, 64 insertions(+), 7 deletions(-) diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config index 8546a1f2..94506877 100644 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config @@ -1462,7 +1462,34 @@ CONFIG_PATA_RZ1000=y # CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=y # CONFIG_PATA_LEGACY is not set -# CONFIG_MD is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +# CONFIG_MD_AUTODETECT is not set +# CONFIG_MD_LINEAR is not set +# CONFIG_MD_RAID0 is not set +# CONFIG_MD_RAID1 is not set +# CONFIG_MD_RAID10 is not set +# CONFIG_MD_RAID456 is not set +# CONFIG_MD_MULTIPATH is not set +# CONFIG_MD_FAULTY is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_CACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set # CONFIG_TARGET_CORE is not set # CONFIG_FUSION is not set @@ -3033,6 +3060,7 @@ CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y # CONFIG_NFS_V4_2 is not set CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=y CONFIG_PNFS_OBJLAYOUT=y CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" # CONFIG_NFS_V4_1_MIGRATION is not set diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/powerpc-e500v-all/powerpc-e500v-all.config b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/powerpc-e500v-all/powerpc-e500v-all.config index 51c3243d..80590231 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/powerpc-e500v-all/powerpc-e500v-all.config +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/powerpc-e500v-all/powerpc-e500v-all.config @@ -1021,7 +1021,21 @@ CONFIG_PATA_QUANTA_LB=y # Generic fallback / legacy drivers # # CONFIG_PATA_LEGACY is not set -# CONFIG_MD is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set # CONFIG_TARGET_CORE is not set # CONFIG_FUSION is not set diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config index d3b59170..42e2b67b 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config @@ -1047,7 +1047,6 @@ CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set -CONFIG_EEPROM_ACCTON_AS5512_54X_SFP=y CONFIG_EEPROM_ACCTON_AS5712_54x_SFP=y CONFIG_EEPROM_ACCTON_AS6712_32x_SFP=y CONFIG_EEPROM_ACCTON_AS7512_32x_SFP=y @@ -1055,6 +1054,7 @@ CONFIG_EEPROM_ACCTON_AS7712_32x_SFP=y CONFIG_EEPROM_ACCTON_AS5812_54x_SFP=y CONFIG_EEPROM_ACCTON_AS6812_32x_SFP=y CONFIG_EEPROM_ACCTON_AS5812_54t_SFP=y +CONFIG_EEPROM_ACCTON_AS5512_54X_SFP=y CONFIG_EEPROM_ACCTON_AS7716_32x_SFP=y CONFIG_EEPROM_93CX6=y # CONFIG_EEPROM_93XX46 is not set @@ -1309,7 +1309,21 @@ CONFIG_PATA_RZ1000=y # CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=y # CONFIG_PATA_LEGACY is not set -# CONFIG_MD is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set # CONFIG_TARGET_CORE is not set # CONFIG_FUSION is not set @@ -1924,8 +1938,6 @@ CONFIG_SENSORS_W83781D=y # CONFIG_SENSORS_APPLESMC is not set # CONFIG_SENSORS_QUANTA_LY_HWMON is not set CONFIG_SENSORS_CPR_4011_4MXX=y -CONFIG_SENSORS_ACCTON_AS5512_54X_PSU=y -CONFIG_SENSORS_ACCTON_AS5512_54X_FAN=y CONFIG_SENSORS_ACCTON_AS5712_54x_FAN=y CONFIG_SENSORS_ACCTON_AS5712_54x_PSU=y CONFIG_SENSORS_ACCTON_AS6712_32x_FAN=y @@ -1942,6 +1954,8 @@ CONFIG_SENSORS_ACCTON_AS6812_32x_FAN=y CONFIG_SENSORS_ACCTON_AS6812_32x_PSU=y CONFIG_SENSORS_ACCTON_AS5812_54t_FAN=y CONFIG_SENSORS_ACCTON_AS5812_54t_PSU=y +CONFIG_SENSORS_ACCTON_AS5512_54X_PSU=y +CONFIG_SENSORS_ACCTON_AS5512_54X_FAN=y CONFIG_SENSORS_ACCTON_AS7716_32x_FAN=y CONFIG_SENSORS_ACCTON_AS7716_32x_PSU=y @@ -2316,7 +2330,6 @@ CONFIG_LEDS_CLASS=y # # LED drivers # -CONFIG_LEDS_ACCTON_AS5512_54X=y CONFIG_LEDS_ACCTON_AS5712_54x=y CONFIG_LEDS_ACCTON_AS6712_32x=y CONFIG_LEDS_ACCTON_AS7512_32x=y @@ -2324,6 +2337,7 @@ CONFIG_LEDS_ACCTON_AS7712_32x=y CONFIG_LEDS_ACCTON_AS5812_54x=y CONFIG_LEDS_ACCTON_AS6812_32x=y CONFIG_LEDS_ACCTON_AS5812_54t=y +CONFIG_LEDS_ACCTON_AS5512_54X=y CONFIG_LEDS_ACCTON_AS7716_32x=y # CONFIG_LEDS_LM3530 is not set # CONFIG_LEDS_PCA9532 is not set @@ -2636,6 +2650,7 @@ CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=m CONFIG_PNFS_OBJLAYOUT=m # CONFIG_NFS_FSCACHE is not set # CONFIG_NFS_USE_LEGACY_DNS is not set From 82ba5faebda4ea53294adef23d802c963c2749dd Mon Sep 17 00:00:00 2001 From: Wilson Ng Date: Tue, 8 Nov 2016 09:27:57 -0800 Subject: [PATCH 063/255] Latest bigcode. --- sm/bigcode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/bigcode b/sm/bigcode index 19fb14d8..8065e129 160000 --- a/sm/bigcode +++ b/sm/bigcode @@ -1 +1 @@ -Subproject commit 19fb14d8a5424284d2b082b65f4f5a269a587fd5 +Subproject commit 8065e129d36a58cae90f52e6ae5542fa885eb42d From 38297a63cf4cde384ebe39dd6a7a2f23d1852aea Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 8 Nov 2016 19:57:17 +0000 Subject: [PATCH 064/255] Latest --- packages/base/any/kernels/legacy/linux-3.9.6 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/any/kernels/legacy/linux-3.9.6 b/packages/base/any/kernels/legacy/linux-3.9.6 index 9b8f43a0..f8e07fad 160000 --- a/packages/base/any/kernels/legacy/linux-3.9.6 +++ b/packages/base/any/kernels/legacy/linux-3.9.6 @@ -1 +1 @@ -Subproject commit 9b8f43a032e20ea640e8e749e4949d829aa28c54 +Subproject commit f8e07fad377b0a15bf3d0fd60858d78b5587f541 From 4c9c1fa7451ff70e887a9627f60d939064b0d051 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Tue, 8 Nov 2016 16:21:58 -0800 Subject: [PATCH 065/255] Works for ppc, x86 probably needs a chroot --- .../src/python/onl/upgrade/loader.py | 88 ++++++++++++++++++- 1 file changed, 85 insertions(+), 3 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py index b7ac149d..101e8f39 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py @@ -11,6 +11,10 @@ import fnmatch from onl.upgrade import ubase from onl.sysconfig import sysconfig from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite +from onl.install import BaseInstall, ConfUtils +from onl.install.ShellApp import OnieBootContext +import onl.platform.current +import onl.versions class LoaderUpgradeBase(ubase.BaseUpgrade): name="loader" @@ -57,9 +61,10 @@ class LoaderUpgradeBase(ubase.BaseUpgrade): * A single reboot will be required to complete this upgrade. """ - class LoaderUpgrade_Fit(LoaderUpgradeBase): + installer_klass = BaseInstall.UbootInstaller + def do_upgrade(self, forced=False): fit_image = None @@ -75,13 +80,50 @@ class LoaderUpgrade_Fit(LoaderUpgradeBase): with OnlMountContextReadWrite("ONL-BOOT", self.logger) as d: self.copyfile(fit_image, os.path.join(d.directory, "%s.itb" % (self.platform.platform()))) - # XXX re-install the firmware environment + onlPlatform = onl.platform.current.OnlPlatform() + + with OnieBootContext(log=self.logger) as octx: + path = os.path.join(octx.initrdDir, "etc/machine.conf") + machineConf = ConfUtils.MachineConf(path=path) + + installerConf = ConfUtils.InstallerConf(path="/dev/null") + # start with an empty installerConf, fill it in piece by piece + + installerConf.installer_platform = onlPlatform.platform() + installerConf.installer_arch = machineConf.onie_arch + installerConf.installer_platform_dir = os.path.join("/lib/platform-config", + onlPlatform.platform()) + + mfPath = os.path.join(sysconfig.upgrade.loader.package.dir, "manifest.json") + mf = onl.versions.OnlVersionManifest(mfPath) + installerConf.onl_version = mf.RELEASE_ID + + grubEnv = ConfUtils.ProxyGrubEnv(installerConf, + bootDir="/mnt/onie-boot", + path="/grub/grubenv", + chroot=False, + log=self.logger.getChild("grub")) + + ubootEnv = ConfUtils.UbootEnv(log=self.logger.getChild("u-boot")) + + installer = self.installer_klass(machineConf=machineConf, + installerConf=installerConf, + platformConf=onlPlatform.platform_config, + grubEnv=grubEnv, + ubootEnv=ubootEnv, + force=True, + log=self.logger) + + installer.upgradeBootLoader() + installer.shutdown() self.reboot() class LoaderUpgrade_x86_64(LoaderUpgradeBase): + installer_klass = BaseInstall.GrubInstaller + def do_upgrade(self, forced=False): X86_64_UPGRADE_DIR=sysconfig.upgrade.loader.package.dir @@ -112,7 +154,47 @@ class LoaderUpgrade_x86_64(LoaderUpgradeBase): #if os.path.exists(src): # self.copyfile(src, dst) - # XXX re-install the grub config + # XXX re-install the firmware environment + import pdb + pdb.set_trace() + + onlPlatform = onl.platform.current.OnlPlatform() + + with OnieBootContext(log=self.logger) as octx: + path = os.path.join(octx.initrdDir, "etc/machine.conf") + machineConf = ConfUtils.MachineConf(path=path) + + # hold on to the ONIE boot context for grub access + + installerConf = ConfUtils.InstallerConf(path="/dev/null") + + # XXX fill in installerConf fields + installerConf.installer_platform = onlPlatform.platform() + installerConf.installer_arch = machineConf.onie_arch + installerConf.installer_platform_dir = os.path.join("/lib/platform-config", + onlPlatform.platform()) + + mfPath = os.path.join(sysconfig.upgrade.loader.package.dir, "manifest.json") + mf = onl.versions.OnlVersionManifest(mfPath) + installerConf.onl_version = mf.RELEASE_ID + + grubEnv = ConfUtils.ChrootGrubEnv(octx.initrdDir, + bootDir=octx.onieDir, + path="/grub/grubenv", + log=self.logger.getChild("grub")) + + ubootEnv = None + + installer = self.installer_klass(machineConf=machineConf, + installerConf=installerConf, + platformConf=onlPlatform.platform_config, + grubEnv=grubEnv, + ubootEnv=ubootEnv, + force=True, + log=self.logger) + + installer.upgradeBootLoader() + installer.shutdown() self.reboot() From 7ea57d959b1d509d1040c50774aaa8ca2862181b Mon Sep 17 00:00:00 2001 From: Steven Noble Date: Wed, 9 Nov 2016 02:18:31 +0000 Subject: [PATCH 066/255] adds base-files dependency to faultd --- packages/base/any/faultd/APKG.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/base/any/faultd/APKG.yml b/packages/base/any/faultd/APKG.yml index fbcd0bef..8a3aecd3 100644 --- a/packages/base/any/faultd/APKG.yml +++ b/packages/base/any/faultd/APKG.yml @@ -3,13 +3,12 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com - + depends: base-files packages: - name: onl-faultd version: 1.0.0 summary: Fault Reporting Daemon provides: [ faultd ] - files: builds/$BUILD_DIR/${TOOLCHAIN}/bin/faultd.bin : /usr/bin/faultd From 46c8c0a0be39d2b9e9a8d00e0b98de7ceddd3a70 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 9 Nov 2016 12:38:18 -0800 Subject: [PATCH 067/255] Working for x86 --- .../src/python/onl/install/BaseInstall.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index 196d6b52..922f781c 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -571,7 +571,8 @@ class GrubInstaller(SubprocessMixin, Base): with MountContext(dev.device, log=self.log) as ctx: d = os.path.join(ctx.dir, "grub") - self.makedirs(d) + if not os.path.exists(d): + self.makedirs(d) dst = os.path.join(ctx.dir, 'grub/grub.cfg') with open(dst, "w") as fd: fd.write(cf) @@ -663,8 +664,7 @@ class GrubInstaller(SubprocessMixin, Base): def upgradeBootLoader(self): """Upgrade the boot loader settings.""" - code = self.findGpt() - if code: return code + self.blkidParts = BlkidParser(log=self.log.getChild("blkid")) code = self.installGrubCfg() if code: return code @@ -909,13 +909,6 @@ class UbootInstaller(SubprocessMixin, Base): def upgradeBootLoader(self): """Upgrade the boot loader settings as part of a loader upgrade.""" - self.partedDevice = parted.getDevice(self.device) - self.partedDisk = parted.newDisk(self.partedDevice) - if self.partedDisk.type != 'msdos': - self.log.error("disk %s has wrong label %s", - self.device, self.partedDisk.type) - return 1 - self.blkidParts = BlkidParser(log=self.log.getChild("blkid")) # XXX boot-config (and saved boot-config) should be unchanged during loader upgrade From a394f2b84c504d37698c020d1e7385c0dafdef0a Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 9 Nov 2016 12:38:48 -0800 Subject: [PATCH 068/255] Unmount /mnt/onl/boot prior to invoking the installer --- .../src/python/onl/upgrade/loader.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py index 101e8f39..29128db9 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/loader.py @@ -11,7 +11,7 @@ import fnmatch from onl.upgrade import ubase from onl.sysconfig import sysconfig from onl.mounts import OnlMountManager, OnlMountContextReadOnly, OnlMountContextReadWrite -from onl.install import BaseInstall, ConfUtils +from onl.install import BaseInstall, ConfUtils, InstallUtils from onl.install.ShellApp import OnieBootContext import onl.platform.current import onl.versions @@ -120,7 +120,7 @@ class LoaderUpgrade_Fit(LoaderUpgradeBase): self.reboot() -class LoaderUpgrade_x86_64(LoaderUpgradeBase): +class LoaderUpgrade_x86_64(LoaderUpgradeBase, InstallUtils.SubprocessMixin): installer_klass = BaseInstall.GrubInstaller @@ -154,9 +154,16 @@ class LoaderUpgrade_x86_64(LoaderUpgradeBase): #if os.path.exists(src): # self.copyfile(src, dst) - # XXX re-install the firmware environment - import pdb - pdb.set_trace() + # installer assumes that partitions are unmounted + + self.log = self.logger + # ha ha, SubprocessMixin api is different + + pm = InstallUtils.ProcMountsParser() + for m in pm.mounts: + if m.dir.startswith('/mnt/onl'): + self.logger.warn("unmounting %s (--force)", m.dir) + self.check_call(('umount', m.dir,)) onlPlatform = onl.platform.current.OnlPlatform() From 2ad51228d95f728e78fed01f904dca6c62cb24f2 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 9 Nov 2016 21:17:10 +0000 Subject: [PATCH 069/255] The system upgrade step must occur prior to mount and platform setup. --- .../src/boot.d/{62.upgrade-system => 10.upgrade-system} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packages/base/all/vendor-config-onl/src/boot.d/{62.upgrade-system => 10.upgrade-system} (100%) diff --git a/packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-system b/packages/base/all/vendor-config-onl/src/boot.d/10.upgrade-system similarity index 100% rename from packages/base/all/vendor-config-onl/src/boot.d/62.upgrade-system rename to packages/base/all/vendor-config-onl/src/boot.d/10.upgrade-system From 48689e3d98ac758b100cac1d1c3782e1ac4ceba0 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 9 Nov 2016 21:18:17 +0000 Subject: [PATCH 070/255] Boot-config installation is optional. --- .../src/python/onl/install/BaseInstall.py | 11 ++++++--- .../src/python/onl/install/SystemInstall.py | 23 ++++++++++++------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index 36cab127..e7319b25 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -102,7 +102,7 @@ class Base: src = os.path.join(self.im.installerConf.installer_dir, basename) if os.path.exists(src): self.copy2(src, dst) - return + return True if basename in self.zf.namelist(): self.log.debug("+ unzip -p %s %s > %s", @@ -110,11 +110,13 @@ class Base: with self.zf.open(basename, "r") as rfd: with open(dst, "wb") as wfd: shutil.copyfileobj(rfd, wfd) - return + return True if not optional: raise ValueError("missing installer file %s" % basename) + return False + def installerDd(self, basename, device): p = os.path.join(self.im.installerConf.installer_dir, basename) @@ -354,7 +356,10 @@ class Base: basename = 'boot-config' with MountContext(dev.device, log=self.log) as ctx: dst = os.path.join(ctx.dir, basename) - self.installerCopy(basename, dst) + + if not self.installerCopy(basename, dst, True): + return + with open(dst) as fd: buf = fd.read() diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py index b3339550..e6e8662d 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py @@ -85,9 +85,13 @@ class App(SubprocessMixin): # constitute an /etc/onl/installer.conf in place installerConf = InstallerConf(path="/dev/null") - with open("/etc/onl/loader/versions.json") as fd: - data = json.load(fd) - installerConf.onl_version = data['VERSION_ID'] + vj = "/etc/onl/loader/versions.json" + if os.path.exists(vj): + with open(vj) as fd: + data = json.load(fd) + installerConf.onl_version = data['VERSION_ID'] + else: + installerConf.onl_version = "unknown" installerConf.installer_dir = chroot_idir @@ -146,11 +150,14 @@ class App(SubprocessMixin): self.log.debug("+ /bin/cp %s %s", src, dst) shutil.copy2(src, dst) - with OnlMountContextReadWrite('ONL-BOOT', logger=self.log) as octx: - src = os.path.join(octx.directory, "boot-config") - dst = os.path.join(abs_idir, "boot-config") - self.log.debug("+ /bin/cp %s %s", src, dst) - shutil.copy2(src, dst) + # + # Disable until a system for boot-config upgrade is implemented. + # with OnlMountContextReadWrite('ONL-BOOT', logger=self.log) as octx: + # src = os.path.join(octx.directory, "boot-config") + # dst = os.path.join(abs_idir, "boot-config") + # self.log.debug("+ /bin/cp %s %s", src, dst) + # shutil.copy2(src, dst) + # # chroot to the onl-install script ##cmd = ('chroot', ctx.dir, From 04658945c571a9c26be136d7c7f61b73cec209b8 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 9 Nov 2016 21:18:47 +0000 Subject: [PATCH 071/255] The upgrade status manifest may not be available during system upgrade. --- .../all/vendor-config-onl/src/python/onl/upgrade/ubase.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py index 86562b61..c5978629 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py @@ -176,8 +176,9 @@ class BaseUpgrade(object): def update_upgrade_status(self, key, value): data = self.upgrade_status_get() data[key] = value - with open(self.UPGRADE_STATUS_JSON, "w") as f: - json.dump(data, f) + if os.path.exists(os.path.dirname(BaseUpgrade.UPGRADE_STATUS_JSON)): + with open(self.UPGRADE_STATUS_JSON, "w") as f: + json.dump(data, f) # # Initialize self.current_version, self.next_Version From eca7a60349d15ed00498aaf032dd78513d0e4c4d Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 9 Nov 2016 21:45:37 +0000 Subject: [PATCH 072/255] Recover default boot-config into /mnt/onl/boot. --- packages/base/all/initrds/loader-initrd-files/src/bin/sysinit | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit b/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit index 983b4799..b80f85fb 100755 --- a/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit +++ b/packages/base/all/initrds/loader-initrd-files/src/bin/sysinit @@ -90,6 +90,7 @@ if [ ! -f /etc/onl/abort ]; then elif [ -f /etc/onl/boot-config-default ]; then # Use default boot-config. cp /etc/onl/boot-config-default /etc/onl/boot-config + cp /etc/onl/boot-config-default /mnt/onl/boot/boot-config fi fi From 0353c52c9dd05e0dcd2b092320b3bd90287325d1 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 9 Nov 2016 22:23:16 +0000 Subject: [PATCH 073/255] Make boot-config files optional. --- tools/mkinstaller.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/mkinstaller.py b/tools/mkinstaller.py index 36d7b2ae..e3945bd2 100755 --- a/tools/mkinstaller.py +++ b/tools/mkinstaller.py @@ -166,7 +166,7 @@ if __name__ == '__main__': choices = ['amd64', 'powerpc', 'armel', 'arm64']) ap.add_argument("--initrd", nargs=2, help="The system initrd.") ap.add_argument("--fit", nargs=2, help="The system FIT image.") - ap.add_argument("--boot-config", help="The boot-config source.", required=True) + ap.add_argument("--boot-config", help="The boot-config source.") ap.add_argument("--add-file", help="Add the given file to the installer package.", nargs='+', default=[]) ap.add_argument("--add-dir", help="Optional directory to include in the installer.", nargs='+', default=[]) ap.add_argument("--swi", help="Include the given SWI in the installer.") @@ -197,7 +197,8 @@ if __name__ == '__main__': if ops.fit: installer.add_fit(*ops.fit) - installer.add_file(ops.boot_config) + if ops.boot_config: + installer.add_file(ops.boot_config) for f in ops.add_file: installer.add_file(f) From 33b8abb64666957295110cfc67e2f2eb81e7eaa3 Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Wed, 9 Nov 2016 22:14:54 -0800 Subject: [PATCH 074/255] fix PSU fan and temperature --- .../onlp/builds/src/module/src/fani.c | 2 +- .../onlp/builds/src/module/src/platform_lib.h | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/fani.c b/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/fani.c index 44d7258c..464e1657 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/fani.c @@ -76,8 +76,8 @@ static last_path_T last_path[] = /* must map with onlp_fan_id */ MAKE_FAN_LAST_PATH_ON_MAIN_BROAD(PROJECT_NAME, FAN_3_ON_MAIN_BROAD), MAKE_FAN_LAST_PATH_ON_MAIN_BROAD(PROJECT_NAME, FAN_4_ON_MAIN_BROAD), MAKE_FAN_LAST_PATH_ON_MAIN_BROAD(PROJECT_NAME, FAN_5_ON_MAIN_BROAD), - MAKE_FAN_LAST_PATH_ON_PSU(6-003d), MAKE_FAN_LAST_PATH_ON_PSU(6-003e), + MAKE_FAN_LAST_PATH_ON_PSU(6-003d), }; #define MAKE_FAN_INFO_NODE_ON_MAIN_BROAD(id) \ diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/platform_lib.h index f355ab66..64841a8b 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/platform_lib.h +++ b/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/builds/src/module/src/platform_lib.h @@ -35,13 +35,13 @@ #define CHASSIS_THERMAL_COUNT 8 #define CHASSIS_SFP_COUNT 32 -#define PSU2_AC_PMBUS_PREFIX "/sys/bus/i2c/devices/6-003e/" -#define PSU1_AC_PMBUS_PREFIX "/sys/bus/i2c/devices/6-003d/" +#define PSU1_AC_PMBUS_PREFIX "/sys/bus/i2c/devices/6-003e/" +#define PSU2_AC_PMBUS_PREFIX "/sys/bus/i2c/devices/6-003d/" -#define PSU1_AC_HWMON_PREFIX "/sys/bus/i2c/devices/6-003a/" -#define PSU1_DC_HWMON_PREFIX "/sys/bus/i2c/devices/6-0052/" -#define PSU2_AC_HWMON_PREFIX "/sys/bus/i2c/devices/6-0039/" +#define PSU1_AC_HWMON_PREFIX "/sys/bus/i2c/devices/6-0039/" #define PSU2_DC_HWMON_PREFIX "/sys/bus/i2c/devices/6-0051/" +#define PSU2_AC_HWMON_PREFIX "/sys/bus/i2c/devices/6-003a/" +#define PSU1_DC_HWMON_PREFIX "/sys/bus/i2c/devices/6-0052/" #define PSU1_AC_HWMON_NODE(node) PSU1_AC_HWMON_PREFIX#node #define PSU1_DC_HWMON_NODE(node) PSU1_DC_HWMON_PREFIX#node From c686cdff2c4c4f266218d358bc1eaec4bd806d46 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 10 Nov 2016 17:58:17 +0000 Subject: [PATCH 075/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index a7f832c1..9e81401a 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit a7f832c14dcc7a146245c3a4a32448324f90171f +Subproject commit 9e81401aa70e79347d79054ed9496d4a9e8584d9 From 640295d3b32a930fb1f2a9fb3e59428d635ec57c Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 14:39:25 -0800 Subject: [PATCH 076/255] Added legacy u-boot parser --- .../all/vendor-config-onl/src/bin/pylegacy | 7 + .../src/python/onl/install/Legacy.py | 383 ++++++++++++++++++ 2 files changed, 390 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/bin/pylegacy create mode 100644 packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py diff --git a/packages/base/all/vendor-config-onl/src/bin/pylegacy b/packages/base/all/vendor-config-onl/src/bin/pylegacy new file mode 100755 index 00000000..d25b64db --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/pylegacy @@ -0,0 +1,7 @@ +#!/usr/bin/python + +"""Swiss-army-knife legacy U-Boot image decoder +""" + +import onl.install.Legacy +onl.install.Legacy.App.main() diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py b/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py new file mode 100644 index 00000000..72ce2c38 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py @@ -0,0 +1,383 @@ +"""Legacy.py + +See +http://www.isysop.com/unpacking-and-repacking-u-boot-uimage-files/ +https://github.com/lentinj/u-boot/blob/master/include/image.h + +""" + +import os, sys +import logging +import struct +import argparse +import time + +class Parser: + + MAGIC = 0x27051956 + + # codes for ih_os + IH_OS_INVALID = 0 + IH_OS_OPENBSD = 1 + IH_OS_NETBSD = 2 + IH_OS_FREEBSD = 3 + IH_OS_4_4BSD = 4 + IH_OS_LINUX = 5 + IH_OS_SVR4 = 6 + IH_OS_ESIX = 7 + IH_OS_SOLARIS = 8 + IH_OS_IRIX = 9 + IH_OS_SCO = 10 + IH_OS_DELL = 11 + IH_OS_NCR = 12 + IH_OS_LYNXOS = 13 + IH_OS_VXWORKS = 14 + IH_OS_PSOS = 15 + IH_OS_QNX = 16 + IH_OS_U_BOOT = 17 + IH_OS_RTEMS = 18 + IH_OS_ARTOS = 19 + IH_OS_UNITY = 20 + IH_OS_INTEGRITY = 21 + IH_OS_OSE = 22 + _IH_OS_END = 23 + + # codes for ih_arch + IH_ARCH_INVALID = 0 + IH_ARCH_ALPHA = 1 + IH_ARCH_ARM = 2 + IH_ARCH_I386 = 3 + IH_ARCH_IA64 = 4 + IH_ARCH_MIPS = 5 + IH_ARCH_MIPS64 = 6 + IH_ARCH_PPC = 7 + IH_ARCH_S390 = 8 + IH_ARCH_SH = 9 + IH_ARCH_SPARC = 10 + IH_ARCH_SPARC64 = 11 + IH_ARCH_M68K = 12 + IH_ARCH_MICROBLAZE = 14 + IH_ARCH_NIOS2 = 15 + IH_ARCH_BLACKFIN = 16 + IH_ARCH_AVR32 = 17 + IH_ARCH_ST200 = 18 + IH_ARCH_SANDBOX = 19 + IH_ARCH_NDS32 = 20 + IH_ARCH_OPENRISC = 21 + _IH_ARCH_END = 22 + + # codes for ih_type + IH_TYPE_INVALID = 0 + IH_TYPE_STANDALONE = 1 + IH_TYPE_KERNEL = 2 + IH_TYPE_RAMDISK = 3 + IH_TYPE_MULTI = 4 + IH_TYPE_FIRMWARE = 5 + IH_TYPE_SCRIPT = 6 + IH_TYPE_FILESYSTEM = 7 + IH_TYPE_FLATDT = 8 + IH_TYPE_KWBIMAGE = 9 + IH_TYPE_IMXIMAGE = 10 + IH_TYPE_UBLIMAGE = 11 + IH_TYPE_OMAPIMAGE = 12 + IH_TYPE_AISIMAGE =13 + IH_TYPE_KERNEL_NOLOAD = 14 + _IH_TYPE_END = 15 + + # codes for ih_comp + IH_COMP_NONE = 0 + IH_COMP_GZIP = 1 + IH_COMP_BZIP2 = 2 + IH_COMP_LZMA = 3 + IH_COMP_LZO = 4 + _IH_COMP_END = 5 + + @classmethod + def registerConstants(cls): + + cls.IH_OS = [None] * cls._IH_OS_END + for k, v in cls.__dict__.iteritems(): + if k.startswith('IH_OS_'): + cls.IH_OS[v] = k[6:] + + cls.IH_ARCH = [None] * cls._IH_ARCH_END + for k, v in cls.__dict__.iteritems(): + if k.startswith('IH_ARCH_'): + cls.IH_ARCH[v] = k[8:] + + cls.IH_TYPE = [None] * cls._IH_TYPE_END + for k, v in cls.__dict__.iteritems(): + if k.startswith('IH_TYPE_'): + cls.IH_TYPE[v] = k[8:] + + cls.IH_COMP = [None] * cls._IH_COMP_END + for k, v in cls.__dict__.iteritems(): + if k.startswith('IH_COMP_'): + cls.IH_COMP[v] = k[8:] + + def __init__(self, path=None, stream=None, log=None): + + self.log = log or logging.getLogger(self.__class__.__name__) + self.path = path + self.stream = stream + + self.images = [] + self._parse() + + @classmethod + def isLegacy(cls, path=None, stream=None): + if stream is not None: + try: + pos = stream.tell() + buf = stream.read(4) + finally: + stream.seek(pos, 0) + else: + with open(path) as fd: + buf = fd.read(4) + if len(buf) != 4: return False + magic = struct.unpack(">I", buf)[0] + return magic == cls.MAGIC + + def _parse(self): + if self.stream is not None: + try: + pos = self.stream.tell() + self._parseStream(self.stream) + finally: + self.stream.seek(pos, 0) + elif self.path is not None: + with open(self.path) as fd: + self._parseStream(fd) + else: + raise ValueError("missing file or stream") + + def _parseStream(self, fd): + + buf = fd.read(64) + hdr = list(struct.unpack(">7IBBBB32s", buf)) + + self.ih_magic = hdr.pop(0) + if self.ih_magic != self.MAGIC: + raise ValueError("missing or invalid magic") + + self.ih_hcrc = hdr.pop(0) + + self.ih_time = hdr.pop(0) + self.log.debug("image created %s", + time.ctime(self.ih_time)) + + self.ih_size = hdr.pop(0) + self.log.debug("total image size %s bytes", + self.ih_size) + + self.ih_load = hdr.pop(0) + self.ih_ep = hdr.pop(0) + self.ih_dcrc = hdr.pop(0) + + self.ih_os = hdr.pop(0) + if self.ih_os != self.IH_OS_LINUX: + raise ValueError("invalid OS code") + + self.ih_arch = hdr.pop(0) + + self.ih_type = hdr.pop(0) + if self.ih_type != self.IH_TYPE_MULTI: + raise ValueError("invalid image type") + + self.ih_comp = hdr.pop(0) + # compression is ignored here, since it applies to the first + # image (the kernel) + + self.ih_name = hdr.pop(0).rstrip('\0') + + if self.ih_type == self.IH_TYPE_MULTI: + self.images = [] + while True: + buf = fd.read(4) + sz = struct.unpack(">I", buf)[0] + if sz == 0: break + self.log.debug("found image header %d bytes", sz) + self.images.append([sz, None]) + + # compute absolute image offsets + pos = fd.tell() + for idx, rec in enumerate(self.images): + rec[1] = pos + pos += rec[0] + + # images are aligned at 4-byte boundaries + pos += 3 + pos &= ~0x3 + + return + +Parser.registerConstants() + +class DumpRunner: + + def __init__(self, stream, log=None): + self.log = log or logging.getLogger(self.__class__.__name__) + self.stream = stream + + def run(self): + p = Parser(stream=self.stream, log=self.log) + + sys.stdout.write("Legacy U-Boot Image \"%s\":\n" % p.ih_name) + sys.stdout.write("created %s, %d bytes\n" + % (time.ctime(p.ih_time), p.ih_size,)) + sys.stdout.write("load @0x%04x, execute @0x%04x\n" + % (p.ih_load, p.ih_ep,)) + sys.stdout.write("OS is %s\n" % p.IH_OS[p.ih_os]) + sys.stdout.write("architecture is %s\n" % p.IH_ARCH[p.ih_arch]) + sys.stdout.write("image type is %s\n" % p.IH_TYPE[p.ih_type]) + sys.stdout.write("compression type is %s\n" % p.IH_COMP[p.ih_comp]) + + if p.ih_type == p.IH_TYPE_MULTI: + sys.stdout.write("%d images total:\n" % len(p.images)) + for i, rec in enumerate(p.images): + sys.stdout.write("image %d, %d bytes (offset %d)\n" + % (i, rec[0], rec[1],)) + + return 0 + + def shutdown(self): + strm, self.stream = self.stream, None + if strm is not None: strm.close() + +class ExtractRunner: + """Extract a specific image. + + NOTE that image zero may be compressed. + """ + + def __init__(self, stream, index=None, outStream=None, log=None): + self.log = log or logging.getLogger(self.__class__.__name__) + self.stream = stream + self.index = index + self.outStream = outStream + + def run(self): + p = Parser(stream=self.stream, log=self.log) + + if p.ih_type != p.IH_TYPE_MULTI: + if self.index is not None: + self.log.error("not a multi-file image, image index not allowed") + return 1 + self.stream.seek(64, 0) + buf = self.stream.read(p.ih_size) + else: + if self.index is None: + self.log.error("multi-file image, image index required") + return 1 + sz, off = p.images[self.index] + self.stream.seek(off, 0) + buf = self.stream.read(sz) + + strm = self.outStream or sys.stdout + strm.write(buf) + + return 0 + + def shutdown(self): + strm, self.stream = self.stream, None + if strm is not None: strm.close() + strm, self.outStream = self.outStream, None + if strm is not None: strm.close() + +USAGE = """\ +pylegacy [OPTIONS] ... +""" + +DUMP_USAGE = """\ +pylegacy [OPTIONS] dump IMAGE-FILE +""" + +EXTRACT_USAGE = """\ +pylegacy [OPTIONS] extract [OPTIONS] IMAGE-FILE [IMAGE-INDEX] +""" + +class App: + + def __init__(self, log=None): + self.log = log or logging.getLogger("pyfit") + + def run(self): + + ap = argparse.ArgumentParser(usage=USAGE) + ap.add_argument('-q', '--quiet', action='store_true', + help="Suppress log messages") + ap.add_argument('-v', '--verbose', action='store_true', + help="Add more logging") + + sp = ap.add_subparsers() + + apd = sp.add_parser('dump', + help="Dump image structure", + usage=DUMP_USAGE) + apd.set_defaults(mode='dump') + apd.add_argument('image-file', type=open, + help="U-Boot Legacy Image") + + apx = sp.add_parser('extract', + help="Extract items", + usage=EXTRACT_USAGE) + apx.set_defaults(mode='extract') + apx.add_argument('image-file', type=open, + help="U-Boot Legacy Image") + + apx.add_argument('-o', '--output', + type=argparse.FileType('wb', 0), + help="File destination") + apx.add_argument('index', type=int, nargs='?', + help="Image index (zero-based)") + + try: + args = ap.parse_args() + except SystemExit, what: + return what.code + + if args.quiet: + self.log.setLevel(logging.ERROR) + if args.verbose: + self.log.setLevel(logging.DEBUG) + + if args.mode == 'dump': + strm = getattr(args, 'image-file') + r = DumpRunner(strm, log=self.log) + elif args.mode == 'extract': + strm = getattr(args, 'image-file') + r = ExtractRunner(strm, + index=args.index, + outStream=args.output, + log=self.log) + + try: + code = r.run() + except: + self.log.exception("runner failed") + code = 1 + r.shutdown() + return code + + def shutdown(self): + pass + + @classmethod + def main(cls): + logging.basicConfig() + logger = logging.getLogger("pylegacy") + logger.setLevel(logging.INFO) + app = cls(log=logger) + try: + code = app.run() + except: + logger.exception("app failed") + code = 1 + app.shutdown() + sys.exit(code) + +main = App.main + +if __name__ == "__main__": + main() From c65c0ff001b1a95028bbe5a7f91754e3eee9323b Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 14:39:33 -0800 Subject: [PATCH 077/255] Added magic-number tester --- .../src/python/onl/install/Fit.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/Fit.py b/packages/base/all/vendor-config-onl/src/python/onl/install/Fit.py index b3ca037f..43532b7e 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/Fit.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/Fit.py @@ -38,6 +38,21 @@ class Parser: self.rootNodes = {} self._parse() + @classmethod + def isFit(cls, path=None, stream=None): + if stream is not None: + try: + pos = stream.tell() + buf = stream.read(4) + finally: + stream.seek(pos, 0) + else: + with open(path) as fd: + buf = fd.read(4) + if len(buf) != 4: return False + magic = struct.unpack(">I", buf)[0] + return magic == cls.FDT_MAGIC + def _parse(self): if self.stream is not None: try: @@ -58,7 +73,7 @@ class Parser: hdr = list(struct.unpack(">10I", buf)) magic = hdr.pop(0) if magic != self.FDT_MAGIC: - raise ValueError("missing magic") + raise ValueError("missing or invalid magic") self.fdtSize = hdr.pop(0) self.structPos = hdr.pop(0) self.stringPos = hdr.pop(0) From 1e262f8b6f0107e3978eeb965a49eb7c273f1c1b Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 14:39:55 -0800 Subject: [PATCH 078/255] Do not parse the ONIE initrd for u-boot systems --- .../src/python/onl/install/App.py | 62 ++++++++++--------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py index cd18cb97..3cc220f8 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py @@ -135,35 +135,6 @@ class App(SubprocessMixin, object): self.log.error("missing installer.conf") return 1 - ##self.log.info("using native GRUB") - ##self.grubEnv = ConfUtils.GrubEnv(log=self.log.getChild("grub")) - - with OnieBootContext(log=self.log) as self.octx: - self.octx.detach() - - if self.octx.onieDir is not None: - self.log.info("using native ONIE initrd+chroot GRUB (%s)", self.octx.onieDir) - self.grubEnv = ConfUtils.ChrootGrubEnv(self.octx.initrdDir, - bootDir=self.octx.onieDir, - path="/grub/grubenv", - log=self.log.getChild("grub")) - # direct access using ONIE initrd as a chroot - # (will need to fix up bootDir and bootPart later) - else: - self.log.info("using proxy GRUB") - self.grubEnv = ConfUtils.ProxyGrubEnv(self.installerConf, - bootDir="/mnt/onie-boot", - path="/grub/grubenv", - chroot=False, - log=self.log.getChild("grub")) - # indirect access through chroot host - # (will need to fix up bootDir and bootPart later) - - if os.path.exists(ConfUtils.UbootEnv.SETENV): - self.ubootEnv = ConfUtils.UbootEnv(log=self.log.getChild("u-boot")) - else: - self.ubootEnv = None - self.log.info("ONL Installer %s", self.installerConf.onl_version) code = self.findPlatform() @@ -190,6 +161,39 @@ class App(SubprocessMixin, object): self.log.error("cannot detect installer type") return 1 + self.grubEnv = None + + if 'grub' in self.onlPlatform.platform_config: + ##self.log.info("using native GRUB") + ##self.grubEnv = ConfUtils.GrubEnv(log=self.log.getChild("grub")) + + with OnieBootContext(log=self.log) as self.octx: + self.octx.detach() + + if self.octx.onieDir is not None: + self.log.info("using native ONIE initrd+chroot GRUB (%s)", self.octx.onieDir) + self.grubEnv = ConfUtils.ChrootGrubEnv(self.octx.initrdDir, + bootDir=self.octx.onieDir, + path="/grub/grubenv", + log=self.log.getChild("grub")) + # direct access using ONIE initrd as a chroot + # (will need to fix up bootDir and bootPart later) + + if self.grubEnv is None: + self.log.info("using proxy GRUB") + self.grubEnv = ConfUtils.ProxyGrubEnv(self.installerConf, + bootDir="/mnt/onie-boot", + path="/grub/grubenv", + chroot=False, + log=self.log.getChild("grub")) + # indirect access through chroot host + # (will need to fix up bootDir and bootPart later) + + if os.path.exists(ConfUtils.UbootEnv.SETENV): + self.ubootEnv = ConfUtils.UbootEnv(log=self.log.getChild("u-boot")) + else: + self.ubootEnv = None + # run the platform-specific installer self.installer = iklass(machineConf=self.machineConf, installerConf=self.installerConf, From 707279da6f4e677ba68f047033dd5855f674ca4d Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 14:49:51 -0800 Subject: [PATCH 079/255] Relax OS and TYPE requirements --- .../all/vendor-config-onl/src/python/onl/install/Legacy.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py b/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py index 72ce2c38..699f5285 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/Legacy.py @@ -176,14 +176,9 @@ class Parser: self.ih_dcrc = hdr.pop(0) self.ih_os = hdr.pop(0) - if self.ih_os != self.IH_OS_LINUX: - raise ValueError("invalid OS code") - self.ih_arch = hdr.pop(0) self.ih_type = hdr.pop(0) - if self.ih_type != self.IH_TYPE_MULTI: - raise ValueError("invalid image type") self.ih_comp = hdr.pop(0) # compression is ignored here, since it applies to the first From 52c96b0bea4fb8663cd41654c071050a208b4f84 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 14:50:26 -0800 Subject: [PATCH 080/255] Extend U-Boot initrd support to include legacy images --- .../src/python/onl/install/InstallUtils.py | 56 ++++++++++++++++--- .../src/python/onl/install/ShellApp.py | 6 +- 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index a831d20b..ddf82f2a 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -10,7 +10,7 @@ import tempfile import string import shutil -import Fit +import Fit, Legacy class SubprocessMixin: @@ -879,16 +879,16 @@ class InitrdContext(SubprocessMixin): # (it's inside this chroot anyway) return initrdDir -class FitInitrdContext(SubprocessMixin): +class UbootInitrdContext(SubprocessMixin): def __init__(self, path, log=None): - self.fitPath = path + self.path = path self.log = log or logging.getLogger(self.__class__.__name__) self.initrd = self.__initrd = None - def __enter__(self): - self.log.debug("parsing FIT image in %s", self.fitPath) - p = Fit.Parser(path=self.fitPath, log=self.log) + def _extractFit(self): + self.log.debug("parsing FIT image in %s", self.path) + p = Fit.Parser(path=self.path, log=self.log) node = p.getInitrdNode() if node is None: raise ValueError("cannot find initrd node in FDT") @@ -896,7 +896,7 @@ class FitInitrdContext(SubprocessMixin): if prop is None: raise ValueError("cannot find initrd data property in FDT") - with open(self.fitPath) as fd: + with open(self.path) as fd: self.log.debug("reading initrd at [%x:%x]", prop.offset, prop.offset+prop.sz) fd.seek(prop.offset, 0) @@ -907,7 +907,47 @@ class FitInitrdContext(SubprocessMixin): self.log.debug("+ cat > %s", self.initrd) with os.fdopen(fno, "w") as fd: fd.write(buf) - return self + + def _extractLegacy(self): + self.log.debug("parsing legacy U-Boot image in %s", self.path) + p = Legacy.Parser(path=self.path, log=self.log) + + if p.ih_type != Legacy.Parser.IH_TYPE_MULTI: + raise ValueError("not a multi-file image") + + if p.ih_os != Legacy.Parser.IH_OS_LINUX: + raise ValueError("invalid OS code") + + sz, off = p.images[1] + # assume the initrd is the second of three images + + with open(self.path) as fd: + self.log.debug("reading initrd at [%x:%x]", + off, off+sz) + fd.seek(off, 0) + buf = fd.read(sz) + + fno, self.initrd = tempfile.mkstemp(prefix="initrd-", + suffix=".img") + self.log.debug("+ cat > %s", self.initrd) + with os.fdopen(fno, "w") as fd: + fd.write(buf) + + def __enter__(self): + + with open(path=self.path) as fd: + isFit = Fit.Parser.isFit(fd) + isLegacy = Legacy.Parser.isLegacy(fd) + + if isFit: + self._extractFit() + return self + + if isLegacy: + self._extractLegacy() + return self + + raise ValueError("invalid U-Boot image %s" % self.path) def shutdown(self): initrd, self.initrd = self.initrd, None diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index fe534512..8490325d 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -13,7 +13,7 @@ from InstallUtils import InitrdContext, MountContext from InstallUtils import SubprocessMixin from InstallUtils import ProcMountsParser, ProcMtdParser from InstallUtils import BlkidParser -from InstallUtils import FitInitrdContext +from InstallUtils import UbootInitrdContext import onl.platform.current from onl.sysconfig import sysconfig @@ -47,7 +47,7 @@ class AppBase(SubprocessMixin, object): return 0 def _runFitShell(self, device): - with FitInitrdContext(path=device, log=self.log) as ctx: + with UbootInitrdContext(path=device, log=self.log) as ctx: return self._runInitrdShell(ctx.initrd) def shutdown(self): @@ -174,7 +174,7 @@ class OnieBootContext: part = parts[0] self.log.debug("found ONIE MTD device %s", part.charDevice or part.blockDevice) - with FitInitrdContext(part.blockDevice, log=self.log) as self.fctx: + with UbootInitrdContext(part.blockDevice, log=self.log) as self.fctx: with InitrdContext(initrd=self.fctx.initrd, log=self.log) as self.ictx: self.initrd = self.fctx.initrd self.fctx.detach() From 551435819d9d1f9c67599df8ecdea6e03f4b0062 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 15:26:27 -0800 Subject: [PATCH 081/255] Fixed format test api --- .../src/python/onl/install/InstallUtils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index ddf82f2a..685f703d 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -935,9 +935,9 @@ class UbootInitrdContext(SubprocessMixin): def __enter__(self): - with open(path=self.path) as fd: - isFit = Fit.Parser.isFit(fd) - isLegacy = Legacy.Parser.isLegacy(fd) + with open(self.path) as fd: + isFit = Fit.Parser.isFit(stream=fd) + isLegacy = Legacy.Parser.isLegacy(stream=fd) if isFit: self._extractFit() From 5773f5e759de1e0f2892f802c447ff37e9e1e272 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 10 Nov 2016 17:42:55 -0800 Subject: [PATCH 082/255] Minor loader-shell breakage on ppc --- .../all/vendor-config-onl/src/python/onl/install/ShellApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index 8490325d..934d40a3 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -302,7 +302,7 @@ class Loader(AppBase): # run from a file in a mounted filesystem parts = [p for p in self.pm.mounts if p.device == bootDevice] if parts: - loaderDir = parts[0] + loaderDir = parts[0].dir self.log.debug("found loader device mounted at %s", loaderDir) for e in l: p = os.path.join(loaderDir, e) From 69c3ca634d4bfa41b972e9b97b7c44d23aa0b0ff Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 11 Nov 2016 07:57:38 -0800 Subject: [PATCH 083/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 9e81401a..70859b05 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 9e81401aa70e79347d79054ed9496d4a9e8584d9 +Subproject commit 70859b05b6aaed7660b6c5b5f84bd7bbd8bfd5d0 From 82eafedd6b173fadcb52b72450133526290f44a8 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 13 Nov 2016 07:37:26 -0800 Subject: [PATCH 084/255] CJSON Fix. --- sm/bigcode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/bigcode b/sm/bigcode index 8065e129..b5c5ef26 160000 --- a/sm/bigcode +++ b/sm/bigcode @@ -1 +1 @@ -Subproject commit 8065e129d36a58cae90f52e6ae5542fa885eb42d +Subproject commit b5c5ef26e618de765ecaf114984807172f3a500b From a2b8c1caebaab1aeaf2e63ee0284c5bff627a2a6 Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Tue, 15 Nov 2016 16:04:57 -0800 Subject: [PATCH 085/255] add onlp_sfpi_dev_read[write] api support --- .../onlp/builds/src/module/src/platform_lib.h | 1 + .../onlp/builds/src/module/src/sfpi.c | 71 ++++++++++++++++++- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h index 9a1257d6..9aa30654 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h @@ -52,6 +52,7 @@ #define SFP_HWMON_NODE(node) SFP_HWMON_PREFIX#node #define SFP_HWMON_DOM_PREFIX "/sys/bus/i2c/devices/3-0051/" #define SFP_HWMON_DOM_NODE(node) SFP_HWMON_DOM_PREFIX#node +#define SFP_BUS 3 int deviceNodeWriteInt(char *filename, int value, int data_len); int deviceNodeReadBinary(char *filename, char *buffer, int buf_size, int data_len); diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c index d4c084d9..aeb52151 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c @@ -30,7 +30,7 @@ #include #include #include - +#include #include "platform_lib.h" static int @@ -333,6 +333,75 @@ onlp_sfpi_dom_read(int port, uint8_t data[256]) return ONLP_STATUS_OK; } +int +onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + int rc; + + if (set_active_port(port+1) != 0) { + AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + rc= onlp_i2c_readb(SFP_BUS, devaddr, addr, ONLP_I2C_F_FORCE); + + set_active_port(0); + + return rc; +} + +int +onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + int rc; + + if (set_active_port(port+1) != 0) { + AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + rc = onlp_i2c_writeb(SFP_BUS, devaddr, addr, value, ONLP_I2C_F_FORCE); + + set_active_port(0); + + return rc; +} + +int +onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + int rc; + + if (set_active_port(port+1) != 0) { + AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + rc= onlp_i2c_readw(SFP_BUS, devaddr, addr, ONLP_I2C_F_FORCE); + + set_active_port(0); + + return rc; +} + +int +onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + int rc; + + if (set_active_port(port+1) != 0) { + AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + rc = onlp_i2c_writew(SFP_BUS, devaddr, addr, value, ONLP_I2C_F_FORCE); + + set_active_port(0); + + return rc; + +} + int onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) { From 28facefe44c8bdde7fdc34e9fc595e4341476952 Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Wed, 16 Nov 2016 10:48:59 -0800 Subject: [PATCH 086/255] minor fix --- .../onlp/builds/src/module/src/sfpi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c index aeb52151..0d320d4e 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/sfpi.c @@ -339,7 +339,7 @@ onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) int rc; if (set_active_port(port+1) != 0) { - AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + AIM_LOG_ERROR("Unable to set active port(%d)\r\n", port); return ONLP_STATUS_E_INTERNAL; } @@ -356,7 +356,7 @@ onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) int rc; if (set_active_port(port+1) != 0) { - AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + AIM_LOG_ERROR("Unable to set active port(%d)\r\n", port); return ONLP_STATUS_E_INTERNAL; } @@ -373,7 +373,7 @@ onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) int rc; if (set_active_port(port+1) != 0) { - AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + AIM_LOG_ERROR("Unable to set active port(%d)\r\n", port); return ONLP_STATUS_E_INTERNAL; } @@ -390,7 +390,7 @@ onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) int rc; if (set_active_port(port+1) != 0) { - AIM_LOG_INFO("Unable to set active port(%d)\r\n", port); + AIM_LOG_ERROR("Unable to set active port(%d)\r\n", port); return ONLP_STATUS_E_INTERNAL; } From ab24f9914ea252bc9b09cc70906ee1b992fe6887 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 16 Nov 2016 20:21:04 +0000 Subject: [PATCH 087/255] Use the new module database infrastructure. --- make/.gitignore | 3 ++- make/config.mk | 7 +++++-- tools/newmodule.py | 2 +- tools/scripts/post-merge.hook | 11 ++++------- tools/scripts/submodule-updated.sh | 2 +- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/make/.gitignore b/make/.gitignore index 4bf19b41..f6b5b210 100644 --- a/make/.gitignore +++ b/make/.gitignore @@ -1,3 +1,4 @@ versions/ -module-manifest.mk +modules/module* + diff --git a/make/config.mk b/make/config.mk index 0e8c23aa..2e1a69aa 100644 --- a/make/config.mk +++ b/make/config.mk @@ -26,8 +26,11 @@ export ONL_DEBIAN_SUITE_$(ONL_DEBIAN_SUITE)=1 export BUILD_DIR_BASE=BUILD/$(ONL_DEBIAN_SUITE) -# Generate manifest if necessary -export MODULEMANIFEST := $(shell $(BUILDER)/tools/mmg.py $(ONL)/make/mmg.yml $(ONL) --only-if-missing) + +# Use the new module database tool to resolve dependencies dynamically. +export BUILDER_MODULE_DATABASE := $(ONL)/make/modules/modules.json +# Regenerate the module manifest if necessary. +export MODULEMANIFEST := $(shell $(BUILDER)/tools/modtool.py --db $(BUILDER_MODULE_DATABASE) --dbroot $(ONL) --make-manifest $(ONL)/make/modules/modules.mk) # Generate versions if necessary. $(shell $(ONL)/tools/make-versions.py --import-file=$(ONL)/tools/onlvi --class-name=OnlVersionImplementation --output-dir $(ONL)/make/versions) diff --git a/tools/newmodule.py b/tools/newmodule.py index 0ac2d599..41d13573 100755 --- a/tools/newmodule.py +++ b/tools/newmodule.py @@ -58,7 +58,7 @@ if __name__ == "__main__": ModuleGenerator.main(globals().copy()) # Make sure the manifest gets regenerated. - os.system("rm -rf %s/make/module-manifest.mk" % ROOT) + os.system("rm -rf %s/make/modules/modules*" % ROOT) diff --git a/tools/scripts/post-merge.hook b/tools/scripts/post-merge.hook index e4bad577..e827fa32 100755 --- a/tools/scripts/post-merge.hook +++ b/tools/scripts/post-merge.hook @@ -2,14 +2,11 @@ ############################################################ # # Every time a merge is performed we should invalidate -# the module manifest. +# the module data. # ############################################################ -mm="$GIT_DIR/../make/module-manifest.mk" - -if [ -f "$mm" ]; then - echo "Removing module manifest after merge..." - rm "$mm" -fi +echo "Removing module data after merge..." +rm -rf "$GIT_DIR/../make/modules/modules*" + diff --git a/tools/scripts/submodule-updated.sh b/tools/scripts/submodule-updated.sh index c3ff4608..979bb64c 100755 --- a/tools/scripts/submodule-updated.sh +++ b/tools/scripts/submodule-updated.sh @@ -13,7 +13,7 @@ ############################################################ # Removing the manifest causes it to be regenerated. -rm -rf $ONL/make/module-manifest.mk +rm -rf $ONL/make/modules/module* # Rebuild pkg cache onlpm.py --rebuild-pkg-cache From e51148bf556f6510047fb1ac59443f042945cd83 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 16 Nov 2016 20:21:18 +0000 Subject: [PATCH 088/255] Use the new module database infrastructure. --- make/mmg.yml | 9 --------- make/modules/README | 1 + 2 files changed, 1 insertion(+), 9 deletions(-) delete mode 100644 make/mmg.yml create mode 100644 make/modules/README diff --git a/make/mmg.yml b/make/mmg.yml deleted file mode 100644 index e6c9626e..00000000 --- a/make/mmg.yml +++ /dev/null @@ -1,9 +0,0 @@ -directories: - - . - -manifest: make/module-manifest.mk - - - - - diff --git a/make/modules/README b/make/modules/README new file mode 100644 index 00000000..44601e1a --- /dev/null +++ b/make/modules/README @@ -0,0 +1 @@ +This directory contains the module database files generated at build time. \ No newline at end of file From 218811da1b417c8ac1640a6bc0dcbe38da2e1e0a Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 16 Nov 2016 20:22:21 +0000 Subject: [PATCH 089/255] Use the CJSON module to generate JSON formatting instead of hand coding it. --- packages/base/any/onlp/src/onlplib/.module | 1 + .../any/onlp/src/onlplib/module/src/onie.c | 67 ++++++++++++------- 2 files changed, 42 insertions(+), 26 deletions(-) diff --git a/packages/base/any/onlp/src/onlplib/.module b/packages/base/any/onlp/src/onlplib/.module index 215a822f..7cf3968c 100644 --- a/packages/base/any/onlp/src/onlplib/.module +++ b/packages/base/any/onlp/src/onlplib/.module @@ -1 +1,2 @@ name: onlplib +depends: cjson diff --git a/packages/base/any/onlp/src/onlplib/module/src/onie.c b/packages/base/any/onlp/src/onlplib/module/src/onie.c index 2ce139f2..eecd5feb 100644 --- a/packages/base/any/onlp/src/onlplib/module/src/onie.c +++ b/packages/base/any/onlp/src/onlplib/module/src/onie.c @@ -395,38 +395,53 @@ onlp_onie_show(onlp_onie_info_t* info, aim_pvs_t* pvs) } } +#include + void onlp_onie_show_json(onlp_onie_info_t* info, aim_pvs_t* pvs) { - aim_printf(pvs, "{\n"); + cJSON* cj = cJSON_CreateObject(); -#define STROUT(_name, _member) \ - do { \ - aim_printf(pvs, " \"%s\" : ", #_name); \ - if(info-> _member) { \ - aim_printf(pvs, "\"%s\",\n", info->_member); \ - } \ - else { \ - aim_printf(pvs, "null,\n"); \ - } \ +#define _S(_name, _member) \ + do { \ + if(info-> _member) { \ + cJSON_AddStringToObject(cj, #_name, info-> _member); \ + } else { \ + cJSON_AddNullToObject(cj, #_name); \ + } \ } while(0) - STROUT(Product Name, product_name); - STROUT(Part Number, part_number); - STROUT(Serial Number, serial_number); - aim_printf(pvs, " \"MAC\": \"%{mac}\", ", info->mac); - aim_printf(pvs, " \"MAC Range\": %d,\n", info->mac_range); - STROUT(Manufacture Date,manufacture_date); - STROUT(Vendor,vendor); - STROUT(Platform Name,platform_name); - aim_printf(pvs, " \"Device Version\": %u,\n", info->device_version); - STROUT(Label Revision,label_revision); - STROUT(Country Code,country_code); - STROUT(Diag Version,diag_version); - STROUT(Service Tag,service_tag); - STROUT(ONIE Version,onie_version); - aim_printf(pvs, " \"CRC\": \"0x%x\"\n", info->crc); - aim_printf(pvs, "}\n"); +#define _N(_name, _member) \ + do { \ + cJSON_AddNumberToObject(cj, #_name, info-> _member); \ + } while(0) + + _S(Product Name, product_name); + _S(Part Number, part_number); + _S(Serial Number, serial_number); + { + char* mac = aim_dfstrdup("%{mac}", info->mac); + cJSON_AddStringToObject(cj, "MAC", mac); + aim_free(mac); + } + _S(Manufacture Date,manufacture_date); + _S(Vendor,vendor); + _S(Platform Name,platform_name); + _S(Label Revision,label_revision); + _S(Country Code,country_code); + _S(Diag Version,diag_version); + _S(Service Tag,service_tag); + _S(ONIE Version,onie_version); + _N(Device Version, device_version); + { + char* crc = aim_fstrdup("0x%x", info->crc); + cJSON_AddStringToObject(cj, "CRC", crc); + aim_free(crc); + } + char* out = cJSON_Print(cj); + aim_printf(pvs, "%s\n", out); + free(out); + cJSON_Delete(cj); } From cc022b27a1d8e0db4249c1ff8a2bcd295f4e153b Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 16 Nov 2016 20:23:19 +0000 Subject: [PATCH 090/255] Latest --- sm/infra | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/infra b/sm/infra index b0e02165..9c4115f9 160000 --- a/sm/infra +++ b/sm/infra @@ -1 +1 @@ -Subproject commit b0e02165a733c268c4891e51da6edc3d5ae9ffbb +Subproject commit 9c4115f96c22f611a4ff3c7bf92d88ceec390379 From 66334bf1c1ee3081571d5b8321c2cc172f877f18 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 16 Nov 2016 20:58:00 +0000 Subject: [PATCH 091/255] Latest --- sm/infra | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/infra b/sm/infra index 9c4115f9..f69bb394 160000 --- a/sm/infra +++ b/sm/infra @@ -1 +1 @@ -Subproject commit 9c4115f96c22f611a4ff3c7bf92d88ceec390379 +Subproject commit f69bb394e461c26f1c8f459abc47580a3d2bb625 From 2a37f606224521eb581ee8e345907a1a44d85658 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 16 Nov 2016 22:54:27 +0000 Subject: [PATCH 092/255] Latest --- sm/infra | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/infra b/sm/infra index f69bb394..806c0828 160000 --- a/sm/infra +++ b/sm/infra @@ -1 +1 @@ -Subproject commit f69bb394e461c26f1c8f459abc47580a3d2bb625 +Subproject commit 806c082806e5f9991b7afdb43199e51d5118cff7 From 31496274597b878a576d79fb9687ca91b5ad8057 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 16 Nov 2016 18:54:34 -0800 Subject: [PATCH 093/255] Initial plugin api, needs docs --- .../src/python/onl/install/Plugin.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py b/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py new file mode 100644 index 00000000..f1e97713 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py @@ -0,0 +1,17 @@ +"""Plugin.py + +Base class for installer plugins. +""" + +class Plugin(object): + + def __init__(self, installer): + self.installer = installer + self.log = self.installer.log.getChild("plugin") + + def run(self): + self.log.warn("not implemented") + return 0 + + def shutdown(self): + pass From 5aa3212f64136ee9a2731205eb780e3cf90ef61f Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 16 Nov 2016 18:54:54 -0800 Subject: [PATCH 094/255] Sample pre/post scripts and plugins, needs docs --- builds/any/installer/sample-postinstall.py | 11 +++++++++++ builds/any/installer/sample-postinstall.sh | 8 ++++++++ builds/any/installer/sample-preinstall.py | 11 +++++++++++ builds/any/installer/sample-preinstall.sh | 8 ++++++++ 4 files changed, 38 insertions(+) create mode 100644 builds/any/installer/sample-postinstall.py create mode 100644 builds/any/installer/sample-postinstall.sh create mode 100644 builds/any/installer/sample-preinstall.py create mode 100644 builds/any/installer/sample-preinstall.sh diff --git a/builds/any/installer/sample-postinstall.py b/builds/any/installer/sample-postinstall.py new file mode 100644 index 00000000..230013c9 --- /dev/null +++ b/builds/any/installer/sample-postinstall.py @@ -0,0 +1,11 @@ +"""sample-postinstall.py + +""" + +import onl.install.Plugin + +class Plugin(onl.install.Plugin.Plugin): + + def run(self): + self.log.info("hello from postinstall plugin") + return 0 diff --git a/builds/any/installer/sample-postinstall.sh b/builds/any/installer/sample-postinstall.sh new file mode 100644 index 00000000..0bc0938c --- /dev/null +++ b/builds/any/installer/sample-postinstall.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +rootdir=$1; shift + +echo "Hello from postinstall" +echo "Chroot is $rootdir" + +exit 0 diff --git a/builds/any/installer/sample-preinstall.py b/builds/any/installer/sample-preinstall.py new file mode 100644 index 00000000..77148225 --- /dev/null +++ b/builds/any/installer/sample-preinstall.py @@ -0,0 +1,11 @@ +"""sample-preinstall.py + +""" + +import onl.install.Plugin + +class Plugin(onl.install.Plugin.Plugin): + + def run(self): + self.log.info("hello from preinstall plugin") + return 0 diff --git a/builds/any/installer/sample-preinstall.sh b/builds/any/installer/sample-preinstall.sh new file mode 100644 index 00000000..daf128a6 --- /dev/null +++ b/builds/any/installer/sample-preinstall.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +rootdir=$1; shift + +echo "Hello from preinstall" +echo "Chroot is $rootdir" + +exit 0 From f088db5372e2eaa6e09d4f29be8accf7b575104d Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 16 Nov 2016 18:55:53 -0800 Subject: [PATCH 095/255] Support pre/post install scripts - refactor unzip handling a bit - better support for file excludes using a tempdir - run pre/post hooks using well-known script names --- builds/any/installer/installer.sh.in | 59 +++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 9 deletions(-) diff --git a/builds/any/installer/installer.sh.in b/builds/any/installer/installer.sh.in index 8a988924..da7a24cb 100644 --- a/builds/any/installer/installer.sh.in +++ b/builds/any/installer/installer.sh.in @@ -361,16 +361,44 @@ else installer_list=$initrd_archive fi +installer_unzip() { + local zip tmp dummy + zip=$1; shift + + installer_say "Extracting from $zip: $@ ..." + + tmp=$(mktemp -d -t "unzip-XXXXXX") + if test "$SFX_PAD"; then + # ha ha, busybox cannot exclude multiple files + unzip -o $zip "$@" -x $SFX_PAD -d $tmp + elif test "$SFX_UNZIP"; then + unzip -o $zip "$@" -x $installer_script -d $tmp + else + dd if=$zip bs=$SFX_BLOCKSIZE skip=$SFX_BLOCKS \ + | unzip -o - "$@" -x $installer_script -d $tmp + fi + + rm -f $tmp/$installer_script + if test "$SFX_PAD"; then + rm -f $tmp/$SFX_PAD + fi + + set dummy $tmp/* + if test -e "$2"; then + shift + while test $# -gt 0; do + mv "$1" . + shift + done + else + installer_say "Extracting from $zip: no files extracted" + fi + + return 0 +} + installer_say "Unpacking ONL installer files..." -if test "$SFX_PAD"; then - # ha ha, busybox cannot exclude multiple files - unzip -o $installer_zip $installer_list -x $SFX_PAD -elif test "$SFX_UNZIP"; then - unzip -o $installer_zip $installer_list -x $installer_script -else - dd if=$installer_zip bs=$SFX_BLOCKSIZE skip=$SFX_BLOCKS \ - | unzip -o - $installer_list -x $installer_script -fi +installer_unzip $installer_zip $installer_list # Developer debugging if has_boot_env onl_installer_unpack_only; then installer_unpack_only=1; fi @@ -513,6 +541,13 @@ else installer_say "*** watch out for lingering mount-points" fi +installer_unzip $installer_zip preinstall.sh || : +if test -f preinstall.sh; then + installer_say "Invoking pre-install actions" + chmod +x preinstall.sh + ./preinstall.sh $rootdir +fi + chroot "${rootdir}" $installer_shell if test -f "$postinst"; then @@ -522,6 +557,12 @@ if test -f "$postinst"; then set +x fi +installer_unzip $installer_zip postinstall.sh || : +if test -f preinstall.sh; then + chmod +x postinstall.sh + ./postinstall.sh $rootdir +fi + trap - 0 1 installer_umount From 2dfb813f8207b7e264edab870da1938cfa8abe75 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 16 Nov 2016 18:56:09 -0800 Subject: [PATCH 096/255] Add sample scripts to the ONL installer --- builds/any/installer/grub/builds/Makefile | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/builds/any/installer/grub/builds/Makefile b/builds/any/installer/grub/builds/Makefile index a89a27cd..933f727f 100644 --- a/builds/any/installer/grub/builds/Makefile +++ b/builds/any/installer/grub/builds/Makefile @@ -10,8 +10,20 @@ endif include $(ONL)/make/versions/version-onl.mk INSTALLER_NAME=$(FNAME_PRODUCT_VERSION)_ONL-OS_$(FNAME_BUILD_ID)_$(UARCH)_$(BOOTMODE)_INSTALLER +MKINSTALLER_OPTS = \ + --arch $(ARCH) \ + --boot-config boot-config \ + --add-dir config \ + --initrd onl-loader-initrd:$(ARCH) onl-loader-initrd-$(ARCH).cpio.gz \ + --swi onl-swi:$(ARCH) \ + --preinstall-script $(ONL)/builds/any/installer/sample-preinstall.sh \ + --postinstall-script $(ONL)/builds/any/installer/sample-postinstall.sh \ + --preinstall-plugin $(ONL)/builds/any/installer/sample-preinstall.py \ + --postinstall-plugin $(ONL)/builds/any/installer/sample-postinstall.py \ + # THIS LINE INTENTIONALLY LEFT BLANK + __installer: - $(ONL)/tools/mkinstaller.py --arch $(ARCH) --boot-config boot-config --add-dir config --initrd onl-loader-initrd:$(ARCH) onl-loader-initrd-$(ARCH).cpio.gz --swi onl-swi:$(ARCH) --out $(INSTALLER_NAME) + $(ONL)/tools/mkinstaller.py $(MKINSTALLER_OPTS) --out $(INSTALLER_NAME) md5sum "$(INSTALLER_NAME)" | awk '{ print $$1 }' > "$(INSTALLER_NAME).md5sum" From f1d0f414ef883fde2b01a379e04a1a517a01ad32 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 16 Nov 2016 18:57:18 -0800 Subject: [PATCH 097/255] Added pre/post plugins - move zip handler earlier in the process - use onl.install.Plugin.Plugin api and introspection --- .../src/python/onl/install/BaseInstall.py | 80 ++++++++++++++++--- 1 file changed, 69 insertions(+), 11 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index c5a8b971..8cc3c676 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -3,7 +3,7 @@ Base classes for installers. """ -import os, stat +import os, sys, stat import subprocess import re import tempfile @@ -13,10 +13,12 @@ import parted import yaml import zipfile import shutil +import imp from InstallUtils import SubprocessMixin from InstallUtils import MountContext, BlkidParser, PartedParser from InstallUtils import ProcMountsParser +from Plugin import Plugin import onl.YamlUtils from onl.sysconfig import sysconfig @@ -413,6 +415,50 @@ class Base: return 0 + def preinstall(self): + return self.runPlugin("preinstall.py") + + def postinstall(self): + return self.runPlugin("postinstall.py") + + def runPluginFile(self, pyPath): + with open(pyPath) as fd: + sfx = ('.py', 'U', imp.PY_SOURCE,) + mod = imp.load_module("plugin", fd, pyPath, sfx) + for attr in dir(mod): + klass = getattr(mod, attr) + if isinstance(klass, type) and issubclass(klass, Plugin): + self.log.info("%s: running plugin %s", pyPath, attr) + plugin = klass(self) + try: + code = plugin.run() + except: + self.log.exception("plugin failed") + code = 1 + plugin.shutdown() + if code: return code + + return 0 + + def runPlugin(self, basename): + + src = os.path.join(self.im.installerConf.installer_dir, basename) + if os.path.exists(src): + return self.runPluginFile(src) + + if basename in self.zf.namelist(): + try: + src = None + with self.zf.open(basename, "r") as rfd: + wfno, src = tempfile.mkstemp(prefix="plugin-", + suffix=".py") + with os.fdopen(wfno, "w") as wfd: + shutil.copyfileobj(rfd, wfd) + return self.runPluginFile(src) + finally: + if src and os.path.exists(src): + os.unlink(src) + GRUB_TPL = """\ serial %(serial)s terminal_input serial @@ -591,6 +637,14 @@ class GrubInstaller(SubprocessMixin, Base): def installGpt(self): + # get a handle to the installer zip + p = os.path.join(self.im.installerConf.installer_dir, + self.im.installerConf.installer_zip) + self.zf = zipfile.ZipFile(p) + + code = self.preinstall() + if code: return code + code = self.findGpt() if code: return code @@ -628,11 +682,6 @@ class GrubInstaller(SubprocessMixin, Base): self.im.grubEnv.__dict__['bootPart'] = dev.device self.im.grubEnv.__dict__['bootDir'] = None - # get a handle to the installer zip - p = os.path.join(self.im.installerConf.installer_dir, - self.im.installerConf.installer_zip) - self.zf = zipfile.ZipFile(p) - code = self.installSwi() if code: return code @@ -651,6 +700,9 @@ class GrubInstaller(SubprocessMixin, Base): code = self.installGrub() if code: return code + code = self.postinstall() + if code: return code + self.log.info("ONL loader install successful.") self.log.info("GRUB installation is required next.") @@ -827,6 +879,14 @@ class UbootInstaller(SubprocessMixin, Base): self.log.error("not a block device: %s", self.device) return 1 + # get a handle to the installer zip + p = os.path.join(self.im.installerConf.installer_dir, + self.im.installerConf.installer_zip) + self.zf = zipfile.ZipFile(p) + + code = self.preinstall() + if code: return code + code = self.assertUnmounted() if code: return code @@ -872,11 +932,6 @@ class UbootInstaller(SubprocessMixin, Base): self.rawLoaderDevice = self.device + str(partIdx+1) break - # get a handle to the installer zip - p = os.path.join(self.im.installerConf.installer_dir, - self.im.installerConf.installer_zip) - self.zf = zipfile.ZipFile(p) - code = self.installSwi() if code: return code @@ -901,6 +956,9 @@ class UbootInstaller(SubprocessMixin, Base): code = self.installUbootEnv() if code: return code + code = self.postinstall() + if code: return code + return 0 def run(self): From 486f375b4a3d1b787768a4b23661d9a354418c5f Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 16 Nov 2016 18:57:34 -0800 Subject: [PATCH 098/255] mkinstaller.py now handles pre/post hooks --- tools/mkinstaller.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tools/mkinstaller.py b/tools/mkinstaller.py index e3945bd2..6348a252 100755 --- a/tools/mkinstaller.py +++ b/tools/mkinstaller.py @@ -106,6 +106,18 @@ class InstallerShar(object): self.files.append(filename) self.files = list(set(self.files)) + def add_file_as(self, source, basename): + if not os.path.exists(source): + self.abort("File %s does not exist." % source) + + tmpdir = os.path.join(self.work_dir, "tmp") + if not os.path.exists(tmpdir): + os.mkdir(tmpdir) + + dst = os.path.join(tmpdir, basename) + shutil.copy(source, dst) + self.add_file(dst) + def add_dir(self, dir_): if not os.path.isdir(dir_): self.abort("Directory %s does not exist." % dir_) @@ -174,6 +186,16 @@ if __name__ == '__main__': ap.add_argument("--verbose", '-v', help="Verbose output.", action='store_true') ap.add_argument("--out", help="Destination Filename") + ap.add_argument("--preinstall-script", + help="Specify a preinstall script (runs before installer)") + ap.add_argument("--postinstall-script", + help="Specify a preinstall script (runs after installer)") + + ap.add_argument("--preinstall-plugin", + help="Specify a preinstall plugin (runs from within the installer chroot)") + ap.add_argument("--postinstall-plugin", + help="Specify a postinstall plugin (runs from within the installer chroot)") + ops = ap.parse_args() installer = InstallerShar(ops.arch, ops.work_dir) @@ -209,6 +231,20 @@ if __name__ == '__main__': if ops.swi: installer.add_swi(ops.swi) + hookdir = os.path.join(installer.work_dir, "tmp") + if not os.path.exists(hookdir): + os.makedirs(hookdir) + + if ops.preinstall_script: + installer.add_file_as(ops.preinstall_script, "preinstall.sh") + if ops.postinstall_script: + installer.add_file_as(ops.postinstall_script, "postinstall.sh") + + if ops.preinstall_plugin: + installer.add_file_as(ops.preinstall_plugin, "preinstall.py") + if ops.postinstall_plugin: + installer.add_file_as(ops.postinstall_plugin, "postinstall.py") + iname = os.path.abspath(ops.out) installer.build(iname) logger.info("installer: %s" % iname) From f6dc473cf84fa31321c03d721e4ef1f9b3713ef8 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:25:25 +0000 Subject: [PATCH 099/255] - Always set the default boot selection to $saved_entry - Always reset $saved_entry to the NOS boot selection. The purpose of this change is to make transitions to and from ONIE for update purposes more robust. Every boot selection to ONIE is automatically a boot-once choice. --- .../src/python/onl/install/BaseInstall.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index c5a8b971..5c46f0b9 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -419,8 +419,17 @@ terminal_input serial terminal_output serial set timeout=5 +# Always boot the saved_entry value +load_env +if [ "${saved_entry}" ] ; then + set default="${saved_entry}" +fi + menuentry %(boot_menu_entry)s { search --no-floppy --label --set=root ONL-BOOT + # Always return to this entry by default. + set saved_entry="0" + save_env saved_entry echo 'Loading %(boot_loading_name)s ...' insmod gzio insmod part_msdos @@ -431,6 +440,9 @@ menuentry %(boot_menu_entry)s { # Menu entry to chainload ONIE menuentry ONIE { search --no-floppy --label --set=root ONIE-BOOT + # Always return to entry 0 by default. + set saved_entry="0" + save_env saved_entry echo 'Loading ONIE ...' chainloader +1 } From 8937a27afb319daf3454f4c35ccbe75dd750f015 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:28:30 +0000 Subject: [PATCH 100/255] Add OnlOnieBootContext This context can be used to execute commands from /mnt/onie-boot. --- .../src/python/onl/mounts/__init__.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py index 8a71d02b..3d0a2a6d 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py @@ -355,3 +355,15 @@ class OnlMountContextReadWrite(OnlMountContext): def __init__(self, label, logger): OnlMountContext.__init__(self, label, "rw", logger) + +class OnlOnieBootContext(MountContext): + def __init__(self, mdir="/mnt/onie-boot", mode="rw", label="ONIE-BOOT", logger=None): + try: + device = subprocess.check_output("blkid -L %s" % label, shell=True).strip() + except subprocess.CalledProcessError: + self.logger.debug("Block label %s does not yet exist..." % label) + raise + if not os.path.exists(mdir): + os.makedirs(mdir) + MountContext.__init__(self, device, mdir, mode, logger) + From d6d89cc3cb27b8fa7e530c2012189e3345afd2b8 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:29:31 +0000 Subject: [PATCH 101/255] - onie boot tool access - boot selection --- .../src/python/onl/grub/__init__.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py diff --git a/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py new file mode 100644 index 00000000..2ef7373c --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py @@ -0,0 +1,35 @@ +from onl.mounts import OnlOnieBootContext, OnlMountContextReadWrite +import subprocess + +ONIE_BOOT_MODES = [ 'install', + 'rescue', + 'uninstall', + 'update', + 'embed', + 'diag', + 'none' + ] + +def onie_boot_mode_set(mode): + if mode not in ONIE_BOOT_MODES: + raise ValueError("%s is not a valid onie boot mode." % mode) + + with OnlOnieBootContext() as ob: + subprocess.check_call("%s/onie/tools/bin/onie-boot-mode -o %s" % (ob.directory, mode), shell=True) + +def onie_fwpkg(arguments): + with OnlOnieBootContext() as ob: + subprocess.check_call("%s/onie/tools/bin/onie-fwpkg %s" % (ob.directory, arguments), shell=True) + +def boot_entry_set(index): + with OnlMountContextReadWrite("ONL-BOOT", logger=None) as ob: + subprocess.check_call("/usr/sbin/grub-set-default --boot-directory=%s %d" % (ob.directory, index), shell=True) + +def boot_onie(): + return boot_entry_set(1) + + + + + + From 877fff69e0e95856bed9ddc6bcf9bceb9ff4a2b1 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:30:39 +0000 Subject: [PATCH 102/255] Use the new onl-onie-boot-mode tool. --- .../all/vendor-config-onl/src/sbin/uninstall | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/sbin/uninstall b/packages/base/all/vendor-config-onl/src/sbin/uninstall index 8fe4742e..c76464d5 100755 --- a/packages/base/all/vendor-config-onl/src/sbin/uninstall +++ b/packages/base/all/vendor-config-onl/src/sbin/uninstall @@ -3,26 +3,13 @@ set -e uninstall_x86_64() { - # - # Set ONIE boot selection to uninstall - # - mkdir -p /mnt/onie-boot - mount -L ONIE-BOOT /mnt/onie-boot > /dev/null 2>&1 if [ "$1" = "factory" ]; then - /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o uninstall + mode=uninstall else - /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o install + mode=install fi - - umount /mnt/onie-boot - - # - # Select ONIE as the boot default - # - onl-mounts mount boot --rw - echo "set default=ONIE" >> /mnt/onl/boot/grub/grub.cfg - onl-mounts mount boot + onl-onie-boot-mode $mode } uninstall_uboot() From bca8be69c81c5c873f3abadc997ec99fcb4092ad Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:31:01 +0000 Subject: [PATCH 103/255] Convenience tool to set the onie-boot-mode and reboot once into onie. --- .../src/bin/onl-onie-boot-mode | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/bin/onl-onie-boot-mode diff --git a/packages/base/all/vendor-config-onl/src/bin/onl-onie-boot-mode b/packages/base/all/vendor-config-onl/src/bin/onl-onie-boot-mode new file mode 100755 index 00000000..a8283a2d --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/onl-onie-boot-mode @@ -0,0 +1,34 @@ +#!/usr/bin/python +############################################################ +import sys +import platform +import argparse +import onl.grub + +if platform.machine() != 'x86_64': + sys.stderr.write("This command can only be used on GRUB-based X86_64 architectures.") + sys.exit(1) + +ap = argparse.ArgumentParser("onl-onie-boot-mode") + +ap.add_argument("mode", choices=onl.grub.ONIE_BOOT_MODES) +ap.add_argument("--onie-only", action='store_true', help="Do not set ONIE boot menu option.") + +ops = ap.parse_args() + +onl.grub.onie_boot_mode_set(ops.mode) + +if not ops.onie_only: + onl.grub.boot_onie() + print "The system will boot into ONIE %s mode at the next restart." % ops.mode +else: + print "Mode %s will be selected the next time the system boots into ONIE." % ops.mode + + + + + + + + + From ce2d20f93b4e5b9b70011a9e9a3972842541c7fb Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:31:31 +0000 Subject: [PATCH 104/255] This is really what onl-mounts should be called. --- packages/base/all/vendor-config-onl/src/bin/onlfs | 3 +++ 1 file changed, 3 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/bin/onlfs diff --git a/packages/base/all/vendor-config-onl/src/bin/onlfs b/packages/base/all/vendor-config-onl/src/bin/onlfs new file mode 100755 index 00000000..7b05623f --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/onlfs @@ -0,0 +1,3 @@ +#!/usr/bin/python +from onl.mounts import OnlMountManager +OnlMountManager.main('onlfs') From 31055e67cea4a36185066367f2b4fcda33c8301d Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 19:31:56 +0000 Subject: [PATCH 105/255] - Support ONIE firmware upgrade packages. - Use the new onl.grub interface --- .../src/python/onl/upgrade/firmware.py | 8 ++++++-- .../src/python/onl/upgrade/ubase.py | 17 ++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py index e9a08088..90635fc6 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py @@ -25,8 +25,12 @@ class FirmwareUpgrade(ubase.BaseOnieUpgrade): self.load_manifest(os.path.join(sysconfig.upgrade.firmware.package.dir, "manifest.json")) def do_upgrade(self, forced=False): - self.install_onie_updater(sysconfig.upgrade.firmware.package.dir, - self.manifest['updater']) + if self.manifest.get('fwpkg', False): + self.onie_fwpkg_add(os.path.join(sysconfig.upgrade.firmware.package.dir, + self.manifest['updater'])) + else: + self.install_onie_updater(sysconfig.upgrade.firmware.package.dir, + self.manifest['updater']) self.initiate_onie_update() diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py index c5978629..90831700 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/ubase.py @@ -391,24 +391,23 @@ class BaseOnieUpgrade(BaseUpgrade): dst = os.path.join(self.ONIE_UPDATER_PATH, f) self.copyfile(src, dst) + def onie_fwpkg_add(self, pkg): + import onl.grub + onl.grub.onie_fwpkg("add %s" % pkg) + onl.grub.onie_fwpkg("show") def initiate_onie_update(self): self.logger.info("Initiating %s Update." % self.Name) + if self.arch == 'ppc': # Initiate update self.fw_setenv('onie_boot_reason', 'update') self.reboot() elif self.arch == 'x86_64': - OB = "/mnt/onie-boot" - self.mount(OB, label="ONIE-BOOT") - if os.system("/mnt/onie-boot/onie/tools/bin/onie-boot-mode -o update") != 0: - self.abort("Could not set ONIE Boot Mode to Update. Upgrade cannot continue.") - self.umount(OB) - - with OnlMountContextReadWrite("ONL-BOOT", logger=None): - with open("/mnt/onl/boot/grub/grub.cfg", "a") as f: - f.write("set default=ONIE\n") + import onl.grub + onl.grub.onie_boot_mode_set("update") + onl.grub.boot_onie() self.reboot() else: From aea86e872629811d9e7865b1ad708c99a5ca2460 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 17 Nov 2016 12:20:51 -0800 Subject: [PATCH 106/255] Added sample hook scripts to u-boot installer --- builds/any/installer/uboot/builds/Makefile | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/builds/any/installer/uboot/builds/Makefile b/builds/any/installer/uboot/builds/Makefile index b6073d5c..800463e4 100644 --- a/builds/any/installer/uboot/builds/Makefile +++ b/builds/any/installer/uboot/builds/Makefile @@ -10,8 +10,20 @@ endif include $(ONL)/make/versions/version-onl.mk INSTALLER_NAME=$(FNAME_PRODUCT_VERSION)_ONL-OS_$(FNAME_BUILD_ID)_$(UARCH)_$(BOOTMODE)_INSTALLER +MKINSTALLER_OPTS = \ + --arch $(ARCH) \ + --boot-config boot-config \ + --add-dir config \ + --fit onl-loader-fit:$(ARCH) onl-loader-fit.itb \ + --swi onl-swi:$(ARCH) \ + --preinstall-script $(ONL)/builds/any/installer/sample-preinstall.sh \ + --postinstall-script $(ONL)/builds/any/installer/sample-postinstall.sh \ + --preinstall-plugin $(ONL)/builds/any/installer/sample-preinstall.py \ + --postinstall-plugin $(ONL)/builds/any/installer/sample-postinstall.py \ + # THIS LINE INTENTIONALLY LEFT BLANK + __installer: - $(ONL)/tools/mkinstaller.py --arch $(ARCH) --boot-config boot-config --add-dir config --fit onl-loader-fit:$(ARCH) onl-loader-fit.itb --swi onl-swi:$(ARCH) --out $(INSTALLER_NAME) + $(ONL)/tools/mkinstaller.py $(MKINSTALLER_OPTS) --out $(INSTALLER_NAME) md5sum "$(INSTALLER_NAME)" | awk '{ print $$1 }' > "$(INSTALLER_NAME).md5sum" From 9a34feb98e67f5826439ae3d38a7b9a5b67690e2 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 17 Nov 2016 20:44:27 +0000 Subject: [PATCH 107/255] Latest --- sm/infra | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/infra b/sm/infra index 806c0828..b7974c19 160000 --- a/sm/infra +++ b/sm/infra @@ -1 +1 @@ -Subproject commit 806c082806e5f9991b7afdb43199e51d5118cff7 +Subproject commit b7974c19ed40c484f75974f4ba5975ba1ba9e1a7 From df84b39c276df5012efb44b46bbc7687d7805b2d Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Thu, 17 Nov 2016 13:16:16 -0800 Subject: [PATCH 108/255] Added docs --- builds/any/installer/sample-postinstall.py | 48 ++++++++++++++++++++++ builds/any/installer/sample-postinstall.sh | 34 +++++++++++++++ builds/any/installer/sample-preinstall.py | 38 +++++++++++++++++ builds/any/installer/sample-preinstall.sh | 30 ++++++++++++++ 4 files changed, 150 insertions(+) diff --git a/builds/any/installer/sample-postinstall.py b/builds/any/installer/sample-postinstall.py index 230013c9..1f5605d2 100644 --- a/builds/any/installer/sample-postinstall.py +++ b/builds/any/installer/sample-postinstall.py @@ -1,5 +1,53 @@ """sample-postinstall.py +Example Python script for post-install hooks. + +Add this as a postinstall hook to your installer via +the 'mkinstaller.py' command line: + +$ mkinstaller.py ... --postinstall-plugin sample-postinstall.py ... + +At install time, this script will + +1. be extracted into a temporary working directory +2. be imported as a module, in the same process as the installer + script + +Importing the module should not trigger any side-effects. + +At the appropriate time during the install (a chrooted invocation +of the installer Python script) will + +1. scrape the top-level plugin's namespace for subclasses of + onl.install.Plugin.Plugin. + Implementors should declare classes here + (inheriting from onl.install.Plugin.Plugin) to embed the plugin + functionality. +2. instantiate an instance of each class, with the installer + object initialized as the 'installer' attribute +3. invoke the 'run' method (which must be overridden by implementors) +4. invoke the 'shutdown' method (by default, a no-op) + +The 'run' method should return zero on success. In any other case, the +installer terminates. + +The post-install plugins are invoked after the installer is complete +and after the boot loader is updated. + +An exception to this is for proxy GRUB configurations. In that case, the +post-install plugins are invoked after the install is finished, but before +the boot loader has been updated. + +At the time the post-install plugin is invoked, none of the +filesystems are mounted. If the implementor needs to manipulate the +disk, the filesystems should be re-mounted temporarily with +e.g. MountContext. The OnlMountContextReadWrite object and their +siblings won't work here because the mtab.yml file is not populated +within the loader environment. + +When using MountContxt, the system state in the installer object can help +(self.installer.blkidParts in particular). + """ import onl.install.Plugin diff --git a/builds/any/installer/sample-postinstall.sh b/builds/any/installer/sample-postinstall.sh index 0bc0938c..36a0da25 100644 --- a/builds/any/installer/sample-postinstall.sh +++ b/builds/any/installer/sample-postinstall.sh @@ -1,4 +1,38 @@ #!/bin/sh +# +###################################################################### +# +# sample-postinstall.sh +# +# Example script for post-install hooks. +# +# Add this as a postinstall hook to your installer via +# the 'mkinstaller.py' command line: +# +# $ mkinstaller.py ... --postinstall-script sample-postinstall.sh ... +# +# At install time, this script will +# +# 1. be extracted into the working directory with the other installer +# collateral +# 2. have the execute bit set +# 3. run in-place with the installer chroot directory passed +# as the first command line parameter +# +# If the script fails (returns a non-zero exit code) then +# the install is aborted. +# +# This script is executed using the ONIE runtime (outside the chroot), +# after the actual installer (chrooted Python script) has finished. +# +# This script is run after the postinstall actions (e.g. proxy GRUB +# commands) +# +# At the time the script is run, the installer environment (chroot) +# is fully prepared, including filesystem mount-points. +# That is, the chroot mount points have not been unmounted yet. +# +###################################################################### rootdir=$1; shift diff --git a/builds/any/installer/sample-preinstall.py b/builds/any/installer/sample-preinstall.py index 77148225..09b2b524 100644 --- a/builds/any/installer/sample-preinstall.py +++ b/builds/any/installer/sample-preinstall.py @@ -1,5 +1,43 @@ """sample-preinstall.py +Example Python script for pre-install hooks. + +Add this as a preinstall hook to your installer via +the 'mkinstaller.py' command line: + +$ mkinstaller.py ... --preinstall-plugin sample-preinstall.py ... + +At install time, this script will + +1. be extracted into a temporary working directory +2. be imported as a module, in the same process as the installer + script + +Importing the module should not trigger any side-effects. + +At the appropriate time during the install (a chrooted invocation +of the installer Python script) will + +1. scrape the top-level plugin's namespace for subclasses of + onl.install.Plugin.Plugin. + Implementors should declare classes here + (inheriting from onl.install.Plugin.Plugin) to embed the plugin + functionality. +2. instantiate an instance of each class, with the installer + object initialized as the 'installer' attribute +3. invoke the 'run' method (which must be overridden by implementors) +4. invoke the 'shutdown' method (by default, a no-op) + +The 'run' method should return zero on success. In any other case, the +installer terminates. + +The 'installer' object has a handle onto the installer ZIP archive +(self.installer.zf) but otherwise the install has not been +started. That is, the install disk has not been +prepped/initialized/scanned yet. As per the ONL installer API, the +installer starts with *no* filesystems mounted, not even the ones from +a prior install. + """ import onl.install.Plugin diff --git a/builds/any/installer/sample-preinstall.sh b/builds/any/installer/sample-preinstall.sh index daf128a6..13532aae 100644 --- a/builds/any/installer/sample-preinstall.sh +++ b/builds/any/installer/sample-preinstall.sh @@ -1,4 +1,34 @@ #!/bin/sh +# +###################################################################### +# +# sample-preinstall.sh +# +# Example script for pre-install hooks. +# +# Add this as a preinstall hook to your installer via +# the 'mkinstaller.py' command line: +# +# $ mkinstaller.py ... --preinstall-script sample-preinstall.sh ... +# +# At install time, this script will +# +# 1. be extracted into the working directory with the other installer +# collateral +# 2. have the execute bit set +# 3. run in-place with the installer chroot directory passed +# as the first command line parameter +# +# If the script fails (returns a non-zero exit code) then +# the install is aborted. +# +# This script is executed using the ONIE runtime (outside the chroot), +# before the actual installer (chrooted Python script) +# +# At the time the script is run, the installer environment (chroot) +# has been fully prepared, including filesystem mount-points. +# +###################################################################### rootdir=$1; shift From b2fa0f4199cd9830850cffedb6ed1d49e3c5e5aa Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Thu, 17 Nov 2016 14:32:02 -0800 Subject: [PATCH 109/255] support onlp_sfpi_dev_read[write] api in as5712, as5812-54x --- .../onlp/builds/src/module/src/sfpi.c | 30 ++++++++++++++++++- .../onlp/builds/src/module/src/sfpi.c | 30 ++++++++++++++++++- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/sfpi.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/sfpi.c index 45d3ae04..560f5762 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/sfpi.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/sfpi.c @@ -30,7 +30,7 @@ #include #include #include - +#include #include "platform_lib.h" #define MAX_SFP_PATH 64 @@ -347,6 +347,34 @@ onlp_sfpi_dom_read(int port, uint8_t data[256]) return ONLP_STATUS_OK; } +int +onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_readb(bus, devaddr, addr, ONLP_I2C_F_FORCE); +} + +int +onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_writeb(bus, devaddr, addr, value, ONLP_I2C_F_FORCE); +} + +int +onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_readw(bus, devaddr, addr, ONLP_I2C_F_FORCE); +} + +int +onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_writew(bus, devaddr, addr, value, ONLP_I2C_F_FORCE); +} + int onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) { diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/sfpi.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/sfpi.c index fc1000b2..6cfa29a9 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/sfpi.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/sfpi.c @@ -30,7 +30,7 @@ #include #include #include - +#include #include "platform_lib.h" #define MAX_SFP_PATH 64 @@ -347,6 +347,34 @@ onlp_sfpi_dom_read(int port, uint8_t data[256]) return ONLP_STATUS_OK; } +int +onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_readb(bus, devaddr, addr, ONLP_I2C_F_FORCE); +} + +int +onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_writeb(bus, devaddr, addr, value, ONLP_I2C_F_FORCE); +} + +int +onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_readw(bus, devaddr, addr, ONLP_I2C_F_FORCE); +} + +int +onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + int bus = front_port_to_cpld_mux_index(port); + return onlp_i2c_writew(bus, devaddr, addr, value, ONLP_I2C_F_FORCE); +} + int onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) { From b39ac643551ba099fe1f7e7cecd78471f70b61dc Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 18 Nov 2016 17:27:22 +0000 Subject: [PATCH 110/255] Utility for resetting the boot swi. --- packages/base/all/vendor-config-onl/src/bin/onlswi | 5 +++++ 1 file changed, 5 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/bin/onlswi diff --git a/packages/base/all/vendor-config-onl/src/bin/onlswi b/packages/base/all/vendor-config-onl/src/bin/onlswi new file mode 100755 index 00000000..fb965581 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/onlswi @@ -0,0 +1,5 @@ +#!/bin/bash +############################################################ +onlfs mount images --rw +(cd /mnt/onl/images && rm *.swi && wget $1) +onlfs mount images From 5c344950969f3b19f42396bd1684fcd08e3f6b23 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 21 Nov 2016 21:21:41 +0000 Subject: [PATCH 111/255] [SWL-3475] Fix Firmware upgrade after initial ONIE upgrade. The ONIE firware upgrade staging area is not initialized properly if we don't first boot into ONIE after the initial upgrade. This causes our workflow of ONIE then CPLD upgrade to fail the first time. The onie-fwpkg script does not report an error about the staging area which is why our code is not detecting the failure to install the firmware package. --- .../all/vendor-config-onl/src/python/onl/grub/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py index 2ef7373c..8b2501ea 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/grub/__init__.py @@ -1,5 +1,6 @@ from onl.mounts import OnlOnieBootContext, OnlMountContextReadWrite import subprocess +import os ONIE_BOOT_MODES = [ 'install', 'rescue', @@ -17,8 +18,15 @@ def onie_boot_mode_set(mode): with OnlOnieBootContext() as ob: subprocess.check_call("%s/onie/tools/bin/onie-boot-mode -o %s" % (ob.directory, mode), shell=True) +def _makedirs(d): + if not os.path.exists(d): + os.makedirs(d) + def onie_fwpkg(arguments): with OnlOnieBootContext() as ob: + # This is necessary if we've upgraded ONIE but haven't booted into it yet... + _makedirs("%s/onie/update/pending" % ob.directory) + _makedirs("%s/onie/update/attempts" % ob.directory) subprocess.check_call("%s/onie/tools/bin/onie-fwpkg %s" % (ob.directory, arguments), shell=True) def boot_entry_set(index): From 711b822dff6fe43bf078633ad68883ced8f8cd73 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 22 Nov 2016 07:29:04 -0800 Subject: [PATCH 112/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 70859b05..70842173 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 70859b05b6aaed7660b6c5b5f84bd7bbd8bfd5d0 +Subproject commit 7084217321c753815743fb46904b598ec65670bb From c825d51bc8e6c8dbb24dcc35b755e20409b80487 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 22 Nov 2016 22:21:41 +0000 Subject: [PATCH 113/255] Common architecture build makefiles. --- builds/amd64/Makefile | 3 +-- builds/arm64/Makefile | 3 +-- builds/armel/Makefile | 3 +-- builds/powerpc/Makefile | 3 +-- make/arch-build.mk | 9 +++++++++ 5 files changed, 13 insertions(+), 8 deletions(-) create mode 100644 make/arch-build.mk diff --git a/builds/amd64/Makefile b/builds/amd64/Makefile index 92917844..435a9dac 100644 --- a/builds/amd64/Makefile +++ b/builds/amd64/Makefile @@ -1,2 +1 @@ -DIRECTORIES := rootfs swi installer -include $(ONL)/make/subdirs.mk +include $(ONL)/make/arch-build.mk diff --git a/builds/arm64/Makefile b/builds/arm64/Makefile index 92917844..435a9dac 100644 --- a/builds/arm64/Makefile +++ b/builds/arm64/Makefile @@ -1,2 +1 @@ -DIRECTORIES := rootfs swi installer -include $(ONL)/make/subdirs.mk +include $(ONL)/make/arch-build.mk diff --git a/builds/armel/Makefile b/builds/armel/Makefile index 92917844..435a9dac 100644 --- a/builds/armel/Makefile +++ b/builds/armel/Makefile @@ -1,2 +1 @@ -DIRECTORIES := rootfs swi installer -include $(ONL)/make/subdirs.mk +include $(ONL)/make/arch-build.mk diff --git a/builds/powerpc/Makefile b/builds/powerpc/Makefile index 92917844..435a9dac 100644 --- a/builds/powerpc/Makefile +++ b/builds/powerpc/Makefile @@ -1,2 +1 @@ -DIRECTORIES := rootfs swi installer -include $(ONL)/make/subdirs.mk +include $(ONL)/make/arch-build.mk diff --git a/make/arch-build.mk b/make/arch-build.mk new file mode 100644 index 00000000..5dc3d5cc --- /dev/null +++ b/make/arch-build.mk @@ -0,0 +1,9 @@ +DIRECTORIES := rootfs swi installer +include $(ONL)/make/subdirs.mk + +.PHONY: swi + +swi: + $(MAKE) -C rootfs + $(MAKE) -C swi + From a752807be4d0c7487d61205d6199d8294901bbae Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 27 Nov 2016 16:17:55 +0000 Subject: [PATCH 114/255] Common Loader boot-config management and manipulation. --- .../vendor-config-onl/src/bin/onl-boot-config | 4 + .../src/python/onl/bootconfig/__init__.py | 215 ++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/bin/onl-boot-config create mode 100755 packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py diff --git a/packages/base/all/vendor-config-onl/src/bin/onl-boot-config b/packages/base/all/vendor-config-onl/src/bin/onl-boot-config new file mode 100755 index 00000000..63eed39c --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/onl-boot-config @@ -0,0 +1,4 @@ +#!/usr/bin/python +############################################################ +from onl.bootconfig import OnlBootConfigNet +OnlBootConfigNet().main("onl-boot-config") diff --git a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py new file mode 100755 index 00000000..f43a1610 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +############################################################ +import os +import sys +import netaddr + +class OnlBootConfig(object): + BOOT_CONFIG_DEFAULT='/mnt/onl/boot/boot-config' + + def __init__(self): + self.keys = {} + self.__classmethod("init") + + def _readf(self, fname): + with open(fname) as f: + for line in f.readlines(): + (k,d,v) = line.partition('=') + if d == '=': + self.keys[k] = v.strip() + self._original = self.keys.copy() + + def read(self, bc=None): + if bc is None: + bc = self.BOOT_CONFIG_DEFAULT + self._readf(bc) + + def set(self, k, v): + self.keys[k] = v + + def get(self, k, d=None): + return self.keys.get(k, d) + + def delete(self, k): + self.keys.pop(k, None) + + def _writeh(self, handle): + for (k, v) in self.keys.iteritems(): + handle.write("%s=%s\n" % (k, v)) + + def _writef(self, f): + with open(f, "w") as f: + self._writeh(f) + + def write(self, dst=None): + self.validate() + if dst: + self._writef(dst) + else: + from onl.mounts import OnlMountContextReadWrite + with OnlMountContextReadWrite("ONL-BOOT", logger=None): + self._writef(self.BOOT_CONFIG_DEFAULT) + + + def __classmethod(self, name, *args): + for attr in dir(self): + if attr.endswith("__%s" % name): + getattr(self, attr)(*args) + + def validate(self): + return self.__classmethod("validate") + + def argparse_init(self, ap): + ap.add_argument("--read", help="Read the given file instead of the default [ %s ]." % OnlBootConfig.BOOT_CONFIG_DEFAULT) + ap.add_argument("--write", help="Write the given file instead of the default [ %s ]." % OnlBootConfig.BOOT_CONFIG_DEFAULT) + ap.add_argument("--show", help="Show the configuration.", action='store_true') + self.__classmethod("argparse_init", ap) + ap.add_argument("--dry", help='Show changes but do not update.', action='store_true') + + def argparse_process(self, ops): + self.read(ops.read) + if(ops.show): + self._writeh(sys.stdout) + return self.__classmethod("argparse_process", ops) + + def argparse_write(self, ops): + try: + if ops.dry: + print self.keys + self.validate() + else: + self.write(ops.write) + if not ops.write and self.keys != self._original: + print "You must reboot the switch before these changes will take affect." + + except Exception, e: + print e + print "The boot configuration has not been changed." + + + def main(self, name): + import argparse + ap = argparse.ArgumentParser("name") + self.argparse_init(ap) + ops = ap.parse_args() + self.argparse_process(ops) + self.argparse_write(ops) + + +class OnlBootConfigNet(OnlBootConfig): + def __init(self): + self.netrequired = False + + def netauto_set(self): + self.delete('NETIP') + self.delete('NETMASK') + self.delete('NETGW') + self.set('NETAUTO', 'dhcp') + + def netip_set(self, addr): + self.delete('NETAUTO') + self.keys['NETIP'] = addr + + def netmask_set(self, mask): + self.delete('NETAUTO') + self.keys['NETMASK'] = mask + + def netgw_set(self, gw): + self.delete('NETAUTO') + self.keys['NETGW'] = gw + + def __validate(self): + if 'NETAUTO' not in self.keys: + + netip = self.keys.get('NETIP', None) + if netip: + if not self.is_ip_address(netip): + raise ValueError("NETIP=%s is not a valid ip-address" % (netup)) + elif self.netrequired: + raise ValueError("No IP configuration set for the management interface.") + + netmask = self.keys.get('NETMASK', None) + if netmask: + if not self.is_netmask(netmask): + raise ValueError("NETMASK=%s is not a valid netmask." % (netmask)) + elif self.netrequired: + raise ValueError("No Netmask configured for the management interface.") + + netgw = self.keys.get('NETGW', None) + if netgw: + if not self.is_ip_address(netgw): + raise ValueError("NETGW=%s is not a valid ip-address." % (netgw)) + elif self.netrequired: + raise ValueError("No gateway configured for the management interface.") + + if netip and netmask and netgw: + net = netaddr.IPNetwork("%s/%s" % (netip, netmask)) + if netaddr.IPAddress(netgw) not in net: + raise ValueError("Gateway provided is not within the management network %s" % net) + elif netip or netmask or netgw: + raise ValueError("Incomplete static network configuration. NETIP, NETMASK, and NETGW must all be set.") + + elif self.keys['NETAUTO'] != 'dhcp': + raise ValueError("The NETAUTO value '%s' is invalid." % self.keys['NETAUTO']) + + if 'NETDEV' not in self.keys: + self.keys['NETDEV'] = 'ma1' + + return True + + @staticmethod + def is_ip_address(value): + try: + netaddr.IPAddress(value) + return value + except (netaddr.core.AddrFormatError, ValueError): + return None + + @staticmethod + def is_netmask(value): + try: + if not netaddr.IPAddress(value).is_netmask(): + return False + return value + except (netaddr.core.AddrFormatError, ValueError): + return False + + @staticmethod + def argparse_type_is_ip_address(value): + if not OnlBootConfigNet.is_ip_address(value): + import argparse + raise argparse.ArgumentTypeError("%s is not a valid address." % value) + return value + + @staticmethod + def argparse_type_is_netmask(value): + if not OnlBootConfigNet.is_netmask(value): + import argparse + raise argparse.ArgumentTypeError("%s is not a valid netmask." % value) + return value + + def __argparse_init(self, ap): + ap.add_argument("--dhcp", action='store_true', help="Use DHCP on the management interface.") + ap.add_argument("--ip", help='Set static IP address for the management interface.', type=OnlBootConfigNet.argparse_type_is_ip_address) + ap.add_argument("--netmask", help='Set the static netmask for the management interface.', type=OnlBootConfigNet.argparse_type_is_netmask) + ap.add_argument("--gateway", help='Set the gateway address.', type=OnlBootConfigNet.argparse_type_is_ip_address) + + + def __argparse_process(self, ops): + if ops.dhcp: + self.netauto_set() + + if ops.ip: + self.netip_set(ops.ip) + + if ops.netmask: + self.netmask_set(ops.netmask) + + if ops.gateway: + self.netgw_set(ops.gateway) + + +if __name__ == '__main__': + bc = OnlBootConfigNet() + bc.main("onl-boot-config") + From 05f4490b67bada7d91d64a861357f592294b6c75 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 28 Nov 2016 08:34:59 -0800 Subject: [PATCH 115/255] - Set NETREQUIRED as a class variable for easier overrides. - Allow NETAUTO=up --- .../src/python/onl/bootconfig/__init__.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py index f43a1610..61a00774 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py @@ -97,8 +97,8 @@ class OnlBootConfig(object): class OnlBootConfigNet(OnlBootConfig): - def __init(self): - self.netrequired = False + + NET_REQUIRED = False def netauto_set(self): self.delete('NETIP') @@ -125,21 +125,21 @@ class OnlBootConfigNet(OnlBootConfig): if netip: if not self.is_ip_address(netip): raise ValueError("NETIP=%s is not a valid ip-address" % (netup)) - elif self.netrequired: + elif self.NET_REQUIRED: raise ValueError("No IP configuration set for the management interface.") netmask = self.keys.get('NETMASK', None) if netmask: if not self.is_netmask(netmask): raise ValueError("NETMASK=%s is not a valid netmask." % (netmask)) - elif self.netrequired: + elif self.NET_REQUIRED: raise ValueError("No Netmask configured for the management interface.") netgw = self.keys.get('NETGW', None) if netgw: if not self.is_ip_address(netgw): raise ValueError("NETGW=%s is not a valid ip-address." % (netgw)) - elif self.netrequired: + elif self.NET_REQUIRED: raise ValueError("No gateway configured for the management interface.") if netip and netmask and netgw: @@ -149,8 +149,10 @@ class OnlBootConfigNet(OnlBootConfig): elif netip or netmask or netgw: raise ValueError("Incomplete static network configuration. NETIP, NETMASK, and NETGW must all be set.") - elif self.keys['NETAUTO'] != 'dhcp': + elif self.keys['NETAUTO'] not in ['dhcp', 'up']: raise ValueError("The NETAUTO value '%s' is invalid." % self.keys['NETAUTO']) + elif self.keys['NETAUTO'] == 'up' && self.NET_REQUIRED: + raise ValueError("NETAUTO is 'up' but non-local networking is required.") if 'NETDEV' not in self.keys: self.keys['NETDEV'] = 'ma1' From d7958005f3938a9d8b1742379e2405955d561272 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 28 Nov 2016 12:06:22 -0800 Subject: [PATCH 116/255] Updated plugin api - single run() method is called with a mode argument - plugin objects are persistent (re-entrant) for the lifetime of the install - installer grovels the filesystem and/or zip file for plugin files --- builds/any/installer/grub/builds/Makefile | 4 +- builds/any/installer/sample-postinstall.py | 8 ++- builds/any/installer/sample-preinstall.py | 8 ++- builds/any/installer/uboot/builds/Makefile | 4 +- .../src/python/onl/install/BaseInstall.py | 71 ++++++++++++------- .../src/python/onl/install/Plugin.py | 18 ++++- tools/mkinstaller.py | 25 ++++--- 7 files changed, 92 insertions(+), 46 deletions(-) diff --git a/builds/any/installer/grub/builds/Makefile b/builds/any/installer/grub/builds/Makefile index 933f727f..cd392e1a 100644 --- a/builds/any/installer/grub/builds/Makefile +++ b/builds/any/installer/grub/builds/Makefile @@ -18,8 +18,8 @@ MKINSTALLER_OPTS = \ --swi onl-swi:$(ARCH) \ --preinstall-script $(ONL)/builds/any/installer/sample-preinstall.sh \ --postinstall-script $(ONL)/builds/any/installer/sample-postinstall.sh \ - --preinstall-plugin $(ONL)/builds/any/installer/sample-preinstall.py \ - --postinstall-plugin $(ONL)/builds/any/installer/sample-postinstall.py \ + --plugin $(ONL)/builds/any/installer/sample-preinstall.py \ + --plugin $(ONL)/builds/any/installer/sample-postinstall.py \ # THIS LINE INTENTIONALLY LEFT BLANK __installer: diff --git a/builds/any/installer/sample-postinstall.py b/builds/any/installer/sample-postinstall.py index 1f5605d2..b7049e6b 100644 --- a/builds/any/installer/sample-postinstall.py +++ b/builds/any/installer/sample-postinstall.py @@ -54,6 +54,10 @@ import onl.install.Plugin class Plugin(onl.install.Plugin.Plugin): - def run(self): - self.log.info("hello from postinstall plugin") + def run(self, mode): + + if mode == self.PLUGIN_POSTINSTALL: + self.log.info("hello from postinstall plugin") + return 0 + return 0 diff --git a/builds/any/installer/sample-preinstall.py b/builds/any/installer/sample-preinstall.py index 09b2b524..704d9a6c 100644 --- a/builds/any/installer/sample-preinstall.py +++ b/builds/any/installer/sample-preinstall.py @@ -44,6 +44,10 @@ import onl.install.Plugin class Plugin(onl.install.Plugin.Plugin): - def run(self): - self.log.info("hello from preinstall plugin") + def run(self, mode): + + if mode == self.PLUGIN_PREINSTALL: + self.log.info("hello from preinstall plugin") + return 0 + return 0 diff --git a/builds/any/installer/uboot/builds/Makefile b/builds/any/installer/uboot/builds/Makefile index 800463e4..dc3bc4b8 100644 --- a/builds/any/installer/uboot/builds/Makefile +++ b/builds/any/installer/uboot/builds/Makefile @@ -18,8 +18,8 @@ MKINSTALLER_OPTS = \ --swi onl-swi:$(ARCH) \ --preinstall-script $(ONL)/builds/any/installer/sample-preinstall.sh \ --postinstall-script $(ONL)/builds/any/installer/sample-postinstall.sh \ - --preinstall-plugin $(ONL)/builds/any/installer/sample-preinstall.py \ - --postinstall-plugin $(ONL)/builds/any/installer/sample-postinstall.py \ + --plugin $(ONL)/builds/any/installer/sample-preinstall.py \ + --plugin $(ONL)/builds/any/installer/sample-postinstall.py \ # THIS LINE INTENTIONALLY LEFT BLANK __installer: diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index b9528b4d..77df42fa 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -14,6 +14,7 @@ import yaml import zipfile import shutil import imp +import fnmatch, glob from InstallUtils import SubprocessMixin from InstallUtils import MountContext, BlkidParser, PartedParser @@ -90,6 +91,9 @@ class Base: self.zf = None # zipfile handle to installer archive + self.plugins = [] + # dynamically-detected plugins + def run(self): self.log.error("not implemented") return 1 @@ -99,6 +103,11 @@ class Base: return 1 def shutdown(self): + + plugins, self.plugins = self.plugins, [] + for plugin in plugins: + plugin.shutdown() + zf, self.zf = self.zf, None if zf: zf.close() @@ -415,38 +424,27 @@ class Base: return 0 - def preinstall(self): - return self.runPlugin("preinstall.py") - - def postinstall(self): - return self.runPlugin("postinstall.py") - - def runPluginFile(self, pyPath): + def loadPluginsFromFile(self, pyPath): + self.log.info("loading plugins from %s", pyPath) with open(pyPath) as fd: sfx = ('.py', 'U', imp.PY_SOURCE,) mod = imp.load_module("plugin", fd, pyPath, sfx) for attr in dir(mod): klass = getattr(mod, attr) if isinstance(klass, type) and issubclass(klass, Plugin): - self.log.info("%s: running plugin %s", pyPath, attr) + self.log.info("%s: found plugin %s", pyPath, attr) plugin = klass(self) - try: - code = plugin.run() - except: - self.log.exception("plugin failed") - code = 1 - plugin.shutdown() - if code: return code + self.plugins.append(plugin) - return 0 + def loadPlugins(self): - def runPlugin(self, basename): + pat = os.path.join(self.im.installerConf.installer_dir, "plugins", "*.py") + for src in glob.glob(pat): + self.loadPluginsFromFile(src) - src = os.path.join(self.im.installerConf.installer_dir, basename) - if os.path.exists(src): - return self.runPluginFile(src) - - if basename in self.zf.namelist(): + pat = "plugins/*.py" + for basename in self.zf.namelist(): + if not fnmatch.fnmatch(basename, pat): continue try: src = None with self.zf.open(basename, "r") as rfd: @@ -454,11 +452,24 @@ class Base: suffix=".py") with os.fdopen(wfno, "w") as wfd: shutil.copyfileobj(rfd, wfd) - return self.runPluginFile(src) + self.loadPluginsFromFile(src) finally: if src and os.path.exists(src): os.unlink(src) + return 0 + + def runPlugins(self, mode): + self.log.info("running plugins: %s", mode) + for plugin in self.plugins: + try: + code = plugin.run(mode) + except: + self.log.exception("plugin failed") + code = 1 + if code: return code + return 0 + GRUB_TPL = """\ serial %(serial)s terminal_input serial @@ -654,7 +665,10 @@ class GrubInstaller(SubprocessMixin, Base): self.im.installerConf.installer_zip) self.zf = zipfile.ZipFile(p) - code = self.preinstall() + code = self.loadPlugins() + if code: return code + + code = self.runPlugins(Plugin.PLUGIN_PREINSTALL) if code: return code code = self.findGpt() @@ -712,7 +726,7 @@ class GrubInstaller(SubprocessMixin, Base): code = self.installGrub() if code: return code - code = self.postinstall() + code = self.runPlugins(Plugin.PLUGIN_POSTINSTALL) if code: return code self.log.info("ONL loader install successful.") @@ -896,7 +910,10 @@ class UbootInstaller(SubprocessMixin, Base): self.im.installerConf.installer_zip) self.zf = zipfile.ZipFile(p) - code = self.preinstall() + code = self.loadPlugins() + if code: return code + + code = self.runPlugins(Plugin.PLUGIN_PREINSTALL) if code: return code code = self.assertUnmounted() @@ -968,7 +985,7 @@ class UbootInstaller(SubprocessMixin, Base): code = self.installUbootEnv() if code: return code - code = self.postinstall() + code = self.runPlugins(Plugin.PLUGIN_POSTINSTALL) if code: return code return 0 diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py b/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py index f1e97713..e854cece 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py @@ -5,13 +5,25 @@ Base class for installer plugins. class Plugin(object): + PLUGIN_PREINSTALL = "preinstall" + PLUGIN_POSTINSTALL = "postinstall" + def __init__(self, installer): self.installer = installer self.log = self.installer.log.getChild("plugin") - def run(self): - self.log.warn("not implemented") - return 0 + def run(self, mode): + + if mode == self.PLUGIN_PREINSTALL: + self.log.warn("pre-install plugin not implemented") + return 0 + + if mode == self.PLUGIN_POSTINSTALL: + self.log.warn("post-install plugin not implemented") + return 0 + + self.log.warn("invalid plugin mode %s", repr(mode)) + return 1 def shutdown(self): pass diff --git a/tools/mkinstaller.py b/tools/mkinstaller.py index 6348a252..1d1375c2 100755 --- a/tools/mkinstaller.py +++ b/tools/mkinstaller.py @@ -191,10 +191,8 @@ if __name__ == '__main__': ap.add_argument("--postinstall-script", help="Specify a preinstall script (runs after installer)") - ap.add_argument("--preinstall-plugin", - help="Specify a preinstall plugin (runs from within the installer chroot)") - ap.add_argument("--postinstall-plugin", - help="Specify a postinstall plugin (runs from within the installer chroot)") + ap.add_argument("--plugin", action='append', + help="Specify a Python plugin (runs from within the installer chroot)") ops = ap.parse_args() installer = InstallerShar(ops.arch, ops.work_dir) @@ -235,15 +233,26 @@ if __name__ == '__main__': if not os.path.exists(hookdir): os.makedirs(hookdir) + plugindir = os.path.join(installer.work_dir, "tmp/plugins") + if not os.path.exists(plugindir): + os.makedirs(plugindir) + if ops.preinstall_script: installer.add_file_as(ops.preinstall_script, "preinstall.sh") if ops.postinstall_script: installer.add_file_as(ops.postinstall_script, "postinstall.sh") - if ops.preinstall_plugin: - installer.add_file_as(ops.preinstall_plugin, "preinstall.py") - if ops.postinstall_plugin: - installer.add_file_as(ops.postinstall_plugin, "postinstall.py") + for plugin in ops.plugin: + basename = os.path.split(plugin)[1] + basename = os.path.splitext(basename)[0] + dst = tempfile.mktemp(dir=plugindir, + prefix=basename+'-', + suffix='.py') + shutil.copy(plugin, dst) + + l = os.listdir(plugindir) + if l: + installer.add_dir(plugindir) iname = os.path.abspath(ops.out) installer.build(iname) From 6448819434e2e1acf3597aeeaca5736d7fec5e34 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 28 Nov 2016 13:31:34 -0800 Subject: [PATCH 117/255] Updated plugin api docs --- builds/any/installer/sample-postinstall.py | 8 ++++++++ builds/any/installer/sample-preinstall.py | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/builds/any/installer/sample-postinstall.py b/builds/any/installer/sample-postinstall.py index b7049e6b..cf1db0b4 100644 --- a/builds/any/installer/sample-postinstall.py +++ b/builds/any/installer/sample-postinstall.py @@ -26,6 +26,8 @@ of the installer Python script) will 2. instantiate an instance of each class, with the installer object initialized as the 'installer' attribute 3. invoke the 'run' method (which must be overridden by implementors) + For a post-install plugin, the 'mode' argument is set to + PLUGIN_POSTINSTALL. 4. invoke the 'shutdown' method (by default, a no-op) The 'run' method should return zero on success. In any other case, the @@ -45,6 +47,12 @@ e.g. MountContext. The OnlMountContextReadWrite object and their siblings won't work here because the mtab.yml file is not populated within the loader environment. +A post-install plugin should execute any post-install actions when +'mode' is set to PLUGIN_POSTINSTALL. If 'mode' is set to any other +value, the plugin should ignore it and return zero. The plugin run() +method is invoked multiple times during the installer with different +values of 'mode'. The 'shutdown()' method is called only once. + When using MountContxt, the system state in the installer object can help (self.installer.blkidParts in particular). diff --git a/builds/any/installer/sample-preinstall.py b/builds/any/installer/sample-preinstall.py index 704d9a6c..cd29c2dc 100644 --- a/builds/any/installer/sample-preinstall.py +++ b/builds/any/installer/sample-preinstall.py @@ -26,6 +26,8 @@ of the installer Python script) will 2. instantiate an instance of each class, with the installer object initialized as the 'installer' attribute 3. invoke the 'run' method (which must be overridden by implementors) + For a pre-install plugin, the 'mode' argument is set to + PLUGIN_PREINSTALL. 4. invoke the 'shutdown' method (by default, a no-op) The 'run' method should return zero on success. In any other case, the @@ -38,6 +40,12 @@ prepped/initialized/scanned yet. As per the ONL installer API, the installer starts with *no* filesystems mounted, not even the ones from a prior install. +A pre-install plugin should execute any pre-install actions when +'mode' is set to PLUGIN_PREINSTALL. If 'mode' is set to any other +value, the plugin should ignore it and return zero. The plugin run() +method is invoked multiple times during the installer with different +values of 'mode'. The 'shutdown()' method is called only once. + """ import onl.install.Plugin From ee551d596835ca1439b23aaade35f2037d23109b Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Mon, 28 Nov 2016 13:34:08 -0800 Subject: [PATCH 118/255] Updated plugin api docs --- builds/any/installer/sample-postinstall.py | 2 +- builds/any/installer/sample-preinstall.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builds/any/installer/sample-postinstall.py b/builds/any/installer/sample-postinstall.py index cf1db0b4..1efd4aa3 100644 --- a/builds/any/installer/sample-postinstall.py +++ b/builds/any/installer/sample-postinstall.py @@ -5,7 +5,7 @@ Example Python script for post-install hooks. Add this as a postinstall hook to your installer via the 'mkinstaller.py' command line: -$ mkinstaller.py ... --postinstall-plugin sample-postinstall.py ... +$ mkinstaller.py ... --plugin sample-postinstall.py ... At install time, this script will diff --git a/builds/any/installer/sample-preinstall.py b/builds/any/installer/sample-preinstall.py index cd29c2dc..b0e1c114 100644 --- a/builds/any/installer/sample-preinstall.py +++ b/builds/any/installer/sample-preinstall.py @@ -5,7 +5,7 @@ Example Python script for pre-install hooks. Add this as a preinstall hook to your installer via the 'mkinstaller.py' command line: -$ mkinstaller.py ... --preinstall-plugin sample-preinstall.py ... +$ mkinstaller.py ... --plugin sample-preinstall.py ... At install time, this script will From f64d0ff258fbe09cdee18609d49d6465306c27a0 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 29 Nov 2016 17:39:13 +0000 Subject: [PATCH 119/255] Allow subclasses to implement mode-methods. --- .../all/vendor-config-onl/src/python/onl/install/Plugin.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py b/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py index e854cece..70e8a870 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/Plugin.py @@ -14,6 +14,9 @@ class Plugin(object): def run(self, mode): + if hasattr(self, mode): + return getattr(self, mode)() + if mode == self.PLUGIN_PREINSTALL: self.log.warn("pre-install plugin not implemented") return 0 @@ -27,3 +30,4 @@ class Plugin(object): def shutdown(self): pass + From f10add085ebde4ac91059831733c6809769bae22 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 29 Nov 2016 17:39:36 +0000 Subject: [PATCH 120/255] Plugins are optional. --- tools/mkinstaller.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/mkinstaller.py b/tools/mkinstaller.py index 1d1375c2..ac4dc432 100755 --- a/tools/mkinstaller.py +++ b/tools/mkinstaller.py @@ -242,13 +242,14 @@ if __name__ == '__main__': if ops.postinstall_script: installer.add_file_as(ops.postinstall_script, "postinstall.sh") - for plugin in ops.plugin: - basename = os.path.split(plugin)[1] - basename = os.path.splitext(basename)[0] - dst = tempfile.mktemp(dir=plugindir, - prefix=basename+'-', - suffix='.py') - shutil.copy(plugin, dst) + if ops.plugin: + for plugin in ops.plugin: + basename = os.path.split(plugin)[1] + basename = os.path.splitext(basename)[0] + dst = tempfile.mktemp(dir=plugindir, + prefix=basename+'-', + suffix='.py') + shutil.copy(plugin, dst) l = os.listdir(plugindir) if l: From e21d14549ba59d1e557b84b7917309c961d85030 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 29 Nov 2016 17:40:05 +0000 Subject: [PATCH 121/255] - Mount the ONL-BOOT context before accessing the default boot-config. --- .../src/python/onl/bootconfig/__init__.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py index 61a00774..a5dde0f0 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py @@ -20,9 +20,12 @@ class OnlBootConfig(object): self._original = self.keys.copy() def read(self, bc=None): - if bc is None: - bc = self.BOOT_CONFIG_DEFAULT - self._readf(bc) + if bc: + self._readf(bc) + else: + from onl.mounts import OnlMountContextReadOnly + with OnlMountContextReadOnly("ONL-BOOT", logger=None): + self._readf(self.BOOT_CONFIG_DEFAULT) def set(self, k, v): self.keys[k] = v @@ -151,7 +154,7 @@ class OnlBootConfigNet(OnlBootConfig): elif self.keys['NETAUTO'] not in ['dhcp', 'up']: raise ValueError("The NETAUTO value '%s' is invalid." % self.keys['NETAUTO']) - elif self.keys['NETAUTO'] == 'up' && self.NET_REQUIRED: + elif self.keys['NETAUTO'] == 'up' and self.NET_REQUIRED: raise ValueError("NETAUTO is 'up' but non-local networking is required.") if 'NETDEV' not in self.keys: From 1d8d72ce7f484638cf9d7f76c0a5c817ef7e7ec6 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Tue, 29 Nov 2016 14:21:51 -0800 Subject: [PATCH 122/255] Import plugins from the loader runtime --- .../src/python/onl/install/BaseInstall.py | 14 ++++++++++++++ .../src/python/onl/install/plugins/__init__.py | 7 +++++++ 2 files changed, 21 insertions(+) create mode 100644 packages/base/all/vendor-config-onl/src/python/onl/install/plugins/__init__.py diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index 77df42fa..f0a4ae6b 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -438,10 +438,12 @@ class Base: def loadPlugins(self): + # scrape any plugins from the installer working directory pat = os.path.join(self.im.installerConf.installer_dir, "plugins", "*.py") for src in glob.glob(pat): self.loadPluginsFromFile(src) + # scrape any plugins from the installer archive pat = "plugins/*.py" for basename in self.zf.namelist(): if not fnmatch.fnmatch(basename, pat): continue @@ -457,6 +459,18 @@ class Base: if src and os.path.exists(src): os.unlink(src) + # scrape plugins from the loader runtime + # (any plugins dropped into $pydir/onl/install/plugins/*.py) + try: + import onl.install.plugins + plugindir = os.path.dirname(onl.install.plugins.__file__) + except ImportError: + plugindir = None + if plugindir: + pat = os.path.join(plugindir, "*.py") + for src in glob.glob(pat): + self.loadPluginsFromFile(src) + return 0 def runPlugins(self, mode): diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/plugins/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/install/plugins/__init__.py new file mode 100644 index 00000000..c8f033e7 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/plugins/__init__.py @@ -0,0 +1,7 @@ +"""__init__.py + +Module init for installer plugins. + +DO NOT auto-load modules from here. +Rather, drop files here so they will be processed by the installer. +""" From c0ca9ede52248b330fcbefecceea7686c59078d3 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 30 Nov 2016 19:24:16 +0000 Subject: [PATCH 123/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 70842173..d9d7d797 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 7084217321c753815743fb46904b598ec65670bb +Subproject commit d9d7d797063780e13c3345b4c488a16bd894b449 From 56b2ca49f95ebcc95ca4e22d095c15e948e7a3a6 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 30 Nov 2016 22:10:29 +0000 Subject: [PATCH 124/255] Don't take an exception if the manifest doesn't exist. - This is useful particularly for imports from the loader. --- .../vendor-config-onl/src/python/onl/versions/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/versions/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/versions/__init__.py index 08faa806..f462c457 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/versions/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/versions/__init__.py @@ -1,3 +1,4 @@ +import os import json class OnlVersionManifest(object): @@ -30,8 +31,8 @@ class OnlLoaderVersion(OnlVersionBase): # # print onl.versions.rootfs.BUILD_TIMESTAMP # - -rootfs = OnlRootfsVersion() -loader = OnlLoaderVersion() +rootfs = OnlRootfsVersion() if os.path.exists(OnlRootfsVersion.MANIFEST) else None +loader = OnlLoaderVersion() if os.path.exists(OnlLoaderVersion.MANIFEST) else None + From 176d9f6449f0ed13c57ae7cbdd69941ec7584d44 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 30 Nov 2016 22:12:27 +0000 Subject: [PATCH 125/255] The imported module name must be unique. Otherwise the existing import will just be reused. --- .../vendor-config-onl/src/python/onl/install/BaseInstall.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py index f0a4ae6b..00b081f1 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/BaseInstall.py @@ -428,7 +428,8 @@ class Base: self.log.info("loading plugins from %s", pyPath) with open(pyPath) as fd: sfx = ('.py', 'U', imp.PY_SOURCE,) - mod = imp.load_module("plugin", fd, pyPath, sfx) + moduleName = os.path.splitext(os.path.basename(pyPath))[0] + mod = imp.load_module("onl_install_plugin_%s" % moduleName, fd, pyPath, sfx) for attr in dir(mod): klass = getattr(mod, attr) if isinstance(klass, type) and issubclass(klass, Plugin): From b5a3ff7e1ffd11c972191208deef557d24f5a53d Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 30 Nov 2016 22:56:18 +0000 Subject: [PATCH 126/255] Add DMI information for x86_64 platforms. --- packages/base/all/vendor-config-onl/PKG.yml | 2 +- .../src/python/onl/platform/base.py | 35 +++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/PKG.yml b/packages/base/all/vendor-config-onl/PKG.yml index 0c0727bb..80116b56 100644 --- a/packages/base/all/vendor-config-onl/PKG.yml +++ b/packages/base/all/vendor-config-onl/PKG.yml @@ -3,7 +3,7 @@ prerequisites: packages: - name: onl-vendor-config-onl - depends: [ python-yaml, onl-bootd ] + depends: [ python-yaml, onl-bootd, python-dmidecode ] version: 1.0.0 arch: all copyright: Copyright 2013, 2014, 2015 Big Switch Networks diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index 23163d29..3abda83a 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -16,6 +16,7 @@ import re import yaml import onl.YamlUtils import subprocess +import platform class OnlInfoObject(object): DEFAULT_INDENT=" " @@ -43,6 +44,9 @@ class OnlInfoObject(object): """String representation of the information container.""" return OnlInfoObject.string(self._data, indent) + def update(self, d): + self._data.update(d) + @staticmethod def string(d, indent=DEFAULT_INDENT): return "\n".join( sorted("%s%s: %s" % (indent,k,v) for k,v in d.iteritems() if not k.startswith('_') and d[k] is not None and k != 'CRC')) @@ -114,6 +118,8 @@ class OnlPlatformBase(object): self.add_info_json("platform_info", "%s/platform-info.json" % self.basedir_onl(), PlatformInfo, required=False) + self.platform_info.update(self.dmi_versions()) + # Find the base platform config if self.platform().startswith('x86-64'): y1 = self.CONFIG_DEFAULT_GRUB @@ -224,6 +230,35 @@ class OnlPlatformBase(object): def firmware_version(self): return self.platform_info.CPLD_VERSIONS + def dmi_versions(self): + # Note - the dmidecode module returns empty lists for powerpc systems. + if platform.machine() != "x86_64": + return {} + + import dmidecode + fields = [ + { + 'name': 'DMI BIOS Version', + 'subsystem': dmidecode.bios, + 'dmi_type' : 0, + 'key' : 'Version', + }, + + { + 'name': 'DMI System Version', + 'subsystem': dmidecode.system, + 'dmi_type' : 1, + 'key' : 'Version', + }, + ] + rv = {} + for field in fields: + for v in field['subsystem']().values(): + if type(v) is dict and v['dmi_type'] == field['dmi_type']: + rv[field['name']] = v['data'][field['key']] + + return rv + def upgrade_manifest(self, type_, override_dir=None): if override_dir: m = os.path.join(override_dir, "manifest.json") From 15cbc922087f60a6e8a5e1df5491b021c2bbc4f7 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 30 Nov 2016 18:23:17 -0800 Subject: [PATCH 127/255] Handle failed installs --- .../vendor-config-onl/src/python/onl/install/SystemInstall.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py index b8c353a7..25d51928 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/SystemInstall.py @@ -32,7 +32,8 @@ class UpgradeHelper(Upgrader): def _runInitrdShell(self, p): if self.callback is not None: - self.callback(self, p) + return self.callback(self, p) + return 0 class App(SubprocessMixin): From 8bee5e779695c51169251e95a78726b4d4ceee8a Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 30 Nov 2016 18:23:34 -0800 Subject: [PATCH 128/255] Refactor the unmount part of the initrd context shutdown --- .../src/python/onl/install/InstallUtils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py index 685f703d..c7d3ee14 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/InstallUtils.py @@ -834,7 +834,7 @@ class InitrdContext(SubprocessMixin): return self - def shutdown(self): + def unmount(self): p = ProcMountsParser() if self.dir is not None: @@ -852,6 +852,10 @@ class InitrdContext(SubprocessMixin): cmd = ('umount', p,) self.check_call(cmd, vmode=self.V1) + def shutdown(self): + + self.unmount() + if self.initrd and self.dir: self.log.debug("cleaning up chroot in %s", self.dir) self.rmtree(self.dir) From 271434a71cbd6746f72cb622f3442eb82a7c7030 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 30 Nov 2016 18:23:58 -0800 Subject: [PATCH 129/255] Oops, unmount the contents of this (temporary) onie boot context --- .../all/vendor-config-onl/src/python/onl/install/App.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py index 3cc220f8..69b83f8f 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/App.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/App.py @@ -168,6 +168,12 @@ class App(SubprocessMixin, object): ##self.grubEnv = ConfUtils.GrubEnv(log=self.log.getChild("grub")) with OnieBootContext(log=self.log) as self.octx: + + self.octx.ictx.attach() + self.octx.ictx.unmount() + self.octx.ictx.detach() + # XXX roth -- here, detach the initrd mounts + self.octx.detach() if self.octx.onieDir is not None: From 0f2b922be8e5c30576fb972960572ebf3281b212 Mon Sep 17 00:00:00 2001 From: "Carl D. Roth" Date: Wed, 30 Nov 2016 18:24:32 -0800 Subject: [PATCH 130/255] attach, then shutdown, the internal components --- .../src/python/onl/install/ShellApp.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py index 934d40a3..e3a5e505 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/install/ShellApp.py @@ -189,11 +189,20 @@ class OnieBootContext: def shutdown(self): ctx, self.fctx = self.fctx, None - if ctx is not None: ctx.shutdown() + if ctx is not None: + ctx.shutdown() + ctx.attach() + ctx.shutdown() ctx, self.ictx = self.ictx, None - if ctx is not None: ctx.shutdown() + if ctx is not None: + ctx.shutdown() + ctx.attach() + ctx.shutdown() ctx, self.dctx = self.dctx, None - if ctx is not None: ctx.shutdown() + if ctx is not None: + ctx.shutdown() + ctx.attach() + ctx.shutdown() def __exit__(self, eType, eValue, eTrace): self.shutdown() From cbec27d710925a1074592d92b19d21bbd3642d0e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 1 Dec 2016 17:13:32 +0000 Subject: [PATCH 131/255] Handle empty images. --- packages/base/all/vendor-config-onl/src/bin/onlswi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/bin/onlswi b/packages/base/all/vendor-config-onl/src/bin/onlswi index fb965581..e228dc40 100755 --- a/packages/base/all/vendor-config-onl/src/bin/onlswi +++ b/packages/base/all/vendor-config-onl/src/bin/onlswi @@ -1,5 +1,5 @@ #!/bin/bash ############################################################ onlfs mount images --rw -(cd /mnt/onl/images && rm *.swi && wget $1) +(cd /mnt/onl/images && rm -f *.swi && wget $1) onlfs mount images From f8df569c2d0463e2a47728b27cc71d5f240001ee Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 1 Dec 2016 17:14:04 +0000 Subject: [PATCH 132/255] Tolerate missing dmidecode module amd missing platform info object. --- .../vendor-config-onl/src/python/onl/platform/base.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index 3abda83a..0b8ffbf2 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -118,7 +118,10 @@ class OnlPlatformBase(object): self.add_info_json("platform_info", "%s/platform-info.json" % self.basedir_onl(), PlatformInfo, required=False) - self.platform_info.update(self.dmi_versions()) + if hasattr(self, "platform_info"): + self.platform_info.update(self.dmi_versions()) + else: + self.add_info_dict("platform_info", self.dmi_versions()) # Find the base platform config if self.platform().startswith('x86-64'): @@ -235,7 +238,11 @@ class OnlPlatformBase(object): if platform.machine() != "x86_64": return {} - import dmidecode + try: + import dmidecode + except ImportError: + return {} + fields = [ { 'name': 'DMI BIOS Version', From add948d33f305d41961ebf07b3983cdd0fb5f238 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 1 Dec 2016 18:20:14 +0000 Subject: [PATCH 133/255] Add force_overrite paramater and return whether the file was re-written or not. This functionality is useful for upgrade scenarios. --- .../src/python/onl/bootconfig/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py index a5dde0f0..db49a4bc 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/bootconfig/__init__.py @@ -44,14 +44,18 @@ class OnlBootConfig(object): with open(f, "w") as f: self._writeh(f) - def write(self, dst=None): + def write(self, dst=None, force_overwrite=True): self.validate() if dst: self._writef(dst) + return True else: from onl.mounts import OnlMountContextReadWrite with OnlMountContextReadWrite("ONL-BOOT", logger=None): - self._writef(self.BOOT_CONFIG_DEFAULT) + if not os.path.exists(self.BOOT_CONFIG_DEFAULT) or force_overwrite: + self._writef(self.BOOT_CONFIG_DEFAULT) + return True + return False def __classmethod(self, name, *args): From 3799cce3f0aa4176f354cc992dfa9e70197cb77d Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 2 Dec 2016 21:31:09 +0000 Subject: [PATCH 134/255] Kexec is no longer used. --- builds/any/rootfs/jessie/common/amd64-base-packages.yml | 2 +- builds/any/rootfs/jessie/common/armel-base-packages.yml | 1 - builds/any/rootfs/jessie/common/powerpc-base-packages.yml | 1 - builds/any/rootfs/wheezy/common/all-base-packages.yml | 1 - builds/any/rootfs/wheezy/standard/standard.yml | 2 -- 5 files changed, 1 insertion(+), 6 deletions(-) diff --git a/builds/any/rootfs/jessie/common/amd64-base-packages.yml b/builds/any/rootfs/jessie/common/amd64-base-packages.yml index 82eaf33a..402a4169 100644 --- a/builds/any/rootfs/jessie/common/amd64-base-packages.yml +++ b/builds/any/rootfs/jessie/common/amd64-base-packages.yml @@ -8,7 +8,7 @@ - smartmontools - grub2 - onl-upgrade -- kexec-tools + diff --git a/builds/any/rootfs/jessie/common/armel-base-packages.yml b/builds/any/rootfs/jessie/common/armel-base-packages.yml index 7b68feed..71c41a67 100644 --- a/builds/any/rootfs/jessie/common/armel-base-packages.yml +++ b/builds/any/rootfs/jessie/common/armel-base-packages.yml @@ -1,2 +1 @@ - u-boot-tools -- kexec-tools diff --git a/builds/any/rootfs/jessie/common/powerpc-base-packages.yml b/builds/any/rootfs/jessie/common/powerpc-base-packages.yml index 54e5593f..e876f6bd 100644 --- a/builds/any/rootfs/jessie/common/powerpc-base-packages.yml +++ b/builds/any/rootfs/jessie/common/powerpc-base-packages.yml @@ -4,7 +4,6 @@ # ############################################################ - u-boot-tools -- kexec-tools diff --git a/builds/any/rootfs/wheezy/common/all-base-packages.yml b/builds/any/rootfs/wheezy/common/all-base-packages.yml index eba11381..3f74f183 100644 --- a/builds/any/rootfs/wheezy/common/all-base-packages.yml +++ b/builds/any/rootfs/wheezy/common/all-base-packages.yml @@ -38,7 +38,6 @@ - pciutils - usbutils - mtd-utils -- kexec-tools - i2c-tools - module-init-tools - isc-dhcp-client diff --git a/builds/any/rootfs/wheezy/standard/standard.yml b/builds/any/rootfs/wheezy/standard/standard.yml index 7b58d66e..083fb7a9 100644 --- a/builds/any/rootfs/wheezy/standard/standard.yml +++ b/builds/any/rootfs/wheezy/standard/standard.yml @@ -69,8 +69,6 @@ Configure: - 'nfs-common remove' - 'rpcbind remove' - 'motd remove' - - 'kexec remove' - - 'kexec-load remove' - 'mountall-bootclean.sh remove' - 'mountall.sh remove' - 'checkfs.sh remove' From efe66c755bb75291f1b4bdbcacea90dc886a3bdd Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 5 Dec 2016 23:02:49 +0000 Subject: [PATCH 135/255] Add newline to issue.net. --- tools/onlrfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/onlrfs.py b/tools/onlrfs.py index 8152731f..126ebe30 100755 --- a/tools/onlrfs.py +++ b/tools/onlrfs.py @@ -525,7 +525,7 @@ rm -f /usr/sbin/policy-rc.d fn = os.path.join(dir_, "etc/issue.net") onlu.execute("sudo chmod a+w %s" % fn) with open(fn, "w") as f: - f.write("%s" % issue) + f.write("%s\n" % issue) onlu.execute("sudo chmod a-w %s" % fn) From 0a635a9e3e929f2e3201447a5e1db5af5e6d5dbf Mon Sep 17 00:00:00 2001 From: Zi Zhou Date: Mon, 5 Dec 2016 17:57:53 -0800 Subject: [PATCH 136/255] report correct fan speed when fan speed changes according to thermal plan --- .../onlp/builds/src/module/src/fani.c | 6 ------ .../onlp/builds/src/module/src/sysi.c | 11 +++++++++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/fani.c b/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/fani.c index ac2a0c2b..579d1ad3 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/fani.c @@ -127,12 +127,6 @@ onlp_fani_init(void) return ONLP_STATUS_E_INTERNAL; } - /* - * Bring both fans to max. - * These will be reduced after the first platform management sequence. - */ - onlp_fani_percentage_set(ONLP_FAN_ID_CREATE(1), FAN_PERCENTAGE_MAX); - return ONLP_STATUS_OK; } diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/sysi.c b/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/sysi.c index aed3f4a4..61a86f60 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5610-52x/onlp/builds/src/module/src/sysi.c @@ -127,6 +127,17 @@ static const temp_sensor_threshold_t temp_sensor_threshold_b2f[NUM_OF_CHASSIS_TH #include #include +int +onlp_sysi_platform_manage_init(void) +{ + /* + * Bring the fan to max. + * These will be reduced after the first platform management sequence. + */ + onlp_fani_percentage_set(ONLP_FAN_ID_CREATE(1), FAN_PERCENTAGE_MAX); + return 0; +} + int onlp_sysi_platform_manage_fans(void) { From 2241b65dfa5d16e45b1af57ed98e94c9999eea56 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 7 Dec 2016 17:55:27 +0000 Subject: [PATCH 137/255] Set upgrade ONIE and Firmware upgrade policy from sysconfig. --- .../all/vendor-config-onl/src/python/onl/upgrade/firmware.py | 3 +++ .../base/all/vendor-config-onl/src/python/onl/upgrade/onie.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py index 90635fc6..a2c1e5d1 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/firmware.py @@ -16,6 +16,9 @@ class FirmwareUpgrade(ubase.BaseOnieUpgrade): current_version_key="Current Firmware Version" next_version_key="Next Firmware Version" + def auto_upgrade_default(self): + return sysconfig.upgrade.firmware.auto + def init_versions(self): # Get the current platform firmware version diff --git a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py index 4cf81016..b71e4659 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/upgrade/onie.py @@ -16,6 +16,9 @@ class OnieUpgrade(ubase.BaseOnieUpgrade): current_version_key="Current ONIE Version" next_version_key="Next ONIE Version" + def auto_upgrade_default(self): + return sysconfig.upgrade.onie.auto + def init_versions(self): # Get the current platform ONIE version self.current_version = self.platform.onie_version() From f6cc003469b75e2bd9e8ff7569a670056a4de179 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 7 Dec 2016 20:14:03 +0000 Subject: [PATCH 138/255] Provide common access to machine.conf and the output of onie-syseeprom. Some platforms do not provide direct device access to the ONIE system eeprom. The only way these systems can export their system data under ONL is by scraping it from the output of onie-syseeprom and/or reading fields from machine.conf. These new platform methods can be called in a platform's baseconfig() to populate the cached version of these values for later use, or used to access that cached data at a later time. --- .../src/python/onl/platform/base.py | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index 0b8ffbf2..5d43ce15 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -186,6 +186,54 @@ class OnlPlatformBase(object): def baseconfig(self): return True + def onie_machine_get(self): + mc = self.basedir_onl("etc/onie/machine.json") + if not os.path.exists(mc): + data = {} + mcconf = subprocess.check_output("""onie-shell -c "cat /etc/machine.conf" """, shell=True) + for entry in mcconf.split(): + (k,e,v) = entry.partition('=') + if e: + data[k] = v + + if not os.path.exists(os.path.dirname(mc)): + os.makedirs(os.path.dirname(mc)) + + with open(mc, "w") as f: + f.write(json.dumps(data, indent=2)) + else: + data = json.load(open(mc)) + + return data + + def onie_syseeprom_get(self): + se = self.basedir_onl("etc/onie/eeprom.json") + if not os.path.exists(se): + data = {} + extensions = [] + syseeprom = subprocess.check_output("""onie-shell -c onie-syseeprom""", shell=True) + e = re.compile(r'(.*?) (0x[0-9a-fA-F][0-9a-fA-F])[ ]+(\d+) (.*)') + for line in syseeprom.split('\n'): + m = e.match(line) + if m: + value = m.groups(0)[3] + code = m.groups(0)[1].lower() + if code == '0xfd': + extensions.append(value) + else: + data[code] = value + if len(extensions): + data['0xfd'] = extensions + + if not os.path.exists(os.path.dirname(se)): + os.makedirs(os.path.dirname(se)) + + with open(se, "w") as f: + f.write(json.dumps(data, indent=2)) + else: + data = json.load(open(se)) + return data + def platform(self): return self.PLATFORM From 54e4c572a09b570793329d2a889e09fc68a26dfa Mon Sep 17 00:00:00 2001 From: "Sung-Kuang (Max) Chung" Date: Wed, 7 Dec 2016 17:21:30 -0800 Subject: [PATCH 139/255] Added community support email opennetworklinux@googlegroups.com, as support@bigswitch.com "is a support line for commercial Big Switch products for customer with paid licenses." --- builds/any/installer/APKG.yml | 1 + builds/any/rootfs/APKG.yml | 1 + builds/any/swi/APKG.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/builds/any/installer/APKG.yml b/builds/any/installer/APKG.yml index 3ac280ff..1c9c8247 100644 --- a/builds/any/installer/APKG.yml +++ b/builds/any/installer/APKG.yml @@ -10,6 +10,7 @@ common: version: 0.$FNAME_RELEASE_ID copyright: Copyright 2016 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-installer-$BOOTMODE diff --git a/builds/any/rootfs/APKG.yml b/builds/any/rootfs/APKG.yml index d83a385f..e8143127 100644 --- a/builds/any/rootfs/APKG.yml +++ b/builds/any/rootfs/APKG.yml @@ -9,6 +9,7 @@ common: version: 0.$FNAME_RELEASE_ID copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-rootfs diff --git a/builds/any/swi/APKG.yml b/builds/any/swi/APKG.yml index 3614408b..48b81df0 100644 --- a/builds/any/swi/APKG.yml +++ b/builds/any/swi/APKG.yml @@ -10,6 +10,7 @@ common: version: 0.$FNAME_RELEASE_ID copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-swi From a0e4cae7c49449f0b788bf1f10089cc517cad742 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 8 Dec 2016 03:46:36 +0000 Subject: [PATCH 140/255] Support manual uninstall mode as a last resort. --- .../all/vendor-config-onl/src/sbin/uninstall | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/sbin/uninstall b/packages/base/all/vendor-config-onl/src/sbin/uninstall index c76464d5..2fb1c9e0 100755 --- a/packages/base/all/vendor-config-onl/src/sbin/uninstall +++ b/packages/base/all/vendor-config-onl/src/sbin/uninstall @@ -3,13 +3,25 @@ set -e uninstall_x86_64() { + if [ "$1" = "manual" ]; then + # Clear any ONIE boot selection settings and default to Install + mkdir -p /mnt/onie-boot + mount `blkid -L ONIE-BOOT` /mnt/onie-boot + rm -rf /mnt/onie-boot/grubenv + umount /mnt/onie-boot + + # Force ONIE boot selection + onlfs mount boot --rw + echo "default=1" >> /mnt/onl/boot/grub/grub.cfg - if [ "$1" = "factory" ]; then - mode=uninstall else - mode=install + if [ "$1" = "factory" ]; then + mode=uninstall + else + mode=install + fi + onl-onie-boot-mode $mode fi - onl-onie-boot-mode $mode } uninstall_uboot() From 1a90e3c514d1ab6252f136b2875e3cd03ad2d0fa Mon Sep 17 00:00:00 2001 From: "Sung-Kuang (Max) Chung" Date: Wed, 7 Dec 2016 21:56:03 -0800 Subject: [PATCH 141/255] Added community support email opennetworklinux@googlegroups.com, as support@bigswitch.com "is a support line for commercial Big Switch products for customer with paid licenses." --- docker/tools/PKG.yml | 1 + packages/base/all/boot.d/PKG.yml | 1 + packages/base/all/initrds/loader-initrd-files/PKG.yml | 1 + packages/base/all/onl-mibs/PKG.yml | 1 + packages/base/all/vendor-config-onl/PKG.yml | 1 + packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml | 1 + packages/base/amd64/kernels/kernel-3.18-x68-64-all/PKG.yml | 1 + packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml | 1 + .../base/amd64/kernels/legacy/kernel-3.9.6-x86-64-all/PKG.yml | 1 + packages/base/amd64/upgrade/PKG.yml | 1 + packages/base/any/faultd/APKG.yml | 1 + packages/base/any/fit/buildroot/APKG.yml | 1 + packages/base/any/fit/loader/APKG.yml | 1 + packages/base/any/initrds/buildroot/APKG.yml | 1 + packages/base/any/initrds/loader/APKG.yml | 1 + packages/base/any/onlp-snmpd/APKG.yml | 1 + packages/base/any/onlp/APKG.yml | 1 + packages/base/any/oom-shim/APKG.yml | 1 + packages/base/any/templates/onlp-platform-any.yml | 1 + packages/base/any/templates/platform-config-platform.yml | 1 + packages/base/any/templates/platform-config-vendor.yml | 1 + packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml | 1 + .../base/armel/kernels/kernel-3.2-deb7-arm-iproc-all/PKG.yml | 1 + .../powerpc/kernels/kernel-3.2-deb7-powerpc-e500v-all/PKG.yml | 1 + .../powerpc/kernels/legacy/kernel-3.8.13-powerpc-e500mc/PKG.yml | 1 + .../powerpc/kernels/legacy/kernel-3.9.6-powerpc-e500v/PKG.yml | 1 + .../accton/powerpc/powerpc-accton-as5710-54x/onlp/PKG.yml | 1 + .../accton/powerpc/powerpc-accton-as6700-32x/onlp/PKG.yml | 1 + .../platforms/accton/x86-64/x86-64-accton-wedge-16x/onlp/PKG.yml | 1 + .../accton/x86-64/x86-64-facebook-wedge100/onlp/PKG.yml | 1 + .../platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml | 1 + 31 files changed, 31 insertions(+) diff --git a/docker/tools/PKG.yml b/docker/tools/PKG.yml index 1c6f35a8..9a7db5b2 100644 --- a/docker/tools/PKG.yml +++ b/docker/tools/PKG.yml @@ -3,6 +3,7 @@ common: version: 1.1.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-docker-tools diff --git a/packages/base/all/boot.d/PKG.yml b/packages/base/all/boot.d/PKG.yml index c785401e..f27fae45 100644 --- a/packages/base/all/boot.d/PKG.yml +++ b/packages/base/all/boot.d/PKG.yml @@ -3,6 +3,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-bootd diff --git a/packages/base/all/initrds/loader-initrd-files/PKG.yml b/packages/base/all/initrds/loader-initrd-files/PKG.yml index 989e15e0..bb05f8a1 100644 --- a/packages/base/all/initrds/loader-initrd-files/PKG.yml +++ b/packages/base/all/initrds/loader-initrd-files/PKG.yml @@ -3,6 +3,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-loader-initrd-files diff --git a/packages/base/all/onl-mibs/PKG.yml b/packages/base/all/onl-mibs/PKG.yml index 4838838a..d29037ff 100644 --- a/packages/base/all/onl-mibs/PKG.yml +++ b/packages/base/all/onl-mibs/PKG.yml @@ -3,6 +3,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-mibs diff --git a/packages/base/all/vendor-config-onl/PKG.yml b/packages/base/all/vendor-config-onl/PKG.yml index 80116b56..1429953f 100644 --- a/packages/base/all/vendor-config-onl/PKG.yml +++ b/packages/base/all/vendor-config-onl/PKG.yml @@ -8,6 +8,7 @@ packages: arch: all copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com summary: ONL Base Configuration Package files: diff --git a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml index 92dca0b4..cf68e414 100644 --- a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml +++ b/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml @@ -4,6 +4,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.16+deb8-x86-64-all diff --git a/packages/base/amd64/kernels/kernel-3.18-x68-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.18-x68-64-all/PKG.yml index 04b5fbac..c42a515c 100644 --- a/packages/base/amd64/kernels/kernel-3.18-x68-64-all/PKG.yml +++ b/packages/base/amd64/kernels/kernel-3.18-x68-64-all/PKG.yml @@ -4,6 +4,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.18-x86-64-all diff --git a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml index 7ef66fe0..5fc62c40 100644 --- a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml +++ b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml @@ -4,6 +4,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.2-deb7-x86-64-all diff --git a/packages/base/amd64/kernels/legacy/kernel-3.9.6-x86-64-all/PKG.yml b/packages/base/amd64/kernels/legacy/kernel-3.9.6-x86-64-all/PKG.yml index 16ccb0f1..d226cf57 100644 --- a/packages/base/amd64/kernels/legacy/kernel-3.9.6-x86-64-all/PKG.yml +++ b/packages/base/amd64/kernels/legacy/kernel-3.9.6-x86-64-all/PKG.yml @@ -7,6 +7,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.9.6-x86-64-all diff --git a/packages/base/amd64/upgrade/PKG.yml b/packages/base/amd64/upgrade/PKG.yml index fe994816..213c0b3b 100644 --- a/packages/base/amd64/upgrade/PKG.yml +++ b/packages/base/amd64/upgrade/PKG.yml @@ -11,6 +11,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-upgrade diff --git a/packages/base/any/faultd/APKG.yml b/packages/base/any/faultd/APKG.yml index 8a3aecd3..6eb1b970 100644 --- a/packages/base/any/faultd/APKG.yml +++ b/packages/base/any/faultd/APKG.yml @@ -3,6 +3,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com depends: base-files packages: - name: onl-faultd diff --git a/packages/base/any/fit/buildroot/APKG.yml b/packages/base/any/fit/buildroot/APKG.yml index 0eb35982..61eb087d 100644 --- a/packages/base/any/fit/buildroot/APKG.yml +++ b/packages/base/any/fit/buildroot/APKG.yml @@ -7,6 +7,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-buildroot-fit diff --git a/packages/base/any/fit/loader/APKG.yml b/packages/base/any/fit/loader/APKG.yml index f50de736..eed7c9a9 100644 --- a/packages/base/any/fit/loader/APKG.yml +++ b/packages/base/any/fit/loader/APKG.yml @@ -7,6 +7,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-loader-fit diff --git a/packages/base/any/initrds/buildroot/APKG.yml b/packages/base/any/initrds/buildroot/APKG.yml index c59cf6cd..b4362069 100644 --- a/packages/base/any/initrds/buildroot/APKG.yml +++ b/packages/base/any/initrds/buildroot/APKG.yml @@ -14,6 +14,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-buildroot-initrd diff --git a/packages/base/any/initrds/loader/APKG.yml b/packages/base/any/initrds/loader/APKG.yml index bb3b8793..8fe5304e 100644 --- a/packages/base/any/initrds/loader/APKG.yml +++ b/packages/base/any/initrds/loader/APKG.yml @@ -13,6 +13,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-loader-initrd diff --git a/packages/base/any/onlp-snmpd/APKG.yml b/packages/base/any/onlp-snmpd/APKG.yml index f25d43fd..76214600 100644 --- a/packages/base/any/onlp-snmpd/APKG.yml +++ b/packages/base/any/onlp-snmpd/APKG.yml @@ -6,6 +6,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onlp-snmpd diff --git a/packages/base/any/onlp/APKG.yml b/packages/base/any/onlp/APKG.yml index b0d91c6a..a245d8ec 100644 --- a/packages/base/any/onlp/APKG.yml +++ b/packages/base/any/onlp/APKG.yml @@ -14,6 +14,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onlp diff --git a/packages/base/any/oom-shim/APKG.yml b/packages/base/any/oom-shim/APKG.yml index 6a54841f..19ccd4c3 100644 --- a/packages/base/any/oom-shim/APKG.yml +++ b/packages/base/any/oom-shim/APKG.yml @@ -13,6 +13,7 @@ common: version: 1.0.0 copyright: Copyright 2016 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com diff --git a/packages/base/any/templates/onlp-platform-any.yml b/packages/base/any/templates/onlp-platform-any.yml index 4217f793..e9580aae 100644 --- a/packages/base/any/templates/onlp-platform-any.yml +++ b/packages/base/any/templates/onlp-platform-any.yml @@ -15,6 +15,7 @@ common: arch: $ARCH copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onlp-${PLATFORM}-r0 diff --git a/packages/base/any/templates/platform-config-platform.yml b/packages/base/any/templates/platform-config-platform.yml index 22630018..0eb1edc0 100644 --- a/packages/base/any/templates/platform-config-platform.yml +++ b/packages/base/any/templates/platform-config-platform.yml @@ -13,6 +13,7 @@ packages: arch: $ARCH copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com summary: ONL Platform Configuration Package for the ${PLATFORM} files: diff --git a/packages/base/any/templates/platform-config-vendor.yml b/packages/base/any/templates/platform-config-vendor.yml index ad783f77..e89538fb 100644 --- a/packages/base/any/templates/platform-config-vendor.yml +++ b/packages/base/any/templates/platform-config-vendor.yml @@ -8,6 +8,7 @@ packages: arch: all copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com summary: ONL Configuration Package for ${Vendor} Platforms files: diff --git a/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml index 19302944..73ccabb3 100644 --- a/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml +++ b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml @@ -4,6 +4,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.18.25-arm64-all diff --git a/packages/base/armel/kernels/kernel-3.2-deb7-arm-iproc-all/PKG.yml b/packages/base/armel/kernels/kernel-3.2-deb7-arm-iproc-all/PKG.yml index 74b43f08..68212ba9 100644 --- a/packages/base/armel/kernels/kernel-3.2-deb7-arm-iproc-all/PKG.yml +++ b/packages/base/armel/kernels/kernel-3.2-deb7-arm-iproc-all/PKG.yml @@ -4,6 +4,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.2-deb7-arm-iproc-all diff --git a/packages/base/powerpc/kernels/kernel-3.2-deb7-powerpc-e500v-all/PKG.yml b/packages/base/powerpc/kernels/kernel-3.2-deb7-powerpc-e500v-all/PKG.yml index 8ee6ee59..92613de7 100644 --- a/packages/base/powerpc/kernels/kernel-3.2-deb7-powerpc-e500v-all/PKG.yml +++ b/packages/base/powerpc/kernels/kernel-3.2-deb7-powerpc-e500v-all/PKG.yml @@ -3,6 +3,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.2-deb7-powerpc-e500v-all diff --git a/packages/base/powerpc/kernels/legacy/kernel-3.8.13-powerpc-e500mc/PKG.yml b/packages/base/powerpc/kernels/legacy/kernel-3.8.13-powerpc-e500mc/PKG.yml index 5c8e3021..b82c53e9 100644 --- a/packages/base/powerpc/kernels/legacy/kernel-3.8.13-powerpc-e500mc/PKG.yml +++ b/packages/base/powerpc/kernels/legacy/kernel-3.8.13-powerpc-e500mc/PKG.yml @@ -7,6 +7,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.8.13-powerpc-e500mc diff --git a/packages/base/powerpc/kernels/legacy/kernel-3.9.6-powerpc-e500v/PKG.yml b/packages/base/powerpc/kernels/legacy/kernel-3.9.6-powerpc-e500v/PKG.yml index db9b6a3e..77254c81 100644 --- a/packages/base/powerpc/kernels/legacy/kernel-3.9.6-powerpc-e500v/PKG.yml +++ b/packages/base/powerpc/kernels/legacy/kernel-3.9.6-powerpc-e500v/PKG.yml @@ -7,6 +7,7 @@ common: version: 1.0.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onl-kernel-3.9.6-powerpc-e500v diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/PKG.yml b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/PKG.yml index 5fa1f690..33609f93 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/PKG.yml +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/PKG.yml @@ -12,6 +12,7 @@ common: arch: powerpc copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com packages: - name: onlp-${r0_platform}-r0 diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/PKG.yml b/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/PKG.yml index d136d719..a6e6ef18 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/PKG.yml +++ b/packages/platforms/accton/powerpc/powerpc-accton-as6700-32x/onlp/PKG.yml @@ -8,6 +8,7 @@ common: arch: powerpc copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com changelog: Change changes changes., diff --git a/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/onlp/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/onlp/PKG.yml index c4d50cf4..537216ac 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/onlp/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-wedge-16x/onlp/PKG.yml @@ -7,6 +7,7 @@ common: arch: amd64 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com comment: dummy package for ONLP on Wedge packages: - name: onlp-${platform} diff --git a/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/onlp/PKG.yml b/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/onlp/PKG.yml index 10caa7f6..cd4f59d4 100644 --- a/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/onlp/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-facebook-wedge100/onlp/PKG.yml @@ -7,6 +7,7 @@ common: arch: amd64 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com comment: dummy package for ONLP on Wedge packages: - name: onlp-${platform} diff --git a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml index 46f47033..0d5a8293 100644 --- a/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml +++ b/packages/platforms/agema/x86-64/x86-64-agema-agc7648/onlp/PKG.yml @@ -7,6 +7,7 @@ common: arch: amd64 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com comment: dummy package for ONLP on Wedge packages: - name: onlp-${platform} From b8c67183eab1c45c5a173451d226dd0aad39cf24 Mon Sep 17 00:00:00 2001 From: brandonchuang Date: Fri, 9 Dec 2016 17:28:10 +0800 Subject: [PATCH 142/255] Fix FAN1 and FAN2 id reversed issue --- .../onlp/builds/src/module/src/fani.c | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as4600-54t/onlp/builds/src/module/src/fani.c b/packages/platforms/accton/powerpc/powerpc-accton-as4600-54t/onlp/builds/src/module/src/fani.c index 5544c9f8..c3e4abf9 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as4600-54t/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as4600-54t/onlp/builds/src/module/src/fani.c @@ -37,10 +37,10 @@ /* FAN status mask */ -#define CPLD_FAN_1_PRESENT_BIT_MASK 0x80 -#define CPLD_FAN_2_PRESENT_BIT_MASK 0x40 -#define CPLD_FAN_1_DIRECTION_BIT_MASK 0x20 -#define CPLD_FAN_2_DIRECTION_BIT_MASK 0x10 +#define CPLD_FAN_1_PRESENT_BIT_MASK 0x40 +#define CPLD_FAN_2_PRESENT_BIT_MASK 0x80 +#define CPLD_FAN_1_DIRECTION_BIT_MASK 0x10 +#define CPLD_FAN_2_DIRECTION_BIT_MASK 0x20 #define CPLD_PSU_FAN_STATUS_BIT_MASK 0x8 #define CPLD_PSU_POWER_GOOD_MASK 0x10 #define CPLD_PSU_PRESENT_BIT_MASK 0x80 @@ -52,13 +52,13 @@ /* FAN status register in the ADT7473 */ -#define I2C_FAN_1_STATUS_REG_TACH_1_LOW 0x28 -#define I2C_FAN_1_STATUS_REG_TACH_1_HIGH 0x29 -#define I2C_FAN_2_STATUS_REG_TACH_3_LOW 0x2C -#define I2C_FAN_2_STATUS_REG_TACH_3_HIGH 0x2D +#define I2C_FAN_1_STATUS_REG_TACH_1_LOW 0x2C +#define I2C_FAN_1_STATUS_REG_TACH_1_HIGH 0x2D +#define I2C_FAN_2_STATUS_REG_TACH_3_LOW 0x28 +#define I2C_FAN_2_STATUS_REG_TACH_3_HIGH 0x29 -#define I2C_FAN_1_DUTY_CYCLE_REG_PWM_1 0x30 -#define I2C_FAN_2_DUTY_CYCLE_REG_PWM_3 0x32 +#define I2C_FAN_1_DUTY_CYCLE_REG_PWM_1 0x32 +#define I2C_FAN_2_DUTY_CYCLE_REG_PWM_3 0x30 #define I2C_AC_PSU_CHICONY_REG_FAN_SPEED 0x90 #define I2C_AC_PSU_CHICONY_REG_FAN_COMMAND 0x3B From 06c2d4bf584ff29eeee30e2647f7f2b846ce2300 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 9 Dec 2016 07:42:30 -0800 Subject: [PATCH 143/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index d9d7d797..935e30c6 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit d9d7d797063780e13c3345b4c488a16bd894b449 +Subproject commit 935e30c6c8fcd48245ec9c2bbe75da563bf59b4c From 65529d39fe17d45c54b8aaeaf024cd787d2310bf Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 10 Dec 2016 15:33:36 +0000 Subject: [PATCH 144/255] Detect both SYS_VENDOR and BOARD_VENDOR. --- .../platform-accton-as5512_54x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as5712_54x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as5812_54t-device-drivers.patch | 11 +++++++++-- .../platform-accton-as5812_54x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as6712_32x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as6812_32x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as7512_32x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as7712_32x-device-drivers.patch | 11 +++++++++-- .../platform-accton-as7716_32x-device-drivers.patch | 11 +++++++++-- 9 files changed, 81 insertions(+), 18 deletions(-) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch index da1cad21..af063811 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch @@ -807,7 +807,7 @@ diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c index acf88c9..e50c599 100644 --- a/drivers/hwmon/accton_i2c_cpld.c +++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -255,6 +255,22 @@ int platform_accton_as5812_54t(void) +@@ -255,6 +255,29 @@ int platform_accton_as5812_54t(void) } EXPORT_SYMBOL(platform_accton_as5812_54t); @@ -818,7 +818,14 @@ index acf88c9..e50c599 100644 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS5512", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), ++ }, ++ }, +}; + +int platform_accton_as5512_54x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch index 67bacd03..f7a74202 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch @@ -828,7 +828,7 @@ new file mode 100644 index 0000000..6381db5 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-accton_as5712_54x_cpld.c -@@ -0,0 +1,459 @@ +@@ -0,0 +1,466 @@ +/* + * I2C multiplexer + * @@ -872,7 +872,14 @@ index 0000000..6381db5 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS5712", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), ++ }, ++ }, +}; + +int platform_accton_as5712_54x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch index 5eedbb50..17db04c3 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch @@ -838,7 +838,7 @@ index 3aeb08d..acf88c9 100644 accton_i2c_cpld_remove_client(client); return 0; -@@ -217,6 +239,22 @@ int platform_accton_as7712_32x(void) +@@ -217,6 +239,29 @@ int platform_accton_as7712_32x(void) } EXPORT_SYMBOL(platform_accton_as7712_32x); @@ -849,7 +849,14 @@ index 3aeb08d..acf88c9 100644 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS5812 54t", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), ++ }, ++ }, +}; + +int platform_accton_as5812_54t(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch index 92c6d201..0b25bf3c 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch @@ -826,7 +826,7 @@ new file mode 100644 index 0000000..e01e557 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-accton_as5812_54x_cpld.c -@@ -0,0 +1,387 @@ +@@ -0,0 +1,394 @@ +/* + * An I2C multiplexer dirver for accton as5812 CPLD + * @@ -871,7 +871,14 @@ index 0000000..e01e557 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS5812-54X", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), ++ }, ++ }, +}; + +int platform_accton_as5812_54x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch index 05377b0d..95ab532b 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch @@ -837,7 +837,7 @@ new file mode 100644 index 0000000..2ec0a59 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-accton_as6712_32x_cpld.c -@@ -0,0 +1,420 @@ +@@ -0,0 +1,427 @@ +/* + * I2C multiplexer + * @@ -881,7 +881,14 @@ index 0000000..2ec0a59 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS6712", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), ++ }, ++ }, +}; + +int platform_accton_as6712_32x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch index b71ad20a..5fe16e5e 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch @@ -829,7 +829,7 @@ new file mode 100644 index 0000000..d668ca4 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-accton_as6812_32x_cpld.c -@@ -0,0 +1,382 @@ +@@ -0,0 +1,389 @@ +/* + * I2C multiplexer for accton as6812 CPLD + * @@ -874,7 +874,14 @@ index 0000000..d668ca4 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS6812", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), ++ }, ++ }, +}; + +int platform_accton_as6812_32x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch index fbecedd4..e5028ae3 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch @@ -888,7 +888,7 @@ new file mode 100644 index 0000000..96e3490 --- /dev/null +++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -0,0 +1,209 @@ +@@ -0,0 +1,216 @@ +/* + * A hwmon driver for the accton_i2c_cpld + * @@ -1083,7 +1083,14 @@ index 0000000..96e3490 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS7512", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), ++ }, ++ }, +}; + +int platform_accton_as7512_32x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch index 2f48944c..8bba8fc5 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch @@ -803,7 +803,7 @@ diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c index 96e3490..3aeb08d 100644 --- a/drivers/hwmon/accton_i2c_cpld.c +++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -201,6 +201,22 @@ int platform_accton_as7512_32x(void) +@@ -201,6 +201,29 @@ int platform_accton_as7512_32x(void) } EXPORT_SYMBOL(platform_accton_as7512_32x); @@ -814,7 +814,14 @@ index 96e3490..3aeb08d 100644 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS7712", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), ++ }, ++ }, +}; + +int platform_accton_as7712_32x(void) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch index 51fca17e..d4b5d41c 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch @@ -803,7 +803,7 @@ diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c index e50c599..89e3a0e 100644 --- a/drivers/hwmon/accton_i2c_cpld.c +++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -271,6 +271,22 @@ int platform_accton_as5512_54x(void) +@@ -271,6 +271,29 @@ int platform_accton_as5512_54x(void) } EXPORT_SYMBOL(platform_accton_as5512_54x); @@ -814,7 +814,14 @@ index e50c599..89e3a0e 100644 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), + }, -+ } ++ }, ++ { ++ .ident = "Accton AS7716", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), ++ }, ++ }, +}; + +int platform_accton_as7716_32x(void) From 7e6a18bf4d88f9c63d8213a992d72416c5f0bd6f Mon Sep 17 00:00:00 2001 From: Michael Shych Date: Sun, 11 Dec 2016 14:18:34 +0000 Subject: [PATCH 145/255] Mellanox MSN2700, MSN2100 and MSN2410 platforms ONL support. Signed-off-by: Michael Shych --- .../jessie/common/amd64-base-packages.yml | 5 +- .../configs/x86_64-all/x86_64-all.config | 2 +- .../patches/driver-igb-version-5.3.54.patch | 48795 ++++++++++++++++ ...river-support-intel-igb-bcm5461X-phy.patch | 258 +- .../base/any/kernels/3.16+deb8/patches/series | 3 +- .../platforms/mellanox/vendor-config/Makefile | 1 + .../platforms/mellanox/vendor-config/PKG.yml | 1 + .../src/python/mellanox/__init__.py | 7 + .../x86-64/x86-64-mlnx-msn2100/Makefile | 1 + .../x86-64/x86-64-mlnx-msn2100/onlp/Makefile | 1 + .../x86-64/x86-64-mlnx-msn2100/onlp/PKG.yml | 1 + .../x86-64-mlnx-msn2100/onlp/builds/Makefile | 2 + .../onlp/builds/lib/Makefile | 45 + .../lib/libonlp-x86-64-mlnx-msn2100-r0.mk | 10 + .../builds/lib/libonlp-x86-64-mlnx-msn2100.mk | 10 + .../onlp/builds/lib/x86_64_mlnx_msn2100.mk | 10 + .../onlp/builds/onlpdump/Makefile | 46 + .../onlp/builds/onlpdump/onlpdump.mk | 10 + .../onlp/builds/src/.module | 1 + .../onlp/builds/src/Makefile | 9 + .../onlp/builds/src/README | 6 + .../onlp/builds/src/module/auto/make.mk | 9 + .../src/module/auto/x86_64_mlnx_msn2100.yml | 50 + .../x86_64_mlnx_msn2100/x86_64_mlnx_msn2100.x | 14 + .../x86_64_mlnx_msn2100_config.h | 137 + .../x86_64_mlnx_msn2100_dox.h | 26 + .../x86_64_mlnx_msn2100_porting.h | 107 + .../onlp/builds/src/module/make.mk | 10 + .../onlp/builds/src/module/src/Makefile | 9 + .../onlp/builds/src/module/src/fani.c | 356 + .../onlp/builds/src/module/src/ledi.c | 284 + .../onlp/builds/src/module/src/make.mk | 9 + .../onlp/builds/src/module/src/platform_lib.c | 35 + .../onlp/builds/src/module/src/platform_lib.h | 52 + .../onlp/builds/src/module/src/psui.c | 176 + .../onlp/builds/src/module/src/sfpi.c | 197 + .../onlp/builds/src/module/src/sysi.c | 258 + .../onlp/builds/src/module/src/thermali.c | 169 + .../module/src/x86_64_mlnx_msn2100_config.c | 81 + .../module/src/x86_64_mlnx_msn2100_enums.c | 10 + .../src/module/src/x86_64_mlnx_msn2100_int.h | 12 + .../src/module/src/x86_64_mlnx_msn2100_log.c | 18 + .../src/module/src/x86_64_mlnx_msn2100_log.h | 12 + .../module/src/x86_64_mlnx_msn2100_module.c | 24 + .../src/module/src/x86_64_mlnx_msn2100_ucli.c | 50 + .../onlp/builds/src/x86_64_mlnx_msn2100.mk | 13 + .../platform-config/Makefile | 1 + .../platform-config/r0/Makefile | 1 + .../platform-config/r0/PKG.yml | 1 + .../r0/src/lib/x86-64-mlnx-msn2100-r0.yml | 35 + .../python/x86_64_mlnx_msn2100_r0/__init__.py | 17 + .../x86-64/x86-64-mlnx-msn2410/Makefile | 1 + .../x86-64/x86-64-mlnx-msn2410/onlp/Makefile | 1 + .../x86-64/x86-64-mlnx-msn2410/onlp/PKG.yml | 1 + .../x86-64-mlnx-msn2410/onlp/builds/Makefile | 2 + .../onlp/builds/lib/Makefile | 45 + .../lib/libonlp-x86-64-mlnx-msn2410-r0.mk | 10 + .../builds/lib/libonlp-x86-64-mlnx-msn2410.mk | 10 + .../onlp/builds/lib/x86_64_mlnx_msn2410.mk | 10 + .../onlp/builds/onlpdump/Makefile | 46 + .../onlp/builds/onlpdump/onlpdump.mk | 10 + .../onlp/builds/src/.module | 1 + .../onlp/builds/src/Makefile | 9 + .../onlp/builds/src/README | 6 + .../onlp/builds/src/module/auto/make.mk | 9 + .../src/module/auto/x86_64_mlnx_msn2410.yml | 50 + .../x86_64_mlnx_msn2410/x86_64_mlnx_msn2410.x | 14 + .../x86_64_mlnx_msn2410_config.h | 137 + .../x86_64_mlnx_msn2410_dox.h | 26 + .../x86_64_mlnx_msn2410_porting.h | 107 + .../onlp/builds/src/module/make.mk | 10 + .../onlp/builds/src/module/src/Makefile | 9 + .../onlp/builds/src/module/src/fani.c | 544 + .../onlp/builds/src/module/src/ledi.c | 301 + .../onlp/builds/src/module/src/make.mk | 9 + .../onlp/builds/src/module/src/platform_lib.c | 80 + .../onlp/builds/src/module/src/platform_lib.h | 56 + .../onlp/builds/src/module/src/psui.c | 202 + .../onlp/builds/src/module/src/sfpi.c | 200 + .../onlp/builds/src/module/src/sysi.c | 266 + .../onlp/builds/src/module/src/thermali.c | 179 + .../module/src/x86_64_mlnx_msn2410_config.c | 81 + .../module/src/x86_64_mlnx_msn2410_enums.c | 10 + .../src/module/src/x86_64_mlnx_msn2410_int.h | 12 + .../src/module/src/x86_64_mlnx_msn2410_log.c | 18 + .../src/module/src/x86_64_mlnx_msn2410_log.h | 12 + .../module/src/x86_64_mlnx_msn2410_module.c | 24 + .../src/module/src/x86_64_mlnx_msn2410_ucli.c | 50 + .../onlp/builds/src/x86_64_mlnx_msn2410.mk | 13 + .../platform-config/Makefile | 1 + .../platform-config/r0/Makefile | 1 + .../platform-config/r0/PKG.yml | 1 + .../r0/src/lib/x86-64-mlnx-msn2410-r0.yml | 35 + .../python/x86_64_mlnx_msn2410_r0/__init__.py | 17 + .../x86-64/x86-64-mlnx-msn2700/Makefile | 1 + .../x86-64/x86-64-mlnx-msn2700/onlp/Makefile | 1 + .../x86-64/x86-64-mlnx-msn2700/onlp/PKG.yml | 1 + .../x86-64-mlnx-msn2700/onlp/builds/Makefile | 2 + .../onlp/builds/lib/Makefile | 45 + .../lib/libonlp-x86-64-mlnx-msn2700-r0.mk | 10 + .../builds/lib/libonlp-x86-64-mlnx-msn2700.mk | 10 + .../onlp/builds/lib/x86_64_mlnx_msn2700.mk | 10 + .../onlp/builds/onlpdump/Makefile | 46 + .../onlp/builds/onlpdump/onlpdump.mk | 10 + .../onlp/builds/src/.module | 1 + .../onlp/builds/src/Makefile | 9 + .../onlp/builds/src/README | 6 + .../onlp/builds/src/module/auto/make.mk | 9 + .../src/module/auto/x86_64_mlnx_msn2700.yml | 50 + .../x86_64_mlnx_msn2700/x86_64_mlnx_msn2700.x | 14 + .../x86_64_mlnx_msn2700_config.h | 137 + .../x86_64_mlnx_msn2700_dox.h | 26 + .../x86_64_mlnx_msn2700_porting.h | 107 + .../onlp/builds/src/module/make.mk | 10 + .../onlp/builds/src/module/src/Makefile | 9 + .../onlp/builds/src/module/src/fani.c | 544 + .../onlp/builds/src/module/src/ledi.c | 301 + .../onlp/builds/src/module/src/make.mk | 9 + .../onlp/builds/src/module/src/platform_lib.c | 80 + .../onlp/builds/src/module/src/platform_lib.h | 58 + .../onlp/builds/src/module/src/psui.c | 202 + .../onlp/builds/src/module/src/sfpi.c | 200 + .../onlp/builds/src/module/src/sysi.c | 266 + .../onlp/builds/src/module/src/thermali.c | 179 + .../module/src/x86_64_mlnx_msn2700_config.c | 81 + .../module/src/x86_64_mlnx_msn2700_enums.c | 10 + .../src/module/src/x86_64_mlnx_msn2700_int.h | 12 + .../src/module/src/x86_64_mlnx_msn2700_log.c | 18 + .../src/module/src/x86_64_mlnx_msn2700_log.h | 12 + .../module/src/x86_64_mlnx_msn2700_module.c | 24 + .../src/module/src/x86_64_mlnx_msn2700_ucli.c | 50 + .../onlp/builds/src/x86_64_mlnx_msn2700.mk | 13 + .../platform-config/Makefile | 1 + .../platform-config/r0/Makefile | 1 + .../platform-config/r0/PKG.yml | 1 + .../r0/src/lib/x86-64-mlnx-msn2700-r0.yml | 35 + .../python/x86_64_mlnx_msn2700_r0/__init__.py | 17 + 137 files changed, 56497 insertions(+), 154 deletions(-) create mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch create mode 100644 packages/platforms/mellanox/vendor-config/Makefile create mode 100644 packages/platforms/mellanox/vendor-config/PKG.yml create mode 100644 packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/PKG.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100-r0.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/x86_64_mlnx_msn2100.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/onlpdump.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/.module create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/README create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/x86_64_mlnx_msn2100.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100.x create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_config.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_dox.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_porting.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sfpi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_config.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_enums.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_int.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_module.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_ucli.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/x86_64_mlnx_msn2100.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/PKG.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/PKG.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410-r0.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/x86_64_mlnx_msn2410.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/onlpdump.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/.module create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/README create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/x86_64_mlnx_msn2410.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410.x create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_config.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_dox.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_porting.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sfpi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_config.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_enums.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_int.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_module.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_ucli.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/x86_64_mlnx_msn2410.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/PKG.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/PKG.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700-r0.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/x86_64_mlnx_msn2700.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/onlpdump.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/.module create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/README create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/x86_64_mlnx_msn2700.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700.x create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_config.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_dox.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_porting.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/make.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sfpi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_config.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_enums.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_int.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.h create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_module.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_ucli.c create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/x86_64_mlnx_msn2700.mk create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/Makefile create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/PKG.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml create mode 100644 packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py diff --git a/builds/any/rootfs/jessie/common/amd64-base-packages.yml b/builds/any/rootfs/jessie/common/amd64-base-packages.yml index 402a4169..b6b63b94 100644 --- a/builds/any/rootfs/jessie/common/amd64-base-packages.yml +++ b/builds/any/rootfs/jessie/common/amd64-base-packages.yml @@ -8,8 +8,7 @@ - smartmontools - grub2 - onl-upgrade - - - +- hw-management +- sx-kernel diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config index 94506877..d0d25a42 100644 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config @@ -1877,7 +1877,7 @@ CONFIG_I2C_ALGOPCA=y # CONFIG_I2C_AMD8111 is not set CONFIG_I2C_I801=y CONFIG_I2C_ISCH=y -CONFIG_I2C_ISMT=y +# CONFIG_I2C_ISMT is not set # CONFIG_I2C_PIIX4 is not set # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_SIS5595 is not set diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch new file mode 100644 index 00000000..a3134c43 --- /dev/null +++ b/packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch @@ -0,0 +1,48795 @@ +diff -Nu a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile +--- a/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 09:20:24.786171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 10:43:55.318238134 +0000 +@@ -32,5 +32,7 @@ + obj-$(CONFIG_IGB) += igb.o + + igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ +- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ +- e1000_i210.o igb_ptp.o igb_hwmon.o ++ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ ++ e1000_i210.o igb_ptp.o igb_hwmon.o \ ++ e1000_manage.o igb_param.o kcompat.o e1000_api.o \ ++ igb_vmdq.o igb_procfs.o igb_debugfs.o +diff -Nu a/drivers/net/ethernet/intel/igb/Module.supported b/drivers/net/ethernet/intel/igb/Module.supported +--- a/drivers/net/ethernet/intel/igb/Module.supported 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/Module.supported 2016-11-13 10:27:24.246224975 +0000 +@@ -0,0 +1 @@ ++igb.ko external +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c +--- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 14:32:08.575567168 +0000 +@@ -1,94 +1,134 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ +- +-/* e1000_82575 +- * e1000_82576 +- */ ++/******************************************************************************* + +-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. + +-#include +-#include +-#include ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ++ * 82575EB Gigabit Network Connection ++ * 82575EB Gigabit Backplane Connection ++ * 82575GB Gigabit Network Connection ++ * 82576 Gigabit Network Connection ++ * 82576 Quad Port Gigabit Mezzanine Adapter ++ * 82580 Gigabit Network Connection ++ * I350 Gigabit Network Connection ++ */ + +-#include "e1000_mac.h" +-#include "e1000_82575.h" ++#include "e1000_api.h" + #include "e1000_i210.h" + +-static s32 igb_get_invariants_82575(struct e1000_hw *); +-static s32 igb_acquire_phy_82575(struct e1000_hw *); +-static void igb_release_phy_82575(struct e1000_hw *); +-static s32 igb_acquire_nvm_82575(struct e1000_hw *); +-static void igb_release_nvm_82575(struct e1000_hw *); +-static s32 igb_check_for_link_82575(struct e1000_hw *); +-static s32 igb_get_cfg_done_82575(struct e1000_hw *); +-static s32 igb_init_hw_82575(struct e1000_hw *); +-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +-static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); +-static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); +-static s32 igb_reset_hw_82575(struct e1000_hw *); +-static s32 igb_reset_hw_82580(struct e1000_hw *); +-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +-static s32 igb_setup_copper_link_82575(struct e1000_hw *); +-static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +-static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, +- u16 *); +-static s32 igb_get_phy_id_82575(struct e1000_hw *); +-static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +-static bool igb_sgmii_active_82575(struct e1000_hw *); +-static s32 igb_reset_init_script_82575(struct e1000_hw *); +-static s32 igb_read_mac_addr_82575(struct e1000_hw *); +-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); ++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); ++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); ++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); ++static void e1000_release_phy_82575(struct e1000_hw *hw); ++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); ++static void e1000_release_nvm_82575(struct e1000_hw *hw); ++static s32 e1000_check_for_link_82575(struct e1000_hw *hw); ++static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); ++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); ++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, ++ u16 *duplex); ++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); ++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, ++ u16 *data); ++static s32 e1000_reset_hw_82575(struct e1000_hw *hw); ++static s32 e1000_reset_hw_82580(struct e1000_hw *hw); ++static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, ++ u32 offset, u16 *data); ++static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, ++ u32 offset, u16 data); ++static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, ++ bool active); ++static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, ++ bool active); ++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, ++ bool active); ++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); ++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); ++static s32 e1000_get_media_type_82575(struct e1000_hw *hw); ++static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); ++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); ++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, ++ u32 offset, u16 data); ++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); ++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); ++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, ++ u16 *speed, u16 *duplex); ++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); ++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); ++static bool e1000_sgmii_active_82575(struct e1000_hw *hw); ++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); ++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); ++static void e1000_config_collision_dist_82575(struct e1000_hw *hw); ++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); ++static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); ++static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); ++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); ++static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); ++static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); ++static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); ++static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, ++ u16 offset); ++static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, ++ u16 offset); ++static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); ++static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); ++static void e1000_clear_vfta_i350(struct e1000_hw *hw); ++ ++static void e1000_i2c_start(struct e1000_hw *hw); ++static void e1000_i2c_stop(struct e1000_hw *hw); ++static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); ++static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); ++static s32 e1000_get_i2c_ack(struct e1000_hw *hw); ++static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); ++static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); ++static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); ++static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); ++static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); ++static bool e1000_get_i2c_data(u32 *i2cctl); ++ + static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; ++#define E1000_82580_RXPBS_TABLE_SIZE \ ++ (sizeof(e1000_82580_rxpbs_table) / \ ++ sizeof(e1000_82580_rxpbs_table[0])) + + /** +- * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO ++ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +-static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) ++static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) + { + u32 reg = 0; + bool ext_mdio = false; + ++ DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); ++ + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: +- reg = rd32(E1000_MDIC); ++ reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: +@@ -96,7 +136,7 @@ + case e1000_i354: + case e1000_i210: + case e1000_i211: +- reg = rd32(E1000_MDICNFG); ++ reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: +@@ -106,135 +146,98 @@ + } + + /** +- * igb_check_for_link_media_swap - Check which M88E1112 interface linked +- * @hw: pointer to the HW structure +- * +- * Poll the M88E1112 interfaces to see which interface achieved link. +- */ +-static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +-{ +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; +- u16 data; +- u8 port = 0; +- +- /* Check the copper medium. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); +- if (ret_val) +- return ret_val; +- +- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); +- if (ret_val) +- return ret_val; +- +- if (data & E1000_M88E1112_STATUS_LINK) +- port = E1000_MEDIA_PORT_COPPER; +- +- /* Check the other medium. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); +- if (ret_val) +- return ret_val; +- +- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); +- if (ret_val) +- return ret_val; +- +- /* reset page to 0 */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); +- if (ret_val) +- return ret_val; +- +- if (data & E1000_M88E1112_STATUS_LINK) +- port = E1000_MEDIA_PORT_OTHER; +- +- /* Determine if a swap needs to happen. */ +- if (port && (hw->dev_spec._82575.media_port != port)) { +- hw->dev_spec._82575.media_port = port; +- hw->dev_spec._82575.media_changed = true; +- } else { +- ret_val = igb_check_for_link_82575(hw); +- } +- +- return 0; +-} +- +-/** +- * igb_init_phy_params_82575 - Init PHY func ptrs. ++ * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +-static s32 igb_init_phy_params_82575(struct e1000_hw *hw) ++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + ++ DEBUGFUNC("e1000_init_phy_params_82575"); ++ ++ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; ++ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; ++ + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + ++ phy->ops.power_up = igb_e1000_power_up_phy_copper; ++ phy->ops.power_down = e1000_power_down_phy_copper_82575; ++ + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ phy->ops.acquire = e1000_acquire_phy_82575; ++ phy->ops.check_reset_block = e1000_check_reset_block_generic; ++ phy->ops.commit = e1000_phy_sw_reset_generic; ++ phy->ops.get_cfg_done = e1000_get_cfg_done_82575; ++ phy->ops.release = e1000_release_phy_82575; + +- if (igb_sgmii_active_82575(hw)) { +- phy->ops.reset = igb_phy_hw_reset_sgmii_82575; ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ++ if (e1000_sgmii_active_82575(hw)) { ++ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { +- phy->ops.reset = igb_phy_hw_reset; ++ phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + +- wr32(E1000_CTRL_EXT, ctrl_ext); +- igb_reset_mdicnfg_82580(hw); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ e1000_reset_mdicnfg_82580(hw); + +- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { +- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; +- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; ++ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { ++ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; ++ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: +- phy->ops.read_reg = igb_read_phy_reg_82580; +- phy->ops.write_reg = igb_write_phy_reg_82580; ++ phy->ops.read_reg = e1000_read_phy_reg_82580; ++ phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: +- phy->ops.read_reg = igb_read_phy_reg_gs40g; +- phy->ops.write_reg = igb_write_phy_reg_gs40g; ++ phy->ops.read_reg = e1000_read_phy_reg_gs40g; ++ phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: +- phy->ops.read_reg = igb_read_phy_reg_igp; +- phy->ops.write_reg = igb_write_phy_reg_igp; ++ phy->ops.read_reg = e1000_read_phy_reg_igp; ++ phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } + +- /* set lan id */ +- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> +- E1000_STATUS_FUNC_SHIFT; +- + /* Set phy->phy_addr and phy->id. */ +- ret_val = igb_get_phy_id_82575(hw); +- if (ret_val) +- return ret_val; ++ ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; +- phy->ops.check_polarity = igb_check_polarity_m88; +- phy->ops.get_phy_info = igb_get_phy_info_m88; +- if (phy->id != M88E1111_I_PHY_ID) ++ phy->ops.check_polarity = igb_e1000_check_polarity_m88; ++ phy->ops.get_info = e1000_get_phy_info_m88; ++ if (phy->id == I347AT4_E_PHY_ID || ++ phy->id == M88E1112_E_PHY_ID || ++ phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = +- igb_get_cable_length_m88_gen2; ++ e1000_get_cable_length_m88_gen2; ++ else if (phy->id == M88E1543_E_PHY_ID || ++ phy->id == M88E1512_E_PHY_ID) ++ phy->ops.get_cable_length = ++ e1000_get_cable_length_m88_gen2; + else +- phy->ops.get_cable_length = igb_get_cable_length_m88; +- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; ++ phy->ops.get_cable_length = e1000_get_cable_length_m88; ++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; +@@ -256,35 +259,48 @@ + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = +- igb_check_for_link_media_swap; ++ e1000_check_for_link_media_swap; ++ } ++ if (phy->id == M88E1512_E_PHY_ID) { ++ ret_val = e1000_initialize_M88E1512_phy(hw); ++ if (ret_val) ++ goto out; ++ } ++ if (phy->id == M88E1543_E_PHY_ID) { ++ ret_val = e1000_initialize_M88E1543_phy(hw); ++ if (ret_val) ++ goto out; + } + break; + case IGP03E1000_E_PHY_ID: ++ case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; +- phy->ops.get_phy_info = igb_get_phy_info_igp; +- phy->ops.get_cable_length = igb_get_cable_length_igp_2; +- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; +- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; +- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; ++ phy->ops.check_polarity = igb_e1000_check_polarity_igp; ++ phy->ops.get_info = e1000_get_phy_info_igp; ++ phy->ops.get_cable_length = e1000_get_cable_length_igp_2; ++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; ++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; ++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; ++ phy->ops.check_polarity = igb_e1000_check_polarity_82577; + phy->ops.force_speed_duplex = +- igb_phy_force_speed_duplex_82580; +- phy->ops.get_cable_length = igb_get_cable_length_82580; +- phy->ops.get_phy_info = igb_get_phy_info_82580; +- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; +- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; ++ igb_e1000_phy_force_speed_duplex_82577; ++ phy->ops.get_cable_length = igb_e1000_get_cable_length_82577; ++ phy->ops.get_info = igb_e1000_get_phy_info_82577; ++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; ++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; +- phy->ops.check_polarity = igb_check_polarity_m88; +- phy->ops.get_phy_info = igb_get_phy_info_m88; +- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; +- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; +- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; +- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; ++ phy->ops.check_polarity = igb_e1000_check_polarity_m88; ++ phy->ops.get_info = e1000_get_phy_info_m88; ++ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; ++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; ++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; ++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; +@@ -296,19 +312,21 @@ + } + + /** +- * igb_init_nvm_params_82575 - Init NVM func ptrs. ++ * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +-static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) ++s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + ++ DEBUGFUNC("e1000_init_nvm_params_82575"); ++ + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); +- +- /* Added to a constant, "size" becomes the left-shift value ++ /* ++ * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; +@@ -320,433 +338,272 @@ + size = 15; + + nvm->word_size = 1 << size; +- nvm->opcode_bits = 8; +- nvm->delay_usec = 1; ++ if (hw->mac.type < e1000_i210) { ++ nvm->opcode_bits = 8; ++ nvm->delay_usec = 1; ++ ++ switch (nvm->override) { ++ case e1000_nvm_override_spi_large: ++ nvm->page_size = 32; ++ nvm->address_bits = 16; ++ break; ++ case e1000_nvm_override_spi_small: ++ nvm->page_size = 8; ++ nvm->address_bits = 8; ++ break; ++ default: ++ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; ++ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? ++ 16 : 8; ++ break; ++ } ++ if (nvm->word_size == (1 << 15)) ++ nvm->page_size = 128; + +- switch (nvm->override) { +- case e1000_nvm_override_spi_large: +- nvm->page_size = 32; +- nvm->address_bits = 16; +- break; +- case e1000_nvm_override_spi_small: +- nvm->page_size = 8; +- nvm->address_bits = 8; +- break; +- default: +- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; +- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? +- 16 : 8; +- break; +- } +- if (nvm->word_size == (1 << 15)) +- nvm->page_size = 128; +- +- nvm->type = e1000_nvm_eeprom_spi; +- +- /* NVM Function Pointers */ +- nvm->ops.acquire = igb_acquire_nvm_82575; +- nvm->ops.release = igb_release_nvm_82575; +- nvm->ops.write = igb_write_nvm_spi; +- nvm->ops.validate = igb_validate_nvm_checksum; +- nvm->ops.update = igb_update_nvm_checksum; ++ nvm->type = e1000_nvm_eeprom_spi; ++ } else { ++ nvm->type = e1000_nvm_flash_hw; ++ } ++ ++ /* Function Pointers */ ++ nvm->ops.acquire = e1000_acquire_nvm_82575; ++ nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) +- nvm->ops.read = igb_read_nvm_eerd; ++ nvm->ops.read = e1000_read_nvm_eerd; + else +- nvm->ops.read = igb_read_nvm_spi; ++ nvm->ops.read = e1000_read_nvm_spi; ++ ++ nvm->ops.write = e1000_write_nvm_spi; ++ nvm->ops.validate = e1000_validate_nvm_checksum_generic; ++ nvm->ops.update = e1000_update_nvm_checksum_generic; ++ nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: +- nvm->ops.validate = igb_validate_nvm_checksum_82580; +- nvm->ops.update = igb_update_nvm_checksum_82580; ++ nvm->ops.validate = e1000_validate_nvm_checksum_82580; ++ nvm->ops.update = e1000_update_nvm_checksum_82580; + break; +- case e1000_i354: + case e1000_i350: +- nvm->ops.validate = igb_validate_nvm_checksum_i350; +- nvm->ops.update = igb_update_nvm_checksum_i350; ++ case e1000_i354: ++ nvm->ops.validate = e1000_validate_nvm_checksum_i350; ++ nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_init_mac_params_82575 - Init MAC func ptrs. ++ * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +-static s32 igb_init_mac_params_82575(struct e1000_hw *hw) ++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + ++ DEBUGFUNC("e1000_init_mac_params_82575"); ++ ++ /* Derives media type */ ++ e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; ++ /* Set uta register count */ ++ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ +- switch (mac->type) { +- case e1000_82576: ++ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; ++ if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; +- break; +- case e1000_82580: ++ if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; +- break; +- case e1000_i350: +- case e1000_i354: ++ if (mac->type == e1000_i350 || mac->type == e1000_i354) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; +- break; +- default: +- mac->rar_entry_count = E1000_RAR_ENTRIES_82575; +- break; +- } +- /* reset */ +- if (mac->type >= e1000_82580) +- mac->ops.reset_hw = igb_reset_hw_82580; +- else +- mac->ops.reset_hw = igb_reset_hw_82575; + +- if (mac->type >= e1000_i210) { +- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; +- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; ++ /* Enable EEE default settings for EEE supported devices */ ++ if (mac->type >= e1000_i350) ++ dev_spec->eee_disable = false; + +- } else { +- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; +- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; +- } ++ /* Allow a single clear of the SW semaphore on I210 and newer */ ++ if (mac->type >= e1000_i210) ++ dev_spec->clear_semaphore_once = true; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; +- /* Set if manageability features are enabled. */ ++ /* FWSM register */ ++ mac->has_fwsm = true; ++ /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = +- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) +- ? true : false; +- /* enable EEE on i350 parts and later parts */ +- if (mac->type >= e1000_i350) +- dev_spec->eee_disable = false; ++ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); ++ ++ /* Function pointers */ ++ ++ /* bus type/speed/width */ ++ mac->ops.get_bus_info = igb_e1000_get_bus_info_pcie_generic; ++ /* reset */ ++ if (mac->type >= e1000_82580) ++ mac->ops.reset_hw = e1000_reset_hw_82580; + else +- dev_spec->eee_disable = true; +- /* Allow a single clear of the SW semaphore on I210 and newer */ +- if (mac->type >= e1000_i210) +- dev_spec->clear_semaphore_once = true; ++ mac->ops.reset_hw = e1000_reset_hw_82575; ++ /* hw initialization */ ++ if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) ++ mac->ops.init_hw = e1000_init_hw_i210; ++ else ++ mac->ops.init_hw = e1000_init_hw_82575; ++ /* link setup */ ++ mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) +- ? igb_setup_copper_link_82575 +- : igb_setup_serdes_link_82575; +- +- if (mac->type == e1000_82580) { +- switch (hw->device_id) { +- /* feature not supported on these id's */ +- case E1000_DEV_ID_DH89XXCC_SGMII: +- case E1000_DEV_ID_DH89XXCC_SERDES: +- case E1000_DEV_ID_DH89XXCC_BACKPLANE: +- case E1000_DEV_ID_DH89XXCC_SFP: +- break; +- default: +- hw->dev_spec._82575.mas_capable = true; +- break; +- } ++ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; ++ /* physical interface shutdown */ ++ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; ++ /* physical interface power up */ ++ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; ++ /* check for link */ ++ mac->ops.check_for_link = e1000_check_for_link_82575; ++ /* read mac address */ ++ mac->ops.read_mac_addr = e1000_read_mac_addr_82575; ++ /* configure collision distance */ ++ mac->ops.config_collision_dist = e1000_config_collision_dist_82575; ++ /* multicast address update */ ++ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; ++ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { ++ /* writing VFTA */ ++ mac->ops.write_vfta = e1000_write_vfta_i350; ++ /* clearing VFTA */ ++ mac->ops.clear_vfta = e1000_clear_vfta_i350; ++ } else { ++ /* writing VFTA */ ++ mac->ops.write_vfta = igb_e1000_write_vfta_generic; ++ /* clearing VFTA */ ++ mac->ops.clear_vfta = igb_e1000_clear_vfta_generic; ++ } ++ if (hw->mac.type >= e1000_82580) ++ mac->ops.validate_mdi_setting = ++ e1000_validate_mdi_setting_crossover_generic; ++ /* ID LED init */ ++ mac->ops.id_led_init = e1000_id_led_init_generic; ++ /* blink LED */ ++ mac->ops.blink_led = e1000_blink_led_generic; ++ /* setup LED */ ++ mac->ops.setup_led = e1000_setup_led_generic; ++ /* cleanup LED */ ++ mac->ops.cleanup_led = e1000_cleanup_led_generic; ++ /* turn on/off LED */ ++ mac->ops.led_on = e1000_led_on_generic; ++ mac->ops.led_off = e1000_led_off_generic; ++ /* clear hardware counters */ ++ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; ++ /* link info */ ++ mac->ops.get_link_up_info = e1000_get_link_up_info_82575; ++ /* get thermal sensor data */ ++ mac->ops.get_thermal_sensor_data = ++ e1000_get_thermal_sensor_data_generic; ++ mac->ops.init_thermal_sensor_thresh = ++ e1000_init_thermal_sensor_thresh_generic; ++ /* acquire SW_FW sync */ ++ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; ++ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; ++ if (mac->type >= e1000_i210) { ++ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; ++ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } +- return 0; ++ ++ /* set lan id for port to determine which phy lock to use */ ++ hw->mac.ops.set_lan_id(hw); ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_set_sfp_media_type_82575 - derives SFP module media type. ++ * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * +- * The media type is chosen based on SFP module. +- * compatibility flags retrieved from SFP ID EEPROM. ++ * Called to initialize all function pointers and parameters. + **/ +-static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) ++void e1000_init_function_pointers_82575(struct e1000_hw *hw) + { +- s32 ret_val = E1000_ERR_CONFIG; +- u32 ctrl_ext = 0; +- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; +- u8 tranceiver_type = 0; +- s32 timeout = 3; ++ DEBUGFUNC("e1000_init_function_pointers_82575"); + +- /* Turn I2C interface ON and power on sfp cage */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; +- wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); ++ hw->mac.ops.init_params = e1000_init_mac_params_82575; ++ hw->nvm.ops.init_params = e1000_init_nvm_params_82575; ++ hw->phy.ops.init_params = e1000_init_phy_params_82575; ++ hw->mbx.ops.init_params = e1000_init_mbx_params_pf; ++} + +- wrfl(); ++/** ++ * e1000_acquire_phy_82575 - Acquire rights to access PHY ++ * @hw: pointer to the HW structure ++ * ++ * Acquire access rights to the correct PHY. ++ **/ ++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) ++{ ++ u16 mask = E1000_SWFW_PHY0_SM; + +- /* Read SFP module data */ +- while (timeout) { +- ret_val = igb_read_sfp_data_byte(hw, +- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), +- &tranceiver_type); +- if (ret_val == 0) +- break; +- msleep(100); +- timeout--; +- } +- if (ret_val != 0) +- goto out; ++ DEBUGFUNC("e1000_acquire_phy_82575"); + +- ret_val = igb_read_sfp_data_byte(hw, +- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), +- (u8 *)eth_flags); +- if (ret_val != 0) +- goto out; ++ if (hw->bus.func == E1000_FUNC_1) ++ mask = E1000_SWFW_PHY1_SM; ++ else if (hw->bus.func == E1000_FUNC_2) ++ mask = E1000_SWFW_PHY2_SM; ++ else if (hw->bus.func == E1000_FUNC_3) ++ mask = E1000_SWFW_PHY3_SM; + +- /* Check if there is some SFP module plugged and powered */ +- if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || +- (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { +- dev_spec->module_plugged = true; +- if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { +- hw->phy.media_type = e1000_media_type_internal_serdes; +- } else if (eth_flags->e100_base_fx) { +- dev_spec->sgmii_active = true; +- hw->phy.media_type = e1000_media_type_internal_serdes; +- } else if (eth_flags->e1000_base_t) { +- dev_spec->sgmii_active = true; +- hw->phy.media_type = e1000_media_type_copper; +- } else { +- hw->phy.media_type = e1000_media_type_unknown; +- hw_dbg("PHY module has not been recognized\n"); +- goto out; +- } +- } else { +- hw->phy.media_type = e1000_media_type_unknown; +- } +- ret_val = 0; +-out: +- /* Restore I2C interface setting */ +- wr32(E1000_CTRL_EXT, ctrl_ext); +- return ret_val; ++ return hw->mac.ops.acquire_swfw_sync(hw, mask); + } + +-static s32 igb_get_invariants_82575(struct e1000_hw *hw) ++/** ++ * e1000_release_phy_82575 - Release rights to access PHY ++ * @hw: pointer to the HW structure ++ * ++ * A wrapper to release access rights to the correct PHY. ++ **/ ++static void e1000_release_phy_82575(struct e1000_hw *hw) + { +- struct e1000_mac_info *mac = &hw->mac; +- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- s32 ret_val; +- u32 ctrl_ext = 0; +- u32 link_mode = 0; ++ u16 mask = E1000_SWFW_PHY0_SM; + +- switch (hw->device_id) { +- case E1000_DEV_ID_82575EB_COPPER: +- case E1000_DEV_ID_82575EB_FIBER_SERDES: +- case E1000_DEV_ID_82575GB_QUAD_COPPER: +- mac->type = e1000_82575; +- break; +- case E1000_DEV_ID_82576: +- case E1000_DEV_ID_82576_NS: +- case E1000_DEV_ID_82576_NS_SERDES: +- case E1000_DEV_ID_82576_FIBER: +- case E1000_DEV_ID_82576_SERDES: +- case E1000_DEV_ID_82576_QUAD_COPPER: +- case E1000_DEV_ID_82576_QUAD_COPPER_ET2: +- case E1000_DEV_ID_82576_SERDES_QUAD: +- mac->type = e1000_82576; +- break; +- case E1000_DEV_ID_82580_COPPER: +- case E1000_DEV_ID_82580_FIBER: +- case E1000_DEV_ID_82580_QUAD_FIBER: +- case E1000_DEV_ID_82580_SERDES: +- case E1000_DEV_ID_82580_SGMII: +- case E1000_DEV_ID_82580_COPPER_DUAL: +- case E1000_DEV_ID_DH89XXCC_SGMII: +- case E1000_DEV_ID_DH89XXCC_SERDES: +- case E1000_DEV_ID_DH89XXCC_BACKPLANE: +- case E1000_DEV_ID_DH89XXCC_SFP: +- mac->type = e1000_82580; +- break; +- case E1000_DEV_ID_I350_COPPER: +- case E1000_DEV_ID_I350_FIBER: +- case E1000_DEV_ID_I350_SERDES: +- case E1000_DEV_ID_I350_SGMII: +- mac->type = e1000_i350; +- break; +- case E1000_DEV_ID_I210_COPPER: +- case E1000_DEV_ID_I210_FIBER: +- case E1000_DEV_ID_I210_SERDES: +- case E1000_DEV_ID_I210_SGMII: +- case E1000_DEV_ID_I210_COPPER_FLASHLESS: +- case E1000_DEV_ID_I210_SERDES_FLASHLESS: +- mac->type = e1000_i210; +- break; +- case E1000_DEV_ID_I211_COPPER: +- mac->type = e1000_i211; +- break; +- case E1000_DEV_ID_I354_BACKPLANE_1GBPS: +- case E1000_DEV_ID_I354_SGMII: +- case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: +- mac->type = e1000_i354; +- break; +- default: +- return -E1000_ERR_MAC_INIT; +- break; +- } ++ DEBUGFUNC("e1000_release_phy_82575"); + +- /* Set media type */ +- /* The 82575 uses bits 22:23 for link mode. The mode can be changed +- * based on the EEPROM. We cannot rely upon device ID. There +- * is no distinguishable difference between fiber and internal +- * SerDes mode on the 82575. There can be an external PHY attached +- * on the SGMII interface. For this, we'll set sgmii_active to true. +- */ +- hw->phy.media_type = e1000_media_type_copper; +- dev_spec->sgmii_active = false; +- dev_spec->module_plugged = false; ++ if (hw->bus.func == E1000_FUNC_1) ++ mask = E1000_SWFW_PHY1_SM; ++ else if (hw->bus.func == E1000_FUNC_2) ++ mask = E1000_SWFW_PHY2_SM; ++ else if (hw->bus.func == E1000_FUNC_3) ++ mask = E1000_SWFW_PHY3_SM; + +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ hw->mac.ops.release_swfw_sync(hw, mask); ++} + +- link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; +- switch (link_mode) { +- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: +- hw->phy.media_type = e1000_media_type_internal_serdes; +- break; +- case E1000_CTRL_EXT_LINK_MODE_SGMII: +- /* Get phy control interface type set (MDIO vs. I2C)*/ +- if (igb_sgmii_uses_mdio_82575(hw)) { +- hw->phy.media_type = e1000_media_type_copper; +- dev_spec->sgmii_active = true; +- break; +- } +- /* fall through for I2C based SGMII */ +- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: +- /* read media type from SFP EEPROM */ +- ret_val = igb_set_sfp_media_type_82575(hw); +- if ((ret_val != 0) || +- (hw->phy.media_type == e1000_media_type_unknown)) { +- /* If media type was not identified then return media +- * type defined by the CTRL_EXT settings. +- */ +- hw->phy.media_type = e1000_media_type_internal_serdes; ++/** ++ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Reads the PHY register at offset using the serial gigabit media independent ++ * interface and stores the retrieved information in data. ++ **/ ++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, ++ u16 *data) ++{ ++ s32 ret_val = -E1000_ERR_PARAM; + +- if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { +- hw->phy.media_type = e1000_media_type_copper; +- dev_spec->sgmii_active = true; +- } ++ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + +- break; +- } ++ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { ++ DEBUGOUT1("PHY Address %u is out of range\n", offset); ++ goto out; ++ } + +- /* do not change link mode for 100BaseFX */ +- if (dev_spec->eth_flags.e100_base_fx) +- break; ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ goto out; + +- /* change current link mode setting */ +- ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; +- +- if (hw->phy.media_type == e1000_media_type_copper) +- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; +- else +- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; +- +- wr32(E1000_CTRL_EXT, ctrl_ext); +- +- break; +- default: +- break; +- } +- +- /* mac initialization and operations */ +- ret_val = igb_init_mac_params_82575(hw); +- if (ret_val) +- goto out; +- +- /* NVM initialization */ +- ret_val = igb_init_nvm_params_82575(hw); +- switch (hw->mac.type) { +- case e1000_i210: +- case e1000_i211: +- ret_val = igb_init_nvm_params_i210(hw); +- break; +- default: +- break; +- } +- +- if (ret_val) +- goto out; +- +- /* if part supports SR-IOV then initialize mailbox parameters */ +- switch (mac->type) { +- case e1000_82576: +- case e1000_i350: +- igb_init_mbx_params_pf(hw); +- break; +- default: +- break; +- } +- +- /* setup PHY parameters */ +- ret_val = igb_init_phy_params_82575(hw); +- +-out: +- return ret_val; +-} +- +-/** +- * igb_acquire_phy_82575 - Acquire rights to access PHY +- * @hw: pointer to the HW structure +- * +- * Acquire access rights to the correct PHY. This is a +- * function pointer entry point called by the api module. +- **/ +-static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +-{ +- u16 mask = E1000_SWFW_PHY0_SM; +- +- if (hw->bus.func == E1000_FUNC_1) +- mask = E1000_SWFW_PHY1_SM; +- else if (hw->bus.func == E1000_FUNC_2) +- mask = E1000_SWFW_PHY2_SM; +- else if (hw->bus.func == E1000_FUNC_3) +- mask = E1000_SWFW_PHY3_SM; +- +- return hw->mac.ops.acquire_swfw_sync(hw, mask); +-} +- +-/** +- * igb_release_phy_82575 - Release rights to access PHY +- * @hw: pointer to the HW structure +- * +- * A wrapper to release access rights to the correct PHY. This is a +- * function pointer entry point called by the api module. +- **/ +-static void igb_release_phy_82575(struct e1000_hw *hw) +-{ +- u16 mask = E1000_SWFW_PHY0_SM; +- +- if (hw->bus.func == E1000_FUNC_1) +- mask = E1000_SWFW_PHY1_SM; +- else if (hw->bus.func == E1000_FUNC_2) +- mask = E1000_SWFW_PHY2_SM; +- else if (hw->bus.func == E1000_FUNC_3) +- mask = E1000_SWFW_PHY3_SM; +- +- hw->mac.ops.release_swfw_sync(hw, mask); +-} +- +-/** +- * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii +- * @hw: pointer to the HW structure +- * @offset: register offset to be read +- * @data: pointer to the read data +- * +- * Reads the PHY register at offset using the serial gigabit media independent +- * interface and stores the retrieved information in data. +- **/ +-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, +- u16 *data) +-{ +- s32 ret_val = -E1000_ERR_PARAM; +- +- if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { +- hw_dbg("PHY Address %u is out of range\n", offset); +- goto out; +- } +- +- ret_val = hw->phy.ops.acquire(hw); +- if (ret_val) +- goto out; +- +- ret_val = igb_read_phy_reg_i2c(hw, offset, data); ++ ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -755,7 +612,7 @@ + } + + /** +- * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii ++ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset +@@ -763,14 +620,15 @@ + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, ++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) + { + s32 ret_val = -E1000_ERR_PARAM; + ++ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { +- hw_dbg("PHY Address %d is out of range\n", offset); ++ DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + +@@ -778,7 +636,7 @@ + if (ret_val) + goto out; + +- ret_val = igb_write_phy_reg_i2c(hw, offset, data); ++ ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -787,41 +645,44 @@ + } + + /** +- * igb_get_phy_id_82575 - Retrieve PHY addr and id ++ * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +-static s32 igb_get_phy_id_82575(struct e1000_hw *hw) ++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + +- /* Extra read required for some PHY's on i354 */ ++ DEBUGFUNC("e1000_get_phy_id_82575"); ++ ++ /* some i354 devices need an extra read for phy id */ + if (hw->mac.type == e1000_i354) +- igb_get_phy_id(hw); ++ e1000_get_phy_id(hw); + +- /* For SGMII PHYs, we try the list of possible addresses until ++ /* ++ * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ +- if (!(igb_sgmii_active_82575(hw))) { ++ if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; +- ret_val = igb_get_phy_id(hw); ++ ret_val = e1000_get_phy_id(hw); + goto out; + } + +- if (igb_sgmii_uses_mdio_82575(hw)) { ++ if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: +- mdic = rd32(E1000_MDIC); ++ mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; +@@ -830,7 +691,7 @@ + case e1000_i354: + case e1000_i210: + case e1000_i211: +- mdic = rd32(E1000_MDICNFG); ++ mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; +@@ -839,31 +700,35 @@ + goto out; + break; + } +- ret_val = igb_get_phy_id(hw); ++ ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); +- wrfl(); +- msleep(300); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ++ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(300); + +- /* The address field in the I2CCMD register is 3 bits and 0 is invalid. ++ /* ++ * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { +- ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); +- if (ret_val == 0) { +- hw_dbg("Vendor ID 0x%08X read at address %u\n", +- phy_id, phy->addr); +- /* At the time of this writing, The M88 part is ++ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); ++ if (ret_val == E1000_SUCCESS) { ++ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", ++ phy_id, phy->addr); ++ /* ++ * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { +- hw_dbg("PHY address %u was unreadable\n", phy->addr); ++ DEBUGOUT1("PHY address %u was unreadable\n", ++ phy->addr); + } + } + +@@ -871,49 +736,60 @@ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; +- goto out; + } else { +- ret_val = igb_get_phy_id(hw); ++ ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ +- wr32(E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + out: + return ret_val; + } + + /** +- * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset ++ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) ++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) + { +- s32 ret_val; ++ s32 ret_val = E1000_SUCCESS; ++ struct e1000_phy_info *phy = &hw->phy; + +- /* This isn't a true "hard" reset, but is the only reset ++ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); ++ ++ /* ++ * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + +- hw_dbg("Soft resetting SGMII attached PHY...\n"); ++ DEBUGOUT("Soft resetting SGMII attached PHY...\n"); ++ ++ if (!(hw->phy.ops.write_reg)) ++ goto out; + +- /* SFP documentation requires the following to configure the SPF module ++ /* ++ * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = hw->phy.ops.commit(hw); ++ if (ret_val) ++ goto out; + ++ if (phy->id == M88E1512_E_PHY_ID) ++ ret_val = e1000_initialize_M88E1512_phy(hw); + out: + return ret_val; + } + + /** +- * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state ++ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * +@@ -925,12 +801,17 @@ + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) ++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; ++ s32 ret_val = E1000_SUCCESS; + u16 data; + ++ DEBUGFUNC("e1000_set_d0_lplu_state_82575"); ++ ++ if (!(hw->phy.ops.read_reg)) ++ goto out; ++ + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; +@@ -938,47 +819,52 @@ + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, +- data); ++ data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, +- &data); ++ &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, +- data); ++ data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, +- data); +- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used ++ data); ++ /* ++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, &data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, &data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ data); + if (ret_val) + goto out; + } +@@ -989,7 +875,7 @@ + } + + /** +- * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state ++ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * +@@ -1001,12 +887,14 @@ + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) ++static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- u16 data; ++ u32 data; + +- data = rd32(E1000_82580_PHY_POWER_MGMT); ++ DEBUGFUNC("e1000_set_d0_lplu_state_82580"); ++ ++ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; +@@ -1016,7 +904,8 @@ + } else { + data &= ~E1000_82580_PM_D0_LPLU; + +- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used ++ /* ++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. +@@ -1024,14 +913,15 @@ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) +- data &= ~E1000_82580_PM_SPD; } ++ data &= ~E1000_82580_PM_SPD; ++ } + +- wr32(E1000_82580_PHY_POWER_MGMT, data); +- return 0; ++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); ++ return E1000_SUCCESS; + } + + /** +- * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 ++ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * +@@ -1044,16 +934,19 @@ + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) ++s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- u16 data; ++ u32 data; + +- data = rd32(E1000_82580_PHY_POWER_MGMT); ++ DEBUGFUNC("e1000_set_d3_lplu_state_82580"); ++ ++ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; +- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used ++ /* ++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. +@@ -1070,12 +963,12 @@ + data &= ~E1000_82580_PM_SPD; + } + +- wr32(E1000_82580_PHY_POWER_MGMT, data); +- return 0; ++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); ++ return E1000_SUCCESS; + } + + /** +- * igb_acquire_nvm_82575 - Request for access to EEPROM ++ * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. +@@ -1083,148 +976,183 @@ + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +-static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) ++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) + { +- s32 ret_val; ++ s32 ret_val = E1000_SUCCESS; + +- ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); ++ DEBUGFUNC("e1000_acquire_nvm_82575"); ++ ++ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + +- ret_val = igb_acquire_nvm(hw); ++ /* ++ * Check if there is some access ++ * error this access may hook on ++ */ ++ if (hw->mac.type == e1000_i350) { ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); ++ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | ++ E1000_EECD_TIMEOUT)) { ++ /* Clear all access error flags */ ++ E1000_WRITE_REG(hw, E1000_EECD, eecd | ++ E1000_EECD_ERROR_CLR); ++ DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); ++ } ++ } ++ ++ if (hw->mac.type == e1000_82580) { ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); ++ if (eecd & E1000_EECD_BLOCKED) { ++ /* Clear access error flag */ ++ E1000_WRITE_REG(hw, E1000_EECD, eecd | ++ E1000_EECD_BLOCKED); ++ DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); ++ } ++ } + ++ ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) +- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); ++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + + out: + return ret_val; + } + + /** +- * igb_release_nvm_82575 - Release exclusive access to EEPROM ++ * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +-static void igb_release_nvm_82575(struct e1000_hw *hw) ++static void e1000_release_nvm_82575(struct e1000_hw *hw) + { +- igb_release_nvm(hw); +- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); ++ DEBUGFUNC("e1000_release_nvm_82575"); ++ ++ e1000_release_nvm_generic(hw); ++ ++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + } + + /** +- * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore ++ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) ++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; +- s32 ret_val = 0; +- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ ++ s32 ret_val = E1000_SUCCESS; ++ s32 i = 0, timeout = 200; ++ ++ DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { +- if (igb_get_hw_semaphore(hw)) { ++ if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + +- /* Firmware currently using resource (fwmask) ++ /* ++ * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ +- igb_put_hw_semaphore(hw); +- mdelay(5); ++ e1000_put_hw_semaphore_generic(hw); ++ msec_delay_irq(5); + i++; + } + + if (i == timeout) { +- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); ++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + +- igb_put_hw_semaphore(hw); ++ e1000_put_hw_semaphore_generic(hw); + + out: + return ret_val; + } + + /** +- * igb_release_swfw_sync_82575 - Release SW/FW semaphore ++ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +-static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) ++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + +- while (igb_get_hw_semaphore(hw) != 0) ++ DEBUGFUNC("e1000_release_swfw_sync_82575"); ++ ++ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + +- igb_put_hw_semaphore(hw); ++ e1000_put_hw_semaphore_generic(hw); + } + + /** +- * igb_get_cfg_done_82575 - Read config done bit ++ * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns +- * 0. If we were to return with error, EEPROM-less silicon ++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +-static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) ++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) + { + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + +- if (hw->bus.func == 1) ++ DEBUGFUNC("e1000_get_cfg_done_82575"); ++ ++ if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; +- + while (timeout) { +- if (rd32(E1000_EEMNGCTL) & mask) ++ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + timeout--; + } + if (!timeout) +- hw_dbg("MNG configuration cycle has not completed.\n"); ++ DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ +- if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && ++ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) +- igb_phy_init_script_igp3(hw); ++ e1000_phy_init_script_igp3(hw); + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_link_up_info_82575 - Get link speed/duplex info ++ * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex +@@ -1233,87 +1161,156 @@ + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +-static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, ++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_get_link_up_info_82575"); ++ + if (hw->phy.media_type != e1000_media_type_copper) +- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, ++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else +- ret_val = igb_get_speed_and_duplex_copper(hw, speed, ++ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; + } + + /** +- * igb_check_for_link_82575 - Check for link ++ * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +-static s32 igb_check_for_link_82575(struct e1000_hw *hw) ++static s32 e1000_check_for_link_82575(struct e1000_hw *hw) + { + s32 ret_val; + u16 speed, duplex; + ++ DEBUGFUNC("e1000_check_for_link_82575"); ++ + if (hw->phy.media_type != e1000_media_type_copper) { +- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, +- &duplex); +- /* Use this flag to determine if link needs to be checked or +- * not. If we have link clear the flag so that we do not ++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, ++ &duplex); ++ /* ++ * Use this flag to determine if link needs to be checked or ++ * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + +- /* Configure Flow Control now that Auto-Neg has completed. ++ /* ++ * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ +- ret_val = igb_config_fc_after_link_up(hw); ++ ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) +- hw_dbg("Error configuring flow control\n"); ++ DEBUGOUT("Error configuring flow control\n"); + } else { +- ret_val = igb_check_for_copper_link(hw); ++ ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; + } + + /** +- * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown ++ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked ++ * @hw: pointer to the HW structure ++ * ++ * Poll the M88E1112 interfaces to see which interface achieved link. ++ */ ++static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data; ++ u8 port = 0; ++ ++ DEBUGFUNC("e1000_check_for_link_media_swap"); ++ ++ /* Check for copper. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); ++ if (ret_val) ++ return ret_val; ++ ++ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); ++ if (ret_val) ++ return ret_val; ++ ++ if (data & E1000_M88E1112_STATUS_LINK) ++ port = E1000_MEDIA_PORT_COPPER; ++ ++ /* Check for other. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); ++ if (ret_val) ++ return ret_val; ++ ++ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); ++ if (ret_val) ++ return ret_val; ++ ++ if (data & E1000_M88E1112_STATUS_LINK) ++ port = E1000_MEDIA_PORT_OTHER; ++ ++ /* Determine if a swap needs to happen. */ ++ if (port && (hw->dev_spec._82575.media_port != port)) { ++ hw->dev_spec._82575.media_port = port; ++ hw->dev_spec._82575.media_changed = true; ++ } ++ ++ if (port == E1000_MEDIA_PORT_COPPER) { ++ /* reset page to 0 */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); ++ if (ret_val) ++ return ret_val; ++ e1000_check_for_link_82575(hw); ++ } else { ++ e1000_check_for_link_82575(hw); ++ /* reset page to 0 */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +-void igb_power_up_serdes_link_82575(struct e1000_hw *hw) ++static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) + { + u32 reg; + ++ DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && +- !igb_sgmii_active_82575(hw)) ++ !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ +- reg = rd32(E1000_PCS_CFG0); ++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; +- wr32(E1000_PCS_CFG0, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ +- reg = rd32(E1000_CTRL_EXT); ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; +- wr32(E1000_CTRL_EXT, reg); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ +- wrfl(); +- usleep_range(1000, 2000); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(1); + } + + /** +- * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex ++ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex +@@ -1321,28 +1318,26 @@ + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, +- u16 *duplex) ++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, ++ u16 *speed, u16 *duplex) + { + struct e1000_mac_info *mac = &hw->mac; +- u32 pcs, status; ++ u32 pcs; ++ u32 status; + +- /* Set up defaults for the return values of this function */ +- mac->serdes_has_link = false; +- *speed = 0; +- *duplex = 0; ++ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + +- /* Read the PCS Status register for link state. For non-copper mode, ++ /* ++ * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ +- pcs = rd32(E1000_PCS_LSTAT); ++ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + +- /* The link up bit determines when link is up on autoneg. The sync ok +- * gets set once both sides sync up and agree upon link. Stable link +- * can be determined by checking for both link up and link sync ok ++ /* ++ * The link up bit determines when link is up on autoneg. + */ +- if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { ++ if (pcs & E1000_PCS_LSTS_LINK_OK) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ +@@ -1359,192 +1354,202 @@ + else + *duplex = HALF_DUPLEX; + +- /* Check if it is an I354 2.5Gb backplane connection. */ ++ /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { +- status = rd32(E1000_STATUS); ++ status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; +- hw_dbg("2500 Mbs, "); +- hw_dbg("Full Duplex\n"); ++ DEBUGOUT("2500 Mbs, "); ++ DEBUGOUT("Full Duplex\n"); + } + } + ++ } else { ++ mac->serdes_has_link = false; ++ *speed = 0; ++ *duplex = 0; + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_shutdown_serdes_link_82575 - Remove link during power down ++ * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * +- * In the case of fiber serdes, shut down optics and PCS on driver unload ++ * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) ++void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) + { + u32 reg; + +- if (hw->phy.media_type != e1000_media_type_internal_serdes && +- igb_sgmii_active_82575(hw)) ++ DEBUGFUNC("e1000_shutdown_serdes_link_82575"); ++ ++ if ((hw->phy.media_type != e1000_media_type_internal_serdes) && ++ !e1000_sgmii_active_82575(hw)) + return; + +- if (!igb_enable_mng_pass_thru(hw)) { ++ if (!igb_e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ +- reg = rd32(E1000_PCS_CFG0); ++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; +- wr32(E1000_PCS_CFG0, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ +- reg = rd32(E1000_CTRL_EXT); ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; +- wr32(E1000_CTRL_EXT, reg); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ +- wrfl(); +- usleep_range(1000, 2000); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(1); + } ++ ++ return; + } + + /** +- * igb_reset_hw_82575 - Reset hardware ++ * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * +- * This resets the hardware into a known state. This is a +- * function pointer entry point called by the api module. ++ * This resets the hardware into a known state. + **/ +-static s32 igb_reset_hw_82575(struct e1000_hw *hw) ++static s32 e1000_reset_hw_82575(struct e1000_hw *hw) + { + u32 ctrl; + s32 ret_val; + +- /* Prevent the PCI-E bus from sticking if there is no TLP connection ++ DEBUGFUNC("e1000_reset_hw_82575"); ++ ++ /* ++ * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ +- ret_val = igb_disable_pcie_master(hw); ++ ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) +- hw_dbg("PCI-E Master disable polling has failed.\n"); ++ DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ +- ret_val = igb_set_pcie_completion_timeout(hw); ++ ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) +- hw_dbg("PCI-E Set completion timeout has failed.\n"); ++ DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + +- hw_dbg("Masking off all interrupts\n"); +- wr32(E1000_IMC, 0xffffffff); ++ DEBUGOUT("Masking off all interrupts\n"); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + +- wr32(E1000_RCTL, 0); +- wr32(E1000_TCTL, E1000_TCTL_PSP); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_RCTL, 0); ++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); ++ E1000_WRITE_FLUSH(hw); + +- usleep_range(10000, 20000); ++ msec_delay(10); + +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + +- hw_dbg("Issuing a global reset to MAC\n"); +- wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); ++ DEBUGOUT("Issuing a global reset to MAC\n"); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + +- ret_val = igb_get_auto_rd_done(hw); ++ ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { +- /* When auto config read does not complete, do not ++ /* ++ * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ +- hw_dbg("Auto Read Done did not complete\n"); ++ DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ +- if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) +- igb_reset_init_script_82575(hw); ++ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) ++ e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ +- wr32(E1000_IMC, 0xffffffff); +- rd32(E1000_ICR); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); ++ E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ +- ret_val = igb_check_alt_mac_addr(hw); ++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); + + return ret_val; + } + + /** +- * igb_init_hw_82575 - Initialize hardware ++ * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +-static s32 igb_init_hw_82575(struct e1000_hw *hw) ++s32 e1000_init_hw_82575(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + +- if ((hw->mac.type >= e1000_i210) && +- !(igb_get_flash_presence_i210(hw))) { +- ret_val = igb_pll_workaround_i210(hw); +- if (ret_val) +- return ret_val; +- } ++ DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ +- ret_val = igb_id_led_init(hw); ++ ret_val = mac->ops.id_led_init(hw); + if (ret_val) { +- hw_dbg("Error initializing identification LED\n"); ++ DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ +- hw_dbg("Initializing the IEEE VLAN\n"); +- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) +- igb_clear_vfta_i350(hw); +- else +- igb_clear_vfta(hw); ++ DEBUGOUT("Initializing the IEEE VLAN\n"); ++ mac->ops.clear_vfta(hw); + + /* Setup the receive address */ +- igb_init_rx_addrs(hw, rar_count); ++ e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ +- hw_dbg("Zeroing the MTA\n"); ++ DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) +- array_wr32(E1000_MTA, i, 0); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ +- hw_dbg("Zeroing the UTA\n"); ++ DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) +- array_wr32(E1000_UTA, i, 0); ++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ +- ret_val = igb_setup_link(hw); ++ ret_val = mac->ops.setup_link(hw); + +- /* Clear all of the statistics registers (clear on read). It is ++ /* Set the default MTU size */ ++ hw->dev_spec._82575.mtu = 1500; ++ ++ /* ++ * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ +- igb_clear_hw_cntrs_82575(hw); ++ e1000_clear_hw_cntrs_82575(hw); ++ + return ret_val; + } + + /** +- * igb_setup_copper_link_82575 - Configure copper link settings ++ * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +-static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) ++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) + { + u32 ctrl; +- s32 ret_val; ++ s32 ret_val; + u32 phpm_reg; + +- ctrl = rd32(E1000_CTRL); ++ DEBUGFUNC("e1000_setup_copper_link_82575"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { +@@ -1552,25 +1557,25 @@ + case e1000_i350: + case e1000_i210: + case e1000_i211: +- phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); ++ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; +- wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); ++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + +- ret_val = igb_setup_serdes_link_82575(hw); ++ ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + +- if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { ++ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ +- msleep(300); ++ msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { +- hw_dbg("Error resetting the PHY.\n"); ++ DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } +@@ -1580,20 +1585,22 @@ + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: +- ret_val = igb_copper_link_setup_m88_gen2(hw); ++ ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: +- ret_val = igb_copper_link_setup_m88(hw); ++ ret_val = e1000_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: +- ret_val = igb_copper_link_setup_igp(hw); ++ ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: +- ret_val = igb_copper_link_setup_82580(hw); ++ ret_val = igb_e1000_copper_link_setup_82577(hw); + break; + default: + ret_val = -E1000_ERR_PHY; +@@ -1603,13 +1610,13 @@ + if (ret_val) + goto out; + +- ret_val = igb_setup_copper_link(hw); ++ ret_val = e1000_setup_copper_link_generic(hw); + out: + return ret_val; + } + + /** +- * igb_setup_serdes_link_82575 - Setup link for serdes ++ * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is +@@ -1617,45 +1624,40 @@ + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +-static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) ++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) + { + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 data; + ++ DEBUGFUNC("e1000_setup_serdes_link_82575"); ++ + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && +- !igb_sgmii_active_82575(hw)) ++ !e1000_sgmii_active_82575(hw)) + return ret_val; + +- +- /* On the 82575, SerDes loopback mode persists until it is ++ /* ++ * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ +- wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); ++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + +- /* power on the sfp cage if present and turn on I2C */ +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ /* power on the sfp cage if present */ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; +- ctrl_ext |= E1000_CTRL_I2C_ENA; +- wr32(E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +- ctrl_reg = rd32(E1000_CTRL); ++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + +- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { +- /* set both sw defined pins */ ++ /* set both sw defined pins on 82575/82576*/ ++ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + +- /* Set switch control to serdes energy detect */ +- reg = rd32(E1000_CONNSW); +- reg |= E1000_CONNSW_ENRGSRC; +- wr32(E1000_CONNSW, reg); +- } +- +- reg = rd32(E1000_PCS_LCTL); ++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; +@@ -1670,12 +1672,13 @@ + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; ++ /* fall through to default case */ + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { +- hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); ++ DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + +@@ -1683,27 +1686,29 @@ + pcs_autoneg = false; + } + +- /* non-SGMII modes only supports a speed of 1000/Full for the ++ /* ++ * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | +- E1000_CTRL_FD | E1000_CTRL_FRCDPX; ++ E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + +- wr32(E1000_CTRL, ctrl_reg); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + +- /* New SerDes mode allows for forcing speed or autonegotiating speed ++ /* ++ * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | +- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); ++ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ +@@ -1714,8 +1719,9 @@ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ +- anadv_reg = rd32(E1000_PCS_ANADV); ++ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); ++ + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: +@@ -1728,251 +1734,480 @@ + default: + break; + } +- wr32(E1000_PCS_ANADV, anadv_reg); + +- hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); ++ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); ++ ++ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ +- reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ ++ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + +- hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); ++ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + +- wr32(E1000_PCS_LCTL, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + +- if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) +- igb_force_mac_fc(hw); ++ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) ++ e1000_force_mac_fc_generic(hw); + + return ret_val; + } + + /** +- * igb_sgmii_active_82575 - Return sgmii state ++ * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * +- * 82575 silicon has a serialized gigabit media independent interface (sgmii) +- * which can be enabled for use in the embedded applications. Simply +- * return the current state of the sgmii interface. ++ * The media type is chosen reflecting few settings. ++ * The following are taken into account: ++ * - link mode set in the current port Init Control Word #3 ++ * - current link mode settings in CSR register ++ * - MDIO vs. I2C PHY control interface chosen ++ * - SFP module media type + **/ +-static bool igb_sgmii_active_82575(struct e1000_hw *hw) ++static s32 e1000_get_media_type_82575(struct e1000_hw *hw) + { + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- return dev_spec->sgmii_active; +-} +- +-/** +- * igb_reset_init_script_82575 - Inits HW defaults after reset +- * @hw: pointer to the HW structure +- * +- * Inits recommended HW defaults after a reset when there is no EEPROM +- * detected. This is only for the 82575. +- **/ +-static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +-{ +- if (hw->mac.type == e1000_82575) { +- hw_dbg("Running reset init script for 82575\n"); +- /* SerDes configuration via SERDESCTRL */ +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); ++ s32 ret_val = E1000_SUCCESS; ++ u32 ctrl_ext = 0; ++ u32 link_mode = 0; + +- /* CCM configuration via CCMCTL register */ +- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); +- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); ++ /* Set internal phy as default */ ++ dev_spec->sgmii_active = false; ++ dev_spec->module_plugged = false; + +- /* PCIe lanes configuration */ +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); ++ /* Get CSR setting */ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + +- /* PCIe PLL Configuration */ +- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); +- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); +- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); +- } ++ /* extract link mode setting */ ++ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + +- return 0; +-} ++ switch (link_mode) { ++ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: ++ hw->phy.media_type = e1000_media_type_internal_serdes; ++ break; ++ case E1000_CTRL_EXT_LINK_MODE_GMII: ++ hw->phy.media_type = e1000_media_type_copper; ++ break; ++ case E1000_CTRL_EXT_LINK_MODE_SGMII: ++ /* Get phy control interface type set (MDIO vs. I2C)*/ ++ if (e1000_sgmii_uses_mdio_82575(hw)) { ++ hw->phy.media_type = e1000_media_type_copper; ++ dev_spec->sgmii_active = true; ++ break; ++ } ++ /* fall through for I2C based SGMII */ ++ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: ++ /* read media type from SFP EEPROM */ ++ ret_val = e1000_set_sfp_media_type_82575(hw); ++ if ((ret_val != E1000_SUCCESS) || ++ (hw->phy.media_type == e1000_media_type_unknown)) { ++ /* ++ * If media type was not identified then return media ++ * type defined by the CTRL_EXT settings. ++ */ ++ hw->phy.media_type = e1000_media_type_internal_serdes; + +-/** +- * igb_read_mac_addr_82575 - Read device MAC address +- * @hw: pointer to the HW structure +- **/ +-static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +-{ +- s32 ret_val = 0; ++ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { ++ hw->phy.media_type = e1000_media_type_copper; ++ dev_spec->sgmii_active = true; ++ } + +- /* If there's an alternate MAC address place it in RAR0 +- * so that it will override the Si installed default perm +- * address. +- */ +- ret_val = igb_check_alt_mac_addr(hw); +- if (ret_val) +- goto out; ++ break; ++ } + +- ret_val = igb_read_mac_addr(hw); ++ /* do not change link mode for 100BaseFX */ ++ if (dev_spec->eth_flags.e100_base_fx) ++ break; ++ ++ /* change current link mode setting */ ++ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; ++ ++ if (hw->phy.media_type == e1000_media_type_copper) ++ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; ++ else ++ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ++ ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ ++ break; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_set_sfp_media_type_82575 - derives SFP module media type. ++ * @hw: pointer to the HW structure ++ * ++ * The media type is chosen based on SFP module. ++ * compatibility flags retrieved from SFP ID EEPROM. ++ **/ ++static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_ERR_CONFIG; ++ u32 ctrl_ext = 0; ++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; ++ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; ++ u8 tranceiver_type = 0; ++ s32 timeout = 3; ++ ++ /* Turn I2C interface ON and power on sfp cage */ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); ++ ++ E1000_WRITE_FLUSH(hw); ++ ++ /* Read SFP module data */ ++ while (timeout) { ++ ret_val = e1000_read_sfp_data_byte(hw, ++ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), ++ &tranceiver_type); ++ if (ret_val == E1000_SUCCESS) ++ break; ++ msec_delay(100); ++ timeout--; ++ } ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ ++ ret_val = e1000_read_sfp_data_byte(hw, ++ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), ++ (u8 *)eth_flags); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ ++ /* Check if there is some SFP module plugged and powered */ ++ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || ++ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { ++ dev_spec->module_plugged = true; ++ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { ++ hw->phy.media_type = e1000_media_type_internal_serdes; ++ } else if (eth_flags->e100_base_fx) { ++ dev_spec->sgmii_active = true; ++ hw->phy.media_type = e1000_media_type_internal_serdes; ++ } else if (eth_flags->e1000_base_t) { ++ dev_spec->sgmii_active = true; ++ hw->phy.media_type = e1000_media_type_copper; ++ } else { ++ hw->phy.media_type = e1000_media_type_unknown; ++ DEBUGOUT("PHY module has not been recognized\n"); ++ goto out; ++ } ++ } else { ++ hw->phy.media_type = e1000_media_type_unknown; ++ } ++ ret_val = E1000_SUCCESS; ++out: ++ /* Restore I2C interface setting */ ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ return ret_val; ++} ++ ++/** ++ * e1000_valid_led_default_82575 - Verify a valid default LED config ++ * @hw: pointer to the HW structure ++ * @data: pointer to the NVM (EEPROM) ++ * ++ * Read the EEPROM for the current default LED configuration. If the ++ * LED configuration is not valid, set to a valid LED configuration. ++ **/ ++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_valid_led_default_82575"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ goto out; ++ } ++ ++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { ++ switch (hw->phy.media_type) { ++ case e1000_media_type_internal_serdes: ++ *data = ID_LED_DEFAULT_82575_SERDES; ++ break; ++ case e1000_media_type_copper: ++ default: ++ *data = ID_LED_DEFAULT; ++ break; ++ } ++ } ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_sgmii_active_82575 - Return sgmii state ++ * @hw: pointer to the HW structure ++ * ++ * 82575 silicon has a serialized gigabit media independent interface (sgmii) ++ * which can be enabled for use in the embedded applications. Simply ++ * return the current state of the sgmii interface. ++ **/ ++static bool e1000_sgmii_active_82575(struct e1000_hw *hw) ++{ ++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; ++ return dev_spec->sgmii_active; ++} ++ ++/** ++ * e1000_reset_init_script_82575 - Inits HW defaults after reset ++ * @hw: pointer to the HW structure ++ * ++ * Inits recommended HW defaults after a reset when there is no EEPROM ++ * detected. This is only for the 82575. ++ **/ ++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) ++{ ++ DEBUGFUNC("e1000_reset_init_script_82575"); ++ ++ if (hw->mac.type == e1000_82575) { ++ DEBUGOUT("Running reset init script for 82575\n"); ++ /* SerDes configuration via SERDESCTRL */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); ++ ++ /* CCM configuration via CCMCTL register */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); ++ ++ /* PCIe lanes configuration */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); ++ ++ /* PCIe PLL Configuration */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_mac_addr_82575 - Read device MAC address ++ * @hw: pointer to the HW structure ++ **/ ++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_read_mac_addr_82575"); ++ ++ /* ++ * If there's an alternate MAC address place it in RAR0 ++ * so that it will override the Si installed default perm ++ * address. ++ */ ++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = igb_e1000_read_mac_addr_generic(hw); + + out: + return ret_val; + } + + /** +- * igb_power_down_phy_copper_82575 - Remove link during PHY power down ++ * e1000_config_collision_dist_82575 - Configure collision distance ++ * @hw: pointer to the HW structure ++ * ++ * Configures the collision distance to the default value and is used ++ * during link setup. ++ **/ ++static void e1000_config_collision_dist_82575(struct e1000_hw *hw) ++{ ++ u32 tctl_ext; ++ ++ DEBUGFUNC("e1000_config_collision_dist_82575"); ++ ++ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); ++ ++ tctl_ext &= ~E1000_TCTL_EXT_COLD; ++ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; ++ ++ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); ++ E1000_WRITE_FLUSH(hw); ++} ++ ++/** ++ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +-void igb_power_down_phy_copper_82575(struct e1000_hw *hw) ++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) + { ++ struct e1000_phy_info *phy = &hw->phy; ++ ++ if (!(phy->ops.check_reset_block)) ++ return; ++ + /* If the management interface is not enabled, then power down */ +- if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) +- igb_power_down_phy_copper(hw); ++ if (!(igb_e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) ++ igb_e1000_power_down_phy_copper(hw); ++ ++ return; + } + + /** +- * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters ++ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +-static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) ++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) + { +- igb_clear_hw_cntrs_base(hw); ++ DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + +- rd32(E1000_PRC64); +- rd32(E1000_PRC127); +- rd32(E1000_PRC255); +- rd32(E1000_PRC511); +- rd32(E1000_PRC1023); +- rd32(E1000_PRC1522); +- rd32(E1000_PTC64); +- rd32(E1000_PTC127); +- rd32(E1000_PTC255); +- rd32(E1000_PTC511); +- rd32(E1000_PTC1023); +- rd32(E1000_PTC1522); +- +- rd32(E1000_ALGNERRC); +- rd32(E1000_RXERRC); +- rd32(E1000_TNCRS); +- rd32(E1000_CEXTERR); +- rd32(E1000_TSCTC); +- rd32(E1000_TSCTFC); +- +- rd32(E1000_MGTPRC); +- rd32(E1000_MGTPDC); +- rd32(E1000_MGTPTC); +- +- rd32(E1000_IAC); +- rd32(E1000_ICRXOC); +- +- rd32(E1000_ICRXPTC); +- rd32(E1000_ICRXATC); +- rd32(E1000_ICTXPTC); +- rd32(E1000_ICTXATC); +- rd32(E1000_ICTXQEC); +- rd32(E1000_ICTXQMTC); +- rd32(E1000_ICRXDMTC); +- +- rd32(E1000_CBTMPC); +- rd32(E1000_HTDPMC); +- rd32(E1000_CBRMPC); +- rd32(E1000_RPTHC); +- rd32(E1000_HGPTC); +- rd32(E1000_HTCBDPC); +- rd32(E1000_HGORCL); +- rd32(E1000_HGORCH); +- rd32(E1000_HGOTCL); +- rd32(E1000_HGOTCH); +- rd32(E1000_LENERRS); ++ e1000_clear_hw_cntrs_base_generic(hw); ++ ++ E1000_READ_REG(hw, E1000_PRC64); ++ E1000_READ_REG(hw, E1000_PRC127); ++ E1000_READ_REG(hw, E1000_PRC255); ++ E1000_READ_REG(hw, E1000_PRC511); ++ E1000_READ_REG(hw, E1000_PRC1023); ++ E1000_READ_REG(hw, E1000_PRC1522); ++ E1000_READ_REG(hw, E1000_PTC64); ++ E1000_READ_REG(hw, E1000_PTC127); ++ E1000_READ_REG(hw, E1000_PTC255); ++ E1000_READ_REG(hw, E1000_PTC511); ++ E1000_READ_REG(hw, E1000_PTC1023); ++ E1000_READ_REG(hw, E1000_PTC1522); ++ ++ E1000_READ_REG(hw, E1000_ALGNERRC); ++ E1000_READ_REG(hw, E1000_RXERRC); ++ E1000_READ_REG(hw, E1000_TNCRS); ++ E1000_READ_REG(hw, E1000_CEXTERR); ++ E1000_READ_REG(hw, E1000_TSCTC); ++ E1000_READ_REG(hw, E1000_TSCTFC); ++ ++ E1000_READ_REG(hw, E1000_MGTPRC); ++ E1000_READ_REG(hw, E1000_MGTPDC); ++ E1000_READ_REG(hw, E1000_MGTPTC); ++ ++ E1000_READ_REG(hw, E1000_IAC); ++ E1000_READ_REG(hw, E1000_ICRXOC); ++ ++ E1000_READ_REG(hw, E1000_ICRXPTC); ++ E1000_READ_REG(hw, E1000_ICRXATC); ++ E1000_READ_REG(hw, E1000_ICTXPTC); ++ E1000_READ_REG(hw, E1000_ICTXATC); ++ E1000_READ_REG(hw, E1000_ICTXQEC); ++ E1000_READ_REG(hw, E1000_ICTXQMTC); ++ E1000_READ_REG(hw, E1000_ICRXDMTC); ++ ++ E1000_READ_REG(hw, E1000_CBTMPC); ++ E1000_READ_REG(hw, E1000_HTDPMC); ++ E1000_READ_REG(hw, E1000_CBRMPC); ++ E1000_READ_REG(hw, E1000_RPTHC); ++ E1000_READ_REG(hw, E1000_HGPTC); ++ E1000_READ_REG(hw, E1000_HTCBDPC); ++ E1000_READ_REG(hw, E1000_HGORCL); ++ E1000_READ_REG(hw, E1000_HGORCH); ++ E1000_READ_REG(hw, E1000_HGOTCL); ++ E1000_READ_REG(hw, E1000_HGOTCH); ++ E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ +- if (hw->phy.media_type == e1000_media_type_internal_serdes || +- igb_sgmii_active_82575(hw)) +- rd32(E1000_SCVPC); ++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) || ++ e1000_sgmii_active_82575(hw)) ++ E1000_READ_REG(hw, E1000_SCVPC); + } + + /** +- * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable ++ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * +- * After rx enable if managability is enabled then there is likely some ++ * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +-void igb_rx_fifo_flush_82575(struct e1000_hw *hw) ++void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) + { + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + ++ DEBUGFUNC("e1000_rx_fifo_flush_82575"); ++ ++ /* disable IPv6 options as per hardware errata */ ++ rfctl = E1000_READ_REG(hw, E1000_RFCTL); ++ rfctl |= E1000_RFCTL_IPV6_EX_DIS; ++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); ++ + if (hw->mac.type != e1000_82575 || +- !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) ++ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + +- /* Disable all RX queues */ ++ /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { +- rxdctl[i] = rd32(E1000_RXDCTL(i)); +- wr32(E1000_RXDCTL(i), +- rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); ++ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(i), ++ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { +- usleep_range(1000, 2000); ++ msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) +- rx_enabled |= rd32(E1000_RXDCTL(i)); ++ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) +- hw_dbg("Queue disable timed out after 10ms\n"); ++ DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ +- rfctl = rd32(E1000_RFCTL); +- wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); ++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + +- rlpml = rd32(E1000_RLPML); +- wr32(E1000_RLPML, 0); ++ rlpml = E1000_READ_REG(hw, E1000_RLPML); ++ E1000_WRITE_REG(hw, E1000_RLPML, 0); + +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + +- wr32(E1000_RCTL, temp_rctl); +- wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); +- wrfl(); +- usleep_range(2000, 3000); ++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(2); + +- /* Enable RX queues that were previously enabled and restore our ++ /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) +- wr32(E1000_RXDCTL(i), rxdctl[i]); +- wr32(E1000_RCTL, rctl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ E1000_WRITE_FLUSH(hw); + +- wr32(E1000_RLPML, rlpml); +- wr32(E1000_RFCTL, rfctl); ++ E1000_WRITE_REG(hw, E1000_RLPML, rlpml); ++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ +- rd32(E1000_ROC); +- rd32(E1000_RNBC); +- rd32(E1000_MPC); ++ E1000_READ_REG(hw, E1000_ROC); ++ E1000_READ_REG(hw, E1000_RNBC); ++ E1000_READ_REG(hw, E1000_MPC); + } + + /** +- * igb_set_pcie_completion_timeout - set pci-e completion timeout ++ * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, +@@ -1981,17 +2216,18 @@ + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) ++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) + { +- u32 gcr = rd32(E1000_GCR); +- s32 ret_val = 0; ++ u32 gcr = E1000_READ_REG(hw, E1000_GCR); ++ s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + +- /* if capabilities version is type 1 we can write the ++ /* ++ * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { +@@ -1999,36 +2235,37 @@ + goto out; + } + +- /* for version 2 capabilities we need to write the config space ++ /* ++ * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ +- ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, +- &pcie_devctl2); ++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, ++ &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + +- ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, +- &pcie_devctl2); ++ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, ++ &pcie_devctl2); + out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + +- wr32(E1000_GCR, gcr); ++ E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; + } + + /** +- * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing ++ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) ++void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) + { + u32 reg_val, reg_offset; + +@@ -2044,7 +2281,7 @@ + return; + } + +- reg_val = rd32(reg_offset); ++ reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); +@@ -2056,66 +2293,67 @@ + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } +- wr32(reg_offset, reg_val); ++ E1000_WRITE_REG(hw, reg_offset, reg_val); + } + + /** +- * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback ++ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +-void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) ++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) + { + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: +- dtxswc = rd32(E1000_DTXSWC); ++ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; +- wr32(E1000_DTXSWC, dtxswc); ++ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; +- case e1000_i354: + case e1000_i350: +- dtxswc = rd32(E1000_TXSWC); ++ case e1000_i354: ++ dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; +- wr32(E1000_TXSWC, dtxswc); ++ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + ++ + } + + /** +- * igb_vmdq_set_replication_pf - enable or disable vmdq replication ++ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +-void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) ++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) + { +- u32 vt_ctl = rd32(E1000_VT_CTL); ++ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + +- wr32(E1000_VT_CTL, vt_ctl); ++ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + } + + /** +- * igb_read_phy_reg_82580 - Read 82580 MDI control register ++ * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data +@@ -2123,15 +2361,17 @@ + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +-static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) ++static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_read_phy_reg_82580"); ++ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + +- ret_val = igb_read_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -2140,23 +2380,24 @@ + } + + /** +- * igb_write_phy_reg_82580 - Write 82580 MDI control register ++ * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +-static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) ++static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + +- ret_val = igb_write_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -2165,123 +2406,133 @@ + } + + /** +- * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits ++ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) ++static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + ++ DEBUGFUNC("e1000_reset_mdicnfg_82580"); ++ + if (hw->mac.type != e1000_82580) + goto out; +- if (!igb_sgmii_active_82575(hw)) ++ if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + goto out; + } + +- mdicnfg = rd32(E1000_MDICNFG); ++ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; +- wr32(E1000_MDICNFG, mdicnfg); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + out: + return ret_val; + } + + /** +- * igb_reset_hw_82580 - Reset hardware ++ * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +-static s32 igb_reset_hw_82580(struct e1000_hw *hw) ++static s32 e1000_reset_hw_82580(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + ++ DEBUGFUNC("e1000_reset_hw_82580"); ++ + hw->dev_spec._82575.global_device_reset = false; + +- /* due to hw errata, global device reset doesn't always +- * work on 82580 +- */ ++ /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + +- /* Prevent the PCI-E bus from sticking if there is no TLP connection ++ /* ++ * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ +- ret_val = igb_disable_pcie_master(hw); ++ ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) +- hw_dbg("PCI-E Master disable polling has failed.\n"); ++ DEBUGOUT("PCI-E Master disable polling has failed.\n"); + +- hw_dbg("Masking off all interrupts\n"); +- wr32(E1000_IMC, 0xffffffff); +- wr32(E1000_RCTL, 0); +- wr32(E1000_TCTL, E1000_TCTL_PSP); +- wrfl(); ++ DEBUGOUT("Masking off all interrupts\n"); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); ++ E1000_WRITE_REG(hw, E1000_RCTL, 0); ++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); ++ E1000_WRITE_FLUSH(hw); + +- usleep_range(10000, 11000); ++ msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ +- if (global_device_reset && +- hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) ++ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, ++ swmbsw_mask)) + global_device_reset = false; + +- if (global_device_reset && +- !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) ++ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & ++ E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + +- wr32(E1000_CTRL, ctrl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +- /* Add delay to insure DEV_RST has time to complete */ +- if (global_device_reset) +- usleep_range(5000, 6000); ++ switch (hw->device_id) { ++ case E1000_DEV_ID_DH89XXCC_SGMII: ++ break; ++ default: ++ E1000_WRITE_FLUSH(hw); ++ break; ++ } ++ ++ /* Add delay to insure DEV_RST or RST has time to complete */ ++ msec_delay(5); + +- ret_val = igb_get_auto_rd_done(hw); ++ ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { +- /* When auto config read does not complete, do not ++ /* ++ * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ +- hw_dbg("Auto Read Done did not complete\n"); ++ DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ +- wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); ++ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ +- wr32(E1000_IMC, 0xffffffff); +- rd32(E1000_ICR); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); ++ E1000_READ_REG(hw, E1000_ICR); + +- ret_val = igb_reset_mdicnfg_82580(hw); ++ ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) +- hw_dbg("Could not reset MDICNFG based on EEPROM\n"); ++ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ +- ret_val = igb_check_alt_mac_addr(hw); ++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) +@@ -2291,7 +2542,7 @@ + } + + /** +- * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size ++ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. +@@ -2300,398 +2551,1222 @@ + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +-u16 igb_rxpbs_adjust_82580(u32 data) ++u16 e1000_rxpbs_adjust_82580(u32 data) + { + u16 ret_val = 0; + +- if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) ++ if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + +- return ret_val; ++ return ret_val; ++} ++ ++/** ++ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM ++ * checksum ++ * @hw: pointer to the HW structure ++ * @offset: offset in words of the checksum protected region ++ * ++ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM ++ * and then verifies that the sum of the EEPROM is equal to 0xBABA. ++ **/ ++s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u16 checksum = 0; ++ u16 i, nvm_data; ++ ++ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); ++ ++ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { ++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ goto out; ++ } ++ checksum += nvm_data; ++ } ++ ++ if (checksum != (u16) NVM_SUM) { ++ DEBUGOUT("NVM Checksum Invalid\n"); ++ ret_val = -E1000_ERR_NVM; ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_update_nvm_checksum_with_offset - Update EEPROM ++ * checksum ++ * @hw: pointer to the HW structure ++ * @offset: offset in words of the checksum protected region ++ * ++ * Updates the EEPROM checksum by reading/adding each word of the EEPROM ++ * up to the checksum. Then calculates the EEPROM checksum and writes the ++ * value to the EEPROM. ++ **/ ++s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) ++{ ++ s32 ret_val; ++ u16 checksum = 0; ++ u16 i, nvm_data; ++ ++ DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); ++ ++ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { ++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error while updating checksum.\n"); ++ goto out; ++ } ++ checksum += nvm_data; ++ } ++ checksum = (u16) NVM_SUM - checksum; ++ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, ++ &checksum); ++ if (ret_val) ++ DEBUGOUT("NVM Write Error while updating checksum.\n"); ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Calculates the EEPROM section checksum by reading/adding each word of ++ * the EEPROM and then verifies that the sum of the EEPROM is ++ * equal to 0xBABA. ++ **/ ++static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 eeprom_regions_count = 1; ++ u16 j, nvm_data; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_validate_nvm_checksum_82580"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ goto out; ++ } ++ ++ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { ++ /* if chekcsums compatibility bit is set validate checksums ++ * for all 4 ports. */ ++ eeprom_regions_count = 4; ++ } ++ ++ for (j = 0; j < eeprom_regions_count; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_validate_nvm_checksum_with_offset(hw, ++ nvm_offset); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Updates the EEPROM section checksums for all 4 ports by reading/adding ++ * each word of the EEPROM up to the checksum. Then calculates the EEPROM ++ * checksum and writes the value to the EEPROM. ++ **/ ++static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 j, nvm_data; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_update_nvm_checksum_82580"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); ++ goto out; ++ } ++ ++ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { ++ /* set compatibility bit to validate checksums appropriately */ ++ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; ++ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, ++ &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); ++ goto out; ++ } ++ } ++ ++ for (j = 0; j < 4; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); ++ if (ret_val) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Calculates the EEPROM section checksum by reading/adding each word of ++ * the EEPROM and then verifies that the sum of the EEPROM is ++ * equal to 0xBABA. ++ **/ ++static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u16 j; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_validate_nvm_checksum_i350"); ++ ++ for (j = 0; j < 4; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_validate_nvm_checksum_with_offset(hw, ++ nvm_offset); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Updates the EEPROM section checksums for all 4 ports by reading/adding ++ * each word of the EEPROM up to the checksum. Then calculates the EEPROM ++ * checksum and writes the value to the EEPROM. ++ **/ ++static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u16 j; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_update_nvm_checksum_i350"); ++ ++ for (j = 0; j < 4; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * __e1000_access_emi_reg - Read/write EMI register ++ * @hw: pointer to the HW structure ++ * @addr: EMI address to program ++ * @data: pointer to value to read/write from/to the EMI address ++ * @read: boolean flag to indicate read or write ++ **/ ++static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, ++ u16 *data, bool read) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("__e1000_access_emi_reg"); ++ ++ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); ++ if (ret_val) ++ return ret_val; ++ ++ if (read) ++ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); ++ else ++ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_read_emi_reg - Read Extended Management Interface register ++ * @hw: pointer to the HW structure ++ * @addr: EMI address to program ++ * @data: value to be read from the EMI address ++ **/ ++s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) ++{ ++ DEBUGFUNC("e1000_read_emi_reg"); ++ ++ return __e1000_access_emi_reg(hw, addr, data, true); ++} ++ ++/** ++ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY ++ * @hw: pointer to the HW structure ++ * ++ * Initialize Marvell 1512 to work correctly with Avoton. ++ **/ ++s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_initialize_M88E1512_phy"); ++ ++ /* Check if this is correct PHY. */ ++ if (phy->id != M88E1512_E_PHY_ID) ++ goto out; ++ ++ /* Switch to PHY page 0xFF. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0xFB. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0x12. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); ++ if (ret_val) ++ goto out; ++ ++ /* Change mode to SGMII-to-Copper */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); ++ if (ret_val) ++ goto out; ++ ++ /* Return the PHY to page 0. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.commit(hw); ++ if (ret_val) { ++ DEBUGOUT("Error committing the PHY changes\n"); ++ return ret_val; ++ } ++ ++ msec_delay(1000); ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY ++ * @hw: pointer to the HW structure ++ * ++ * Initialize Marvell 1543 to work correctly with Avoton. ++ **/ ++s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_initialize_M88E1543_phy"); ++ ++ /* Check if this is correct PHY. */ ++ if (phy->id != M88E1543_E_PHY_ID) ++ goto out; ++ ++ /* Switch to PHY page 0xFF. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0xFB. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0x12. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); ++ if (ret_val) ++ goto out; ++ ++ /* Change mode to SGMII-to-Copper */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 1. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); ++ if (ret_val) ++ goto out; ++ ++ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); ++ if (ret_val) ++ goto out; ++ ++ /* Return the PHY to page 0. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.commit(hw); ++ if (ret_val) { ++ DEBUGOUT("Error committing the PHY changes\n"); ++ return ret_val; ++ } ++ ++ msec_delay(1000); ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_set_eee_i350 - Enable/disable EEE support ++ * @hw: pointer to the HW structure ++ * @adv1g: boolean flag enabling 1G EEE advertisement ++ * @adv100m: boolean flag enabling 100M EEE advertisement ++ * ++ * Enable/disable EEE based on setting in dev_spec structure. ++ * ++ **/ ++s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) ++{ ++ u32 ipcnfg, eeer; ++ ++ DEBUGFUNC("e1000_set_eee_i350"); ++ ++ if ((hw->mac.type < e1000_i350) || ++ (hw->phy.media_type != e1000_media_type_copper)) ++ goto out; ++ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); ++ eeer = E1000_READ_REG(hw, E1000_EEER); ++ ++ /* enable or disable per user setting */ ++ if (!(hw->dev_spec._82575.eee_disable)) { ++ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); ++ ++ if (adv100M) ++ ipcnfg |= E1000_IPCNFG_EEE_100M_AN; ++ else ++ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; ++ ++ if (adv1G) ++ ipcnfg |= E1000_IPCNFG_EEE_1G_AN; ++ else ++ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; ++ ++ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | ++ E1000_EEER_LPI_FC); ++ ++ /* This bit should not be set in normal operation. */ ++ if (eee_su & E1000_EEE_SU_LPI_CLK_STP) ++ DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); ++ } else { ++ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); ++ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | ++ E1000_EEER_LPI_FC); ++ } ++ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); ++ E1000_WRITE_REG(hw, E1000_EEER, eeer); ++ E1000_READ_REG(hw, E1000_IPCNFG); ++ E1000_READ_REG(hw, E1000_EEER); ++out: ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_eee_i354 - Enable/disable EEE support ++ * @hw: pointer to the HW structure ++ * @adv1g: boolean flag enabling 1G EEE advertisement ++ * @adv100m: boolean flag enabling 100M EEE advertisement ++ * ++ * Enable/disable EEE legacy mode based on setting in dev_spec structure. ++ * ++ **/ ++s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ u16 phy_data; ++ ++ DEBUGFUNC("e1000_set_eee_i354"); ++ ++ if ((hw->phy.media_type != e1000_media_type_copper) || ++ ((phy->id != M88E1543_E_PHY_ID) && ++ (phy->id != M88E1512_E_PHY_ID))) ++ goto out; ++ ++ if (!hw->dev_spec._82575.eee_disable) { ++ /* Switch to PHY page 18. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, ++ phy_data); ++ if (ret_val) ++ goto out; ++ ++ /* Return the PHY to page 0. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); ++ if (ret_val) ++ goto out; ++ ++ /* Turn on EEE advertisement. */ ++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ if (adv100M) ++ phy_data |= E1000_EEE_ADV_100_SUPPORTED; ++ else ++ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; ++ ++ if (adv1G) ++ phy_data |= E1000_EEE_ADV_1000_SUPPORTED; ++ else ++ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; ++ ++ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ phy_data); ++ } else { ++ /* Turn off EEE advertisement. */ ++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | ++ E1000_EEE_ADV_1000_SUPPORTED); ++ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ phy_data); ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_get_eee_status_i354 - Get EEE status ++ * @hw: pointer to the HW structure ++ * @status: EEE status ++ * ++ * Get EEE status by guessing based on whether Tx or Rx LPI indications have ++ * been received. ++ **/ ++s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ u16 phy_data; ++ ++ DEBUGFUNC("e1000_get_eee_status_i354"); ++ ++ /* Check if EEE is supported on this device. */ ++ if ((hw->phy.media_type != e1000_media_type_copper) || ++ ((phy->id != M88E1543_E_PHY_ID) && ++ (phy->id != M88E1512_E_PHY_ID))) ++ goto out; ++ ++ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, ++ E1000_PCS_STATUS_DEV_I354, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | ++ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; ++ ++out: ++ return ret_val; ++} ++ ++/* Due to a hw errata, if the host tries to configure the VFTA register ++ * while performing queries from the BMC or DMA, then the VFTA in some ++ * cases won't be written. ++ */ ++ ++/** ++ * e1000_clear_vfta_i350 - Clear VLAN filter table ++ * @hw: pointer to the HW structure ++ * ++ * Clears the register array which contains the VLAN filter table by ++ * setting all the values to 0. ++ **/ ++void e1000_clear_vfta_i350(struct e1000_hw *hw) ++{ ++ u32 offset; ++ int i; ++ ++ DEBUGFUNC("e1000_clear_vfta_350"); ++ ++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { ++ for (i = 0; i < 10; i++) ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); ++ ++ E1000_WRITE_FLUSH(hw); ++ } ++} ++ ++/** ++ * e1000_write_vfta_i350 - Write value to VLAN filter table ++ * @hw: pointer to the HW structure ++ * @offset: register offset in VLAN filter table ++ * @value: register value written to VLAN filter table ++ * ++ * Writes value at the given offset in the register array which stores ++ * the VLAN filter table. ++ **/ ++void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) ++{ ++ int i; ++ ++ DEBUGFUNC("e1000_write_vfta_350"); ++ ++ for (i = 0; i < 10; i++) ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); ++ ++ E1000_WRITE_FLUSH(hw); ++} ++ ++/** ++ * e1000_set_i2c_bb - Enable I2C bit-bang ++ * @hw: pointer to the HW structure ++ * ++ * Enable I2C bit-bang interface ++ * ++ **/ ++s32 e1000_set_i2c_bb(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u32 ctrl_ext, i2cparams; ++ ++ DEBUGFUNC("e1000_set_i2c_bb"); ++ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ctrl_ext |= E1000_CTRL_I2C_ENA; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_FLUSH(hw); ++ ++ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ i2cparams |= E1000_I2CBB_EN; ++ i2cparams |= E1000_I2C_DATA_OE_N; ++ i2cparams |= E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); ++ E1000_WRITE_FLUSH(hw); ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @dev_addr: device address ++ * @data: value read ++ * ++ * Performs byte read operation over I2C interface at ++ * a specified device address. ++ **/ ++s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) ++{ ++ s32 status = E1000_SUCCESS; ++ u32 max_retry = 10; ++ u32 retry = 1; ++ u16 swfw_mask = 0; ++ ++ bool nack = true; ++ ++ DEBUGFUNC("e1000_read_i2c_byte_generic"); ++ ++ swfw_mask = E1000_SWFW_PHY0_SM; ++ ++ do { ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) ++ != E1000_SUCCESS) { ++ status = E1000_ERR_SWFW_SYNC; ++ goto read_byte_out; ++ } ++ ++ e1000_i2c_start(hw); ++ ++ /* Device Address and write indication */ ++ status = e1000_clock_out_i2c_byte(hw, dev_addr); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_byte(hw, byte_offset); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ e1000_i2c_start(hw); ++ ++ /* Device Address and read indication */ ++ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_in_i2c_byte(hw, data); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_bit(hw, nack); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ e1000_i2c_stop(hw); ++ break; ++ ++fail: ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ msec_delay(100); ++ e1000_i2c_bus_clear(hw); ++ retry++; ++ if (retry < max_retry) ++ DEBUGOUT("I2C byte read error - Retrying.\n"); ++ else ++ DEBUGOUT("I2C byte read error.\n"); ++ ++ } while (retry < max_retry); ++ ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ ++read_byte_out: ++ ++ return status; ++} ++ ++/** ++ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: device address ++ * @data: value to write ++ * ++ * Performs byte write operation over I2C interface at ++ * a specified device address. ++ **/ ++s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) ++{ ++ s32 status = E1000_SUCCESS; ++ u32 max_retry = 1; ++ u32 retry = 0; ++ u16 swfw_mask = 0; ++ ++ DEBUGFUNC("e1000_write_i2c_byte_generic"); ++ ++ swfw_mask = E1000_SWFW_PHY0_SM; ++ ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { ++ status = E1000_ERR_SWFW_SYNC; ++ goto write_byte_out; ++ } ++ ++ do { ++ e1000_i2c_start(hw); ++ ++ status = e1000_clock_out_i2c_byte(hw, dev_addr); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_byte(hw, byte_offset); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_byte(hw, data); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ e1000_i2c_stop(hw); ++ break; ++ ++fail: ++ e1000_i2c_bus_clear(hw); ++ retry++; ++ if (retry < max_retry) ++ DEBUGOUT("I2C byte write error - Retrying.\n"); ++ else ++ DEBUGOUT("I2C byte write error.\n"); ++ } while (retry < max_retry); ++ ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ ++write_byte_out: ++ ++ return status; ++} ++ ++/** ++ * e1000_i2c_start - Sets I2C start condition ++ * @hw: pointer to hardware structure ++ * ++ * Sets I2C start condition (High -> Low on SDA while SCL is High) ++ **/ ++static void e1000_i2c_start(struct e1000_hw *hw) ++{ ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ ++ DEBUGFUNC("e1000_i2c_start"); ++ ++ /* Start condition must begin with data and clock high */ ++ e1000_set_i2c_data(hw, &i2cctl, 1); ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Setup time for start condition (4.7us) */ ++ usec_delay(E1000_I2C_T_SU_STA); ++ ++ e1000_set_i2c_data(hw, &i2cctl, 0); ++ ++ /* Hold time for start condition (4us) */ ++ usec_delay(E1000_I2C_T_HD_STA); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ usec_delay(E1000_I2C_T_LOW); ++ + } + + /** +- * igb_validate_nvm_checksum_with_offset - Validate EEPROM +- * checksum +- * @hw: pointer to the HW structure +- * @offset: offset in words of the checksum protected region ++ * e1000_i2c_stop - Sets I2C stop condition ++ * @hw: pointer to hardware structure + * +- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM +- * and then verifies that the sum of the EEPROM is equal to 0xBABA. ++ * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +-static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, +- u16 offset) ++static void e1000_i2c_stop(struct e1000_hw *hw) + { +- s32 ret_val = 0; +- u16 checksum = 0; +- u16 i, nvm_data; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { +- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; +- } +- checksum += nvm_data; +- } ++ DEBUGFUNC("e1000_i2c_stop"); + +- if (checksum != (u16) NVM_SUM) { +- hw_dbg("NVM Checksum Invalid\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; +- } ++ /* Stop condition must begin with data low and clock high */ ++ e1000_set_i2c_data(hw, &i2cctl, 0); ++ e1000_raise_i2c_clk(hw, &i2cctl); + +-out: +- return ret_val; ++ /* Setup time for stop condition (4us) */ ++ usec_delay(E1000_I2C_T_SU_STO); ++ ++ e1000_set_i2c_data(hw, &i2cctl, 1); ++ ++ /* bus free time between stop and start (4.7us)*/ ++ usec_delay(E1000_I2C_T_BUF); + } + + /** +- * igb_update_nvm_checksum_with_offset - Update EEPROM +- * checksum +- * @hw: pointer to the HW structure +- * @offset: offset in words of the checksum protected region ++ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C ++ * @hw: pointer to hardware structure ++ * @data: data byte to clock in + * +- * Updates the EEPROM checksum by reading/adding each word of the EEPROM +- * up to the checksum. Then calculates the EEPROM checksum and writes the +- * value to the EEPROM. ++ * Clocks in one byte data via I2C data/clock + **/ +-static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) ++static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) + { +- s32 ret_val; +- u16 checksum = 0; +- u16 i, nvm_data; ++ s32 i; ++ bool bit = 0; + +- for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { +- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error while updating checksum.\n"); +- goto out; +- } +- checksum += nvm_data; ++ DEBUGFUNC("e1000_clock_in_i2c_byte"); ++ ++ *data = 0; ++ for (i = 7; i >= 0; i--) { ++ e1000_clock_in_i2c_bit(hw, &bit); ++ *data |= bit << i; + } +- checksum = (u16) NVM_SUM - checksum; +- ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, +- &checksum); +- if (ret_val) +- hw_dbg("NVM Write Error while updating checksum.\n"); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C ++ * @hw: pointer to hardware structure ++ * @data: data byte clocked out + * +- * Calculates the EEPROM section checksum by reading/adding each word of +- * the EEPROM and then verifies that the sum of the EEPROM is +- * equal to 0xBABA. ++ * Clocks out one byte data via I2C data/clock + **/ +-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) ++static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) + { +- s32 ret_val = 0; +- u16 eeprom_regions_count = 1; +- u16 j, nvm_data; +- u16 nvm_offset; ++ s32 status = E1000_SUCCESS; ++ s32 i; ++ u32 i2cctl; ++ bool bit = 0; + +- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; +- } ++ DEBUGFUNC("e1000_clock_out_i2c_byte"); + +- if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { +- /* if checksums compatibility bit is set validate checksums +- * for all 4 ports. +- */ +- eeprom_regions_count = 4; +- } ++ for (i = 7; i >= 0; i--) { ++ bit = (data >> i) & 0x1; ++ status = e1000_clock_out_i2c_bit(hw, bit); + +- for (j = 0; j < eeprom_regions_count; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_validate_nvm_checksum_with_offset(hw, +- nvm_offset); +- if (ret_val != 0) +- goto out; ++ if (status != E1000_SUCCESS) ++ break; + } + +-out: +- return ret_val; ++ /* Release SDA line (set high) */ ++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ ++ i2cctl |= E1000_I2C_DATA_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); ++ E1000_WRITE_FLUSH(hw); ++ ++ return status; + } + + /** +- * igb_update_nvm_checksum_82580 - Update EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_get_i2c_ack - Polls for I2C ACK ++ * @hw: pointer to hardware structure + * +- * Updates the EEPROM section checksums for all 4 ports by reading/adding +- * each word of the EEPROM up to the checksum. Then calculates the EEPROM +- * checksum and writes the value to the EEPROM. ++ * Clocks in/out one bit via I2C data/clock + **/ +-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) ++static s32 e1000_get_i2c_ack(struct e1000_hw *hw) + { +- s32 ret_val; +- u16 j, nvm_data; +- u16 nvm_offset; ++ s32 status = E1000_SUCCESS; ++ u32 i = 0; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ u32 timeout = 10; ++ bool ack = true; + +- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); +- goto out; +- } ++ DEBUGFUNC("e1000_get_i2c_ack"); + +- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { +- /* set compatibility bit to validate checksums appropriately */ +- nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; +- ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, +- &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); +- goto out; +- } ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ /* Wait until SCL returns high */ ++ for (i = 0; i < timeout; i++) { ++ usec_delay(1); ++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ if (i2cctl & E1000_I2C_CLK_IN) ++ break; + } ++ if (!(i2cctl & E1000_I2C_CLK_IN)) ++ return E1000_ERR_I2C; + +- for (j = 0; j < 4; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); +- if (ret_val) +- goto out; ++ ack = e1000_get_i2c_data(&i2cctl); ++ if (ack) { ++ DEBUGOUT("I2C ack was not received.\n"); ++ status = E1000_ERR_I2C; + } + +-out: +- return ret_val; ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ usec_delay(E1000_I2C_T_LOW); ++ ++ return status; + } + + /** +- * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock ++ * @hw: pointer to hardware structure ++ * @data: read data value + * +- * Calculates the EEPROM section checksum by reading/adding each word of +- * the EEPROM and then verifies that the sum of the EEPROM is +- * equal to 0xBABA. ++ * Clocks in one bit via I2C data/clock + **/ +-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) ++static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) + { +- s32 ret_val = 0; +- u16 j; +- u16 nvm_offset; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- for (j = 0; j < 4; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_validate_nvm_checksum_with_offset(hw, +- nvm_offset); +- if (ret_val != 0) +- goto out; +- } ++ DEBUGFUNC("e1000_clock_in_i2c_bit"); + +-out: +- return ret_val; ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ *data = e1000_get_i2c_data(&i2cctl); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ usec_delay(E1000_I2C_T_LOW); ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_update_nvm_checksum_i350 - Update EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock ++ * @hw: pointer to hardware structure ++ * @data: data value to write + * +- * Updates the EEPROM section checksums for all 4 ports by reading/adding +- * each word of the EEPROM up to the checksum. Then calculates the EEPROM +- * checksum and writes the value to the EEPROM. ++ * Clocks out one bit via I2C data/clock + **/ +-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) ++static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) + { +- s32 ret_val = 0; +- u16 j; +- u16 nvm_offset; ++ s32 status; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- for (j = 0; j < 4; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); +- if (ret_val != 0) +- goto out; ++ DEBUGFUNC("e1000_clock_out_i2c_bit"); ++ ++ status = e1000_set_i2c_data(hw, &i2cctl, data); ++ if (status == E1000_SUCCESS) { ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us. ++ * This also takes care of the data hold time. ++ */ ++ usec_delay(E1000_I2C_T_LOW); ++ } else { ++ status = E1000_ERR_I2C; ++ DEBUGOUT1("I2C data was not set to %X\n", data); + } + +-out: +- return ret_val; ++ return status; + } +- + /** +- * __igb_access_emi_reg - Read/write EMI register +- * @hw: pointer to the HW structure +- * @addr: EMI address to program +- * @data: pointer to value to read/write from/to the EMI address +- * @read: boolean flag to indicate read or write ++ * e1000_raise_i2c_clk - Raises the I2C SCL clock ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Raises the I2C clock line '0'->'1' + **/ +-static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, +- u16 *data, bool read) ++static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) + { +- s32 ret_val = 0; +- +- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); +- if (ret_val) +- return ret_val; ++ DEBUGFUNC("e1000_raise_i2c_clk"); + +- if (read) +- ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); +- else +- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); ++ *i2cctl |= E1000_I2C_CLK_OUT; ++ *i2cctl &= ~E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); ++ E1000_WRITE_FLUSH(hw); + +- return ret_val; ++ /* SCL rise time (1000ns) */ ++ usec_delay(E1000_I2C_T_RISE); + } + + /** +- * igb_read_emi_reg - Read Extended Management Interface register +- * @hw: pointer to the HW structure +- * @addr: EMI address to program +- * @data: value to be read from the EMI address ++ * e1000_lower_i2c_clk - Lowers the I2C SCL clock ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Lowers the I2C clock line '1'->'0' + **/ +-s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) ++static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) + { +- return __igb_access_emi_reg(hw, addr, data, true); ++ ++ DEBUGFUNC("e1000_lower_i2c_clk"); ++ ++ *i2cctl &= ~E1000_I2C_CLK_OUT; ++ *i2cctl &= ~E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); ++ E1000_WRITE_FLUSH(hw); ++ ++ /* SCL fall time (300ns) */ ++ usec_delay(E1000_I2C_T_FALL); + } + + /** +- * igb_set_eee_i350 - Enable/disable EEE support +- * @hw: pointer to the HW structure +- * +- * Enable/disable EEE based on setting in dev_spec structure. ++ * e1000_set_i2c_data - Sets the I2C data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * @data: I2C data value (0 or 1) to set + * ++ * Sets the I2C data bit + **/ +-s32 igb_set_eee_i350(struct e1000_hw *hw) ++static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) + { +- u32 ipcnfg, eeer; ++ s32 status = E1000_SUCCESS; + +- if ((hw->mac.type < e1000_i350) || +- (hw->phy.media_type != e1000_media_type_copper)) +- goto out; +- ipcnfg = rd32(E1000_IPCNFG); +- eeer = rd32(E1000_EEER); ++ DEBUGFUNC("e1000_set_i2c_data"); + +- /* enable or disable per user setting */ +- if (!(hw->dev_spec._82575.eee_disable)) { +- u32 eee_su = rd32(E1000_EEE_SU); ++ if (data) ++ *i2cctl |= E1000_I2C_DATA_OUT; ++ else ++ *i2cctl &= ~E1000_I2C_DATA_OUT; + +- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); +- eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | +- E1000_EEER_LPI_FC); ++ *i2cctl &= ~E1000_I2C_DATA_OE_N; ++ *i2cctl |= E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); ++ E1000_WRITE_FLUSH(hw); + +- /* This bit should not be set in normal operation. */ +- if (eee_su & E1000_EEE_SU_LPI_CLK_STP) +- hw_dbg("LPI Clock Stop Bit should not be set!\n"); ++ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ ++ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + +- } else { +- ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | +- E1000_IPCNFG_EEE_100M_AN); +- eeer &= ~(E1000_EEER_TX_LPI_EN | +- E1000_EEER_RX_LPI_EN | +- E1000_EEER_LPI_FC); ++ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ if (data != e1000_get_i2c_data(i2cctl)) { ++ status = E1000_ERR_I2C; ++ DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } +- wr32(E1000_IPCNFG, ipcnfg); +- wr32(E1000_EEER, eeer); +- rd32(E1000_IPCNFG); +- rd32(E1000_EEER); +-out: + +- return 0; ++ return status; + } + + /** +- * igb_set_eee_i354 - Enable/disable EEE support +- * @hw: pointer to the HW structure +- * +- * Enable/disable EEE legacy mode based on setting in dev_spec structure. ++ * e1000_get_i2c_data - Reads the I2C SDA data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register + * ++ * Returns the I2C data bit value + **/ +-s32 igb_set_eee_i354(struct e1000_hw *hw) ++static bool e1000_get_i2c_data(u32 *i2cctl) + { +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; +- u16 phy_data; +- +- if ((hw->phy.media_type != e1000_media_type_copper) || +- (phy->id != M88E1543_E_PHY_ID)) +- goto out; +- +- if (!hw->dev_spec._82575.eee_disable) { +- /* Switch to PHY page 18. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); +- if (ret_val) +- goto out; +- +- ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, +- &phy_data); +- if (ret_val) +- goto out; +- +- phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; +- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, +- phy_data); +- if (ret_val) +- goto out; +- +- /* Return the PHY to page 0. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); +- if (ret_val) +- goto out; +- +- /* Turn on EEE advertisement. */ +- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- &phy_data); +- if (ret_val) +- goto out; ++ bool data; + +- phy_data |= E1000_EEE_ADV_100_SUPPORTED | +- E1000_EEE_ADV_1000_SUPPORTED; +- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- phy_data); +- } else { +- /* Turn off EEE advertisement. */ +- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- &phy_data); +- if (ret_val) +- goto out; ++ DEBUGFUNC("e1000_get_i2c_data"); + +- phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | +- E1000_EEE_ADV_1000_SUPPORTED); +- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- phy_data); +- } ++ if (*i2cctl & E1000_I2C_DATA_IN) ++ data = 1; ++ else ++ data = 0; + +-out: +- return ret_val; ++ return data; + } + + /** +- * igb_get_eee_status_i354 - Get EEE status +- * @hw: pointer to the HW structure +- * @status: EEE status ++ * e1000_i2c_bus_clear - Clears the I2C bus ++ * @hw: pointer to hardware structure + * +- * Get EEE status by guessing based on whether Tx or Rx LPI indications have +- * been received. ++ * Clears the I2C bus by sending nine clock pulses. ++ * Used when data line is stuck low. + **/ +-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) ++void e1000_i2c_bus_clear(struct e1000_hw *hw) + { +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; +- u16 phy_data; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ u32 i; + +- /* Check if EEE is supported on this device. */ +- if ((hw->phy.media_type != e1000_media_type_copper) || +- (phy->id != M88E1543_E_PHY_ID)) +- goto out; ++ DEBUGFUNC("e1000_i2c_bus_clear"); + +- ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, +- E1000_PCS_STATUS_DEV_I354, +- &phy_data); +- if (ret_val) +- goto out; ++ e1000_i2c_start(hw); + +- *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | +- E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; ++ e1000_set_i2c_data(hw, &i2cctl, 1); + +-out: +- return ret_val; ++ for (i = 0; i < 9; i++) { ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Min high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Min low period of clock is 4.7us*/ ++ usec_delay(E1000_I2C_T_LOW); ++ } ++ ++ e1000_i2c_start(hw); ++ ++ /* Put the i2c bus back to default state */ ++ e1000_i2c_stop(hw); + } + + static const u8 e1000_emc_temp_data[4] = { +@@ -2707,14 +3782,13 @@ + E1000_EMC_DIODE3_THERM_LIMIT + }; + +-#ifdef CONFIG_IGB_HWMON + /** +- * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data ++ * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +-static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) ++s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) + { + u16 ets_offset; + u16 ets_cfg; +@@ -2725,17 +3799,19 @@ + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + ++ DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); ++ + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + +- data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); ++ data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ +- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); ++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) +- return 0; ++ return E1000_SUCCESS; + +- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); ++ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; +@@ -2745,7 +3821,7 @@ + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { +- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); ++ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> +@@ -2757,17 +3833,17 @@ + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds ++ * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +-static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) ++s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) + { + u16 ets_offset; + u16 ets_cfg; +@@ -2780,6 +3856,8 @@ + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + ++ DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); ++ + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + +@@ -2787,16 +3865,16 @@ + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = +- (rd32(E1000_THHIGHTC) & 0xFF); ++ (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = +- (rd32(E1000_THLOWTC) & 0xFF); ++ (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ +- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); ++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) +- return 0; ++ return E1000_SUCCESS; + +- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); ++ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; +@@ -2806,7 +3884,7 @@ + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { +- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); ++ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> +@@ -2825,41 +3903,5 @@ + low_thresh_delta; + } + } +- return 0; ++ return E1000_SUCCESS; + } +- +-#endif +-static struct e1000_mac_operations e1000_mac_ops_82575 = { +- .init_hw = igb_init_hw_82575, +- .check_for_link = igb_check_for_link_82575, +- .rar_set = igb_rar_set, +- .read_mac_addr = igb_read_mac_addr_82575, +- .get_speed_and_duplex = igb_get_link_up_info_82575, +-#ifdef CONFIG_IGB_HWMON +- .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, +- .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +-#endif +-}; +- +-static struct e1000_phy_operations e1000_phy_ops_82575 = { +- .acquire = igb_acquire_phy_82575, +- .get_cfg_done = igb_get_cfg_done_82575, +- .release = igb_release_phy_82575, +- .write_i2c_byte = igb_write_i2c_byte, +- .read_i2c_byte = igb_read_i2c_byte, +-}; +- +-static struct e1000_nvm_operations e1000_nvm_ops_82575 = { +- .acquire = igb_acquire_nvm_82575, +- .read = igb_read_nvm_eerd, +- .release = igb_release_nvm_82575, +- .write = igb_write_nvm_spi, +-}; +- +-const struct e1000_info e1000_82575_info = { +- .get_invariants = igb_get_invariants_82575, +- .mac_ops = &e1000_mac_ops_82575, +- .phy_ops = &e1000_phy_ops_82575, +- .nvm_ops = &e1000_nvm_ops_82575, +-}; +- +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h +--- a/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,67 +1,149 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_82575_H_ + #define _E1000_82575_H_ + +-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +-void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +-void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +-void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +-s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, +- u8 *data); +-s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, +- u8 data); +- +-#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ +- (ID_LED_DEF1_DEF2 << 8) | \ +- (ID_LED_DEF1_DEF2 << 4) | \ +- (ID_LED_OFF1_ON2)) +- +-#define E1000_RAR_ENTRIES_82575 16 +-#define E1000_RAR_ENTRIES_82576 24 +-#define E1000_RAR_ENTRIES_82580 24 +-#define E1000_RAR_ENTRIES_I350 32 +- +-#define E1000_SW_SYNCH_MB 0x00000100 +-#define E1000_STAT_DEV_RST_SET 0x00100000 +-#define E1000_CTRL_DEV_RST 0x20000000 +- +-/* SRRCTL bit definitions */ +-#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +-#define E1000_SRRCTL_DROP_EN 0x80000000 +-#define E1000_SRRCTL_TIMESTAMP 0x40000000 ++#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ ++ (ID_LED_DEF1_DEF2 << 8) | \ ++ (ID_LED_DEF1_DEF2 << 4) | \ ++ (ID_LED_OFF1_ON2)) ++/* ++ * Receive Address Register Count ++ * Number of high/low register pairs in the RAR. The RAR (Receive Address ++ * Registers) holds the directed and multicast addresses that we monitor. ++ * These entries are also used for MAC-based filtering. ++ */ ++/* ++ * For 82576, there are an additional set of RARs that begin at an offset ++ * separate from the first set of RARs. ++ */ ++#define E1000_RAR_ENTRIES_82575 16 ++#define E1000_RAR_ENTRIES_82576 24 ++#define E1000_RAR_ENTRIES_82580 24 ++#define E1000_RAR_ENTRIES_I350 32 ++#define E1000_SW_SYNCH_MB 0x00000100 ++#define E1000_STAT_DEV_RST_SET 0x00100000 ++#define E1000_CTRL_DEV_RST 0x20000000 ++ ++struct e1000_adv_data_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ union { ++ u32 data; ++ struct { ++ u32 datalen:16; /* Data buffer length */ ++ u32 rsvd:4; ++ u32 dtyp:4; /* Descriptor type */ ++ u32 dcmd:8; /* Descriptor command */ ++ } config; ++ } lower; ++ union { ++ u32 data; ++ struct { ++ u32 status:4; /* Descriptor status */ ++ u32 idx:4; ++ u32 popts:6; /* Packet Options */ ++ u32 paylen:18; /* Payload length */ ++ } options; ++ } upper; ++}; + ++#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ ++#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ ++#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ ++#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ ++#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ ++#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ ++#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ ++#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ ++#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ ++#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ ++#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ ++#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ ++#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ ++/* Extended Device Control */ ++#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ ++ ++struct e1000_adv_context_desc { ++ union { ++ u32 ip_config; ++ struct { ++ u32 iplen:9; ++ u32 maclen:7; ++ u32 vlan_tag:16; ++ } fields; ++ } ip_setup; ++ u32 seq_num; ++ union { ++ u64 l4_config; ++ struct { ++ u32 mkrloc:9; ++ u32 tucmd:11; ++ u32 dtyp:4; ++ u32 adv:8; ++ u32 rsvd:4; ++ u32 idx:4; ++ u32 l4len:8; ++ u32 mss:16; ++ } fields; ++ } l4_setup; ++}; + +-#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +-#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +-#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +-#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +-#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 ++/* SRRCTL bit definitions */ ++#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ ++#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 ++#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ ++#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 ++#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 ++#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 ++#define E1000_SRRCTL_TIMESTAMP 0x40000000 ++#define E1000_SRRCTL_DROP_EN 0x80000000 ++ ++#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F ++#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 ++ ++#define E1000_TX_HEAD_WB_ENABLE 0x1 ++#define E1000_TX_SEQNUM_WB_ENABLE 0x2 ++ ++#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 ++#define E1000_MRQC_ENABLE_VMDQ 0x00000003 ++#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 ++#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 ++#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 ++#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 ++#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 ++ ++#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 ++#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ ++ E1000_VMRCTL_MIRROR_PORT_SHIFT) ++#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) ++#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) ++#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + + #define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ +@@ -75,42 +157,114 @@ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + ++#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE ++#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE ++ ++#define EIMS_ENABLE_MASK ( \ ++ E1000_EIMS_RX_QUEUE | \ ++ E1000_EIMS_TX_QUEUE | \ ++ E1000_EIMS_TCP_TIMER | \ ++ E1000_EIMS_OTHER) ++ + /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +-#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +-#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ ++#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ ++#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ ++#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ ++#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ ++#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ ++#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ ++#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ ++#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ ++#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ ++#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + + /* Receive Descriptor - Advanced */ + union e1000_adv_rx_desc { + struct { +- __le64 pkt_addr; /* Packet buffer address */ +- __le64 hdr_addr; /* Header buffer address */ ++ __le64 pkt_addr; /* Packet buffer address */ ++ __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { +- struct { +- __le16 pkt_info; /* RSS type, Packet type */ +- __le16 hdr_info; /* Split Head, buf len */ ++ union { ++ __le32 data; ++ struct { ++ __le16 pkt_info; /*RSS type, Pkt type*/ ++ /* Split Header, header buffer len */ ++ __le16 hdr_info; ++ } hs_rss; + } lo_dword; + union { +- __le32 rss; /* RSS Hash */ ++ __le32 rss; /* RSS Hash */ + struct { +- __le16 ip_id; /* IP id */ +- __le16 csum; /* Packet Checksum */ ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { +- __le32 status_error; /* ext status/error */ +- __le16 length; /* Packet length */ +- __le16 vlan; /* VLAN tag */ ++ __le32 status_error; /* ext status/error */ ++ __le16 length; /* Packet length */ ++ __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ + }; + +-#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +-#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +-#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +-#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ ++#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F ++#define E1000_RXDADV_RSSTYPE_SHIFT 12 ++#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 ++#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 ++#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 ++#define E1000_RXDADV_SPH 0x8000 ++#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ ++#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ ++#define E1000_RXDADV_ERR_HBO 0x00800000 ++ ++/* RSS Hash results */ ++#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 ++#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 ++#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 ++#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 ++#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 ++#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 ++#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 ++#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 ++#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 ++#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 ++ ++/* RSS Packet Types as indicated in the receive descriptor */ ++#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 ++#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 ++#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 ++#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ ++#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ ++#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ ++#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ ++#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ ++#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ ++#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ ++#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ ++ ++#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ ++#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ ++#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ ++#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ ++#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ ++#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ ++ ++/* LinkSec results */ ++/* Security Processing bit Indication */ ++#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 ++#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 ++#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 ++#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 ++#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 ++ ++#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 ++#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 ++#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 ++#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 ++#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + + /* Transmit Descriptor - Advanced */ + union e1000_adv_tx_desc { +@@ -127,16 +281,26 @@ + }; + + /* Adv Transmit Descriptor Config Masks */ +-#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +-#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +-#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +-#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +-#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +-#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +-#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +-#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +-#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +-#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ ++#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ ++#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ ++#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ ++#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ ++#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ ++#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ ++#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ ++#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ ++#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ ++#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ ++#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ ++#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ ++#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ ++#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ ++#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ ++#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ ++/* 1st & Last TSO-full iSCSI PDU*/ ++#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 ++#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ ++#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + + /* Context descriptors */ + struct e1000_adv_tx_context_desc { +@@ -146,127 +310,174 @@ + __le32 mss_l4len_idx; + }; + +-#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +-#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +-#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ ++#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ ++#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ ++#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ ++#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ ++#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ ++#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ ++#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ ++#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ + /* IPSec Encrypt Enable for ESP */ +-#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +-#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ ++#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 ++/* Req requires Markers and CRC */ ++#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 ++#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ ++#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + /* Adv ctxt IPSec SA IDX mask */ ++#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF + /* Adv ctxt IPSec ESP len mask */ ++#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + + /* Additional Transmit Descriptor Control definitions */ +-#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ ++#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ ++#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ + /* Tx Queue Arbitration Priority 0=low, 1=high */ ++#define E1000_TXDCTL_PRIORITY 0x08000000 + + /* Additional Receive Descriptor Control definitions */ +-#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ ++#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ ++#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + + /* Direct Cache Access (DCA) definitions */ +-#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ ++#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ ++#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +-#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +- +-#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +- +-/* Additional DCA related definitions, note change in position of CPUID */ +-#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +-#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +-#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +-#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ ++#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ ++#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ ++ ++#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ ++#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ ++#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ ++#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ ++#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ ++ ++#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ ++#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ ++#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ ++#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ ++#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ ++ ++#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ ++#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ ++#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ ++#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ ++ ++/* Additional interrupt register bit definitions */ ++#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ ++#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ ++#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + + /* ETQF register bit definitions */ +-#define E1000_ETQF_FILTER_ENABLE (1 << 26) +-#define E1000_ETQF_1588 (1 << 30) ++#define E1000_ETQF_FILTER_ENABLE (1 << 26) ++#define E1000_ETQF_IMM_INT (1 << 29) ++#define E1000_ETQF_1588 (1 << 30) ++#define E1000_ETQF_QUEUE_ENABLE (1 << 31) ++/* ++ * ETQF filter list: one static filter per filter consumer. This is ++ * to avoid filter collisions later. Add new filters ++ * here!! ++ * ++ * Current filters: ++ * EAPOL 802.1x (0x888e): Filter 0 ++ */ ++#define E1000_ETQF_FILTER_EAPOL 0 + +-/* FTQF register bit definitions */ +-#define E1000_FTQF_VF_BP 0x00008000 +-#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +-#define E1000_FTQF_MASK 0xF0000000 +-#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +-#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 +- +-#define E1000_NVM_APME_82575 0x0400 +-#define MAX_NUM_VFS 8 +- +-#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +-#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +-#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +-#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ ++#define E1000_FTQF_VF_BP 0x00008000 ++#define E1000_FTQF_1588_TIME_STAMP 0x08000000 ++#define E1000_FTQF_MASK 0xF0000000 ++#define E1000_FTQF_MASK_PROTO_BP 0x10000000 ++#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 ++#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 ++#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 ++ ++#define E1000_NVM_APME_82575 0x0400 ++#define MAX_NUM_VFS 7 ++ ++#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ ++#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ ++#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ ++#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 ++#define E1000_DTXSWC_LLE_SHIFT 16 ++#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + + /* Easy defines for setting default pool, would normally be left a zero */ +-#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +-#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) ++#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 ++#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + + /* Other useful VMD_CTL register defines */ +-#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +-#define E1000_VT_CTL_VM_REPL_EN (1 << 30) ++#define E1000_VT_CTL_IGNORE_MAC (1 << 28) ++#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) ++#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + + /* Per VM Offload register setup */ +-#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +-#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +-#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +-#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +-#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +-#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +-#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +-#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +-#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +-#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ +- +-#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +-#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +-#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ +- +-#define E1000_VLVF_ARRAY_SIZE 32 +-#define E1000_VLVF_VLANID_MASK 0x00000FFF +-#define E1000_VLVF_POOLSEL_SHIFT 12 +-#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +-#define E1000_VLVF_LVLAN 0x00100000 +-#define E1000_VLVF_VLANID_ENABLE 0x80000000 +- +-#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +-#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ +- +-#define E1000_IOVCTL 0x05BBC +-#define E1000_IOVCTL_REUSE_VFQ 0x00000001 +- +-#define E1000_RPLOLR_STRVLAN 0x40000000 +-#define E1000_RPLOLR_STRCRC 0x80000000 +- +-#define E1000_DTXCTL_8023LL 0x0004 +-#define E1000_DTXCTL_VLAN_ADDED 0x0008 +-#define E1000_DTXCTL_OOS_ENABLE 0x0010 +-#define E1000_DTXCTL_MDP_EN 0x0020 +-#define E1000_DTXCTL_SPOOF_INT 0x0040 ++#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ ++#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ ++#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ ++#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ ++#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ ++#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ ++#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ ++#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ ++#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ ++#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ ++ ++#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ ++#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ ++#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ ++#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ ++#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ ++ ++#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ ++#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ ++ ++#define E1000_VLVF_ARRAY_SIZE 32 ++#define E1000_VLVF_VLANID_MASK 0x00000FFF ++#define E1000_VLVF_POOLSEL_SHIFT 12 ++#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) ++#define E1000_VLVF_LVLAN 0x00100000 ++#define E1000_VLVF_VLANID_ENABLE 0x80000000 ++ ++#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ ++#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ ++ ++#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++ ++#define E1000_IOVCTL 0x05BBC ++#define E1000_IOVCTL_REUSE_VFQ 0x00000001 ++ ++#define E1000_RPLOLR_STRVLAN 0x40000000 ++#define E1000_RPLOLR_STRCRC 0x80000000 ++ ++#define E1000_TCTL_EXT_COLD 0x000FFC00 ++#define E1000_TCTL_EXT_COLD_SHIFT 10 ++ ++#define E1000_DTXCTL_8023LL 0x0004 ++#define E1000_DTXCTL_VLAN_ADDED 0x0008 ++#define E1000_DTXCTL_OOS_ENABLE 0x0010 ++#define E1000_DTXCTL_MDP_EN 0x0020 ++#define E1000_DTXCTL_SPOOF_INT 0x0040 + + #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +-#define ALL_QUEUES 0xFFFF +- +-/* RX packet buffer size defines */ +-#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +-void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +-void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +-u16 igb_rxpbs_adjust_82580(u32 data); +-s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +-s32 igb_set_eee_i350(struct e1000_hw *); +-s32 igb_set_eee_i354(struct e1000_hw *); +-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); ++#define ALL_QUEUES 0xFFFF + ++/* Rx packet buffer size defines */ ++#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F ++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); ++void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); ++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); ++s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); ++s32 e1000_init_hw_82575(struct e1000_hw *hw); ++ ++void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); ++u16 e1000_rxpbs_adjust_82580(u32 data); ++s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); ++s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); ++s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); ++s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); ++s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); ++s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw); + #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 + #define E1000_EMC_INTERNAL_DATA 0x00 + #define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +@@ -276,4 +487,26 @@ + #define E1000_EMC_DIODE2_THERM_LIMIT 0x1A + #define E1000_EMC_DIODE3_DATA 0x2A + #define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +-#endif ++ ++s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); ++s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); ++ ++/* I2C SDA and SCL timing parameters for standard mode */ ++#define E1000_I2C_T_HD_STA 4 ++#define E1000_I2C_T_LOW 5 ++#define E1000_I2C_T_HIGH 4 ++#define E1000_I2C_T_SU_STA 5 ++#define E1000_I2C_T_HD_DATA 5 ++#define E1000_I2C_T_SU_DATA 1 ++#define E1000_I2C_T_RISE 1 ++#define E1000_I2C_T_FALL 1 ++#define E1000_I2C_T_SU_STO 4 ++#define E1000_I2C_T_BUF 5 ++ ++s32 e1000_set_i2c_bb(struct e1000_hw *hw); ++s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++void e1000_i2c_bus_clear(struct e1000_hw *hw); ++#endif /* _E1000_82575_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.c b/drivers/net/ethernet/intel/igb/e1000_api.c +--- a/drivers/net/ethernet/intel/igb/e1000_api.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_api.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,1184 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++/** ++ * e1000_init_mac_params - Initialize MAC function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the MAC ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_mac_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->mac.ops.init_params) { ++ ret_val = hw->mac.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("MAC Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("mac.init_mac_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_init_nvm_params - Initialize NVM function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the NVM ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_nvm_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->nvm.ops.init_params) { ++ ret_val = hw->nvm.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("NVM Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("nvm.init_nvm_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_init_phy_params - Initialize PHY function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the PHY ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_phy_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->phy.ops.init_params) { ++ ret_val = hw->phy.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("PHY Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("phy.init_phy_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_init_mbx_params - Initialize mailbox function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the PHY ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_mbx_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->mbx.ops.init_params) { ++ ret_val = hw->mbx.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("Mailbox Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("mbx.init_mbx_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_set_mac_type - Sets MAC type ++ * @hw: pointer to the HW structure ++ * ++ * This function sets the mac type of the adapter based on the ++ * device ID stored in the hw structure. ++ * MUST BE FIRST FUNCTION CALLED (explicitly or through ++ * e1000_setup_init_funcs()). ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_set_mac_type(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("igb_e1000_set_mac_type"); ++ ++ switch (hw->device_id) { ++ case E1000_DEV_ID_82575EB_COPPER: ++ case E1000_DEV_ID_82575EB_FIBER_SERDES: ++ case E1000_DEV_ID_82575GB_QUAD_COPPER: ++ mac->type = e1000_82575; ++ break; ++ case E1000_DEV_ID_82576: ++ case E1000_DEV_ID_82576_FIBER: ++ case E1000_DEV_ID_82576_SERDES: ++ case E1000_DEV_ID_82576_QUAD_COPPER: ++ case E1000_DEV_ID_82576_QUAD_COPPER_ET2: ++ case E1000_DEV_ID_82576_NS: ++ case E1000_DEV_ID_82576_NS_SERDES: ++ case E1000_DEV_ID_82576_SERDES_QUAD: ++ mac->type = e1000_82576; ++ break; ++ case E1000_DEV_ID_82580_COPPER: ++ case E1000_DEV_ID_82580_FIBER: ++ case E1000_DEV_ID_82580_SERDES: ++ case E1000_DEV_ID_82580_SGMII: ++ case E1000_DEV_ID_82580_COPPER_DUAL: ++ case E1000_DEV_ID_82580_QUAD_FIBER: ++ case E1000_DEV_ID_DH89XXCC_SGMII: ++ case E1000_DEV_ID_DH89XXCC_SERDES: ++ case E1000_DEV_ID_DH89XXCC_BACKPLANE: ++ case E1000_DEV_ID_DH89XXCC_SFP: ++ mac->type = e1000_82580; ++ break; ++ case E1000_DEV_ID_I350_COPPER: ++ case E1000_DEV_ID_I350_FIBER: ++ case E1000_DEV_ID_I350_SERDES: ++ case E1000_DEV_ID_I350_SGMII: ++ case E1000_DEV_ID_I350_DA4: ++ mac->type = e1000_i350; ++ break; ++ case E1000_DEV_ID_I210_COPPER_FLASHLESS: ++ case E1000_DEV_ID_I210_SERDES_FLASHLESS: ++ case E1000_DEV_ID_I210_COPPER: ++ case E1000_DEV_ID_I210_COPPER_OEM1: ++ case E1000_DEV_ID_I210_COPPER_IT: ++ case E1000_DEV_ID_I210_FIBER: ++ case E1000_DEV_ID_I210_SERDES: ++ case E1000_DEV_ID_I210_SGMII: ++ mac->type = e1000_i210; ++ break; ++ case E1000_DEV_ID_I211_COPPER: ++ mac->type = e1000_i211; ++ break; ++ ++ case E1000_DEV_ID_I354_BACKPLANE_1GBPS: ++ case E1000_DEV_ID_I354_SGMII: ++ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: ++ mac->type = e1000_i354; ++ break; ++ default: ++ /* Should never have loaded on this device */ ++ ret_val = -E1000_ERR_MAC_INIT; ++ break; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_setup_init_funcs - Initializes function pointers ++ * @hw: pointer to the HW structure ++ * @init_device: true will initialize the rest of the function pointers ++ * getting the device ready for use. false will only set ++ * MAC type and the function pointers for the other init ++ * functions. Passing false will not generate any hardware ++ * reads or writes. ++ * ++ * This function must be called by a driver in order to use the rest ++ * of the 'shared' code files. Called by drivers only. ++ **/ ++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) ++{ ++ s32 ret_val; ++ ++ /* Can't do much good without knowing the MAC type. */ ++ ret_val = igb_e1000_set_mac_type(hw); ++ if (ret_val) { ++ DEBUGOUT("ERROR: MAC type could not be set properly.\n"); ++ goto out; ++ } ++ ++ if (!hw->hw_addr) { ++ DEBUGOUT("ERROR: Registers not mapped\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ goto out; ++ } ++ ++ /* ++ * Init function pointers to generic implementations. We do this first ++ * allowing a driver module to override it afterward. ++ */ ++ e1000_init_mac_ops_generic(hw); ++ e1000_init_phy_ops_generic(hw); ++ e1000_init_nvm_ops_generic(hw); ++ e1000_init_mbx_ops_generic(hw); ++ ++ /* ++ * Set up the init function pointers. These are functions within the ++ * adapter family file that sets up function pointers for the rest of ++ * the functions in that family. ++ */ ++ switch (hw->mac.type) { ++ case e1000_82575: ++ case e1000_82576: ++ case e1000_82580: ++ case e1000_i350: ++ case e1000_i354: ++ e1000_init_function_pointers_82575(hw); ++ break; ++ case e1000_i210: ++ case e1000_i211: ++ e1000_init_function_pointers_i210(hw); ++ break; ++ default: ++ DEBUGOUT("Hardware not supported\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ break; ++ } ++ ++ /* ++ * Initialize the rest of the function pointers. These require some ++ * register reads/writes in some cases. ++ */ ++ if (!(ret_val) && init_device) { ++ ret_val = e1000_init_mac_params(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = e1000_init_nvm_params(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = e1000_init_phy_params(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = e1000_init_mbx_params(hw); ++ if (ret_val) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_get_bus_info - Obtain bus information for adapter ++ * @hw: pointer to the HW structure ++ * ++ * This will obtain information about the HW bus for which the ++ * adapter is attached and stores it in the hw structure. This is a ++ * function pointer entry point called by drivers. ++ **/ ++ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_bus_info(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.get_bus_info) ++ return hw->mac.ops.get_bus_info(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_clear_vfta - Clear VLAN filter table ++ * @hw: pointer to the HW structure ++ * ++ * This clears the VLAN filter table on the adapter. This is a function ++ * pointer entry point called by drivers. ++ **/ ++void e1000_clear_vfta(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.clear_vfta) ++ hw->mac.ops.clear_vfta(hw); ++} ++ ++/** ++ * igb_e1000_write_vfta - Write value to VLAN filter table ++ * @hw: pointer to the HW structure ++ * @offset: the 32-bit offset in which to write the value to. ++ * @value: the 32-bit value to write at location offset. ++ * ++ * This writes a 32-bit value to a 32-bit offset in the VLAN filter ++ * table. This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) ++{ ++ if (hw->mac.ops.write_vfta) ++ hw->mac.ops.write_vfta(hw, offset, value); ++} ++ ++/** ++ * e1000_update_mc_addr_list - Update Multicast addresses ++ * @hw: pointer to the HW structure ++ * @mc_addr_list: array of multicast addresses to program ++ * @mc_addr_count: number of multicast addresses to program ++ * ++ * Updates the Multicast Table Array. ++ * The caller must have a packed mc_addr_list of multicast addresses. ++ **/ ++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count) ++{ ++ if (hw->mac.ops.update_mc_addr_list) ++ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, ++ mc_addr_count); ++} ++ ++/** ++ * igb_e1000_force_mac_fc - Force MAC flow control ++ * @hw: pointer to the HW structure ++ * ++ * Force the MAC's flow control settings. Currently no func pointer exists ++ * and all implementations are handled in the generic version of this ++ * function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_force_mac_fc(struct e1000_hw *hw) ++{ ++ return e1000_force_mac_fc_generic(hw); ++} ++ ++/** ++ * igb_e1000_check_for_link - Check/Store link connection ++ * @hw: pointer to the HW structure ++ * ++ * This checks the link condition of the adapter and stores the ++ * results in the hw->mac structure. This is a function pointer entry ++ * point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_for_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.check_for_link) ++ return hw->mac.ops.check_for_link(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_check_mng_mode - Check management mode ++ * @hw: pointer to the HW structure ++ * ++ * This checks if the adapter has manageability enabled. ++ * This is a function pointer entry point called by drivers. ++ **/ ++bool e1000_check_mng_mode(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.check_mng_mode) ++ return hw->mac.ops.check_mng_mode(hw); ++ ++ return false; ++} ++ ++/** ++ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface ++ * @length: size of the buffer ++ * ++ * Writes the DHCP information to the host interface. ++ **/ ++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) ++{ ++ return e1000_mng_write_dhcp_info_generic(hw, buffer, length); ++} ++ ++/** ++ * igb_e1000_reset_hw - Reset hardware ++ * @hw: pointer to the HW structure ++ * ++ * This resets the hardware into a known state. This is a function pointer ++ * entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_reset_hw(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.reset_hw) ++ return hw->mac.ops.reset_hw(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_init_hw - Initialize hardware ++ * @hw: pointer to the HW structure ++ * ++ * This inits the hardware readying it for operation. This is a function ++ * pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_init_hw(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.init_hw) ++ return hw->mac.ops.init_hw(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_setup_link - Configures link and flow control ++ * @hw: pointer to the HW structure ++ * ++ * This configures link and flow control settings for the adapter. This ++ * is a function pointer entry point called by drivers. While modules can ++ * also call this, they probably call their own version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_setup_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.setup_link) ++ return hw->mac.ops.setup_link(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_get_speed_and_duplex - Returns current speed and duplex ++ * @hw: pointer to the HW structure ++ * @speed: pointer to a 16-bit value to store the speed ++ * @duplex: pointer to a 16-bit value to store the duplex. ++ * ++ * This returns the speed and duplex of the adapter in the two 'out' ++ * variables passed in. This is a function pointer entry point called ++ * by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) ++{ ++ if (hw->mac.ops.get_link_up_info) ++ return hw->mac.ops.get_link_up_info(hw, speed, duplex); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_setup_led - Configures SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This prepares the SW controllable LED for use and saves the current state ++ * of the LED so it can be later restored. This is a function pointer entry ++ * point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_setup_led(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.setup_led) ++ return hw->mac.ops.setup_led(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_cleanup_led - Restores SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This restores the SW controllable LED to the value saved off by ++ * igb_e1000_setup_led. This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_cleanup_led(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.cleanup_led) ++ return hw->mac.ops.cleanup_led(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_blink_led - Blink SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This starts the adapter LED blinking. Request the LED to be setup first ++ * and cleaned up after. This is a function pointer entry point called by ++ * drivers. ++ **/ ++s32 e1000_blink_led(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.blink_led) ++ return hw->mac.ops.blink_led(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_id_led_init - store LED configurations in SW ++ * @hw: pointer to the HW structure ++ * ++ * Initializes the LED config in SW. This is a function pointer entry point ++ * called by drivers. ++ **/ ++s32 e1000_id_led_init(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.id_led_init) ++ return hw->mac.ops.id_led_init(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_led_on - Turn on SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * Turns the SW defined LED on. This is a function pointer entry point ++ * called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_led_on(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.led_on) ++ return hw->mac.ops.led_on(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_led_off - Turn off SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * Turns the SW defined LED off. This is a function pointer entry point ++ * called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_led_off(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.led_off) ++ return hw->mac.ops.led_off(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_reset_adaptive - Reset adaptive IFS ++ * @hw: pointer to the HW structure ++ * ++ * Resets the adaptive IFS. Currently no func pointer exists and all ++ * implementations are handled in the generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_reset_adaptive(struct e1000_hw *hw) ++{ ++ e1000_reset_adaptive_generic(hw); ++} ++ ++/** ++ * igb_e1000_update_adaptive - Update adaptive IFS ++ * @hw: pointer to the HW structure ++ * ++ * Updates adapter IFS. Currently no func pointer exists and all ++ * implementations are handled in the generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_update_adaptive(struct e1000_hw *hw) ++{ ++ e1000_update_adaptive_generic(hw); ++} ++ ++/** ++ * e1000_disable_pcie_master - Disable PCI-Express master access ++ * @hw: pointer to the HW structure ++ * ++ * Disables PCI-Express master access and verifies there are no pending ++ * requests. Currently no func pointer exists and all implementations are ++ * handled in the generic version of this function. ++ **/ ++s32 e1000_disable_pcie_master(struct e1000_hw *hw) ++{ ++ return e1000_disable_pcie_master_generic(hw); ++} ++ ++/** ++ * igb_e1000_config_collision_dist - Configure collision distance ++ * @hw: pointer to the HW structure ++ * ++ * Configures the collision distance to the default value and is used ++ * during link setup. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_config_collision_dist(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.config_collision_dist) ++ hw->mac.ops.config_collision_dist(hw); ++} ++ ++/** ++ * igb_e1000_rar_set - Sets a receive address register ++ * @hw: pointer to the HW structure ++ * @addr: address to set the RAR to ++ * @index: the RAR to set ++ * ++ * Sets a Receive Address Register (RAR) to the specified address. ++ **/ ++/* Changed name, duplicated with e1000 */ ++int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) ++{ ++ if (hw->mac.ops.rar_set) ++ return hw->mac.ops.rar_set(hw, addr, index); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state ++ * @hw: pointer to the HW structure ++ * ++ * Ensures that the MDI/MDIX SW state is valid. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.validate_mdi_setting) ++ return hw->mac.ops.validate_mdi_setting(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_hash_mc_addr - Determines address location in multicast table ++ * @hw: pointer to the HW structure ++ * @mc_addr: Multicast address to hash. ++ * ++ * This hashes an address to determine its location in the multicast ++ * table. Currently no func pointer exists and all implementations ++ * are handled in the generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) ++{ ++ return e1000_hash_mc_addr_generic(hw, mc_addr); ++} ++ ++/** ++ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX ++ * @hw: pointer to the HW structure ++ * ++ * Enables packet filtering on transmit packets if manageability is enabled ++ * and host interface is enabled. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) ++{ ++ return e1000_enable_tx_pkt_filtering_generic(hw); ++} ++ ++/** ++ * e1000_mng_host_if_write - Writes to the manageability host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface buffer ++ * @length: size of the buffer ++ * @offset: location in the buffer to write to ++ * @sum: sum of the data (not checksum) ++ * ++ * This function writes the buffer content at the offset given on the host if. ++ * It also does alignment considerations to do the writes in most efficient ++ * way. Also fills up the sum of the buffer in *buffer parameter. ++ **/ ++s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, ++ u16 offset, u8 *sum) ++{ ++ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); ++} ++ ++/** ++ * e1000_mng_write_cmd_header - Writes manageability command header ++ * @hw: pointer to the HW structure ++ * @hdr: pointer to the host interface command header ++ * ++ * Writes the command header after does the checksum calculation. ++ **/ ++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr) ++{ ++ return e1000_mng_write_cmd_header_generic(hw, hdr); ++} ++ ++/** ++ * e1000_mng_enable_host_if - Checks host interface is enabled ++ * @hw: pointer to the HW structure ++ * ++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND ++ * ++ * This function checks whether the HOST IF is enabled for command operation ++ * and also checks whether the previous command is completed. It busy waits ++ * in case of previous command is not completed. ++ **/ ++s32 e1000_mng_enable_host_if(struct e1000_hw *hw) ++{ ++ return e1000_mng_enable_host_if_generic(hw); ++} ++ ++/** ++ * e1000_check_reset_block - Verifies PHY can be reset ++ * @hw: pointer to the HW structure ++ * ++ * Checks if the PHY is in a state that can be reset or if manageability ++ * has it tied up. This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_check_reset_block(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.check_reset_block) ++ return hw->phy.ops.check_reset_block(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_read_phy_reg - Reads PHY register ++ * @hw: pointer to the HW structure ++ * @offset: the register to read ++ * @data: the buffer to store the 16-bit read. ++ * ++ * Reads the PHY register and returns the value in data. ++ * This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ if (hw->phy.ops.read_reg) ++ return hw->phy.ops.read_reg(hw, offset, data); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_write_phy_reg - Writes PHY register ++ * @hw: pointer to the HW structure ++ * @offset: the register to write ++ * @data: the value to write. ++ * ++ * Writes the PHY register at offset with the value in data. ++ * This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ if (hw->phy.ops.write_reg) ++ return hw->phy.ops.write_reg(hw, offset, data); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_release_phy - Generic release PHY ++ * @hw: pointer to the HW structure ++ * ++ * Return if silicon family does not require a semaphore when accessing the ++ * PHY. ++ **/ ++void e1000_release_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.release) ++ hw->phy.ops.release(hw); ++} ++ ++/** ++ * e1000_acquire_phy - Generic acquire PHY ++ * @hw: pointer to the HW structure ++ * ++ * Return success if silicon family does not require a semaphore when ++ * accessing the PHY. ++ **/ ++s32 e1000_acquire_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.acquire) ++ return hw->phy.ops.acquire(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_kmrn_reg - Reads register using Kumeran interface ++ * @hw: pointer to the HW structure ++ * @offset: the register to read ++ * @data: the location to store the 16-bit value read. ++ * ++ * Reads a register out of the Kumeran interface. Currently no func pointer ++ * exists and all implementations are handled in the generic version of ++ * this function. ++ **/ ++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return e1000_read_kmrn_reg_generic(hw, offset, data); ++} ++ ++/** ++ * e1000_write_kmrn_reg - Writes register using Kumeran interface ++ * @hw: pointer to the HW structure ++ * @offset: the register to write ++ * @data: the value to write. ++ * ++ * Writes a register to the Kumeran interface. Currently no func pointer ++ * exists and all implementations are handled in the generic version of ++ * this function. ++ **/ ++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return e1000_write_kmrn_reg_generic(hw, offset, data); ++} ++ ++/** ++ * e1000_get_cable_length - Retrieves cable length estimation ++ * @hw: pointer to the HW structure ++ * ++ * This function estimates the cable length and stores them in ++ * hw->phy.min_length and hw->phy.max_length. This is a function pointer ++ * entry point called by drivers. ++ **/ ++s32 e1000_get_cable_length(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.get_cable_length) ++ return hw->phy.ops.get_cable_length(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_get_phy_info - Retrieves PHY information from registers ++ * @hw: pointer to the HW structure ++ * ++ * This function gets some information from various PHY registers and ++ * populates hw->phy values with it. This is a function pointer entry ++ * point called by drivers. ++ **/ ++s32 e1000_get_phy_info(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.get_info) ++ return hw->phy.ops.get_info(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_phy_hw_reset - Hard PHY reset ++ * @hw: pointer to the HW structure ++ * ++ * Performs a hard PHY reset. This is a function pointer entry point called ++ * by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.reset) ++ return hw->phy.ops.reset(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_phy_commit - Soft PHY reset ++ * @hw: pointer to the HW structure ++ * ++ * Performs a soft PHY reset on those that apply. This is a function pointer ++ * entry point called by drivers. ++ **/ ++s32 e1000_phy_commit(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.commit) ++ return hw->phy.ops.commit(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_d0_lplu_state - Sets low power link up state for D0 ++ * @hw: pointer to the HW structure ++ * @active: boolean used to enable/disable lplu ++ * ++ * Success returns 0, Failure returns 1 ++ * ++ * The low power link up (lplu) state is set to the power management level D0 ++ * and SmartSpeed is disabled when active is true, else clear lplu for D0 ++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU ++ * is used during Dx states where the power conservation is most important. ++ * During driver activity, SmartSpeed should be enabled so performance is ++ * maintained. This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) ++{ ++ if (hw->phy.ops.set_d0_lplu_state) ++ return hw->phy.ops.set_d0_lplu_state(hw, active); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_d3_lplu_state - Sets low power link up state for D3 ++ * @hw: pointer to the HW structure ++ * @active: boolean used to enable/disable lplu ++ * ++ * Success returns 0, Failure returns 1 ++ * ++ * The low power link up (lplu) state is set to the power management level D3 ++ * and SmartSpeed is disabled when active is true, else clear lplu for D3 ++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU ++ * is used during Dx states where the power conservation is most important. ++ * During driver activity, SmartSpeed should be enabled so performance is ++ * maintained. This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) ++{ ++ if (hw->phy.ops.set_d3_lplu_state) ++ return hw->phy.ops.set_d3_lplu_state(hw, active); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_read_mac_addr - Reads MAC address ++ * @hw: pointer to the HW structure ++ * ++ * Reads the MAC address out of the adapter and stores it in the HW structure. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_mac_addr(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.read_mac_addr) ++ return hw->mac.ops.read_mac_addr(hw); ++ ++ return igb_e1000_read_mac_addr_generic(hw); ++} ++ ++/** ++ * e1000_read_pba_string - Read device part number string ++ * @hw: pointer to the HW structure ++ * @pba_num: pointer to device part number ++ * @pba_num_size: size of part number buffer ++ * ++ * Reads the product board assembly (PBA) number from the EEPROM and stores ++ * the value in pba_num. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) ++{ ++ return igb_e1000_read_pba_string_generic(hw, pba_num, pba_num_size); ++} ++ ++/** ++ * e1000_read_pba_length - Read device part number string length ++ * @hw: pointer to the HW structure ++ * @pba_num_size: size of part number buffer ++ * ++ * Reads the product board assembly (PBA) number length from the EEPROM and ++ * stores the value in pba_num. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) ++{ ++ return e1000_read_pba_length_generic(hw, pba_num_size); ++} ++ ++/** ++ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum ++ * @hw: pointer to the HW structure ++ * ++ * Validates the NVM checksum is correct. This is a function pointer entry ++ * point called by drivers. ++ **/ ++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) ++{ ++ if (hw->nvm.ops.validate) ++ return hw->nvm.ops.validate(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum ++ * @hw: pointer to the HW structure ++ * ++ * Updates the NVM checksum. Currently no func pointer exists and all ++ * implementations are handled in the generic version of this function. ++ **/ ++s32 e1000_update_nvm_checksum(struct e1000_hw *hw) ++{ ++ if (hw->nvm.ops.update) ++ return hw->nvm.ops.update(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_reload_nvm - Reloads EEPROM ++ * @hw: pointer to the HW structure ++ * ++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the ++ * extended control register. ++ **/ ++void e1000_reload_nvm(struct e1000_hw *hw) ++{ ++ if (hw->nvm.ops.reload) ++ hw->nvm.ops.reload(hw); ++} ++ ++/** ++ * e1000_read_nvm - Reads NVM (EEPROM) ++ * @hw: pointer to the HW structure ++ * @offset: the word offset to read ++ * @words: number of 16-bit words to read ++ * @data: pointer to the properly sized buffer for the data. ++ * ++ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function ++ * pointer entry point called by drivers. ++ **/ ++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++{ ++ if (hw->nvm.ops.read) ++ return hw->nvm.ops.read(hw, offset, words, data); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_write_nvm - Writes to NVM (EEPROM) ++ * @hw: pointer to the HW structure ++ * @offset: the word offset to read ++ * @words: number of 16-bit words to write ++ * @data: pointer to the properly sized buffer for the data. ++ * ++ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function ++ * pointer entry point called by drivers. ++ **/ ++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++{ ++ if (hw->nvm.ops.write) ++ return hw->nvm.ops.write(hw, offset, words, data); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register ++ * @hw: pointer to the HW structure ++ * @reg: 32bit register offset ++ * @offset: the register to write ++ * @data: the value to write. ++ * ++ * Writes the PHY register at offset with the value in data. ++ * This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, ++ u8 data) ++{ ++ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); ++} ++ ++/** ++ * igb_e1000_power_up_phy - Restores link in case of PHY power down ++ * @hw: pointer to the HW structure ++ * ++ * The phy may be powered down to save power, to turn off link when the ++ * driver is unloaded, or wake on lan is not enabled (among others). ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_power_up_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.power_up) ++ hw->phy.ops.power_up(hw); ++ ++ igb_e1000_setup_link(hw); ++} ++ ++/** ++ * e1000_power_down_phy - Power down PHY ++ * @hw: pointer to the HW structure ++ * ++ * The phy may be powered down to save power, to turn off link when the ++ * driver is unloaded, or wake on lan is not enabled (among others). ++ **/ ++void e1000_power_down_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.power_down) ++ hw->phy.ops.power_down(hw); ++} ++ ++/** ++ * e1000_power_up_fiber_serdes_link - Power up serdes link ++ * @hw: pointer to the HW structure ++ * ++ * Power on the optics and PCS. ++ **/ ++void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.power_up_serdes) ++ hw->mac.ops.power_up_serdes(hw); ++} ++ ++/** ++ * e1000_shutdown_fiber_serdes_link - Remove link during power down ++ * @hw: pointer to the HW structure ++ * ++ * Shutdown the optics and PCS on driver unload. ++ **/ ++void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.shutdown_serdes) ++ hw->mac.ops.shutdown_serdes(hw); ++} ++ ++/** ++ * e1000_get_thermal_sensor_data - Gathers thermal sensor data ++ * @hw: pointer to hardware structure ++ * ++ * Updates the temperatures in mac.thermal_sensor_data ++ **/ ++s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.get_thermal_sensor_data) ++ return hw->mac.ops.get_thermal_sensor_data(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds ++ * @hw: pointer to hardware structure ++ * ++ * Sets the thermal sensor thresholds according to the NVM map ++ **/ ++s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.init_thermal_sensor_thresh) ++ return hw->mac.ops.init_thermal_sensor_thresh(hw); ++ ++ return E1000_SUCCESS; ++} ++ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.h b/drivers/net/ethernet/intel/igb/e1000_api.h +--- a/drivers/net/ethernet/intel/igb/e1000_api.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_api.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,152 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _E1000_API_H_ ++#define _E1000_API_H_ ++ ++#include "e1000_hw.h" ++ ++extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); ++extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); ++extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); ++extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); ++extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); ++extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); ++ ++s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); ++s32 igb_e1000_set_mac_type(struct e1000_hw *hw); ++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); ++s32 e1000_init_mac_params(struct e1000_hw *hw); ++s32 e1000_init_nvm_params(struct e1000_hw *hw); ++s32 e1000_init_phy_params(struct e1000_hw *hw); ++s32 e1000_init_mbx_params(struct e1000_hw *hw); ++s32 igb_e1000_get_bus_info(struct e1000_hw *hw); ++void e1000_clear_vfta(struct e1000_hw *hw); ++void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); ++s32 igb_e1000_force_mac_fc(struct e1000_hw *hw); ++s32 igb_e1000_check_for_link(struct e1000_hw *hw); ++s32 igb_e1000_reset_hw(struct e1000_hw *hw); ++s32 igb_e1000_init_hw(struct e1000_hw *hw); ++s32 igb_e1000_setup_link(struct e1000_hw *hw); ++s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); ++s32 e1000_disable_pcie_master(struct e1000_hw *hw); ++void igb_e1000_config_collision_dist(struct e1000_hw *hw); ++int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); ++u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); ++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count); ++s32 igb_e1000_setup_led(struct e1000_hw *hw); ++s32 igb_e1000_cleanup_led(struct e1000_hw *hw); ++s32 e1000_check_reset_block(struct e1000_hw *hw); ++s32 e1000_blink_led(struct e1000_hw *hw); ++s32 igb_e1000_led_on(struct e1000_hw *hw); ++s32 igb_e1000_led_off(struct e1000_hw *hw); ++s32 e1000_id_led_init(struct e1000_hw *hw); ++void igb_e1000_reset_adaptive(struct e1000_hw *hw); ++void igb_e1000_update_adaptive(struct e1000_hw *hw); ++s32 e1000_get_cable_length(struct e1000_hw *hw); ++s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw); ++s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, ++ u8 data); ++s32 e1000_get_phy_info(struct e1000_hw *hw); ++void e1000_release_phy(struct e1000_hw *hw); ++s32 e1000_acquire_phy(struct e1000_hw *hw); ++s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw); ++s32 e1000_phy_commit(struct e1000_hw *hw); ++void igb_e1000_power_up_phy(struct e1000_hw *hw); ++void e1000_power_down_phy(struct e1000_hw *hw); ++s32 igb_e1000_read_mac_addr(struct e1000_hw *hw); ++s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); ++s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); ++void e1000_reload_nvm(struct e1000_hw *hw); ++s32 e1000_update_nvm_checksum(struct e1000_hw *hw); ++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); ++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); ++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); ++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); ++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); ++bool e1000_check_mng_mode(struct e1000_hw *hw); ++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); ++s32 e1000_mng_enable_host_if(struct e1000_hw *hw); ++s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, ++ u16 offset, u8 *sum); ++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr); ++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); ++s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); ++s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); ++ ++/* ++ * TBI_ACCEPT macro definition: ++ * ++ * This macro requires: ++ * a = a pointer to struct e1000_hw ++ * status = the 8 bit status field of the Rx descriptor with EOP set ++ * errors = the 8 bit error field of the Rx descriptor with EOP set ++ * length = the sum of all the length fields of the Rx descriptors that ++ * make up the current frame ++ * last_byte = the last byte of the frame DMAed by the hardware ++ * min_frame_size = the minimum frame length we want to accept. ++ * max_frame_size = the maximum frame length we want to accept. ++ * ++ * This macro is a conditional that should be used in the interrupt ++ * handler's Rx processing routine when RxErrors have been detected. ++ * ++ * Typical use: ++ * ... ++ * if (TBI_ACCEPT) { ++ * accept_frame = true; ++ * e1000_tbi_adjust_stats(adapter, MacAddress); ++ * frame_length--; ++ * } else { ++ * accept_frame = false; ++ * } ++ * ... ++ */ ++ ++/* The carrier extension symbol, as received by the NIC. */ ++#define CARRIER_EXTENSION 0x0F ++ ++#define TBI_ACCEPT(a, status, errors, length, last_byte, \ ++ min_frame_size, max_frame_size) \ ++ (e1000_tbi_sbp_enabled_82543(a) && \ ++ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ ++ ((last_byte) == CARRIER_EXTENSION) && \ ++ (((status) & E1000_RXD_STAT_VP) ? \ ++ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ ++ ((length) <= ((max_frame_size) + 1))) : \ ++ (((length) > (min_frame_size)) && \ ++ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) ++ ++#ifndef E1000_MAX ++#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) ++#endif ++#ifndef E1000_DIVIDE_ROUND_UP ++#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ ++#endif ++#endif /* _E1000_API_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h +--- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,25 +1,26 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_DEFINES_H_ + #define _E1000_DEFINES_H_ +@@ -30,38 +31,55 @@ + + /* Definitions for power management and wakeup registers */ + /* Wake Up Control */ +-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ ++#define E1000_WUC_APME 0x00000001 /* APM Enable */ ++#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ ++#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ ++#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ ++#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + + /* Wake Up Filter Control */ +-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +-#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +-#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +-#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +-#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ ++#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ ++#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ ++#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ ++#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ ++#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ ++#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ ++#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ ++#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ ++ ++/* Wake Up Status */ ++#define E1000_WUS_LNKC E1000_WUFC_LNKC ++#define E1000_WUS_MAG E1000_WUFC_MAG ++#define E1000_WUS_EX E1000_WUFC_EX ++#define E1000_WUS_MC E1000_WUFC_MC ++#define E1000_WUS_BC E1000_WUFC_BC + + /* Extended Device Control */ +-#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +-#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +-#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +-#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +- ++#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ ++#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ ++#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ ++#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ ++#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ ++#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ ++#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ ++#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ + /* Physical Func Reset Done Indication */ + #define E1000_CTRL_EXT_PFRSTD 0x00004000 + #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ ++#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ ++#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ ++#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ + #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 ++/* Offset of the link mode field in Ctrl Ext register */ ++#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 + #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 + #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +-#define E1000_CTRL_EXT_EIAME 0x01000000 ++#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 ++#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 ++#define E1000_CTRL_EXT_EIAME 0x01000000 + #define E1000_CTRL_EXT_IRCA 0x00000001 +-/* Interrupt delay cancellation */ +-/* Driver loaded bit for FW */ +-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +-/* Interrupt acknowledge Auto-mask */ +-/* Clear Interrupt timers after IMS clear */ +-/* packet buffer parity error detection enabled */ +-/* descriptor FIFO parity error detection enable */ ++#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ ++#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ + #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ + #define E1000_CTRL_EXT_PHYPDEN 0x00100000 + #define E1000_I2CCMD_REG_ADDR_SHIFT 16 +@@ -74,322 +92,446 @@ + #define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) + #define E1000_MAX_SGMII_PHY_REG_ADDR 255 + #define E1000_I2CCMD_PHY_TIMEOUT 200 +-#define E1000_IVAR_VALID 0x80 +-#define E1000_GPIE_NSICR 0x00000001 +-#define E1000_GPIE_MSIX_MODE 0x00000010 +-#define E1000_GPIE_EIAME 0x40000000 +-#define E1000_GPIE_PBA 0x80000000 ++#define E1000_IVAR_VALID 0x80 ++#define E1000_GPIE_NSICR 0x00000001 ++#define E1000_GPIE_MSIX_MODE 0x00000010 ++#define E1000_GPIE_EIAME 0x40000000 ++#define E1000_GPIE_PBA 0x80000000 + + /* Receive Descriptor bit definitions */ +-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +-#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ +- +-#define E1000_RXDEXT_STATERR_LB 0x00040000 +-#define E1000_RXDEXT_STATERR_CE 0x01000000 +-#define E1000_RXDEXT_STATERR_SE 0x02000000 +-#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +-#define E1000_RXDEXT_STATERR_CXE 0x10000000 +-#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +-#define E1000_RXDEXT_STATERR_IPE 0x40000000 +-#define E1000_RXDEXT_STATERR_RXE 0x80000000 ++#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ ++#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ ++#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ ++#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ ++#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ ++#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ ++#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ ++#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ ++#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ ++#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ ++#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ ++#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ ++#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ ++#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ ++#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ ++#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ ++#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ ++#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ ++#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ ++ ++#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ ++#define E1000_RXDEXT_STATERR_LB 0x00040000 ++#define E1000_RXDEXT_STATERR_CE 0x01000000 ++#define E1000_RXDEXT_STATERR_SE 0x02000000 ++#define E1000_RXDEXT_STATERR_SEQ 0x04000000 ++#define E1000_RXDEXT_STATERR_CXE 0x10000000 ++#define E1000_RXDEXT_STATERR_TCPE 0x20000000 ++#define E1000_RXDEXT_STATERR_IPE 0x40000000 ++#define E1000_RXDEXT_STATERR_RXE 0x80000000 ++ ++/* mask to determine if packets should be dropped due to frame errors */ ++#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ ++ E1000_RXD_ERR_CE | \ ++ E1000_RXD_ERR_SE | \ ++ E1000_RXD_ERR_SEQ | \ ++ E1000_RXD_ERR_CXE | \ ++ E1000_RXD_ERR_RXE) + + /* Same mask, but for extended and packet split descriptors */ + #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ +- E1000_RXDEXT_STATERR_CE | \ +- E1000_RXDEXT_STATERR_SE | \ +- E1000_RXDEXT_STATERR_SEQ | \ +- E1000_RXDEXT_STATERR_CXE | \ ++ E1000_RXDEXT_STATERR_CE | \ ++ E1000_RXDEXT_STATERR_SE | \ ++ E1000_RXDEXT_STATERR_SEQ | \ ++ E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +-#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +-#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +-#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +-#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 ++#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 ++#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 ++#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 ++#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 ++#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 ++#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + ++#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + + /* Management Control */ +-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +-#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +-/* Enable Neighbor Discovery Filtering */ +-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ ++#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ ++#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ ++#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ ++#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ ++#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + /* Enable MAC address filtering */ +-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 ++#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 ++/* Enable MNG packets to host memory */ ++#define E1000_MANC_EN_MNG2HOST 0x00200000 ++ ++#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ ++#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ ++#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ ++#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + + /* Receive Control */ +-#define E1000_RCTL_EN 0x00000002 /* enable */ +-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +-#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +-#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +-#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ ++#define E1000_RCTL_RST 0x00000001 /* Software reset */ ++#define E1000_RCTL_EN 0x00000002 /* enable */ ++#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ ++#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ ++#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ ++#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ ++#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ ++#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ ++#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ ++#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ ++#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ ++#define E1000_RCTL_RDMTS_HEX 0x00010000 ++#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX ++#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ ++#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ ++#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ ++/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ ++#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ ++#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ ++#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ ++#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ ++/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ ++#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ ++#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ ++#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ ++#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ ++#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ ++#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ ++#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ ++#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ ++#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ ++#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + + /* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & +- * E1000_PSRCTL_BSIZE0_MASK) | +- * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & +- * E1000_PSRCTL_BSIZE1_MASK) | +- * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & +- * E1000_PSRCTL_BSIZE2_MASK) | +- * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; +- * E1000_PSRCTL_BSIZE3_MASK)) ++ * E1000_PSRCTL_BSIZE0_MASK) | ++ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & ++ * E1000_PSRCTL_BSIZE1_MASK) | ++ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & ++ * E1000_PSRCTL_BSIZE2_MASK) | ++ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; ++ * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +-#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +-#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +-#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +-#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 +- +-#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +-#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +-#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +-#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ ++#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F ++#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 ++#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 ++#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 ++ ++#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ ++#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ ++#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ ++#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + + /* SWFW_SYNC Definitions */ +-#define E1000_SWFW_EEP_SM 0x1 +-#define E1000_SWFW_PHY0_SM 0x2 +-#define E1000_SWFW_PHY1_SM 0x4 +-#define E1000_SWFW_PHY2_SM 0x20 +-#define E1000_SWFW_PHY3_SM 0x40 ++#define E1000_SWFW_EEP_SM 0x01 ++#define E1000_SWFW_PHY0_SM 0x02 ++#define E1000_SWFW_PHY1_SM 0x04 ++#define E1000_SWFW_CSR_SM 0x08 ++#define E1000_SWFW_PHY2_SM 0x20 ++#define E1000_SWFW_PHY3_SM 0x40 ++#define E1000_SWFW_SW_MNG_SM 0x400 + +-/* FACTPS Definitions */ + /* Device Control */ +-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +-/* Defined polarity of Dock/Undock indication in SDP[0] */ +-/* Reset both PHY ports, through PHYRST_N pin */ +-/* enable link status from external LINK_0 and LINK_1 pins */ +-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +-#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +-#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +-#define E1000_CTRL_RST 0x04000000 /* Global reset */ +-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +-/* Initiate an interrupt to manageability engine */ +-#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ +- +-/* Bit definitions for the Management Data IO (MDIO) and Management Data +- * Clock (MDC) pins in the Device Control Register. +- */ ++#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ ++#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ ++#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ ++#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ ++#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ ++#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ ++#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ ++#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ ++#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ ++#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ ++#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ ++#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ ++#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ ++#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ ++#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ ++#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ ++#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ ++#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ ++#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ ++#define E1000_CTRL_RST 0x04000000 /* Global reset */ ++#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ ++#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ ++#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ ++#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ ++#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +-#define E1000_CONNSW_ENRGSRC 0x4 ++#define E1000_CONNSW_ENRGSRC 0x4 + #define E1000_CONNSW_PHYSD 0x400 + #define E1000_CONNSW_PHY_PDN 0x800 + #define E1000_CONNSW_SERDESD 0x200 + #define E1000_CONNSW_AUTOSENSE_CONF 0x2 + #define E1000_CONNSW_AUTOSENSE_EN 0x1 +-#define E1000_PCS_CFG_PCS_EN 8 +-#define E1000_PCS_LCTL_FLV_LINK_UP 1 +-#define E1000_PCS_LCTL_FSV_100 2 +-#define E1000_PCS_LCTL_FSV_1000 4 +-#define E1000_PCS_LCTL_FDV_FULL 8 +-#define E1000_PCS_LCTL_FSD 0x10 +-#define E1000_PCS_LCTL_FORCE_LINK 0x20 +-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +-#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +-#define E1000_PCS_LCTL_AN_RESTART 0x20000 +-#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +-#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 +- +-#define E1000_PCS_LSTS_LINK_OK 1 +-#define E1000_PCS_LSTS_SPEED_100 2 +-#define E1000_PCS_LSTS_SPEED_1000 4 +-#define E1000_PCS_LSTS_DUPLEX_FULL 8 +-#define E1000_PCS_LSTS_SYNK_OK 0x10 ++#define E1000_PCS_CFG_PCS_EN 8 ++#define E1000_PCS_LCTL_FLV_LINK_UP 1 ++#define E1000_PCS_LCTL_FSV_10 0 ++#define E1000_PCS_LCTL_FSV_100 2 ++#define E1000_PCS_LCTL_FSV_1000 4 ++#define E1000_PCS_LCTL_FDV_FULL 8 ++#define E1000_PCS_LCTL_FSD 0x10 ++#define E1000_PCS_LCTL_FORCE_LINK 0x20 ++#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 ++#define E1000_PCS_LCTL_AN_ENABLE 0x10000 ++#define E1000_PCS_LCTL_AN_RESTART 0x20000 ++#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 ++#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 ++ ++#define E1000_PCS_LSTS_LINK_OK 1 ++#define E1000_PCS_LSTS_SPEED_100 2 ++#define E1000_PCS_LSTS_SPEED_1000 4 ++#define E1000_PCS_LSTS_DUPLEX_FULL 8 ++#define E1000_PCS_LSTS_SYNK_OK 0x10 ++#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + + /* Device Status */ +-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +-#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +-#define E1000_STATUS_FUNC_SHIFT 2 +-#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +-/* Change in Dock/Undock state. Clear on write '0'. */ +-/* Status of Master requests. */ +-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +-/* BMC external code execution disabled */ +- ++#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ ++#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ ++#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ ++#define E1000_STATUS_FUNC_SHIFT 2 ++#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ ++#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ ++#define E1000_STATUS_SPEED_MASK 0x000000C0 ++#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ ++#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ ++#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ ++#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ ++#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ ++#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ + #define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ + #define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +-/* Constants used to intrepret the masked PCI-X bus speed. */ + +-#define SPEED_10 10 +-#define SPEED_100 100 +-#define SPEED_1000 1000 +-#define SPEED_2500 2500 +-#define HALF_DUPLEX 1 +-#define FULL_DUPLEX 2 +- +- +-#define ADVERTISE_10_HALF 0x0001 +-#define ADVERTISE_10_FULL 0x0002 +-#define ADVERTISE_100_HALF 0x0004 +-#define ADVERTISE_100_FULL 0x0008 +-#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +-#define ADVERTISE_1000_FULL 0x0020 ++#define SPEED_10 10 ++#define SPEED_100 100 ++#define SPEED_1000 1000 ++#define SPEED_2500 2500 ++#define HALF_DUPLEX 1 ++#define FULL_DUPLEX 2 ++ ++#define ADVERTISE_10_HALF 0x0001 ++#define ADVERTISE_10_FULL 0x0002 ++#define ADVERTISE_100_HALF 0x0004 ++#define ADVERTISE_100_FULL 0x0008 ++#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ ++#define ADVERTISE_1000_FULL 0x0020 + + /* 1000/H is not supported, nor spec-compliant. */ +-#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ +- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ +- ADVERTISE_1000_FULL) +-#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ +- ADVERTISE_100_HALF | ADVERTISE_100_FULL) +-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +-#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ +- ADVERTISE_1000_FULL) +-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) ++#define E1000_ALL_SPEED_DUPLEX ( \ ++ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ++ ADVERTISE_100_FULL | ADVERTISE_1000_FULL) ++#define E1000_ALL_NOT_GIG ( \ ++ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ++ ADVERTISE_100_FULL) ++#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) ++#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) ++#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +-#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX ++#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + + /* LED Control */ +-#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +-#define E1000_LEDCTL_LED0_BLINK 0x00000080 + #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F ++#define E1000_LEDCTL_LED0_MODE_SHIFT 0 + #define E1000_LEDCTL_LED0_IVRT 0x00000040 ++#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +-#define E1000_LEDCTL_MODE_LED_ON 0xE +-#define E1000_LEDCTL_MODE_LED_OFF 0xF ++#define E1000_LEDCTL_MODE_LED_ON 0xE ++#define E1000_LEDCTL_MODE_LED_OFF 0xF + + /* Transmit Descriptor bit definitions */ +-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +-/* Extended desc bits for Linksec and timesync */ ++#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ ++#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ ++#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ ++#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ ++#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ ++#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ ++#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ ++#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ ++#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ ++#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ ++#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ ++#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ ++#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ ++#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ ++#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ ++#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ ++#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ ++#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ ++#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ ++#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ ++#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + + /* Transmit Control */ +-#define E1000_TCTL_EN 0x00000002 /* enable tx */ +-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +- +-/* DMA Coalescing register fields */ +-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +-#define E1000_DMACR_DMACTHR_SHIFT 16 +-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +-#define E1000_DMACR_DMAC_LX_SHIFT 28 +-#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +-/* DMA Coalescing BMC-to-OS Watchdog Enable */ +-#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 +- +-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ +- +-#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ +- +-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ ++#define E1000_TCTL_EN 0x00000002 /* enable Tx */ ++#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ ++#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ ++#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ ++#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ ++#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ +- +-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +-#define E1000_FCRTC_RTH_COAL_SHIFT 4 +-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ +- +-/* Timestamp in Rx buffer */ +-#define E1000_RXPBS_CFG_TS_EN 0x80000000 +- +-#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +-#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ ++/* Transmit Arbitration Count */ ++#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + + /* SerDes Control */ +-#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 ++#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 ++#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + + /* Receive Checksum Control */ +-#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +-#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +-#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +-#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ ++#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ ++#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ ++#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ ++#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ ++#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + + /* Header split receive */ +-#define E1000_RFCTL_LEF 0x00040000 ++#define E1000_RFCTL_NFSW_DIS 0x00000040 ++#define E1000_RFCTL_NFSR_DIS 0x00000080 ++#define E1000_RFCTL_ACK_DIS 0x00001000 ++#define E1000_RFCTL_EXTEN 0x00008000 ++#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 ++#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 ++#define E1000_RFCTL_LEF 0x00040000 + + /* Collision related configuration parameters */ +-#define E1000_COLLISION_THRESHOLD 15 +-#define E1000_CT_SHIFT 4 +-#define E1000_COLLISION_DISTANCE 63 +-#define E1000_COLD_SHIFT 12 ++#define E1000_COLLISION_THRESHOLD 15 ++#define E1000_CT_SHIFT 4 ++#define E1000_COLLISION_DISTANCE 63 ++#define E1000_COLD_SHIFT 12 ++ ++/* Default values for the transmit IPG register */ ++#define DEFAULT_82543_TIPG_IPGT_FIBER 9 ++#define DEFAULT_82543_TIPG_IPGT_COPPER 8 ++ ++#define E1000_TIPG_IPGT_MASK 0x000003FF ++ ++#define DEFAULT_82543_TIPG_IPGR1 8 ++#define E1000_TIPG_IPGR1_SHIFT 10 ++ ++#define DEFAULT_82543_TIPG_IPGR2 6 ++#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 ++#define E1000_TIPG_IPGR2_SHIFT 20 + + /* Ethertype field values */ +-#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ ++#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ ++ ++#define ETHERNET_FCS_SIZE 4 ++#define MAX_JUMBO_FRAME_SIZE 0x3F00 ++/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ ++#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 ++#define E1000_TX_PTR_GAP 0x1F ++ ++/* Extended Configuration Control and Size */ ++#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 ++#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 ++#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 ++#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 ++#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 ++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 ++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 ++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 ++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 ++ ++#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 ++#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 ++#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 ++#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +-#define MAX_JUMBO_FRAME_SIZE 0x3F00 ++#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + + /* PBA constants */ +-#define E1000_PBA_34K 0x0022 +-#define E1000_PBA_64K 0x0040 /* 64KB */ ++#define E1000_PBA_8K 0x0008 /* 8KB */ ++#define E1000_PBA_10K 0x000A /* 10KB */ ++#define E1000_PBA_12K 0x000C /* 12KB */ ++#define E1000_PBA_14K 0x000E /* 14KB */ ++#define E1000_PBA_16K 0x0010 /* 16KB */ ++#define E1000_PBA_18K 0x0012 ++#define E1000_PBA_20K 0x0014 ++#define E1000_PBA_22K 0x0016 ++#define E1000_PBA_24K 0x0018 ++#define E1000_PBA_26K 0x001A ++#define E1000_PBA_30K 0x001E ++#define E1000_PBA_32K 0x0020 ++#define E1000_PBA_34K 0x0022 ++#define E1000_PBA_35K 0x0023 ++#define E1000_PBA_38K 0x0026 ++#define E1000_PBA_40K 0x0028 ++#define E1000_PBA_48K 0x0030 /* 48KB */ ++#define E1000_PBA_64K 0x0040 /* 64KB */ ++ ++#define E1000_PBA_RXA_MASK 0xFFFF ++ ++#define E1000_PBS_16K E1000_PBA_16K ++ ++/* Uncorrectable/correctable ECC Error counts and enable bits */ ++#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF ++#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 ++#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 ++#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 ++ ++#define IFS_MAX 80 ++#define IFS_MIN 40 ++#define IFS_RATIO 4 ++#define IFS_STEP 10 ++#define MIN_NUM_XMITS 1000 + + /* SW Semaphore Register */ +-#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +-#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ ++#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ ++#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ ++#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ ++ ++#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + + /* Interrupt Cause Read */ +-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +-#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +-#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +-#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +-#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +-#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +-#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ ++#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ ++#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ ++#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ ++#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ ++#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ ++#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ ++#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ ++#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ ++#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ ++#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ ++#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ ++#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ ++#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ ++#define E1000_ICR_TXD_LOW 0x00008000 ++#define E1000_ICR_MNG 0x00040000 /* Manageability event */ ++#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ ++#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ ++#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ + /* If this bit asserted, the driver should claim the interrupt */ +-#define E1000_ICR_INT_ASSERTED 0x80000000 +-/* LAN connected device generates an interrupt */ +-#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ ++#define E1000_ICR_INT_ASSERTED 0x80000000 ++#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ ++#define E1000_ICR_FER 0x00400000 /* Fatal Error */ ++ ++#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ ++#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + + /* Extended Interrupt Cause Read */ +-#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +-#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +-#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +-#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +-#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +-#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +-#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +-#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +-#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ ++#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ ++#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ ++#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ ++#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ ++#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ ++#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ ++#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ ++#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ ++#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ ++#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + /* TCP Timer */ ++#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ ++#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ ++#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ ++#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + + /* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: +@@ -404,194 +546,207 @@ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ +- E1000_IMS_LSC | \ +- E1000_IMS_DOUTSYNC) ++ E1000_IMS_LSC) + + /* Interrupt Mask Set */ +-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +-#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +-#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +-#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +-#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ ++#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ ++#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ ++#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ ++#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ ++#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ ++#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ ++#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ ++#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ ++#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW ++#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ ++#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ ++#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ ++#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ ++#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + ++#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ ++#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ + /* Extended Interrupt Mask Set */ +-#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ ++#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ ++#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ ++#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ ++#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ ++#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ ++#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ ++#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ ++#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ ++#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ ++#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + + /* Interrupt Cause Set */ +-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +-#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ ++#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ ++#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ ++#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + + /* Extended Interrupt Cause Set */ +-/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +-#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ ++#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ ++#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ ++#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ ++#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ ++#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ ++#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ ++#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ ++#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ ++#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ ++#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + ++#define E1000_EITR_ITR_INT_MASK 0x0000FFFF ++/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ ++#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ ++#define E1000_EITR_INTERVAL 0x00007FFC + + /* Transmit Descriptor Control */ ++#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ ++#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ ++#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ ++#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ ++#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ ++#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ + /* Enable the counting of descriptors still to be processed. */ ++#define E1000_TXDCTL_COUNT_DESC 0x00400000 + + /* Flow Control Constants */ +-#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +-#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +-#define FLOW_CONTROL_TYPE 0x8808 +- +-/* Transmit Config Word */ +-#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +-#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ ++#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 ++#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 ++#define FLOW_CONTROL_TYPE 0x8808 + + /* 802.1q VLAN Packet Size */ +-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +-#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ ++#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ ++#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +-/* Receive Address */ +-/* Number of high/low register pairs in the RAR. The RAR (Receive Address ++/* Receive Address ++ * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +-#define E1000_RAL_MAC_ADDR_LEN 4 +-#define E1000_RAH_MAC_ADDR_LEN 2 +-#define E1000_RAH_POOL_MASK 0x03FC0000 +-#define E1000_RAH_POOL_1 0x00040000 ++#define E1000_RAR_ENTRIES 15 ++#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ ++#define E1000_RAL_MAC_ADDR_LEN 4 ++#define E1000_RAH_MAC_ADDR_LEN 2 ++#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 ++#define E1000_RAH_POOL_1 0x00040000 + + /* Error Codes */ +-#define E1000_ERR_NVM 1 +-#define E1000_ERR_PHY 2 +-#define E1000_ERR_CONFIG 3 +-#define E1000_ERR_PARAM 4 +-#define E1000_ERR_MAC_INIT 5 +-#define E1000_ERR_RESET 9 +-#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +-#define E1000_BLK_PHY_RESET 12 +-#define E1000_ERR_SWFW_SYNC 13 +-#define E1000_NOT_IMPLEMENTED 14 +-#define E1000_ERR_MBX 15 +-#define E1000_ERR_INVALID_ARGUMENT 16 +-#define E1000_ERR_NO_SPACE 17 +-#define E1000_ERR_NVM_PBA_SECTION 18 +-#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +-#define E1000_ERR_I2C 20 ++#define E1000_SUCCESS 0 ++#define E1000_ERR_NVM 1 ++#define E1000_ERR_PHY 2 ++#define E1000_ERR_CONFIG 3 ++#define E1000_ERR_PARAM 4 ++#define E1000_ERR_MAC_INIT 5 ++#define E1000_ERR_PHY_TYPE 6 ++#define E1000_ERR_RESET 9 ++#define E1000_ERR_MASTER_REQUESTS_PENDING 10 ++#define E1000_ERR_HOST_INTERFACE_COMMAND 11 ++#define E1000_BLK_PHY_RESET 12 ++#define E1000_ERR_SWFW_SYNC 13 ++#define E1000_NOT_IMPLEMENTED 14 ++#define E1000_ERR_MBX 15 ++#define E1000_ERR_INVALID_ARGUMENT 16 ++#define E1000_ERR_NO_SPACE 17 ++#define E1000_ERR_NVM_PBA_SECTION 18 ++#define E1000_ERR_I2C 19 ++#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 + + /* Loop limit on how long we wait for auto-negotiation to complete */ +-#define COPPER_LINK_UP_LIMIT 10 +-#define PHY_AUTO_NEG_LIMIT 45 +-#define PHY_FORCE_LIMIT 20 ++#define FIBER_LINK_UP_LIMIT 50 ++#define COPPER_LINK_UP_LIMIT 10 ++#define PHY_AUTO_NEG_LIMIT 45 ++#define PHY_FORCE_LIMIT 20 + /* Number of 100 microseconds we wait for PCI Express master disable */ +-#define MASTER_DISABLE_TIMEOUT 800 ++#define MASTER_DISABLE_TIMEOUT 800 + /* Number of milliseconds we wait for PHY configuration done after MAC reset */ +-#define PHY_CFG_TIMEOUT 100 ++#define PHY_CFG_TIMEOUT 100 + /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ ++#define MDIO_OWNERSHIP_TIMEOUT 10 + /* Number of milliseconds for NVM auto read done after MAC reset. */ +-#define AUTO_READ_DONE_TIMEOUT 10 ++#define AUTO_READ_DONE_TIMEOUT 10 + + /* Flow Control */ +-#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ ++#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ ++#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ ++#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +-#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +-#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ ++/* Transmit Configuration Word */ ++#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ ++#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ ++#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ ++#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ ++#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +-#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +-#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +-#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +-#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +-#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +-#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +-#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +-#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ +- +-#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +-#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +-#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +-#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 +- +-#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +-#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +-#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +-#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +-#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +-#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 +- +-#define E1000_TIMINCA_16NS_SHIFT 24 +- +-/* Time Sync Interrupt Cause/Mask Register Bits */ +- +-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ +- +-#define TSYNC_INTERRUPTS TSINTR_TXTS +-#define E1000_TSICR_TXTS TSINTR_TXTS +- +-/* TSAUXC Configuration Bits */ +-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ +- +-/* SDP Configuration Bits */ +-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ +- +-#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +-#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +-#define E1000_MDICNFG_PHY_MASK 0x03E00000 +-#define E1000_MDICNFG_PHY_SHIFT 21 ++/* Receive Configuration Word */ ++#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ ++#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ ++#define E1000_RXCW_C 0x20000000 /* Receive config */ ++#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ ++ ++#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ ++#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ ++ ++#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ ++#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ ++#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 ++#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 ++#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 ++#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 ++#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A ++#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ ++#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ ++ ++#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF ++#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 ++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 ++#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 ++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 ++#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 ++ ++#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 ++#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 ++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 ++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 ++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 ++#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 ++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 ++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 ++#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 ++#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 ++#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 ++ ++#define E1000_TIMINCA_16NS_SHIFT 24 ++#define E1000_TIMINCA_INCPERIOD_SHIFT 24 ++#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF ++ ++#define E1000_TSICR_TXTS 0x00000002 ++#define E1000_TSIM_TXTS 0x00000002 ++/* TUPLE Filtering Configuration */ ++#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ ++#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ ++#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ ++/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ ++#define E1000_TTQF_PROTOCOL_TCP 0x0 ++/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ ++#define E1000_TTQF_PROTOCOL_UDP 0x1 ++/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ ++#define E1000_TTQF_PROTOCOL_SCTP 0x2 ++#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ ++#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ ++#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ ++#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ ++#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ ++#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ ++#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ ++#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ ++ ++#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ ++#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ ++#define E1000_MDICNFG_PHY_MASK 0x03E00000 ++#define E1000_MDICNFG_PHY_SHIFT 21 + + #define E1000_MEDIA_PORT_COPPER 1 + #define E1000_MEDIA_PORT_OTHER 2 +@@ -604,95 +759,209 @@ + #define E1000_M88E1112_PAGE_ADDR 0x16 + #define E1000_M88E1112_STATUS 0x01 + ++#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ ++#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ ++#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ ++#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ ++#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ ++ ++/* I350 EEE defines */ ++#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ ++#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ ++#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ ++#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ ++#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ ++/* EEE status */ ++#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ ++#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ ++#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ ++#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ ++#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ ++#define E1000_M88E1543_EEE_CTRL_1 0x0 ++#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ ++#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ ++#define E1000_EEE_ADV_DEV_I354 7 ++#define E1000_EEE_ADV_ADDR_I354 60 ++#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ ++#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ ++#define E1000_PCS_STATUS_DEV_I354 3 ++#define E1000_PCS_STATUS_ADDR_I354 1 ++#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 ++#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 ++#define E1000_M88E1512_CFG_REG_1 0x0010 ++#define E1000_M88E1512_CFG_REG_2 0x0011 ++#define E1000_M88E1512_CFG_REG_3 0x0007 ++#define E1000_M88E1512_MODE 0x0014 ++#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ ++#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ ++#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ + /* PCI Express Control */ +-#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +-#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +-#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +-#define E1000_GCR_CAP_VER2 0x00040000 +- +-/* mPHY Address Control and Data Registers */ +-#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +-#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +-#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ +- +-/* mPHY PCS CLK Register */ +-#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +-/* mPHY Near End Digital Loopback Override Bit */ +-#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 +- +-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +-#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 ++#define E1000_GCR_RXD_NO_SNOOP 0x00000001 ++#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 ++#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 ++#define E1000_GCR_TXD_NO_SNOOP 0x00000008 ++#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 ++#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 ++#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 ++#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 ++#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 ++#define E1000_GCR_CAP_VER2 0x00040000 ++ ++#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ ++ E1000_GCR_RXDSCW_NO_SNOOP | \ ++ E1000_GCR_RXDSCR_NO_SNOOP | \ ++ E1000_GCR_TXD_NO_SNOOP | \ ++ E1000_GCR_TXDSCW_NO_SNOOP | \ ++ E1000_GCR_TXDSCR_NO_SNOOP) ++ ++#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ ++ ++/* mPHY address control and data registers */ ++#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ ++#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 ++#define E1000_MPHY_DATA 0x0E10 /* Data Register */ ++ ++/* AFE CSR Offset for PCS CLK */ ++#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 ++/* Override for near end digital loopback. */ ++#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + + /* PHY Control Register */ +-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +-#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +-#define MII_CR_SPEED_1000 0x0040 +-#define MII_CR_SPEED_100 0x2000 +-#define MII_CR_SPEED_10 0x0000 ++#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ ++#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ ++#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ ++#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ ++#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ ++#define MII_CR_POWER_DOWN 0x0800 /* Power down */ ++#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ ++#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ ++#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ ++#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ ++#define MII_CR_SPEED_1000 0x0040 ++#define MII_CR_SPEED_100 0x2000 ++#define MII_CR_SPEED_10 0x0000 + + /* PHY Status Register */ +-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ ++#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ ++#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ ++#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ ++#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ ++#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ ++#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ ++#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ ++#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ ++#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ ++#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ ++#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ ++#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ ++#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ ++#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ ++#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + + /* Autoneg Advertisement Register */ +-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ ++#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ ++#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ ++#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ ++#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ ++#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ ++#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ ++#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ ++#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ ++#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ ++#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + + /* Link Partner Ability Register (Base Page) */ +-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ ++#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ ++#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ ++#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ ++#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ ++#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ ++#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ ++#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ ++#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ ++#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ ++#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ ++#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + + /* Autoneg Expansion Register */ ++#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ ++#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ ++#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ ++#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ ++#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + + /* 1000BASE-T Control Register */ +-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ +- /* 0=Configure PHY as Slave */ +-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ +- /* 0=Automatic Master/Slave config */ ++#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ ++#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ ++#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ ++/* 1=Repeater/switch device port 0=DTE device */ ++#define CR_1000T_REPEATER_DTE 0x0400 ++/* 1=Configure PHY as Master 0=Configure PHY as Slave */ ++#define CR_1000T_MS_VALUE 0x0800 ++/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ ++#define CR_1000T_MS_ENABLE 0x1000 ++#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ ++#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ ++#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ ++#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ ++#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + + /* 1000BASE-T Status Register */ +-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ ++#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ ++#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ ++#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ ++#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ ++#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ ++#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ ++#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ ++#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + ++#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + + /* PHY 1000 MII Register/Bit Definitions */ + /* PHY Registers defined by IEEE */ +-#define PHY_CONTROL 0x00 /* Control Register */ +-#define PHY_STATUS 0x01 /* Status Register */ +-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ ++#define PHY_CONTROL 0x00 /* Control Register */ ++#define PHY_STATUS 0x01 /* Status Register */ ++#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ ++#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ ++#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ ++#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ ++#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ ++#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ ++#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ ++#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ ++#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ ++#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ ++ ++#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + + /* NVM Control */ +-#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +-#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +-#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +-#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +-#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +-#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +-#define E1000_EECD_PRES 0x00000100 /* NVM Present */ ++#define E1000_EECD_SK 0x00000001 /* NVM Clock */ ++#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ ++#define E1000_EECD_DI 0x00000004 /* NVM Data In */ ++#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ ++#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ ++#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ ++#define E1000_EECD_PRES 0x00000100 /* NVM Present */ ++#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ ++#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ ++#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ ++#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ ++#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ + /* NVM Addressing bits based on type 0=small, 1=large */ +-#define E1000_EECD_ADDR_BITS 0x00000400 +-#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +-#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +-#define E1000_EECD_SIZE_EX_SHIFT 11 ++#define E1000_EECD_ADDR_BITS 0x00000400 ++#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ ++#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ ++#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ ++#define E1000_EECD_SIZE_EX_SHIFT 11 ++#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ ++#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ ++#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ ++#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ ++#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ + #define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ ++#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ + #define E1000_FLUDONE_ATTEMPTS 20000 + #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + #define E1000_I210_FIFO_SEL_RX 0x00 +@@ -700,53 +969,32 @@ + #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) + #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 + #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 ++ + #define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ + /* Secure FLASH mode requires removing MSb */ + #define E1000_I210_FW_PTR_MASK 0x7FFF + /* Firmware code revision field word offset*/ + #define E1000_I210_FW_VER_OFFSET 328 +-#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +-#define E1000_FLUDONE_ATTEMPTS 20000 +-#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +-#define E1000_I210_FIFO_SEL_RX 0x00 +-#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +-#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +-#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +-#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +- + +-/* Offset to data in NVM read/write registers */ +-#define E1000_NVM_RW_REG_DATA 16 +-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +-#define E1000_NVM_RW_REG_START 1 /* Start operation */ +-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ ++#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ ++#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ ++#define E1000_NVM_RW_REG_START 1 /* Start operation */ ++#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ ++#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ ++#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ ++#define E1000_FLASH_UPDATES 2000 + + /* NVM Word Offsets */ +-#define NVM_COMPAT 0x0003 +-#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +-#define NVM_VERSION 0x0005 +-#define NVM_INIT_CONTROL2_REG 0x000F +-#define NVM_INIT_CONTROL3_PORT_B 0x0014 +-#define NVM_INIT_CONTROL3_PORT_A 0x0024 +-#define NVM_ALT_MAC_ADDR_PTR 0x0037 +-#define NVM_CHECKSUM_REG 0x003F +-#define NVM_COMPATIBILITY_REG_3 0x0003 +-#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +-#define NVM_MAC_ADDR 0x0000 +-#define NVM_SUB_DEV_ID 0x000B +-#define NVM_SUB_VEN_ID 0x000C +-#define NVM_DEV_ID 0x000D +-#define NVM_VEN_ID 0x000E +-#define NVM_INIT_CTRL_2 0x000F +-#define NVM_INIT_CTRL_4 0x0013 +-#define NVM_LED_1_CFG 0x001C +-#define NVM_LED_0_2_CFG 0x001F +-#define NVM_ETRACK_WORD 0x0042 +-#define NVM_ETRACK_HIWORD 0x0043 +-#define NVM_COMB_VER_OFF 0x0083 +-#define NVM_COMB_VER_PTR 0x003d ++#define NVM_COMPAT 0x0003 ++#define NVM_ID_LED_SETTINGS 0x0004 ++#define NVM_VERSION 0x0005 ++#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 ++#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 ++#define NVM_FUTURE_INIT_WORD1 0x0019 ++#define NVM_ETRACK_WORD 0x0042 ++#define NVM_ETRACK_HIWORD 0x0043 ++#define NVM_COMB_VER_OFF 0x0083 ++#define NVM_COMB_VER_PTR 0x003d + + /* NVM version defines */ + #define NVM_MAJOR_MASK 0xF000 +@@ -763,6 +1011,31 @@ + #define NVM_HEX_CONV 16 + #define NVM_HEX_TENS 10 + ++/* FW version defines */ ++/* Offset of "Loader patch ptr" in Firmware Header */ ++#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 ++/* Patch generation hour & minutes */ ++#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 ++/* Patch generation month & day */ ++#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 ++/* Patch generation year */ ++#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 ++/* Patch major & minor numbers */ ++#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 ++ ++#define NVM_MAC_ADDR 0x0000 ++#define NVM_SUB_DEV_ID 0x000B ++#define NVM_SUB_VEN_ID 0x000C ++#define NVM_DEV_ID 0x000D ++#define NVM_VEN_ID 0x000E ++#define NVM_INIT_CTRL_2 0x000F ++#define NVM_INIT_CTRL_4 0x0013 ++#define NVM_LED_1_CFG 0x001C ++#define NVM_LED_0_2_CFG 0x001F ++ ++#define NVM_COMPAT_VALID_CSUM 0x0001 ++#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 ++ + #define NVM_ETS_CFG 0x003E + #define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 + #define NVM_ETS_LTHRES_DELTA_SHIFT 6 +@@ -775,236 +1048,292 @@ + #define NVM_ETS_DATA_INDEX_MASK 0x0300 + #define NVM_ETS_DATA_INDEX_SHIFT 8 + #define NVM_ETS_DATA_HTHRESH_MASK 0x00FF ++#define NVM_INIT_CONTROL2_REG 0x000F ++#define NVM_INIT_CONTROL3_PORT_B 0x0014 ++#define NVM_INIT_3GIO_3 0x001A ++#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 ++#define NVM_INIT_CONTROL3_PORT_A 0x0024 ++#define NVM_CFG 0x0012 ++#define NVM_ALT_MAC_ADDR_PTR 0x0037 ++#define NVM_CHECKSUM_REG 0x003F ++#define NVM_COMPATIBILITY_REG_3 0x0003 ++#define NVM_COMPATIBILITY_BIT_MASK 0x8000 ++ ++#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ ++#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ ++#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ ++#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +-#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +-#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +-#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +-#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ +- +-#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) ++#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + + /* Mask bits for fields in Word 0x24 of the NVM */ +-#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +-#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ ++#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ ++#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ ++/* Offset of Link Mode bits for 82575/82576 */ ++#define NVM_WORD24_LNK_MODE_OFFSET 8 ++/* Offset of Link Mode bits for 82580 up */ ++#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + + /* Mask bits for fields in Word 0x0f of the NVM */ +-#define NVM_WORD0F_PAUSE_MASK 0x3000 +-#define NVM_WORD0F_ASM_DIR 0x2000 ++#define NVM_WORD0F_PAUSE_MASK 0x3000 ++#define NVM_WORD0F_PAUSE 0x1000 ++#define NVM_WORD0F_ASM_DIR 0x2000 + + /* Mask bits for fields in Word 0x1a of the NVM */ ++#define NVM_WORD1A_ASPM_MASK 0x000C + +-/* length of string needed to store part num */ +-#define E1000_PBANUM_LENGTH 11 ++/* Mask bits for fields in Word 0x03 of the EEPROM */ ++#define NVM_COMPAT_LOM 0x0800 ++ ++/* length of string needed to store PBA number */ ++#define E1000_PBANUM_LENGTH 11 + + /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +-#define NVM_SUM 0xBABA ++#define NVM_SUM 0xBABA + +-#define NVM_PBA_OFFSET_0 8 +-#define NVM_PBA_OFFSET_1 9 ++/* PBA (printed board assembly) number words */ ++#define NVM_PBA_OFFSET_0 8 ++#define NVM_PBA_OFFSET_1 9 ++#define NVM_PBA_PTR_GUARD 0xFAFA + #define NVM_RESERVED_WORD 0xFFFF +-#define NVM_PBA_PTR_GUARD 0xFAFA +-#define NVM_WORD_SIZE_BASE_SHIFT 6 +- +-/* NVM Commands - Microwire */ ++#define NVM_WORD_SIZE_BASE_SHIFT 6 + + /* NVM Commands - SPI */ +-#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +-#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +-#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +-#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +-#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +-#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ ++#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ ++#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ ++#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ ++#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ ++#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ ++#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + + /* SPI NVM Status Register */ +-#define NVM_STATUS_RDY_SPI 0x01 ++#define NVM_STATUS_RDY_SPI 0x01 + + /* Word definitions for ID LED Settings */ +-#define ID_LED_RESERVED_0000 0x0000 +-#define ID_LED_RESERVED_FFFF 0xFFFF +-#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ +- (ID_LED_OFF1_OFF2 << 8) | \ +- (ID_LED_DEF1_DEF2 << 4) | \ +- (ID_LED_DEF1_DEF2)) +-#define ID_LED_DEF1_DEF2 0x1 +-#define ID_LED_DEF1_ON2 0x2 +-#define ID_LED_DEF1_OFF2 0x3 +-#define ID_LED_ON1_DEF2 0x4 +-#define ID_LED_ON1_ON2 0x5 +-#define ID_LED_ON1_OFF2 0x6 +-#define ID_LED_OFF1_DEF2 0x7 +-#define ID_LED_OFF1_ON2 0x8 +-#define ID_LED_OFF1_OFF2 0x9 +- +-#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +-#define IGP_ACTIVITY_LED_ENABLE 0x0300 +-#define IGP_LED3_MODE 0x07000000 ++#define ID_LED_RESERVED_0000 0x0000 ++#define ID_LED_RESERVED_FFFF 0xFFFF ++#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ ++ (ID_LED_OFF1_OFF2 << 8) | \ ++ (ID_LED_DEF1_DEF2 << 4) | \ ++ (ID_LED_DEF1_DEF2)) ++#define ID_LED_DEF1_DEF2 0x1 ++#define ID_LED_DEF1_ON2 0x2 ++#define ID_LED_DEF1_OFF2 0x3 ++#define ID_LED_ON1_DEF2 0x4 ++#define ID_LED_ON1_ON2 0x5 ++#define ID_LED_ON1_OFF2 0x6 ++#define ID_LED_OFF1_DEF2 0x7 ++#define ID_LED_OFF1_ON2 0x8 ++#define ID_LED_OFF1_OFF2 0x9 ++ ++#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF ++#define IGP_ACTIVITY_LED_ENABLE 0x0300 ++#define IGP_LED3_MODE 0x07000000 + + /* PCI/PCI-X/PCI-EX Config space */ +-#define PCIE_DEVICE_CONTROL2 0x28 +-#define PCIE_DEVICE_CONTROL2_16ms 0x0005 ++#define PCIX_COMMAND_REGISTER 0xE6 ++#define PCIX_STATUS_REGISTER_LO 0xE8 ++#define PCIX_STATUS_REGISTER_HI 0xEA ++#define PCI_HEADER_TYPE_REGISTER 0x0E ++#define PCIE_LINK_STATUS 0x12 ++#define PCIE_DEVICE_CONTROL2 0x28 ++ ++#define PCIX_COMMAND_MMRBC_MASK 0x000C ++#define PCIX_COMMAND_MMRBC_SHIFT 0x2 ++#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 ++#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 ++#define PCIX_STATUS_HI_MMRBC_4K 0x3 ++#define PCIX_STATUS_HI_MMRBC_2K 0x2 ++#define PCIX_STATUS_LO_FUNC_MASK 0x7 ++#define PCI_HEADER_TYPE_MULTIFUNC 0x80 ++#define PCIE_LINK_WIDTH_MASK 0x3F0 ++#define PCIE_LINK_WIDTH_SHIFT 4 ++#define PCIE_LINK_SPEED_MASK 0x0F ++#define PCIE_LINK_SPEED_2500 0x01 ++#define PCIE_LINK_SPEED_5000 0x02 ++#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +-#define PHY_REVISION_MASK 0xFFFFFFF0 +-#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +-#define MAX_PHY_MULTI_PAGE_REG 0xF ++#ifndef ETH_ADDR_LEN ++#define ETH_ADDR_LEN 6 ++#endif ++ ++#define PHY_REVISION_MASK 0xFFFFFFF0 ++#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ ++#define MAX_PHY_MULTI_PAGE_REG 0xF + +-/* Bit definitions for valid PHY IDs. */ +-/* I = Integrated ++/* Bit definitions for valid PHY IDs. ++ * I = Integrated + * E = External + */ +-#define M88E1111_I_PHY_ID 0x01410CC0 +-#define M88E1112_E_PHY_ID 0x01410C90 +-#define I347AT4_E_PHY_ID 0x01410DC0 +-#define IGP03E1000_E_PHY_ID 0x02A80390 +-#define I82580_I_PHY_ID 0x015403A0 +-#define I350_I_PHY_ID 0x015403B0 +-#define M88_VENDOR 0x0141 +-#define I210_I_PHY_ID 0x01410C00 +-#define M88E1543_E_PHY_ID 0x01410EA0 ++#define M88E1000_E_PHY_ID 0x01410C50 ++#define M88E1000_I_PHY_ID 0x01410C30 ++#define M88E1011_I_PHY_ID 0x01410C20 ++#define IGP01E1000_I_PHY_ID 0x02A80380 ++#define M88E1111_I_PHY_ID 0x01410CC0 ++#define M88E1543_E_PHY_ID 0x01410EA0 ++#define M88E1512_E_PHY_ID 0x01410DD0 ++#define M88E1112_E_PHY_ID 0x01410C90 ++#define I347AT4_E_PHY_ID 0x01410DC0 ++#define M88E1340M_E_PHY_ID 0x01410DF0 ++#define GG82563_E_PHY_ID 0x01410CA0 ++#define IGP03E1000_E_PHY_ID 0x02A80390 ++#define IFE_E_PHY_ID 0x02A80330 ++#define IFE_PLUS_E_PHY_ID 0x02A80320 ++#define IFE_C_E_PHY_ID 0x02A80310 ++#define I82580_I_PHY_ID 0x015403A0 ++#define I350_I_PHY_ID 0x015403B0 ++#define I210_I_PHY_ID 0x01410C00 ++#define IGP04E1000_E_PHY_ID 0x02A80391 ++#define M88_VENDOR 0x0141 + + /* M88E1000 Specific Registers */ +-#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +-#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ ++#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ ++#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ ++#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ ++#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +-#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +-#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ ++#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ ++#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ + + /* M88E1000 PHY Specific Control Register */ +-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +-/* 1=CLK125 low, 0=CLK125 toggling */ +-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ +- /* Manual MDI configuration */ +-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ ++#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ ++/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ ++#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 ++#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ + /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +-#define M88E1000_PSCR_AUTO_X_1000T 0x0040 ++#define M88E1000_PSCR_AUTO_X_1000T 0x0040 + /* Auto crossover enabled all speeds */ +-#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold +- * 0=Normal 10BASE-T Rx Threshold +- */ +-/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ ++#define M88E1000_PSCR_AUTO_X_MODE 0x0060 ++#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + + /* M88E1000 PHY Specific Status Register */ +-#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +-#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +-#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ ++#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ ++#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ ++#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ + /* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +-#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +-#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +-#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ +- +-#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 +- +-/* M88E1000 Extended PHY Specific Control Register */ +-/* 1 = Lost lock detect enabled. +- * Will assert lost lock and bring +- * link down if idle not seen +- * within 1ms in 1000BASE-T +- */ ++#define M88E1000_PSSR_CABLE_LENGTH 0x0380 ++#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ ++#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ ++#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ ++#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ ++ ++#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 ++ + /* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 ++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 ++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 + /* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +-#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ ++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 ++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 ++#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ ++ ++/* Intel I347AT4 Registers */ ++#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ ++#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ ++#define I347AT4_PAGE_SELECT 0x16 + +-/* Intel i347-AT4 Registers */ ++/* I347AT4 Extended PHY Specific Control Register */ + +-#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +-#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +-#define I347AT4_PAGE_SELECT 0x16 +- +-/* i347-AT4 Extended PHY Specific Control Register */ +- +-/* Number of times we will attempt to autonegotiate before downshifting if we +- * are the master ++/* Number of times we will attempt to autonegotiate before downshifting if we ++ * are the master + */ +-#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +-#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +-#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +-#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +-#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +-#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +-#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +-#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +-#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +-#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 ++#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 ++#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 ++#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 ++#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 ++#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 ++#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 ++#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 ++#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 ++#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 ++#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +-/* i347-AT4 PHY Cable Diagnostics Control */ +-#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ ++/* I347AT4 PHY Cable Diagnostics Control */ ++#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +-/* Marvell 1112 only registers */ +-#define M88E1112_VCT_DSP_DISTANCE 0x001A ++/* M88E1112 only registers */ ++#define M88E1112_VCT_DSP_DISTANCE 0x001A + + /* M88EC018 Rev 2 specific DownShift settings */ +-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 ++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 ++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 ++ ++/* Bits... ++ * 15-5: page ++ * 4-0: register offset ++ */ ++#define GG82563_PAGE_SHIFT 5 ++#define GG82563_REG(page, reg) \ ++ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) ++#define GG82563_MIN_ALT_REG 30 ++ ++/* GG82563 Specific Registers */ ++#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ ++#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ ++#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ ++#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ ++ ++/* MAC Specific Control Register */ ++#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) ++ ++#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ ++ ++/* Page 193 - Port Control Registers */ ++/* Kumeran Mode Control */ ++#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) ++#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ ++ ++/* Page 194 - KMRN Registers */ ++#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + + /* MDI Control */ +-#define E1000_MDIC_DATA_MASK 0x0000FFFF +-#define E1000_MDIC_REG_MASK 0x001F0000 +-#define E1000_MDIC_REG_SHIFT 16 +-#define E1000_MDIC_PHY_MASK 0x03E00000 +-#define E1000_MDIC_PHY_SHIFT 21 +-#define E1000_MDIC_OP_WRITE 0x04000000 +-#define E1000_MDIC_OP_READ 0x08000000 +-#define E1000_MDIC_READY 0x10000000 +-#define E1000_MDIC_INT_EN 0x20000000 +-#define E1000_MDIC_ERROR 0x40000000 +-#define E1000_MDIC_DEST 0x80000000 +- +-/* Thermal Sensor */ +-#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +-#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ +- +-/* Energy Efficient Ethernet */ +-#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +-#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +-#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +-#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +-#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +-#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +-#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +-#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +-#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +-#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +-#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +-#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +-#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +-#define E1000_M88E1543_EEE_CTRL_1 0x0 +-#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +-#define E1000_EEE_ADV_DEV_I354 7 +-#define E1000_EEE_ADV_ADDR_I354 60 +-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +-#define E1000_PCS_STATUS_DEV_I354 3 +-#define E1000_PCS_STATUS_ADDR_I354 1 +-#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +-#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +-#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 ++#define E1000_MDIC_REG_MASK 0x001F0000 ++#define E1000_MDIC_REG_SHIFT 16 ++#define E1000_MDIC_PHY_MASK 0x03E00000 ++#define E1000_MDIC_PHY_SHIFT 21 ++#define E1000_MDIC_OP_WRITE 0x04000000 ++#define E1000_MDIC_OP_READ 0x08000000 ++#define E1000_MDIC_READY 0x10000000 ++#define E1000_MDIC_ERROR 0x40000000 ++#define E1000_MDIC_DEST 0x80000000 + + /* SerDes Control */ +-#define E1000_GEN_CTL_READY 0x80000000 +-#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +-#define E1000_GEN_POLL_TIMEOUT 640 +- +-#define E1000_VFTA_ENTRY_SHIFT 5 +-#define E1000_VFTA_ENTRY_MASK 0x7F +-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F +- +-/* DMA Coalescing register fields */ +-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ ++#define E1000_GEN_CTL_READY 0x80000000 ++#define E1000_GEN_CTL_ADDRESS_SHIFT 8 ++#define E1000_GEN_POLL_TIMEOUT 640 ++ ++/* LinkSec register fields */ ++#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 ++#define E1000_LSECTXCAP_SUM_SHIFT 16 ++#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 ++#define E1000_LSECRXCAP_SUM_SHIFT 16 ++ ++#define E1000_LSECTXCTRL_EN_MASK 0x00000003 ++#define E1000_LSECTXCTRL_DISABLE 0x0 ++#define E1000_LSECTXCTRL_AUTH 0x1 ++#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 ++#define E1000_LSECTXCTRL_AISCI 0x00000020 ++#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 ++#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 ++ ++#define E1000_LSECRXCTRL_EN_MASK 0x0000000C ++#define E1000_LSECRXCTRL_EN_SHIFT 2 ++#define E1000_LSECRXCTRL_DISABLE 0x0 ++#define E1000_LSECRXCTRL_CHECK 0x1 ++#define E1000_LSECRXCTRL_STRICT 0x2 ++#define E1000_LSECRXCTRL_DROP 0x3 ++#define E1000_LSECRXCTRL_PLSH 0x00000040 ++#define E1000_LSECRXCTRL_RP 0x00000080 ++#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + + /* Tx Rate-Scheduler Config fields */ + #define E1000_RTTBCNRC_RS_ENA 0x80000000 +@@ -1013,4 +1342,70 @@ + #define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +-#endif ++/* DMA Coalescing register fields */ ++/* DMA Coalescing Watchdog Timer */ ++#define E1000_DMACR_DMACWT_MASK 0x00003FFF ++/* DMA Coalescing Rx Threshold */ ++#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 ++#define E1000_DMACR_DMACTHR_SHIFT 16 ++/* Lx when no PCIe transactions */ ++#define E1000_DMACR_DMAC_LX_MASK 0x30000000 ++#define E1000_DMACR_DMAC_LX_SHIFT 28 ++#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ ++/* DMA Coalescing BMC-to-OS Watchdog Enable */ ++#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 ++ ++/* DMA Coalescing Transmit Threshold */ ++#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF ++ ++#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ ++ ++/* Rx Traffic Rate Threshold */ ++#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF ++/* Rx packet rate in current window */ ++#define E1000_DMCRTRH_LRPRCW 0x80000000 ++ ++/* DMA Coal Rx Traffic Current Count */ ++#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF ++ ++/* Flow ctrl Rx Threshold High val */ ++#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 ++#define E1000_FCRTC_RTH_COAL_SHIFT 4 ++/* Lx power decision based on DMA coal */ ++#define E1000_PCIEMISC_LX_DECISION 0x00000080 ++ ++#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ ++#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ ++#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ ++#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ ++#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ ++ ++/* Proxy Filter Control */ ++#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ ++#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ ++#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ ++#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ ++#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ ++#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ ++#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ ++#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ ++#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ ++/* Proxy Status */ ++#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ ++ ++/* Firmware Status */ ++#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ ++/* VF Control */ ++#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ ++ ++#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ ++/* Lan ID bit field offset in status register */ ++#define E1000_STATUS_LAN_ID_OFFSET 2 ++#define E1000_VFTA_ENTRIES 128 ++#ifndef E1000_UNUSEDARG ++#define E1000_UNUSEDARG ++#endif /* E1000_UNUSEDARG */ ++#ifndef ERROR_REPORT ++#define ERROR_REPORT(fmt) do { } while (0) ++#endif /* ERROR_REPORT */ ++#endif /* _E1000_DEFINES_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h +--- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,33 +1,31 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_HW_H_ + #define _E1000_HW_H_ + +-#include +-#include +-#include +-#include +- ++#include "e1000_osdep.h" + #include "e1000_regs.h" + #include "e1000_defines.h" + +@@ -50,15 +48,14 @@ + #define E1000_DEV_ID_82580_SGMII 0x1511 + #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 + #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +-#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +-#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +-#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + #define E1000_DEV_ID_I350_COPPER 0x1521 + #define E1000_DEV_ID_I350_FIBER 0x1522 + #define E1000_DEV_ID_I350_SERDES 0x1523 + #define E1000_DEV_ID_I350_SGMII 0x1524 ++#define E1000_DEV_ID_I350_DA4 0x1546 + #define E1000_DEV_ID_I210_COPPER 0x1533 ++#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 ++#define E1000_DEV_ID_I210_COPPER_IT 0x1535 + #define E1000_DEV_ID_I210_FIBER 0x1536 + #define E1000_DEV_ID_I210_SERDES 0x1537 + #define E1000_DEV_ID_I210_SGMII 0x1538 +@@ -68,19 +65,26 @@ + #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 + #define E1000_DEV_ID_I354_SGMII 0x1F41 + #define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 ++#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 ++#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A ++#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C ++#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +-#define E1000_REVISION_2 2 +-#define E1000_REVISION_4 4 +- +-#define E1000_FUNC_0 0 +-#define E1000_FUNC_1 1 +-#define E1000_FUNC_2 2 +-#define E1000_FUNC_3 3 +- +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 ++#define E1000_REVISION_0 0 ++#define E1000_REVISION_1 1 ++#define E1000_REVISION_2 2 ++#define E1000_REVISION_3 3 ++#define E1000_REVISION_4 4 ++ ++#define E1000_FUNC_0 0 ++#define E1000_FUNC_1 1 ++#define E1000_FUNC_2 2 ++#define E1000_FUNC_3 3 ++ ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + + enum e1000_mac_type { + e1000_undefined = 0, +@@ -127,6 +131,7 @@ + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, ++ e1000_phy_vf, + e1000_phy_i210, + }; + +@@ -181,6 +186,177 @@ + e1000_fc_default = 0xFF + }; + ++enum e1000_ms_type { ++ e1000_ms_hw_default = 0, ++ e1000_ms_force_master, ++ e1000_ms_force_slave, ++ e1000_ms_auto ++}; ++ ++enum e1000_smart_speed { ++ e1000_smart_speed_default = 0, ++ e1000_smart_speed_on, ++ e1000_smart_speed_off ++}; ++ ++enum e1000_serdes_link_state { ++ e1000_serdes_link_down = 0, ++ e1000_serdes_link_autoneg_progress, ++ e1000_serdes_link_autoneg_complete, ++ e1000_serdes_link_forced_up ++}; ++ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++#endif ++/* Receive Descriptor */ ++struct e1000_rx_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ __le16 length; /* Length of data DMAed into data buffer */ ++ __le16 csum; /* Packet checksum */ ++ u8 status; /* Descriptor status */ ++ u8 errors; /* Descriptor Errors */ ++ __le16 special; ++}; ++ ++/* Receive Descriptor - Extended */ ++union e1000_rx_desc_extended { ++ struct { ++ __le64 buffer_addr; ++ __le64 reserved; ++ } read; ++ struct { ++ struct { ++ __le32 mrq; /* Multiple Rx Queues */ ++ union { ++ __le32 rss; /* RSS Hash */ ++ struct { ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ ++ } csum_ip; ++ } hi_dword; ++ } lower; ++ struct { ++ __le32 status_error; /* ext status/error */ ++ __le16 length; ++ __le16 vlan; /* VLAN tag */ ++ } upper; ++ } wb; /* writeback */ ++}; ++ ++#define MAX_PS_BUFFERS 4 ++ ++/* Number of packet split data buffers (not including the header buffer) */ ++#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) ++ ++/* Receive Descriptor - Packet Split */ ++union e1000_rx_desc_packet_split { ++ struct { ++ /* one buffer for protocol header(s), three data buffers */ ++ __le64 buffer_addr[MAX_PS_BUFFERS]; ++ } read; ++ struct { ++ struct { ++ __le32 mrq; /* Multiple Rx Queues */ ++ union { ++ __le32 rss; /* RSS Hash */ ++ struct { ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ ++ } csum_ip; ++ } hi_dword; ++ } lower; ++ struct { ++ __le32 status_error; /* ext status/error */ ++ __le16 length0; /* length of buffer 0 */ ++ __le16 vlan; /* VLAN tag */ ++ } middle; ++ struct { ++ __le16 header_status; ++ /* length of buffers 1-3 */ ++ __le16 length[PS_PAGE_BUFFERS]; ++ } upper; ++ __le64 reserved; ++ } wb; /* writeback */ ++}; ++ ++/* Transmit Descriptor */ ++struct e1000_tx_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ union { ++ __le32 data; ++ struct { ++ __le16 length; /* Data buffer length */ ++ u8 cso; /* Checksum offset */ ++ u8 cmd; /* Descriptor control */ ++ } flags; ++ } lower; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 css; /* Checksum start */ ++ __le16 special; ++ } fields; ++ } upper; ++}; ++ ++/* Offload Context Descriptor */ ++struct e1000_context_desc { ++ union { ++ __le32 ip_config; ++ struct { ++ u8 ipcss; /* IP checksum start */ ++ u8 ipcso; /* IP checksum offset */ ++ __le16 ipcse; /* IP checksum end */ ++ } ip_fields; ++ } lower_setup; ++ union { ++ __le32 tcp_config; ++ struct { ++ u8 tucss; /* TCP checksum start */ ++ u8 tucso; /* TCP checksum offset */ ++ __le16 tucse; /* TCP checksum end */ ++ } tcp_fields; ++ } upper_setup; ++ __le32 cmd_and_length; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 hdr_len; /* Header length */ ++ __le16 mss; /* Maximum segment size */ ++ } fields; ++ } tcp_seg_setup; ++}; ++ ++/* Offload data descriptor */ ++struct e1000_data_desc { ++ __le64 buffer_addr; /* Address of the descriptor's buffer address */ ++ union { ++ __le32 data; ++ struct { ++ __le16 length; /* Data buffer length */ ++ u8 typ_len_ext; ++ u8 cmd; ++ } flags; ++ } lower; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 popts; /* Packet Options */ ++ __le16 special; ++ } fields; ++ } upper; ++}; ++ + /* Statistics counters collected by the MAC */ + struct e1000_hw_stats { + u64 crcerrs; +@@ -289,7 +465,7 @@ + u8 checksum; + }; + +-#define E1000_HI_MAX_DATA_LENGTH 252 ++#define E1000_HI_MAX_DATA_LENGTH 252 + struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +@@ -304,7 +480,7 @@ + u16 command_length; + }; + +-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 ++#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 + struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +@@ -313,52 +489,95 @@ + #include "e1000_mac.h" + #include "e1000_phy.h" + #include "e1000_nvm.h" ++#include "e1000_manage.h" + #include "e1000_mbx.h" + ++/* Function pointers for the MAC. */ + struct e1000_mac_operations { +- s32 (*check_for_link)(struct e1000_hw *); +- s32 (*reset_hw)(struct e1000_hw *); +- s32 (*init_hw)(struct e1000_hw *); ++ s32 (*init_params)(struct e1000_hw *); ++ s32 (*id_led_init)(struct e1000_hw *); ++ s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); +- s32 (*setup_physical_interface)(struct e1000_hw *); +- void (*rar_set)(struct e1000_hw *, u8 *, u32); +- s32 (*read_mac_addr)(struct e1000_hw *); +- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); +- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); +- void (*release_swfw_sync)(struct e1000_hw *, u16); +-#ifdef CONFIG_IGB_HWMON ++ s32 (*check_for_link)(struct e1000_hw *); ++ s32 (*cleanup_led)(struct e1000_hw *); ++ void (*clear_hw_cntrs)(struct e1000_hw *); ++ void (*clear_vfta)(struct e1000_hw *); ++ s32 (*get_bus_info)(struct e1000_hw *); ++ void (*set_lan_id)(struct e1000_hw *); ++ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); ++ s32 (*led_on)(struct e1000_hw *); ++ s32 (*led_off)(struct e1000_hw *); ++ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); ++ s32 (*reset_hw)(struct e1000_hw *); ++ s32 (*init_hw)(struct e1000_hw *); ++ void (*shutdown_serdes)(struct e1000_hw *); ++ void (*power_up_serdes)(struct e1000_hw *); ++ s32 (*setup_link)(struct e1000_hw *); ++ s32 (*setup_physical_interface)(struct e1000_hw *); ++ s32 (*setup_led)(struct e1000_hw *); ++ void (*write_vfta)(struct e1000_hw *, u32, u32); ++ void (*config_collision_dist)(struct e1000_hw *); ++ int (*rar_set)(struct e1000_hw *, u8*, u32); ++ s32 (*read_mac_addr)(struct e1000_hw *); ++ s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +-#endif +- ++ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); ++ void (*release_swfw_sync)(struct e1000_hw *, u16); + }; + ++/* When to use various PHY register access functions: ++ * ++ * Func Caller ++ * Function Does Does When to use ++ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ * X_reg L,P,A n/a for simple PHY reg accesses ++ * X_reg_locked P,A L for multiple accesses of different regs ++ * on different pages ++ * X_reg_page A L,P for multiple accesses of different regs ++ * on the same page ++ * ++ * Where X=[read|write], L=locking, P=sets page, A=register access ++ * ++ */ + struct e1000_phy_operations { +- s32 (*acquire)(struct e1000_hw *); +- s32 (*check_polarity)(struct e1000_hw *); +- s32 (*check_reset_block)(struct e1000_hw *); +- s32 (*force_speed_duplex)(struct e1000_hw *); +- s32 (*get_cfg_done)(struct e1000_hw *hw); +- s32 (*get_cable_length)(struct e1000_hw *); +- s32 (*get_phy_info)(struct e1000_hw *); +- s32 (*read_reg)(struct e1000_hw *, u32, u16 *); ++ s32 (*init_params)(struct e1000_hw *); ++ s32 (*acquire)(struct e1000_hw *); ++ s32 (*check_polarity)(struct e1000_hw *); ++ s32 (*check_reset_block)(struct e1000_hw *); ++ s32 (*commit)(struct e1000_hw *); ++ s32 (*force_speed_duplex)(struct e1000_hw *); ++ s32 (*get_cfg_done)(struct e1000_hw *hw); ++ s32 (*get_cable_length)(struct e1000_hw *); ++ s32 (*get_info)(struct e1000_hw *); ++ s32 (*set_page)(struct e1000_hw *, u16); ++ s32 (*read_reg)(struct e1000_hw *, u32, u16 *); ++ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); ++ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); +- s32 (*reset)(struct e1000_hw *); +- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); +- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); +- s32 (*write_reg)(struct e1000_hw *, u32, u16); ++ s32 (*reset)(struct e1000_hw *); ++ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); ++ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); ++ s32 (*write_reg)(struct e1000_hw *, u32, u16); ++ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); ++ s32 (*write_reg_page)(struct e1000_hw *, u32, u16); ++ void (*power_up)(struct e1000_hw *); ++ void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); + }; + ++/* Function pointers for the NVM. */ + struct e1000_nvm_operations { +- s32 (*acquire)(struct e1000_hw *); +- s32 (*read)(struct e1000_hw *, u16, u16, u16 *); ++ s32 (*init_params)(struct e1000_hw *); ++ s32 (*acquire)(struct e1000_hw *); ++ s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); +- s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +- s32 (*update)(struct e1000_hw *); +- s32 (*validate)(struct e1000_hw *); +- s32 (*valid_led_default)(struct e1000_hw *, u16 *); ++ void (*reload)(struct e1000_hw *); ++ s32 (*update)(struct e1000_hw *); ++ s32 (*valid_led_default)(struct e1000_hw *, u16 *); ++ s32 (*validate)(struct e1000_hw *); ++ s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + }; + + #define E1000_MAX_SENSORS 3 +@@ -374,49 +593,45 @@ + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; + }; + +-struct e1000_info { +- s32 (*get_invariants)(struct e1000_hw *); +- struct e1000_mac_operations *mac_ops; +- struct e1000_phy_operations *phy_ops; +- struct e1000_nvm_operations *nvm_ops; +-}; +- +-extern const struct e1000_info e1000_82575_info; +- + struct e1000_mac_info { + struct e1000_mac_operations ops; +- +- u8 addr[6]; +- u8 perm_addr[6]; ++ u8 addr[ETH_ADDR_LEN]; ++ u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + ++ u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; ++ u32 tx_packet_delta; + u32 txcw; + ++ u16 current_ifs_val; ++ u16 ifs_max_val; ++ u16 ifs_min_val; ++ u16 ifs_ratio; ++ u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +- #define MAX_MTA_REG 128 ++#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; ++ bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; +- bool disable_hw_init_bits; + bool get_link_status; +- bool ifs_params_forced; + bool in_ifs_mode; +- bool report_tx_early; ++ enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +@@ -424,7 +639,6 @@ + + struct e1000_phy_info { + struct e1000_phy_operations ops; +- + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; +@@ -477,20 +691,19 @@ + enum e1000_bus_speed speed; + enum e1000_bus_width width; + +- u32 snoop; +- + u16 func; + u16 pci_cmd_word; + }; + + struct e1000_fc_info { +- u32 high_water; /* Flow control high-water mark */ +- u32 low_water; /* Flow control low-water mark */ +- u16 pause_time; /* Flow control pause timer */ +- bool send_xon; /* Flow control send XON */ +- bool strict_ieee; /* Strict IEEE mode */ +- enum e1000_fc_mode current_mode; /* Type of flow control */ +- enum e1000_fc_mode requested_mode; ++ u32 high_water; /* Flow control high-water mark */ ++ u32 low_water; /* Flow control low-water mark */ ++ u16 pause_time; /* Flow control pause timer */ ++ u16 refresh_time; /* Flow control refresh timer */ ++ bool send_xon; /* Flow control send XON */ ++ bool strict_ieee; /* Strict IEEE mode */ ++ enum e1000_fc_mode current_mode; /* FC mode in effect */ ++ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ + }; + + struct e1000_mbx_operations { +@@ -525,12 +738,17 @@ + bool sgmii_active; + bool global_device_reset; + bool eee_disable; +- bool clear_semaphore_once; +- struct e1000_sfp_flags eth_flags; + bool module_plugged; ++ bool clear_semaphore_once; ++ u32 mtu; ++ struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; +- bool mas_capable; ++}; ++ ++struct e1000_dev_spec_vf { ++ u32 vf_number; ++ u32 v2p_mailbox; + }; + + struct e1000_hw { +@@ -549,7 +767,8 @@ + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { +- struct e1000_dev_spec_82575 _82575; ++ struct e1000_dev_spec_82575 _82575; ++ struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; +@@ -560,14 +779,13 @@ + u8 revision_id; + }; + +-struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +-#define hw_dbg(format, arg...) \ +- netdev_dbg(igb_get_hw_dev(hw), format, ##arg) ++#include "e1000_82575.h" ++#include "e1000_i210.h" + + /* These functions must be implemented by drivers */ +-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); ++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); ++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); ++void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); ++void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +-void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +-void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +-#endif /* _E1000_HW_H_ */ ++#endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c +--- a/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,107 +1,40 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* + +-/* e1000_i210 +- * e1000_i211 +- */ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. + +-#include +-#include ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-#include "e1000_hw.h" +-#include "e1000_i210.h" ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. + +-static s32 igb_update_flash_i210(struct e1000_hw *hw); ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". + +-/** +- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore +- * @hw: pointer to the HW structure +- * +- * Acquire the HW semaphore to access the PHY or NVM +- */ +-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +-{ +- u32 swsm; +- s32 timeout = hw->nvm.word_size + 1; +- s32 i = 0; ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +- /* Get the SW semaphore */ +- while (i < timeout) { +- swsm = rd32(E1000_SWSM); +- if (!(swsm & E1000_SWSM_SMBI)) +- break; ++*******************************************************************************/ + +- udelay(50); +- i++; +- } ++#include "e1000_api.h" + +- if (i == timeout) { +- /* In rare circumstances, the SW semaphore may already be held +- * unintentionally. Clear the semaphore once before giving up. +- */ +- if (hw->dev_spec._82575.clear_semaphore_once) { +- hw->dev_spec._82575.clear_semaphore_once = false; +- igb_put_hw_semaphore(hw); +- for (i = 0; i < timeout; i++) { +- swsm = rd32(E1000_SWSM); +- if (!(swsm & E1000_SWSM_SMBI)) +- break; + +- udelay(50); +- } +- } +- +- /* If we do not have the semaphore here, we have to give up. */ +- if (i == timeout) { +- hw_dbg("Driver can't access device - SMBI bit is set.\n"); +- return -E1000_ERR_NVM; +- } +- } +- +- /* Get the FW semaphore. */ +- for (i = 0; i < timeout; i++) { +- swsm = rd32(E1000_SWSM); +- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); +- +- /* Semaphore acquired if bit latched */ +- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) +- break; +- +- udelay(50); +- } +- +- if (i == timeout) { +- /* Release semaphores */ +- igb_put_hw_semaphore(hw); +- hw_dbg("Driver can't access the NVM\n"); +- return -E1000_ERR_NVM; +- } +- +- return 0; +-} ++static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); ++static void e1000_release_nvm_i210(struct e1000_hw *hw); ++static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); ++static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data); ++static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); ++static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + + /** +- * igb_acquire_nvm_i210 - Request for access to EEPROM ++ * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. +@@ -109,93 +42,178 @@ + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +-static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) ++static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) + { +- return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_acquire_nvm_i210"); ++ ++ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); ++ ++ return ret_val; + } + + /** +- * igb_release_nvm_i210 - Release exclusive access to EEPROM ++ * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +-static void igb_release_nvm_i210(struct e1000_hw *hw) ++static void e1000_release_nvm_i210(struct e1000_hw *hw) + { +- igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); ++ DEBUGFUNC("e1000_release_nvm_i210"); ++ ++ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + } + + /** +- * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore ++ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) ++s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + ++ DEBUGFUNC("e1000_acquire_swfw_sync_i210"); ++ + while (i < timeout) { +- if (igb_get_hw_semaphore_i210(hw)) { ++ if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + +- /* Firmware currently using resource (fwmask) */ +- igb_put_hw_semaphore(hw); +- mdelay(5); ++ /* ++ * Firmware currently using resource (fwmask) ++ * or other software thread using resource (swmask) ++ */ ++ e1000_put_hw_semaphore_generic(hw); ++ msec_delay_irq(5); + i++; + } + + if (i == timeout) { +- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); ++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); ++ ++ e1000_put_hw_semaphore_generic(hw); + +- igb_put_hw_semaphore(hw); + out: + return ret_val; + } + + /** +- * igb_release_swfw_sync_i210 - Release SW/FW semaphore ++ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) ++void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + +- while (igb_get_hw_semaphore_i210(hw)) ++ DEBUGFUNC("e1000_release_swfw_sync_i210"); ++ ++ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + +- igb_put_hw_semaphore(hw); ++ e1000_put_hw_semaphore_generic(hw); + } + + /** +- * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register ++ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore ++ * @hw: pointer to the HW structure ++ * ++ * Acquire the HW semaphore to access the PHY or NVM ++ **/ ++static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) ++{ ++ u32 swsm; ++ s32 timeout = hw->nvm.word_size + 1; ++ s32 i = 0; ++ ++ DEBUGFUNC("e1000_get_hw_semaphore_i210"); ++ ++ /* Get the SW semaphore */ ++ while (i < timeout) { ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ if (!(swsm & E1000_SWSM_SMBI)) ++ break; ++ ++ usec_delay(50); ++ i++; ++ } ++ ++ if (i == timeout) { ++ /* In rare circumstances, the SW semaphore may already be held ++ * unintentionally. Clear the semaphore once before giving up. ++ */ ++ if (hw->dev_spec._82575.clear_semaphore_once) { ++ hw->dev_spec._82575.clear_semaphore_once = false; ++ e1000_put_hw_semaphore_generic(hw); ++ for (i = 0; i < timeout; i++) { ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ if (!(swsm & E1000_SWSM_SMBI)) ++ break; ++ ++ usec_delay(50); ++ } ++ } ++ ++ /* If we do not have the semaphore here, we have to give up. */ ++ if (i == timeout) { ++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); ++ return -E1000_ERR_NVM; ++ } ++ } ++ ++ /* Get the FW semaphore. */ ++ for (i = 0; i < timeout; i++) { ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); ++ ++ /* Semaphore acquired if bit latched */ ++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) ++ break; ++ ++ usec_delay(50); ++ } ++ ++ if (i == timeout) { ++ /* Release semaphores */ ++ e1000_put_hw_semaphore_generic(hw); ++ DEBUGOUT("Driver can't access the NVM\n"); ++ return -E1000_ERR_NVM; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read +@@ -204,28 +222,74 @@ + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +-static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, +- u16 *data) ++s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data) + { +- s32 status = 0; ++ s32 status = E1000_SUCCESS; + u16 i, count; + ++ DEBUGFUNC("e1000_read_nvm_srrd_i210"); ++ + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient +- * to read in bursts than synchronizing access for each word. +- */ ++ * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); +- if (!(hw->nvm.ops.acquire(hw))) { +- status = igb_read_nvm_eerd(hw, offset, count, ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { ++ status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + +- if (status) ++ if (status != E1000_SUCCESS) ++ break; ++ } ++ ++ return status; ++} ++ ++/** ++ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR ++ * @hw: pointer to the HW structure ++ * @offset: offset within the Shadow RAM to be written to ++ * @words: number of words to write ++ * @data: 16 bit word(s) to be written to the Shadow RAM ++ * ++ * Writes data to Shadow RAM at offset using EEWR register. ++ * ++ * If e1000_update_nvm_checksum is not called after this function , the ++ * data will not be committed to FLASH and also Shadow RAM will most likely ++ * contain an invalid checksum. ++ * ++ * If error code is returned, data and Shadow RAM may be inconsistent - buffer ++ * partially written. ++ **/ ++s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data) ++{ ++ s32 status = E1000_SUCCESS; ++ u16 i, count; ++ ++ DEBUGFUNC("e1000_write_nvm_srwr_i210"); ++ ++ /* We cannot hold synchronization semaphores for too long, ++ * because of forceful takeover procedure. However it is more efficient ++ * to write in bursts than synchronizing access for each word. */ ++ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { ++ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? ++ E1000_EERD_EEWR_MAX_COUNT : (words - i); ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { ++ status = e1000_write_nvm_srwr(hw, offset, count, ++ data + i); ++ hw->nvm.ops.release(hw); ++ } else { ++ status = E1000_ERR_SWFW_SYNC; ++ } ++ ++ if (status != E1000_SUCCESS) + break; + } + +@@ -233,7 +297,7 @@ + } + + /** +- * igb_write_nvm_srwr - Write to Shadow Ram using EEWR ++ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write +@@ -241,23 +305,26 @@ + * + * Writes data to Shadow Ram at offset using EEWR register. + * +- * If igb_update_nvm_checksum is not called after this function , the ++ * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, ++static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + +- /* A check for invalid values: offset too large, too many words, ++ DEBUGFUNC("e1000_write_nvm_srwr"); ++ ++ /* ++ * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } +@@ -267,19 +334,19 @@ + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + +- wr32(E1000_SRWR, eewr); ++ E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & +- rd32(E1000_SRWR)) { +- ret_val = 0; ++ E1000_READ_REG(hw, E1000_SRWR)) { ++ ret_val = E1000_SUCCESS; + break; + } +- udelay(5); +- } ++ usec_delay(5); ++ } + +- if (ret_val) { +- hw_dbg("Shadow RAM write EEWR timed out\n"); ++ if (ret_val != E1000_SUCCESS) { ++ DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } +@@ -288,52 +355,7 @@ + return ret_val; + } + +-/** +- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR +- * @hw: pointer to the HW structure +- * @offset: offset within the Shadow RAM to be written to +- * @words: number of words to write +- * @data: 16 bit word(s) to be written to the Shadow RAM +- * +- * Writes data to Shadow RAM at offset using EEWR register. +- * +- * If e1000_update_nvm_checksum is not called after this function , the +- * data will not be committed to FLASH and also Shadow RAM will most likely +- * contain an invalid checksum. +- * +- * If error code is returned, data and Shadow RAM may be inconsistent - buffer +- * partially written. +- **/ +-static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, +- u16 *data) +-{ +- s32 status = 0; +- u16 i, count; +- +- /* We cannot hold synchronization semaphores for too long, +- * because of forceful takeover procedure. However it is more efficient +- * to write in bursts than synchronizing access for each word. +- */ +- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { +- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? +- E1000_EERD_EEWR_MAX_COUNT : (words - i); +- if (!(hw->nvm.ops.acquire(hw))) { +- status = igb_write_nvm_srwr(hw, offset, count, +- data + i); +- hw->nvm.ops.release(hw); +- } else { +- status = E1000_ERR_SWFW_SYNC; +- } +- +- if (status) +- break; +- } +- +- return status; +-} +- +-/** +- * igb_read_invm_word_i210 - Reads OTP ++/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read +@@ -341,15 +363,17 @@ + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +-static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) ++static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) + { + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + ++ DEBUGFUNC("e1000_read_invm_word_i210"); ++ + for (i = 0; i < E1000_INVM_SIZE; i++) { +- invm_dword = rd32(E1000_INVM_DATA_REG(i)); ++ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) +@@ -362,75 +386,76 @@ + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); +- hw_dbg("Read INVM Word 0x%02x = %x\n", ++ DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); +- status = 0; ++ status = E1000_SUCCESS; + break; + } + } + } +- if (status) +- hw_dbg("Requested word 0x%02x not found in OTP\n", address); ++ if (status != E1000_SUCCESS) ++ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; + } + +-/** +- * igb_read_invm_i210 - Read invm wrapper function for I210/I211 ++/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure +- * @words: number of words to read ++ * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +-static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, +- u16 words __always_unused, u16 *data) ++static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, ++ u16 E1000_UNUSEDARG words, u16 *data) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); +- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); ++ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); +- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, ++ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); +- if (ret_val) +- hw_dbg("MAC Addr not found in iNVM\n"); ++ if (ret_val != E1000_SUCCESS) ++ DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: +@@ -446,7 +471,7 @@ + *data = hw->vendor_id; + break; + default: +- hw_dbg("NVM word 0x%02x is not mapped.\n", offset); ++ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } +@@ -454,14 +479,15 @@ + } + + /** +- * igb_read_invm_version - Reads iNVM version and image type ++ * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +-s32 igb_read_invm_version(struct e1000_hw *hw, +- struct e1000_fw_version *invm_ver) { ++s32 e1000_read_invm_version(struct e1000_hw *hw, ++ struct e1000_fw_version *invm_ver) ++{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; +@@ -472,9 +498,11 @@ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + ++ DEBUGFUNC("e1000_read_invm_version"); ++ + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { +- invm_dword = rd32(E1000_INVM_DATA_REG(i)); ++ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + +@@ -486,17 +514,18 @@ + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; +- status = 0; ++ status = E1000_SUCCESS; + break; + } +- /* Check if we have odd version location ++ /* ++ * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && +@@ -504,21 +533,22 @@ + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; +- status = 0; ++ status = E1000_SUCCESS; + break; + } +- /* Check if we have even version location ++ /* ++ * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + } + +- if (!status) { ++ if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; +@@ -531,7 +561,7 @@ + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ +@@ -540,7 +570,7 @@ + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + } +@@ -548,27 +578,30 @@ + } + + /** +- * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum ++ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +-static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) ++s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) + { +- s32 status = 0; ++ s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + +- if (!(hw->nvm.ops.acquire(hw))) { ++ DEBUGFUNC("e1000_validate_nvm_checksum_i210"); ++ ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + +- /* Replace the read function with semaphore grabbing with ++ /* ++ * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; +- hw->nvm.ops.read = igb_read_nvm_eerd; ++ hw->nvm.ops.read = e1000_read_nvm_eerd; + +- status = igb_validate_nvm_checksum(hw); ++ status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; +@@ -581,147 +614,208 @@ + return status; + } + ++ + /** +- * igb_update_nvm_checksum_i210 - Update EEPROM checksum ++ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +-static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) ++s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + +- /* Read the first word from the EEPROM. If this times out or fails, do ++ DEBUGFUNC("e1000_update_nvm_checksum_i210"); ++ ++ /* ++ * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ +- ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("EEPROM read failed\n"); ++ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); ++ if (ret_val != E1000_SUCCESS) { ++ DEBUGOUT("EEPROM read failed\n"); + goto out; + } + +- if (!(hw->nvm.ops.acquire(hw))) { +- /* Do not use hw->nvm.ops.write, hw->nvm.ops.read ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { ++ /* ++ * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { +- ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); ++ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); +- hw_dbg("NVM Read Error while updating checksum.\n"); ++ DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; +- ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, ++ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); +- if (ret_val) { ++ if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); +- hw_dbg("NVM Write Error while updating checksum.\n"); ++ DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + +- ret_val = igb_update_flash_i210(hw); ++ ret_val = e1000_update_flash_i210(hw); + } else { +- ret_val = -E1000_ERR_SWFW_SYNC; ++ ret_val = E1000_ERR_SWFW_SYNC; ++ } ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_get_flash_presence_i210 - Check if flash device is detected. ++ * @hw: pointer to the HW structure ++ * ++ **/ ++bool e1000_get_flash_presence_i210(struct e1000_hw *hw) ++{ ++ u32 eec = 0; ++ bool ret_val = false; ++ ++ DEBUGFUNC("e1000_get_flash_presence_i210"); ++ ++ eec = E1000_READ_REG(hw, E1000_EECD); ++ ++ if (eec & E1000_EECD_FLASH_DETECTED_I210) ++ ret_val = true; ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_update_flash_i210 - Commit EEPROM to the flash ++ * @hw: pointer to the HW structure ++ * ++ **/ ++s32 e1000_update_flash_i210(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u32 flup; ++ ++ DEBUGFUNC("e1000_update_flash_i210"); ++ ++ ret_val = e1000_pool_flash_update_done_i210(hw); ++ if (ret_val == -E1000_ERR_NVM) { ++ DEBUGOUT("Flash update time out\n"); ++ goto out; + } ++ ++ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; ++ E1000_WRITE_REG(hw, E1000_EECD, flup); ++ ++ ret_val = e1000_pool_flash_update_done_i210(hw); ++ if (ret_val == E1000_SUCCESS) ++ DEBUGOUT("Flash update complete\n"); ++ else ++ DEBUGOUT("Flash update time out\n"); ++ + out: + return ret_val; + } + + /** +- * igb_pool_flash_update_done_i210 - Pool FLUDONE status. ++ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) ++s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) + { + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + ++ DEBUGFUNC("e1000_pool_flash_update_done_i210"); ++ + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { +- reg = rd32(E1000_EECD); ++ reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + break; + } +- udelay(5); ++ usec_delay(5); + } + + return ret_val; + } + + /** +- * igb_get_flash_presence_i210 - Check if flash device is detected. ++ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * ++ * Initialize the i210/i211 NVM parameters and function pointers. + **/ +-bool igb_get_flash_presence_i210(struct e1000_hw *hw) ++static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) + { +- u32 eec = 0; +- bool ret_val = false; ++ s32 ret_val; ++ struct e1000_nvm_info *nvm = &hw->nvm; + +- eec = rd32(E1000_EECD); +- if (eec & E1000_EECD_FLASH_DETECTED_I210) +- ret_val = true; ++ DEBUGFUNC("e1000_init_nvm_params_i210"); + ++ ret_val = e1000_init_nvm_params_82575(hw); ++ nvm->ops.acquire = e1000_acquire_nvm_i210; ++ nvm->ops.release = e1000_release_nvm_i210; ++ nvm->ops.valid_led_default = e1000_valid_led_default_i210; ++ if (e1000_get_flash_presence_i210(hw)) { ++ hw->nvm.type = e1000_nvm_flash_hw; ++ nvm->ops.read = e1000_read_nvm_srrd_i210; ++ nvm->ops.write = e1000_write_nvm_srwr_i210; ++ nvm->ops.validate = e1000_validate_nvm_checksum_i210; ++ nvm->ops.update = e1000_update_nvm_checksum_i210; ++ } else { ++ hw->nvm.type = e1000_nvm_invm; ++ nvm->ops.read = e1000_read_invm_i210; ++ nvm->ops.write = e1000_null_write_nvm; ++ nvm->ops.validate = e1000_null_ops_generic; ++ nvm->ops.update = e1000_null_ops_generic; ++ } + return ret_val; + } + + /** +- * igb_update_flash_i210 - Commit EEPROM to the flash ++ * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * ++ * Called to initialize all function pointers and parameters. + **/ +-static s32 igb_update_flash_i210(struct e1000_hw *hw) ++void e1000_init_function_pointers_i210(struct e1000_hw *hw) + { +- s32 ret_val = 0; +- u32 flup; +- +- ret_val = igb_pool_flash_update_done_i210(hw); +- if (ret_val == -E1000_ERR_NVM) { +- hw_dbg("Flash update time out\n"); +- goto out; +- } ++ e1000_init_function_pointers_82575(hw); ++ hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + +- flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; +- wr32(E1000_EECD, flup); +- +- ret_val = igb_pool_flash_update_done_i210(hw); +- if (ret_val) +- hw_dbg("Flash update complete\n"); +- else +- hw_dbg("Flash update time out\n"); +- +-out: +- return ret_val; ++ return; + } + + /** +- * igb_valid_led_default_i210 - Verify a valid default LED config ++ * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) ++static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_valid_led_default_i210"); ++ + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + goto out; + } + +@@ -741,17 +835,19 @@ + } + + /** +- * __igb_access_xmdio_reg - Read/write XMDIO register ++ * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +-static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, +- u8 dev_addr, u16 *data, bool read) ++static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, ++ u8 dev_addr, u16 *data, bool read) + { +- s32 ret_val = 0; ++ s32 ret_val; ++ ++ DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) +@@ -782,67 +878,41 @@ + } + + /** +- * igb_read_xmdio_reg - Read XMDIO register ++ * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) ++s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) + { +- return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); ++ DEBUGFUNC("e1000_read_xmdio_reg"); ++ ++ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); + } + + /** +- * igb_write_xmdio_reg - Write XMDIO register ++ * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +-{ +- return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +-} +- +-/** +- * igb_init_nvm_params_i210 - Init NVM func ptrs. +- * @hw: pointer to the HW structure +- **/ +-s32 igb_init_nvm_params_i210(struct e1000_hw *hw) ++s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) + { +- s32 ret_val = 0; +- struct e1000_nvm_info *nvm = &hw->nvm; ++ DEBUGFUNC("e1000_read_xmdio_reg"); + +- nvm->ops.acquire = igb_acquire_nvm_i210; +- nvm->ops.release = igb_release_nvm_i210; +- nvm->ops.valid_led_default = igb_valid_led_default_i210; +- +- /* NVM Function Pointers */ +- if (igb_get_flash_presence_i210(hw)) { +- hw->nvm.type = e1000_nvm_flash_hw; +- nvm->ops.read = igb_read_nvm_srrd_i210; +- nvm->ops.write = igb_write_nvm_srwr_i210; +- nvm->ops.validate = igb_validate_nvm_checksum_i210; +- nvm->ops.update = igb_update_nvm_checksum_i210; +- } else { +- hw->nvm.type = e1000_nvm_invm; +- nvm->ops.read = igb_read_invm_i210; +- nvm->ops.write = NULL; +- nvm->ops.validate = NULL; +- nvm->ops.update = NULL; +- } +- return ret_val; ++ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); + } + + /** +- * igb_pll_workaround_i210 ++ * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +-s32 igb_pll_workaround_i210(struct e1000_hw *hw) ++static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) + { + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; +@@ -850,53 +920,104 @@ + int i; + + /* Get and set needed register values */ +- wuc = rd32(E1000_WUC); +- mdicnfg = rd32(E1000_MDICNFG); ++ wuc = E1000_READ_REG(hw, E1000_WUC); ++ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; +- wr32(E1000_MDICNFG, reg_val); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ +- ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, +- &nvm_word); +- if (ret_val) ++ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, ++ &nvm_word); ++ if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ +- igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | ++ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ +- ctrl = rd32(E1000_CTRL); +- wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); +- wr32(E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +- wr32(E1000_WUC, 0); ++ E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); +- wr32(E1000_EEARBC_I210, reg_val); ++ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + +- igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); ++ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; +- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); +- usleep_range(1000, 2000); ++ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); ++ msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; +- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); ++ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); +- wr32(E1000_EEARBC_I210, reg_val); ++ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ +- wr32(E1000_WUC, wuc); ++ E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ +- wr32(E1000_MDICNFG, mdicnfg); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); ++ return ret_val; ++} ++ ++/** ++ * e1000_get_cfg_done_i210 - Read config done bit ++ * @hw: pointer to the HW structure ++ * ++ * Read the management control register for the config done bit for ++ * completion status. NOTE: silicon which is EEPROM-less will fail trying ++ * to read the config done bit, so an error is *ONLY* logged and returns ++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon ++ * would not be able to be reset or change link. ++ **/ ++static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) ++{ ++ s32 timeout = PHY_CFG_TIMEOUT; ++ u32 mask = E1000_NVM_CFG_DONE_PORT_0; ++ ++ DEBUGFUNC("e1000_get_cfg_done_i210"); ++ ++ while (timeout) { ++ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) ++ break; ++ msec_delay(1); ++ timeout--; ++ } ++ if (!timeout) ++ DEBUGOUT("MNG configuration cycle has not completed.\n"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_init_hw_i210 - Init hw for I210/I211 ++ * @hw: pointer to the HW structure ++ * ++ * Called to initialize hw for i210 hw family. ++ **/ ++s32 e1000_init_hw_i210(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_init_hw_i210"); ++ if ((hw->mac.type >= e1000_i210) && ++ !(e1000_get_flash_presence_i210(hw))) { ++ ret_val = e1000_pll_workaround_i210(hw); ++ if (ret_val != E1000_SUCCESS) ++ return ret_val; ++ } ++ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; ++ ret_val = e1000_init_hw_82575(hw); + return ret_val; + } +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h +--- a/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,39 +1,47 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_I210_H_ + #define _E1000_I210_H_ + +-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +-s32 igb_read_invm_version(struct e1000_hw *hw, +- struct e1000_fw_version *invm_ver); +-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +-s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +-bool igb_get_flash_presence_i210(struct e1000_hw *hw); +-s32 igb_pll_workaround_i210(struct e1000_hw *hw); ++bool e1000_get_flash_presence_i210(struct e1000_hw *hw); ++s32 e1000_update_flash_i210(struct e1000_hw *hw); ++s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); ++s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); ++s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, ++ u16 words, u16 *data); ++s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, ++ u16 words, u16 *data); ++s32 e1000_read_invm_version(struct e1000_hw *hw, ++ struct e1000_fw_version *invm_ver); ++s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); ++void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); ++s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, ++ u16 *data); ++s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, ++ u16 data); ++s32 e1000_init_hw_i210(struct e1000_hw *hw); + + #define E1000_STM_OPCODE 0xDB00 + #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 +@@ -56,15 +64,15 @@ + + #define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 + #define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +-#define E1000_INVM_ULT_BYTES_SIZE 8 +-#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +-#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +-#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +-#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 +- +-#define E1000_INVM_MAJOR_MASK 0x3F0 +-#define E1000_INVM_MINOR_MASK 0xF +-#define E1000_INVM_MAJOR_SHIFT 4 ++#define E1000_INVM_ULT_BYTES_SIZE 8 ++#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 ++#define E1000_INVM_VER_FIELD_ONE 0x1FF8 ++#define E1000_INVM_VER_FIELD_TWO 0x7FE000 ++#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 ++ ++#define E1000_INVM_MAJOR_MASK 0x3F0 ++#define E1000_INVM_MINOR_MASK 0xF ++#define E1000_INVM_MAJOR_SHIFT 4 + + #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ +@@ -73,7 +81,7 @@ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +-/* NVM offset defaults for i211 device */ ++/* NVM offset defaults for I211 devices */ + #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 + #define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 + #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c +--- a/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,68 +1,179 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); ++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); ++static void e1000_config_collision_dist_generic(struct e1000_hw *hw); ++static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); ++ ++/** ++ * e1000_init_mac_ops_generic - Initialize MAC function pointers ++ * @hw: pointer to the HW structure + * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++ * Setups up the function pointers to no-op functions ++ **/ ++void e1000_init_mac_ops_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ DEBUGFUNC("e1000_init_mac_ops_generic"); ++ ++ /* General Setup */ ++ mac->ops.init_params = e1000_null_ops_generic; ++ mac->ops.init_hw = e1000_null_ops_generic; ++ mac->ops.reset_hw = e1000_null_ops_generic; ++ mac->ops.setup_physical_interface = e1000_null_ops_generic; ++ mac->ops.get_bus_info = e1000_null_ops_generic; ++ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; ++ mac->ops.read_mac_addr = igb_e1000_read_mac_addr_generic; ++ mac->ops.config_collision_dist = e1000_config_collision_dist_generic; ++ mac->ops.clear_hw_cntrs = e1000_null_mac_generic; ++ /* LED */ ++ mac->ops.cleanup_led = e1000_null_ops_generic; ++ mac->ops.setup_led = e1000_null_ops_generic; ++ mac->ops.blink_led = e1000_null_ops_generic; ++ mac->ops.led_on = e1000_null_ops_generic; ++ mac->ops.led_off = e1000_null_ops_generic; ++ /* LINK */ ++ mac->ops.setup_link = e1000_null_ops_generic; ++ mac->ops.get_link_up_info = e1000_null_link_info; ++ mac->ops.check_for_link = e1000_null_ops_generic; ++ /* Management */ ++ mac->ops.check_mng_mode = e1000_null_mng_mode; ++ /* VLAN, MC, etc. */ ++ mac->ops.update_mc_addr_list = e1000_null_update_mc; ++ mac->ops.clear_vfta = e1000_null_mac_generic; ++ mac->ops.write_vfta = e1000_null_write_vfta; ++ mac->ops.rar_set = e1000_rar_set_generic; ++ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; ++} ++ ++/** ++ * e1000_null_ops_generic - No-op function, returns 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_ops_generic"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_mac_generic - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_mac_generic"); ++ return; ++} + +-#include +-#include +-#include +-#include +-#include ++/** ++ * e1000_null_link_info - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) ++{ ++ DEBUGFUNC("e1000_null_link_info"); ++ return E1000_SUCCESS; ++} + +-#include "e1000_mac.h" ++/** ++ * e1000_null_mng_mode - No-op function, return false ++ * @hw: pointer to the HW structure ++ **/ ++bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_mng_mode"); ++ return false; ++} + +-#include "igb.h" ++/** ++ * e1000_null_update_mc - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) ++{ ++ DEBUGFUNC("e1000_null_update_mc"); ++ return; ++} + +-static s32 igb_set_default_fc(struct e1000_hw *hw); +-static s32 igb_set_fc_watermarks(struct e1000_hw *hw); ++/** ++ * e1000_null_write_vfta - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) ++{ ++ DEBUGFUNC("e1000_null_write_vfta"); ++ return; ++} + + /** +- * igb_get_bus_info_pcie - Get PCIe bus information ++ * e1000_null_rar_set - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) ++{ ++ DEBUGFUNC("e1000_null_rar_set"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +-s32 igb_get_bus_info_pcie(struct e1000_hw *hw) ++s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) + { ++ struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; +- u32 reg; + u16 pcie_link_status; + ++ DEBUGFUNC("igb_e1000_get_bus_info_pcie_generic"); ++ + bus->type = e1000_bus_type_pci_express; + +- ret_val = igb_read_pcie_cap_reg(hw, +- PCI_EXP_LNKSTA, +- &pcie_link_status); ++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, ++ &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { +- switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { +- case PCI_EXP_LNKSTA_CLS_2_5GB: ++ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { ++ case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; +- case PCI_EXP_LNKSTA_CLS_5_0GB: ++ case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: +@@ -71,75 +182,70 @@ + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & +- PCI_EXP_LNKSTA_NLW) >> +- PCI_EXP_LNKSTA_NLW_SHIFT); ++ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + +- reg = rd32(E1000_STATUS); +- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; ++ mac->ops.set_lan_id(hw); + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_clear_vfta - Clear VLAN filter table ++ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices ++ * + * @hw: pointer to the HW structure + * +- * Clears the register array which contains the VLAN filter table by +- * setting all the values to 0. ++ * Determines the LAN function id by reading memory-mapped registers ++ * and swaps the port value if requested. + **/ +-void igb_clear_vfta(struct e1000_hw *hw) ++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) + { +- u32 offset; ++ struct e1000_bus_info *bus = &hw->bus; ++ u32 reg; + +- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { +- array_wr32(E1000_VFTA, offset, 0); +- wrfl(); +- } ++ /* The status register reports the correct function number ++ * for the device regardless of function swap state. ++ */ ++ reg = E1000_READ_REG(hw, E1000_STATUS); ++ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + } + + /** +- * igb_write_vfta - Write value to VLAN filter table ++ * igb_e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure +- * @offset: register offset in VLAN filter table +- * @value: register value written to VLAN filter table + * +- * Writes value at the given offset in the register array which stores +- * the VLAN filter table. ++ * Sets the LAN function id to zero for a single port device. + **/ +-static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw) + { +- array_wr32(E1000_VFTA, offset, value); +- wrfl(); +-} ++ struct e1000_bus_info *bus = &hw->bus; + +-/* Due to a hw errata, if the host tries to configure the VFTA register +- * while performing queries from the BMC or DMA, then the VFTA in some +- * cases won't be written. +- */ ++ bus->func = 0; ++} + + /** +- * igb_clear_vfta_i350 - Clear VLAN filter table ++ * igb_e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +-void igb_clear_vfta_i350(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_clear_vfta_generic(struct e1000_hw *hw) + { + u32 offset; +- int i; + +- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { +- for (i = 0; i < 10; i++) +- array_wr32(E1000_VFTA, offset, 0); ++ DEBUGFUNC("igb_e1000_clear_vfta_generic"); + +- wrfl(); ++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); ++ E1000_WRITE_FLUSH(hw); + } + } + + /** +- * igb_write_vfta_i350 - Write value to VLAN filter table ++ * igb_e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table +@@ -147,113 +253,85 @@ + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +-static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) + { +- int i; +- +- for (i = 0; i < 10; i++) +- array_wr32(E1000_VFTA, offset, value); ++ DEBUGFUNC("igb_e1000_write_vfta_generic"); + +- wrfl(); ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_init_rx_addrs - Initialize receive address's ++ * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * +- * Setups the receive address registers by setting the base receive address ++ * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) ++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) + { + u32 i; +- u8 mac_addr[ETH_ALEN] = {0}; ++ u8 mac_addr[ETH_ADDR_LEN] = {0}; ++ ++ DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ +- hw_dbg("Programming MAC Address into RAR[0]\n"); ++ DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ +- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); ++ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); + } + + /** +- * igb_vfta_set - enable or disable vlan in VLAN filter table +- * @hw: pointer to the HW structure +- * @vid: VLAN id to add or remove +- * @add: if true add filter, if false remove +- * +- * Sets or clears a bit in the VLAN filter table array based on VLAN id +- * and if we are adding or removing the filter +- **/ +-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +-{ +- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; +- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); +- u32 vfta; +- struct igb_adapter *adapter = hw->back; +- s32 ret_val = 0; +- +- vfta = adapter->shadow_vfta[index]; +- +- /* bit was set/cleared before we started */ +- if ((!!(vfta & mask)) == add) { +- ret_val = -E1000_ERR_CONFIG; +- } else { +- if (add) +- vfta |= mask; +- else +- vfta &= ~mask; +- } +- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) +- igb_write_vfta_i350(hw, index, vfta); +- else +- igb_write_vfta(hw, index, vfta); +- adapter->shadow_vfta[index] = vfta; +- +- return ret_val; +-} +- +-/** +- * igb_check_alt_mac_addr - Check for alternate MAC addr ++ * igb_e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent +- * address and must override the actual permanent MAC address. If an +- * alternate MAC address is found it is saved in the hw struct and +- * programmed into RAR0 and the function returns success, otherwise the +- * function returns an error. ++ * address and must override the actual permanent MAC address. If an ++ * alternate MAC address is found it is programmed into RAR0, replacing ++ * the permanent address that was installed into RAR0 by the Si on reset. ++ * This function will return SUCCESS unless it encounters an error while ++ * reading the EEPROM. + **/ +-s32 igb_check_alt_mac_addr(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) + { + u32 i; +- s32 ret_val = 0; ++ s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; +- u8 alt_mac_addr[ETH_ALEN]; ++ u8 alt_mac_addr[ETH_ADDR_LEN]; ++ ++ DEBUGFUNC("igb_e1000_check_alt_mac_addr_generic"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); ++ if (ret_val) ++ return ret_val; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) +- goto out; ++ return E1000_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, +- &nvm_alt_mac_addr_offset); ++ &nvm_alt_mac_addr_offset); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ +- goto out; ++ return E1000_SUCCESS; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; +@@ -262,12 +340,12 @@ + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; +- for (i = 0; i < ETH_ALEN; i += 2) { ++ for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); +@@ -275,9 +353,9 @@ + } + + /* if multicast bit is set, the alternate address will not be used */ +- if (is_multicast_ether_addr(alt_mac_addr)) { +- hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); +- goto out; ++ if (alt_mac_addr[0] & 0x01) { ++ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); ++ return E1000_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the +@@ -286,12 +364,11 @@ + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_rar_set - Set receive address register ++ * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register +@@ -299,16 +376,17 @@ + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) ++static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) + { + u32 rar_low, rar_high; + ++ DEBUGFUNC("e1000_rar_set_generic"); ++ + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ +- rar_low = ((u32) addr[0] | +- ((u32) addr[1] << 8) | +- ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); ++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + +@@ -320,60 +398,29 @@ + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ +- wr32(E1000_RAL(index), rar_low); +- wrfl(); +- wr32(E1000_RAH(index), rar_high); +- wrfl(); +-} ++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); ++ E1000_WRITE_FLUSH(hw); ++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); ++ E1000_WRITE_FLUSH(hw); + +-/** +- * igb_mta_set - Set multicast filter table address +- * @hw: pointer to the HW structure +- * @hash_value: determines the MTA register and bit to set +- * +- * The multicast table address is a register array of 32-bit registers. +- * The hash_value is used to determine what register the bit is in, the +- * current value is read, the new bit is OR'd in and the new value is +- * written back into the register. +- **/ +-void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +-{ +- u32 hash_bit, hash_reg, mta; +- +- /* The MTA is a register array of 32-bit registers. It is +- * treated like an array of (32*mta_reg_count) bits. We want to +- * set bit BitArray[hash_value]. So we figure out what register +- * the bit is in, read it, OR in the new bit, then write +- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a +- * mask to bits 31:5 of the hash value which gives us the +- * register we're modifying. The hash bit within that register +- * is determined by the lower 5 bits of the hash value. +- */ +- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); +- hash_bit = hash_value & 0x1F; +- +- mta = array_rd32(E1000_MTA, hash_reg); +- +- mta |= (1 << hash_bit); +- +- array_wr32(E1000_MTA, hash_reg, mta); +- wrfl(); ++ return E1000_SUCCESS; + } + + /** +- * igb_hash_mc_addr - Generate a multicast hash value ++ * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine +- * the multicast filter table array address and new table value. See +- * igb_mta_set() ++ * the multicast filter table array address and new table value. + **/ +-static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) ++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) + { + u32 hash_value, hash_mask; + u8 bit_shift = 0; + ++ DEBUGFUNC("e1000_hash_mc_addr_generic"); ++ + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + +@@ -401,7 +448,7 @@ + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 +- * LSB MSB ++ * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 +@@ -430,7 +477,7 @@ + } + + /** +- * igb_update_mc_addr_list - Update Multicast addresses ++ * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program +@@ -438,150 +485,412 @@ + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +-void igb_update_mc_addr_list(struct e1000_hw *hw, +- u8 *mc_addr_list, u32 mc_addr_count) ++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, ++ u8 *mc_addr_list, u32 mc_addr_count) + { + u32 hash_value, hash_bit, hash_reg; + int i; + ++ DEBUGFUNC("e1000_update_mc_addr_list_generic"); ++ + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { +- hash_value = igb_hash_mc_addr(hw, mc_addr_list); ++ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); +- mc_addr_list += (ETH_ALEN); ++ mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) +- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); +- wrfl(); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_clear_hw_cntrs_base - Clear base hardware counters ++ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value ++ * @hw: pointer to the HW structure ++ * ++ * In certain situations, a system BIOS may report that the PCIx maximum ++ * memory read byte count (MMRBC) value is higher than than the actual ++ * value. We check the PCIx command register with the current PCIx status ++ * register. ++ **/ ++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) ++{ ++ u16 cmd_mmrbc; ++ u16 pcix_cmd; ++ u16 pcix_stat_hi_word; ++ u16 stat_mmrbc; ++ ++ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); ++ ++ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ ++ if (hw->bus.type != e1000_bus_type_pcix) ++ return; ++ ++ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); ++ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); ++ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> ++ PCIX_COMMAND_MMRBC_SHIFT; ++ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> ++ PCIX_STATUS_HI_MMRBC_SHIFT; ++ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) ++ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; ++ if (cmd_mmrbc > stat_mmrbc) { ++ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; ++ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; ++ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); ++ } ++} ++ ++/** ++ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +-void igb_clear_hw_cntrs_base(struct e1000_hw *hw) ++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) + { +- rd32(E1000_CRCERRS); +- rd32(E1000_SYMERRS); +- rd32(E1000_MPC); +- rd32(E1000_SCC); +- rd32(E1000_ECOL); +- rd32(E1000_MCC); +- rd32(E1000_LATECOL); +- rd32(E1000_COLC); +- rd32(E1000_DC); +- rd32(E1000_SEC); +- rd32(E1000_RLEC); +- rd32(E1000_XONRXC); +- rd32(E1000_XONTXC); +- rd32(E1000_XOFFRXC); +- rd32(E1000_XOFFTXC); +- rd32(E1000_FCRUC); +- rd32(E1000_GPRC); +- rd32(E1000_BPRC); +- rd32(E1000_MPRC); +- rd32(E1000_GPTC); +- rd32(E1000_GORCL); +- rd32(E1000_GORCH); +- rd32(E1000_GOTCL); +- rd32(E1000_GOTCH); +- rd32(E1000_RNBC); +- rd32(E1000_RUC); +- rd32(E1000_RFC); +- rd32(E1000_ROC); +- rd32(E1000_RJC); +- rd32(E1000_TORL); +- rd32(E1000_TORH); +- rd32(E1000_TOTL); +- rd32(E1000_TOTH); +- rd32(E1000_TPR); +- rd32(E1000_TPT); +- rd32(E1000_MPTC); +- rd32(E1000_BPTC); ++ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); ++ ++ E1000_READ_REG(hw, E1000_CRCERRS); ++ E1000_READ_REG(hw, E1000_SYMERRS); ++ E1000_READ_REG(hw, E1000_MPC); ++ E1000_READ_REG(hw, E1000_SCC); ++ E1000_READ_REG(hw, E1000_ECOL); ++ E1000_READ_REG(hw, E1000_MCC); ++ E1000_READ_REG(hw, E1000_LATECOL); ++ E1000_READ_REG(hw, E1000_COLC); ++ E1000_READ_REG(hw, E1000_DC); ++ E1000_READ_REG(hw, E1000_SEC); ++ E1000_READ_REG(hw, E1000_RLEC); ++ E1000_READ_REG(hw, E1000_XONRXC); ++ E1000_READ_REG(hw, E1000_XONTXC); ++ E1000_READ_REG(hw, E1000_XOFFRXC); ++ E1000_READ_REG(hw, E1000_XOFFTXC); ++ E1000_READ_REG(hw, E1000_FCRUC); ++ E1000_READ_REG(hw, E1000_GPRC); ++ E1000_READ_REG(hw, E1000_BPRC); ++ E1000_READ_REG(hw, E1000_MPRC); ++ E1000_READ_REG(hw, E1000_GPTC); ++ E1000_READ_REG(hw, E1000_GORCL); ++ E1000_READ_REG(hw, E1000_GORCH); ++ E1000_READ_REG(hw, E1000_GOTCL); ++ E1000_READ_REG(hw, E1000_GOTCH); ++ E1000_READ_REG(hw, E1000_RNBC); ++ E1000_READ_REG(hw, E1000_RUC); ++ E1000_READ_REG(hw, E1000_RFC); ++ E1000_READ_REG(hw, E1000_ROC); ++ E1000_READ_REG(hw, E1000_RJC); ++ E1000_READ_REG(hw, E1000_TORL); ++ E1000_READ_REG(hw, E1000_TORH); ++ E1000_READ_REG(hw, E1000_TOTL); ++ E1000_READ_REG(hw, E1000_TOTH); ++ E1000_READ_REG(hw, E1000_TPR); ++ E1000_READ_REG(hw, E1000_TPT); ++ E1000_READ_REG(hw, E1000_MPTC); ++ E1000_READ_REG(hw, E1000_BPTC); + } + + /** +- * igb_check_for_copper_link - Check for link (Copper) ++ * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +-s32 igb_check_for_copper_link(struct e1000_hw *hw) ++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + ++ DEBUGFUNC("e1000_check_for_copper_link"); ++ + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ +- if (!mac->get_link_status) { +- ret_val = 0; +- goto out; +- } ++ if (!mac->get_link_status) ++ return E1000_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) +- goto out; /* No link detected */ ++ return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ +- igb_check_downshift(hw); ++ e1000_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ +- if (!mac->autoneg) { +- ret_val = -E1000_ERR_CONFIG; +- goto out; +- } ++ if (!mac->autoneg) ++ return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ +- igb_config_collision_dist(hw); ++ mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ +- ret_val = igb_config_fc_after_link_up(hw); ++ ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) +- hw_dbg("Error configuring flow control\n"); ++ DEBUGOUT("Error configuring flow control\n"); + +-out: + return ret_val; + } + + /** +- * igb_setup_link - Setup flow control and link settings ++ * e1000_check_for_fiber_link_generic - Check for link (Fiber) ++ * @hw: pointer to the HW structure ++ * ++ * Checks for link up on the hardware. If link is not up and we have ++ * a signal, then we need to force link up. ++ **/ ++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 rxcw; ++ u32 ctrl; ++ u32 status; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_check_for_fiber_link_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ ++ /* If we don't have link (auto-negotiation failed or link partner ++ * cannot auto-negotiate), the cable is plugged in (we have signal), ++ * and our link partner is not trying to auto-negotiate with us (we ++ * are receiving idles or data), we need to force link up. We also ++ * need to give auto-negotiation time to complete, in case the cable ++ * was just plugged in. The autoneg_failed flag does this. ++ */ ++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ ++ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && ++ !(rxcw & E1000_RXCW_C)) { ++ if (!mac->autoneg_failed) { ++ mac->autoneg_failed = true; ++ return E1000_SUCCESS; ++ } ++ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); ++ ++ /* Disable auto-negotiation in the TXCW register */ ++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); ++ ++ /* Force link-up and also force full-duplex. */ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ ++ /* Configure Flow Control after forcing link up. */ ++ ret_val = e1000_config_fc_after_link_up_generic(hw); ++ if (ret_val) { ++ DEBUGOUT("Error configuring flow control\n"); ++ return ret_val; ++ } ++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { ++ /* If we are forcing link and we are receiving /C/ ordered ++ * sets, re-enable auto-negotiation in the TXCW register ++ * and disable forced link in the Device Control register ++ * in an attempt to auto-negotiate with our link partner. ++ */ ++ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); ++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); ++ ++ mac->serdes_has_link = true; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_check_for_serdes_link_generic - Check for link (Serdes) ++ * @hw: pointer to the HW structure ++ * ++ * Checks for link up on the hardware. If link is not up and we have ++ * a signal, then we need to force link up. ++ **/ ++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 rxcw; ++ u32 ctrl; ++ u32 status; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_check_for_serdes_link_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ ++ /* If we don't have link (auto-negotiation failed or link partner ++ * cannot auto-negotiate), and our link partner is not trying to ++ * auto-negotiate with us (we are receiving idles or data), ++ * we need to force link up. We also need to give auto-negotiation ++ * time to complete. ++ */ ++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ ++ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { ++ if (!mac->autoneg_failed) { ++ mac->autoneg_failed = true; ++ return E1000_SUCCESS; ++ } ++ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); ++ ++ /* Disable auto-negotiation in the TXCW register */ ++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); ++ ++ /* Force link-up and also force full-duplex. */ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ ++ /* Configure Flow Control after forcing link up. */ ++ ret_val = e1000_config_fc_after_link_up_generic(hw); ++ if (ret_val) { ++ DEBUGOUT("Error configuring flow control\n"); ++ return ret_val; ++ } ++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { ++ /* If we are forcing link and we are receiving /C/ ordered ++ * sets, re-enable auto-negotiation in the TXCW register ++ * and disable forced link in the Device Control register ++ * in an attempt to auto-negotiate with our link partner. ++ */ ++ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); ++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); ++ ++ mac->serdes_has_link = true; ++ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { ++ /* If we force link for non-auto-negotiation switch, check ++ * link status based on MAC synchronization for internal ++ * serdes media type. ++ */ ++ /* SYNCH bit and IV bit are sticky. */ ++ usec_delay(10); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ if (rxcw & E1000_RXCW_SYNCH) { ++ if (!(rxcw & E1000_RXCW_IV)) { ++ mac->serdes_has_link = true; ++ DEBUGOUT("SERDES: Link up - forced.\n"); ++ } ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - force failed.\n"); ++ } ++ } ++ ++ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if (status & E1000_STATUS_LU) { ++ /* SYNCH bit and IV bit are sticky, so reread rxcw. */ ++ usec_delay(10); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ if (rxcw & E1000_RXCW_SYNCH) { ++ if (!(rxcw & E1000_RXCW_IV)) { ++ mac->serdes_has_link = true; ++ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); ++ } ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - no sync.\n"); ++ } ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - autoneg failed\n"); ++ } ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_default_fc_generic - Set flow control default values ++ * @hw: pointer to the HW structure ++ * ++ * Read the EEPROM for the default values for flow control and store the ++ * values. ++ **/ ++static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 nvm_data; ++ u16 nvm_offset = 0; ++ ++ DEBUGFUNC("e1000_set_default_fc_generic"); ++ ++ /* Read and store word 0x0F of the EEPROM. This word contains bits ++ * that determine the hardware's default PAUSE (flow control) mode, ++ * a bit that determines whether the HW defaults to enabling or ++ * disabling auto-negotiation, and the direction of the ++ * SW defined pins. If there is no SW over-ride of the flow ++ * control setting, then the variable hw->fc will ++ * be initialized based on a value in the EEPROM. ++ */ ++ if (hw->mac.type == e1000_i350) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); ++ ret_val = hw->nvm.ops.read(hw, ++ NVM_INIT_CONTROL2_REG + ++ nvm_offset, ++ 1, &nvm_data); ++ } else { ++ ret_val = hw->nvm.ops.read(hw, ++ NVM_INIT_CONTROL2_REG, ++ 1, &nvm_data); ++ } ++ ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) ++ hw->fc.requested_mode = e1000_fc_none; ++ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == ++ NVM_WORD0F_ASM_DIR) ++ hw->fc.requested_mode = e1000_fc_tx_pause; ++ else ++ hw->fc.requested_mode = e1000_fc_full; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow +@@ -590,91 +899,260 @@ + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +-s32 igb_setup_link(struct e1000_hw *hw) ++s32 e1000_setup_link_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ +- if (igb_check_reset_block(hw)) +- goto out; ++ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) ++ return E1000_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { +- ret_val = igb_set_default_fc(hw); ++ ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) +- goto out; ++ return ret_val; + } + +- /* We want to save off the original Flow Control configuration just +- * in case we get disconnected and then reconnected into a different +- * hub or switch with different Flow Control capabilities. ++ /* Save off the requested flow control mode for use later. Depending ++ * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + +- hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); ++ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", ++ hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) +- goto out; ++ return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ +- hw_dbg("Initializing the Flow Control address, type and timer regs\n"); +- wr32(E1000_FCT, FLOW_CONTROL_TYPE); +- wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); +- wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); ++ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); ++ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); ++ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); ++ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); ++ ++ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + +- wr32(E1000_FCTTV, hw->fc.pause_time); ++ return e1000_set_fc_watermarks_generic(hw); ++} + +- ret_val = igb_set_fc_watermarks(hw); ++/** ++ * e1000_commit_fc_settings_generic - Configure flow control ++ * @hw: pointer to the HW structure ++ * ++ * Write the flow control settings to the Transmit Config Word Register (TXCW) ++ * base on the flow control settings in e1000_mac_info. ++ **/ ++static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 txcw; + +-out: ++ DEBUGFUNC("e1000_commit_fc_settings_generic"); ++ ++ /* Check for a software override of the flow control settings, and ++ * setup the device accordingly. If auto-negotiation is enabled, then ++ * software will have to set the "PAUSE" bits to the correct value in ++ * the Transmit Config Word Register (TXCW) and re-start auto- ++ * negotiation. However, if auto-negotiation is disabled, then ++ * software will have to manually configure the two flow control enable ++ * bits in the CTRL register. ++ * ++ * The possible values of the "fc" parameter are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but we ++ * do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++ */ ++ switch (hw->fc.current_mode) { ++ case e1000_fc_none: ++ /* Flow control completely disabled by a software over-ride. */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); ++ break; ++ case e1000_fc_rx_pause: ++ /* Rx Flow control is enabled and Tx Flow control is disabled ++ * by a software over-ride. Since there really isn't a way to ++ * advertise that we are capable of Rx Pause ONLY, we will ++ * advertise that we support both symmetric and asymmetric Rx ++ * PAUSE. Later, we will disable the adapter's ability to send ++ * PAUSE frames. ++ */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); ++ break; ++ case e1000_fc_tx_pause: ++ /* Tx Flow control is enabled, and Rx Flow control is disabled, ++ * by a software over-ride. ++ */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); ++ break; ++ case e1000_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by a software ++ * over-ride. ++ */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); ++ break; ++ default: ++ DEBUGOUT("Flow control param set incorrectly\n"); ++ return -E1000_ERR_CONFIG; ++ break; ++ } ++ ++ E1000_WRITE_REG(hw, E1000_TXCW, txcw); ++ mac->txcw = txcw; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_poll_fiber_serdes_link_generic - Poll for link up ++ * @hw: pointer to the HW structure ++ * ++ * Polls for link up by reading the status register, if link fails to come ++ * up with auto-negotiation, then the link is forced if a signal is detected. ++ **/ ++static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 i, status; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); ++ ++ /* If we have a signal (the cable is plugged in, or assumed true for ++ * serdes media) then poll for a "Link-Up" indication in the Device ++ * Status Register. Time-out if a link isn't seen in 500 milliseconds ++ * seconds (Auto-negotiation should complete in less than 500 ++ * milliseconds even if the other end is doing it in SW). ++ */ ++ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { ++ msec_delay(10); ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if (status & E1000_STATUS_LU) ++ break; ++ } ++ if (i == FIBER_LINK_UP_LIMIT) { ++ DEBUGOUT("Never got a valid link from auto-neg!!!\n"); ++ mac->autoneg_failed = true; ++ /* AutoNeg failed to achieve a link, so we'll call ++ * mac->check_for_link. This routine will force the ++ * link up if we detect a signal. This will allow us to ++ * communicate with non-autonegotiating link partners. ++ */ ++ ret_val = mac->ops.check_for_link(hw); ++ if (ret_val) { ++ DEBUGOUT("Error while checking for link\n"); ++ return ret_val; ++ } ++ mac->autoneg_failed = false; ++ } else { ++ mac->autoneg_failed = false; ++ DEBUGOUT("Valid Link Found\n"); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes ++ * @hw: pointer to the HW structure ++ * ++ * Configures collision distance and flow control for fiber and serdes ++ * links. Upon successful setup, poll for link. ++ **/ ++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) ++{ ++ u32 ctrl; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ++ /* Take the link out of reset */ ++ ctrl &= ~E1000_CTRL_LRST; ++ ++ hw->mac.ops.config_collision_dist(hw); ++ ++ ret_val = e1000_commit_fc_settings_generic(hw); ++ if (ret_val) ++ return ret_val; ++ ++ /* Since auto-negotiation is enabled, take the link out of reset (the ++ * link will be in reset, because we previously reset the chip). This ++ * will restart auto-negotiation. If auto-negotiation is successful ++ * then the link-up status bit will be set and the flow control enable ++ * bits (RFCE and TFCE) will be set according to their negotiated value. ++ */ ++ DEBUGOUT("Auto-negotiation enabled\n"); ++ ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(1); ++ ++ /* For these adapters, the SW definable pin 1 is set when the optics ++ * detect a signal. If we have a signal, then poll for a "Link-Up" ++ * indication. ++ */ ++ if (hw->phy.media_type == e1000_media_type_internal_serdes || ++ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { ++ ret_val = e1000_poll_fiber_serdes_link_generic(hw); ++ } else { ++ DEBUGOUT("No signal detected\n"); ++ } + + return ret_val; + } + + /** +- * igb_config_collision_dist - Configure collision distance ++ * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used +- * during link setup. Currently no func pointer exists and all +- * implementations are handled in the generic version of this function. ++ * during link setup. + **/ +-void igb_config_collision_dist(struct e1000_hw *hw) ++static void e1000_config_collision_dist_generic(struct e1000_hw *hw) + { + u32 tctl; + +- tctl = rd32(E1000_TCTL); ++ DEBUGFUNC("e1000_config_collision_dist_generic"); ++ ++ tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + +- wr32(E1000_TCTL, tctl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_TCTL, tctl); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_set_fc_watermarks - Set flow control high/low watermarks ++ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame +- * tansmission as well. ++ * transmission as well. + **/ +-static s32 igb_set_fc_watermarks(struct e1000_hw *hw) ++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; + u32 fcrtl = 0, fcrth = 0; + ++ DEBUGFUNC("e1000_set_fc_watermarks_generic"); ++ + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the +@@ -692,61 +1170,14 @@ + + fcrth = hw->fc.high_water; + } +- wr32(E1000_FCRTL, fcrtl); +- wr32(E1000_FCRTH, fcrth); ++ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); ++ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_set_default_fc - Set flow control default values +- * @hw: pointer to the HW structure +- * +- * Read the EEPROM for the default values for flow control and store the +- * values. +- **/ +-static s32 igb_set_default_fc(struct e1000_hw *hw) +-{ +- s32 ret_val = 0; +- u16 lan_offset; +- u16 nvm_data; +- +- /* Read and store word 0x0F of the EEPROM. This word contains bits +- * that determine the hardware's default PAUSE (flow control) mode, +- * a bit that determines whether the HW defaults to enabling or +- * disabling auto-negotiation, and the direction of the +- * SW defined pins. If there is no SW over-ride of the flow +- * control setting, then the variable hw->fc will +- * be initialized based on a value in the EEPROM. +- */ +- if (hw->mac.type == e1000_i350) { +- lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); +- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG +- + lan_offset, 1, &nvm_data); +- } else { +- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, +- 1, &nvm_data); +- } +- +- if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; +- } +- +- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) +- hw->fc.requested_mode = e1000_fc_none; +- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == +- NVM_WORD0F_ASM_DIR) +- hw->fc.requested_mode = e1000_fc_tx_pause; +- else +- hw->fc.requested_mode = e1000_fc_full; +- +-out: +- return ret_val; +-} +- +-/** +- * igb_force_mac_fc - Force the MAC's flow control settings ++ * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the +@@ -755,12 +1186,13 @@ + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +-s32 igb_force_mac_fc(struct e1000_hw *hw) ++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) + { + u32 ctrl; +- s32 ret_val = 0; + +- ctrl = rd32(E1000_CTRL); ++ DEBUGFUNC("e1000_force_mac_fc_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY +@@ -776,10 +1208,10 @@ + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). +- * 3: Both Rx and TX flow control (symmetric) is enabled. ++ * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ +- hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); ++ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: +@@ -797,19 +1229,17 @@ + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: +- hw_dbg("Flow control param set incorrectly\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Flow control param set incorrectly\n"); ++ return -E1000_ERR_CONFIG; + } + +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_config_fc_after_link_up - Configures flow control after link ++ * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the +@@ -818,29 +1248,32 @@ + * and did not fail, then we configure flow control based on our link + * partner. + **/ +-s32 igb_config_fc_after_link_up(struct e1000_hw *hw) ++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + ++ DEBUGFUNC("e1000_config_fc_after_link_up_generic"); ++ + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { +- if (hw->phy.media_type == e1000_media_type_internal_serdes) +- ret_val = igb_force_mac_fc(hw); ++ if (hw->phy.media_type == e1000_media_type_fiber || ++ hw->phy.media_type == e1000_media_type_internal_serdes) ++ ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) +- ret_val = igb_force_mac_fc(hw); ++ ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { +- hw_dbg("Error forcing flow control settings\n"); +- goto out; ++ DEBUGOUT("Error forcing flow control settings\n"); ++ return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is +@@ -853,18 +1286,16 @@ + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ +- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, +- &mii_status_reg); ++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) +- goto out; +- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, +- &mii_status_reg); ++ return ret_val; ++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) +- goto out; ++ return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { +- hw_dbg("Copper PHY and Auto Neg has not completed.\n"); +- goto out; ++ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); ++ return ret_val; + } + + /* The AutoNeg process has completed, so we now need to +@@ -874,13 +1305,13 @@ + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, +- &mii_nway_adv_reg); ++ &mii_nway_adv_reg); + if (ret_val) +- goto out; ++ return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, +- &mii_nway_lp_ability_reg); ++ &mii_nway_lp_ability_reg); + if (ret_val) +- goto out; ++ return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base +@@ -917,18 +1348,18 @@ + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { +- /* Now we need to check if the user selected RX ONLY ++ /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise +- * FULL flow control because we could not advertise RX ++ * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to +- * turn OFF the TRANSMISSION of PAUSE frames. ++ * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; +- hw_dbg("Flow Control = FULL.\n"); ++ DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = RX PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. +@@ -943,7 +1374,7 @@ + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; +- hw_dbg("Flow Control = TX PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * +@@ -957,46 +1388,23 @@ + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = RX PAUSE frames only.\n"); +- } +- /* Per the IEEE spec, at this point flow control should be +- * disabled. However, we want to consider that we could +- * be connected to a legacy switch that doesn't advertise +- * desired flow control, but can be forced on the link +- * partner. So if we advertised no flow control, that is +- * what we will resolve to. If we advertised some kind of +- * receive capability (Rx Pause Only or Full Flow Control) +- * and the link partner advertised none, we will configure +- * ourselves to enable Rx Flow Control only. We can do +- * this safely for two reasons: If the link partner really +- * didn't want flow control enabled, and we enable Rx, no +- * harm done since we won't be receiving any PAUSE frames +- * anyway. If the intent on the link partner was to have +- * flow control enabled, then by us enabling RX only, we +- * can at least receive pause frames and process them. +- * This is a good idea because in most cases, since we are +- * predominantly a server NIC, more times than not we will +- * be asked to delay transmission of packets than asking +- * our link partner to pause transmission of frames. +- */ +- else if ((hw->fc.requested_mode == e1000_fc_none) || +- (hw->fc.requested_mode == e1000_fc_tx_pause) || +- (hw->fc.strict_ieee)) { +- hw->fc.current_mode = e1000_fc_none; +- hw_dbg("Flow Control = NONE.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { +- hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = RX PAUSE frames only.\n"); ++ /* Per the IEEE spec, at this point flow control ++ * should be disabled. ++ */ ++ hw->fc.current_mode = e1000_fc_none; ++ DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ +- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); ++ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { +- hw_dbg("Error getting link speed and duplex\n"); +- goto out; ++ DEBUGOUT("Error getting link speed and duplex\n"); ++ return ret_val; + } + + if (duplex == HALF_DUPLEX) +@@ -1005,26 +1413,27 @@ + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ +- ret_val = igb_force_mac_fc(hw); ++ ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { +- hw_dbg("Error forcing flow control settings\n"); +- goto out; ++ DEBUGOUT("Error forcing flow control settings\n"); ++ return ret_val; + } + } ++ + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ +- if ((hw->phy.media_type == e1000_media_type_internal_serdes) +- && mac->autoneg) { ++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) && ++ mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ +- pcs_status_reg = rd32(E1000_PCS_LSTAT); ++ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { +- hw_dbg("PCS Auto Neg has not completed.\n"); ++ DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + +@@ -1034,8 +1443,8 @@ + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ +- pcs_adv_reg = rd32(E1000_PCS_ANADV); +- pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); ++ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); ++ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base +@@ -1080,10 +1489,10 @@ + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; +- hw_dbg("Flow Control = FULL.\n"); ++ DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = Rx PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. +@@ -1098,7 +1507,7 @@ + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; +- hw_dbg("Flow Control = Tx PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * +@@ -1112,35 +1521,34 @@ + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = Rx PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; +- hw_dbg("Flow Control = NONE.\n"); ++ DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ +- pcs_ctrl_reg = rd32(E1000_PCS_LCTL); ++ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; +- wr32(E1000_PCS_LCTL, pcs_ctrl_reg); ++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + +- ret_val = igb_force_mac_fc(hw); ++ ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { +- hw_dbg("Error forcing flow control settings\n"); ++ DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex ++ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex +@@ -1148,172 +1556,185 @@ + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, +- u16 *duplex) ++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, ++ u16 *duplex) + { + u32 status; + +- status = rd32(E1000_STATUS); ++ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); ++ ++ status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; +- hw_dbg("1000 Mbs, "); ++ DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; +- hw_dbg("100 Mbs, "); ++ DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; +- hw_dbg("10 Mbs, "); ++ DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; +- hw_dbg("Full Duplex\n"); ++ DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; +- hw_dbg("Half Duplex\n"); ++ DEBUGOUT("Half Duplex\n"); + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_hw_semaphore - Acquire hardware semaphore ++ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex ++ * @hw: pointer to the HW structure ++ * @speed: stores the current speed ++ * @duplex: stores the current duplex ++ * ++ * Sets the speed and duplex to gigabit full duplex (the only possible option) ++ * for fiber/serdes links. ++ **/ ++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 *speed, u16 *duplex) ++{ ++ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); ++ ++ *speed = SPEED_1000; ++ *duplex = FULL_DUPLEX; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +-s32 igb_get_hw_semaphore(struct e1000_hw *hw) ++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) + { + u32 swsm; +- s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + ++ DEBUGFUNC("e1000_get_hw_semaphore_generic"); ++ + /* Get the SW semaphore */ + while (i < timeout) { +- swsm = rd32(E1000_SWSM); ++ swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + +- udelay(50); ++ usec_delay(50); + i++; + } + + if (i == timeout) { +- hw_dbg("Driver can't access device - SMBI bit is set.\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); ++ return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { +- swsm = rd32(E1000_SWSM); +- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ +- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) ++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + +- udelay(50); ++ usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ +- igb_put_hw_semaphore(hw); +- hw_dbg("Driver can't access the NVM\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ e1000_put_hw_semaphore_generic(hw); ++ DEBUGOUT("Driver can't access the NVM\n"); ++ return -E1000_ERR_NVM; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_put_hw_semaphore - Release hardware semaphore ++ * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +-void igb_put_hw_semaphore(struct e1000_hw *hw) ++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) + { + u32 swsm; + +- swsm = rd32(E1000_SWSM); ++ DEBUGFUNC("e1000_put_hw_semaphore_generic"); ++ ++ swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + +- wr32(E1000_SWSM, swsm); ++ E1000_WRITE_REG(hw, E1000_SWSM, swsm); + } + + /** +- * igb_get_auto_rd_done - Check for auto read completion ++ * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +-s32 igb_get_auto_rd_done(struct e1000_hw *hw) ++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) + { + s32 i = 0; +- s32 ret_val = 0; + ++ DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { +- if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) ++ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { +- hw_dbg("Auto read by HW from NVM has not completed.\n"); +- ret_val = -E1000_ERR_RESET; +- goto out; ++ DEBUGOUT("Auto read by HW from NVM has not completed.\n"); ++ return -E1000_ERR_RESET; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_valid_led_default - Verify a valid default LED config ++ * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +-static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) ++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_valid_led_default_generic"); ++ + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + +- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { +- switch (hw->phy.media_type) { +- case e1000_media_type_internal_serdes: +- *data = ID_LED_DEFAULT_82575_SERDES; +- break; +- case e1000_media_type_copper: +- default: +- *data = ID_LED_DEFAULT; +- break; +- } +- } +-out: +- return ret_val; ++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) ++ *data = ID_LED_DEFAULT; ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_id_led_init - ++ * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +-s32 igb_id_led_init(struct e1000_hw *hw) ++s32 e1000_id_led_init_generic(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; +@@ -1323,17 +1744,13 @@ + u16 data, i, temp; + const u16 led_mask = 0x0F; + +- /* i210 and i211 devices have different LED mechanism */ +- if ((hw->mac.type == e1000_i210) || +- (hw->mac.type == e1000_i211)) +- ret_val = igb_valid_led_default_i210(hw, &data); +- else +- ret_val = igb_valid_led_default(hw, &data); ++ DEBUGFUNC("e1000_id_led_init_generic"); + ++ ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) +- goto out; ++ return ret_val; + +- mac->ledctl_default = rd32(E1000_LEDCTL); ++ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + +@@ -1375,34 +1792,69 @@ + } + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_cleanup_led - Set LED config to default operation ++ * e1000_setup_led_generic - Configures SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This prepares the SW controllable LED for use and saves the current state ++ * of the LED so it can be later restored. ++ **/ ++s32 e1000_setup_led_generic(struct e1000_hw *hw) ++{ ++ u32 ledctl; ++ ++ DEBUGFUNC("e1000_setup_led_generic"); ++ ++ if (hw->mac.ops.setup_led != e1000_setup_led_generic) ++ return -E1000_ERR_CONFIG; ++ ++ if (hw->phy.media_type == e1000_media_type_fiber) { ++ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ++ hw->mac.ledctl_default = ledctl; ++ /* Turn off LED0 */ ++ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | ++ E1000_LEDCTL_LED0_MODE_MASK); ++ ledctl |= (E1000_LEDCTL_MODE_LED_OFF << ++ E1000_LEDCTL_LED0_MODE_SHIFT); ++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); ++ } else if (hw->phy.media_type == e1000_media_type_copper) { ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +-s32 igb_cleanup_led(struct e1000_hw *hw) ++s32 e1000_cleanup_led_generic(struct e1000_hw *hw) + { +- wr32(E1000_LEDCTL, hw->mac.ledctl_default); +- return 0; ++ DEBUGFUNC("e1000_cleanup_led_generic"); ++ ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); ++ return E1000_SUCCESS; + } + + /** +- * igb_blink_led - Blink LED ++ * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * +- * Blink the led's which are set to be on. ++ * Blink the LEDs which are set to be on. + **/ +-s32 igb_blink_led(struct e1000_hw *hw) ++s32 e1000_blink_led_generic(struct e1000_hw *hw) + { + u32 ledctl_blink = 0; + u32 i; + ++ DEBUGFUNC("e1000_blink_led_generic"); ++ + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | +@@ -1432,100 +1884,239 @@ + } + } + +- wr32(E1000_LEDCTL, ledctl_blink); ++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_led_on_generic - Turn LED on ++ * @hw: pointer to the HW structure ++ * ++ * Turn LED on. ++ **/ ++s32 e1000_led_on_generic(struct e1000_hw *hw) ++{ ++ u32 ctrl; ++ ++ DEBUGFUNC("e1000_led_on_generic"); ++ ++ switch (hw->phy.media_type) { ++ case e1000_media_type_fiber: ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl &= ~E1000_CTRL_SWDPIN0; ++ ctrl |= E1000_CTRL_SWDPIO0; ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ break; ++ case e1000_media_type_copper: ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); ++ break; ++ default: ++ break; ++ } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_led_off - Turn LED off ++ * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +-s32 igb_led_off(struct e1000_hw *hw) ++s32 e1000_led_off_generic(struct e1000_hw *hw) + { ++ u32 ctrl; ++ ++ DEBUGFUNC("e1000_led_off_generic"); ++ + switch (hw->phy.media_type) { ++ case e1000_media_type_fiber: ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl |= E1000_CTRL_SWDPIN0; ++ ctrl |= E1000_CTRL_SWDPIO0; ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ break; + case e1000_media_type_copper: +- wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + +- return 0; ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities ++ * @hw: pointer to the HW structure ++ * @no_snoop: bitmap of snoop events ++ * ++ * Set the PCI-express register to snoop for events enabled in 'no_snoop'. ++ **/ ++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) ++{ ++ u32 gcr; ++ ++ DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); ++ ++ if (hw->bus.type != e1000_bus_type_pci_express) ++ return; ++ ++ if (no_snoop) { ++ gcr = E1000_READ_REG(hw, E1000_GCR); ++ gcr &= ~(PCIE_NO_SNOOP_ALL); ++ gcr |= no_snoop; ++ E1000_WRITE_REG(hw, E1000_GCR, gcr); ++ } + } + + /** +- * igb_disable_pcie_master - Disables PCI-express master access ++ * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * +- * Returns 0 (0) if successful, else returns -10 ++ * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +-s32 igb_disable_pcie_master(struct e1000_hw *hw) ++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) + { + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) +- goto out; ++ return E1000_SUCCESS; + +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { +- if (!(rd32(E1000_STATUS) & +- E1000_STATUS_GIO_MASTER_ENABLE)) ++ if (!(E1000_READ_REG(hw, E1000_STATUS) & ++ E1000_STATUS_GIO_MASTER_ENABLE) || ++ E1000_REMOVED(hw->hw_addr)) + break; +- udelay(100); ++ usec_delay(100); + timeout--; + } + + if (!timeout) { +- hw_dbg("Master requests are pending.\n"); +- ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; +- goto out; ++ DEBUGOUT("Master requests are pending.\n"); ++ return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_validate_mdi_setting - Verify MDI/MDIx settings ++ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * +- * Verify that when not using auto-negotitation that MDI/MDIx is correctly +- * set, which is forced to MDI mode only. ++ * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +-s32 igb_validate_mdi_setting(struct e1000_hw *hw) ++void e1000_reset_adaptive_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ struct e1000_mac_info *mac = &hw->mac; + +- /* All MDI settings are supported on 82580 and newer. */ +- if (hw->mac.type >= e1000_82580) +- goto out; ++ DEBUGFUNC("e1000_reset_adaptive_generic"); ++ ++ if (!mac->adaptive_ifs) { ++ DEBUGOUT("Not in Adaptive IFS mode!\n"); ++ return; ++ } ++ ++ mac->current_ifs_val = 0; ++ mac->ifs_min_val = IFS_MIN; ++ mac->ifs_max_val = IFS_MAX; ++ mac->ifs_step_size = IFS_STEP; ++ mac->ifs_ratio = IFS_RATIO; ++ ++ mac->in_ifs_mode = false; ++ E1000_WRITE_REG(hw, E1000_AIT, 0); ++} ++ ++/** ++ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing ++ * @hw: pointer to the HW structure ++ * ++ * Update the Adaptive Interframe Spacing Throttle value based on the ++ * time between transmitted packets and time between collisions. ++ **/ ++void e1000_update_adaptive_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ ++ DEBUGFUNC("e1000_update_adaptive_generic"); ++ ++ if (!mac->adaptive_ifs) { ++ DEBUGOUT("Not in Adaptive IFS mode!\n"); ++ return; ++ } ++ ++ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { ++ if (mac->tx_packet_delta > MIN_NUM_XMITS) { ++ mac->in_ifs_mode = true; ++ if (mac->current_ifs_val < mac->ifs_max_val) { ++ if (!mac->current_ifs_val) ++ mac->current_ifs_val = mac->ifs_min_val; ++ else ++ mac->current_ifs_val += ++ mac->ifs_step_size; ++ E1000_WRITE_REG(hw, E1000_AIT, ++ mac->current_ifs_val); ++ } ++ } ++ } else { ++ if (mac->in_ifs_mode && ++ (mac->tx_packet_delta <= MIN_NUM_XMITS)) { ++ mac->current_ifs_val = 0; ++ mac->in_ifs_mode = false; ++ E1000_WRITE_REG(hw, E1000_AIT, 0); ++ } ++ } ++} ++ ++/** ++ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings ++ * @hw: pointer to the HW structure ++ * ++ * Verify that when not using auto-negotiation that MDI/MDIx is correctly ++ * set, which is forced to MDI mode only. ++ **/ ++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) ++{ ++ DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { +- hw_dbg("Invalid MDI setting detected\n"); ++ DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ return -E1000_ERR_CONFIG; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register ++ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings ++ * @hw: pointer to the HW structure ++ * ++ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced ++ * operation. ++ **/ ++s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to +@@ -1535,72 +2126,28 @@ + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, +- u32 offset, u8 data) ++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, ++ u32 offset, u8 data) + { + u32 i, regvalue = 0; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); +- wr32(reg, regvalue); ++ E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { +- udelay(5); +- regvalue = rd32(reg); ++ usec_delay(5); ++ regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { +- hw_dbg("Reg %08x did not indicate ready\n", reg); +- ret_val = -E1000_ERR_PHY; +- goto out; +- } +- +-out: +- return ret_val; +-} +- +-/** +- * igb_enable_mng_pass_thru - Enable processing of ARP's +- * @hw: pointer to the HW structure +- * +- * Verifies the hardware needs to leave interface enabled so that frames can +- * be directed to and from the management interface. +- **/ +-bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +-{ +- u32 manc; +- u32 fwsm, factps; +- bool ret_val = false; +- +- if (!hw->mac.asf_firmware_present) +- goto out; +- +- manc = rd32(E1000_MANC); +- +- if (!(manc & E1000_MANC_RCV_TCO_EN)) +- goto out; +- +- if (hw->mac.arc_subsystem_valid) { +- fwsm = rd32(E1000_FWSM); +- factps = rd32(E1000_FACTPS); +- +- if (!(factps & E1000_FACTPS_MNGCG) && +- ((fwsm & E1000_FWSM_MODE_MASK) == +- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { +- ret_val = true; +- goto out; +- } +- } else { +- if ((manc & E1000_MANC_SMBUS_EN) && +- !(manc & E1000_MANC_ASF_EN)) { +- ret_val = true; +- goto out; +- } ++ DEBUGOUT1("Reg %08x did not indicate ready\n", reg); ++ return -E1000_ERR_PHY; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h +--- a/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,87 +1,81 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* + +-#ifndef _E1000_MAC_H_ +-#define _E1000_MAC_H_ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-#include "e1000_hw.h" ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. + +-#include "e1000_phy.h" +-#include "e1000_nvm.h" +-#include "e1000_defines.h" +-#include "e1000_i210.h" +- +-/* Functions that should not be called directly from drivers but can be used +- * by other files in this 'shared code' +- */ +-s32 igb_blink_led(struct e1000_hw *hw); +-s32 igb_check_for_copper_link(struct e1000_hw *hw); +-s32 igb_cleanup_led(struct e1000_hw *hw); +-s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +-s32 igb_disable_pcie_master(struct e1000_hw *hw); +-s32 igb_force_mac_fc(struct e1000_hw *hw); +-s32 igb_get_auto_rd_done(struct e1000_hw *hw); +-s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +-s32 igb_get_hw_semaphore(struct e1000_hw *hw); +-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, +- u16 *duplex); +-s32 igb_id_led_init(struct e1000_hw *hw); +-s32 igb_led_off(struct e1000_hw *hw); +-void igb_update_mc_addr_list(struct e1000_hw *hw, +- u8 *mc_addr_list, u32 mc_addr_count); +-s32 igb_setup_link(struct e1000_hw *hw); +-s32 igb_validate_mdi_setting(struct e1000_hw *hw); +-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, +- u32 offset, u8 data); +- +-void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +-void igb_clear_vfta(struct e1000_hw *hw); +-void igb_clear_vfta_i350(struct e1000_hw *hw); +-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +-void igb_config_collision_dist(struct e1000_hw *hw); +-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +-void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +-void igb_put_hw_semaphore(struct e1000_hw *hw); +-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +-s32 igb_check_alt_mac_addr(struct e1000_hw *hw); +- +-bool igb_enable_mng_pass_thru(struct e1000_hw *hw); +- +-enum e1000_mng_mode { +- e1000_mng_mode_none = 0, +- e1000_mng_mode_asf, +- e1000_mng_mode_pt, +- e1000_mng_mode_ipmi, +- e1000_mng_mode_host_if_only +-}; ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". + +-#define E1000_FACTPS_MNGCG 0x20000000 ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +-#define E1000_FWSM_MODE_MASK 0xE +-#define E1000_FWSM_MODE_SHIFT 1 ++*******************************************************************************/ + +-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 ++#ifndef _E1000_MAC_H_ ++#define _E1000_MAC_H_ + +-void e1000_init_function_pointers_82575(struct e1000_hw *hw); ++void e1000_init_mac_ops_generic(struct e1000_hw *hw); ++#ifndef E1000_REMOVED ++#define E1000_REMOVED(a) (0) ++#endif /* E1000_REMOVED */ ++void e1000_null_mac_generic(struct e1000_hw *hw); ++s32 e1000_null_ops_generic(struct e1000_hw *hw); ++s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); ++bool e1000_null_mng_mode(struct e1000_hw *hw); ++void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); ++void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); ++int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); ++s32 e1000_blink_led_generic(struct e1000_hw *hw); ++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); ++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); ++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); ++s32 e1000_cleanup_led_generic(struct e1000_hw *hw); ++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); ++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); ++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); ++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); ++s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); ++void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw); ++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); ++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, ++ u16 *duplex); ++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, ++ u16 *speed, u16 *duplex); ++s32 e1000_id_led_init_generic(struct e1000_hw *hw); ++s32 e1000_led_on_generic(struct e1000_hw *hw); ++s32 e1000_led_off_generic(struct e1000_hw *hw); ++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, ++ u8 *mc_addr_list, u32 mc_addr_count); ++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); ++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); ++s32 e1000_setup_led_generic(struct e1000_hw *hw); ++s32 e1000_setup_link_generic(struct e1000_hw *hw); ++s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); ++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, ++ u32 offset, u8 data); ++ ++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); ++ ++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); ++void igb_e1000_clear_vfta_generic(struct e1000_hw *hw); ++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); ++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); ++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); ++s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); ++void e1000_reset_adaptive_generic(struct e1000_hw *hw); ++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); ++void e1000_update_adaptive_generic(struct e1000_hw *hw); ++void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.c b/drivers/net/ethernet/intel/igb/e1000_manage.c +--- a/drivers/net/ethernet/intel/igb/e1000_manage.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_manage.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,552 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++/** ++ * e1000_calculate_checksum - Calculate checksum for buffer ++ * @buffer: pointer to EEPROM ++ * @length: size of EEPROM to calculate a checksum for ++ * ++ * Calculates the checksum for some buffer on a specified length. The ++ * checksum calculated is returned. ++ **/ ++u8 e1000_calculate_checksum(u8 *buffer, u32 length) ++{ ++ u32 i; ++ u8 sum = 0; ++ ++ DEBUGFUNC("e1000_calculate_checksum"); ++ ++ if (!buffer) ++ return 0; ++ ++ for (i = 0; i < length; i++) ++ sum += buffer[i]; ++ ++ return (u8) (0 - sum); ++} ++ ++/** ++ * e1000_mng_enable_host_if_generic - Checks host interface is enabled ++ * @hw: pointer to the HW structure ++ * ++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND ++ * ++ * This function checks whether the HOST IF is enabled for command operation ++ * and also checks whether the previous command is completed. It busy waits ++ * in case of previous command is not completed. ++ **/ ++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) ++{ ++ u32 hicr; ++ u8 i; ++ ++ DEBUGFUNC("e1000_mng_enable_host_if_generic"); ++ ++ if (!hw->mac.arc_subsystem_valid) { ++ DEBUGOUT("ARC subsystem not valid.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Check that the host interface is enabled. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_EN)) { ++ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ /* check the previous command is completed */ ++ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_C)) ++ break; ++ msec_delay_irq(1); ++ } ++ ++ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { ++ DEBUGOUT("Previous command timeout failed .\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_check_mng_mode_generic - Generic check management mode ++ * @hw: pointer to the HW structure ++ * ++ * Reads the firmware semaphore register and returns true (>0) if ++ * manageability is enabled, else false (0). ++ **/ ++bool e1000_check_mng_mode_generic(struct e1000_hw *hw) ++{ ++ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); ++ ++ DEBUGFUNC("e1000_check_mng_mode_generic"); ++ ++ ++ return (fwsm & E1000_FWSM_MODE_MASK) == ++ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); ++} ++ ++/** ++ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx ++ * @hw: pointer to the HW structure ++ * ++ * Enables packet filtering on transmit packets if manageability is enabled ++ * and host interface is enabled. ++ **/ ++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) ++{ ++ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; ++ u32 *buffer = (u32 *)&hw->mng_cookie; ++ u32 offset; ++ s32 ret_val, hdr_csum, csum; ++ u8 i, len; ++ ++ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); ++ ++ hw->mac.tx_pkt_filtering = true; ++ ++ /* No manageability, no filtering */ ++ if (!hw->mac.ops.check_mng_mode(hw)) { ++ hw->mac.tx_pkt_filtering = false; ++ return hw->mac.tx_pkt_filtering; ++ } ++ ++ /* If we can't read from the host interface for whatever ++ * reason, disable filtering. ++ */ ++ ret_val = e1000_mng_enable_host_if_generic(hw); ++ if (ret_val != E1000_SUCCESS) { ++ hw->mac.tx_pkt_filtering = false; ++ return hw->mac.tx_pkt_filtering; ++ } ++ ++ /* Read in the header. Length and offset are in dwords. */ ++ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; ++ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; ++ for (i = 0; i < len; i++) ++ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, ++ offset + i); ++ hdr_csum = hdr->checksum; ++ hdr->checksum = 0; ++ csum = e1000_calculate_checksum((u8 *)hdr, ++ E1000_MNG_DHCP_COOKIE_LENGTH); ++ /* If either the checksums or signature don't match, then ++ * the cookie area isn't considered valid, in which case we ++ * take the safe route of assuming Tx filtering is enabled. ++ */ ++ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { ++ hw->mac.tx_pkt_filtering = true; ++ return hw->mac.tx_pkt_filtering; ++ } ++ ++ /* Cookie area is valid, make the final check for filtering. */ ++ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) ++ hw->mac.tx_pkt_filtering = false; ++ ++ return hw->mac.tx_pkt_filtering; ++} ++ ++/** ++ * e1000_mng_write_cmd_header_generic - Writes manageability command header ++ * @hw: pointer to the HW structure ++ * @hdr: pointer to the host interface command header ++ * ++ * Writes the command header after does the checksum calculation. ++ **/ ++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr) ++{ ++ u16 i, length = sizeof(struct e1000_host_mng_command_header); ++ ++ DEBUGFUNC("e1000_mng_write_cmd_header_generic"); ++ ++ /* Write the whole command header structure with new checksum. */ ++ ++ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); ++ ++ length >>= 2; ++ /* Write the relevant command block into the ram area. */ ++ for (i = 0; i < length; i++) { ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, ++ *((u32 *) hdr + i)); ++ E1000_WRITE_FLUSH(hw); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_mng_host_if_write_generic - Write to the manageability host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface buffer ++ * @length: size of the buffer ++ * @offset: location in the buffer to write to ++ * @sum: sum of the data (not checksum) ++ * ++ * This function writes the buffer content at the offset given on the host if. ++ * It also does alignment considerations to do the writes in most efficient ++ * way. Also fills up the sum of the buffer in *buffer parameter. ++ **/ ++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, ++ u16 length, u16 offset, u8 *sum) ++{ ++ u8 *tmp; ++ u8 *bufptr = buffer; ++ u32 data = 0; ++ u16 remaining, i, j, prev_bytes; ++ ++ DEBUGFUNC("e1000_mng_host_if_write_generic"); ++ ++ /* sum = only sum of the data and it is not checksum */ ++ ++ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) ++ return -E1000_ERR_PARAM; ++ ++ tmp = (u8 *)&data; ++ prev_bytes = offset & 0x3; ++ offset >>= 2; ++ ++ if (prev_bytes) { ++ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); ++ for (j = prev_bytes; j < sizeof(u32); j++) { ++ *(tmp + j) = *bufptr++; ++ *sum += *(tmp + j); ++ } ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); ++ length -= j - prev_bytes; ++ offset++; ++ } ++ ++ remaining = length & 0x3; ++ length -= remaining; ++ ++ /* Calculate length in DWORDs */ ++ length >>= 2; ++ ++ /* The device driver writes the relevant command block into the ++ * ram area. ++ */ ++ for (i = 0; i < length; i++) { ++ for (j = 0; j < sizeof(u32); j++) { ++ *(tmp + j) = *bufptr++; ++ *sum += *(tmp + j); ++ } ++ ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, ++ data); ++ } ++ if (remaining) { ++ for (j = 0; j < sizeof(u32); j++) { ++ if (j < remaining) ++ *(tmp + j) = *bufptr++; ++ else ++ *(tmp + j) = 0; ++ ++ *sum += *(tmp + j); ++ } ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, ++ data); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface ++ * @length: size of the buffer ++ * ++ * Writes the DHCP information to the host interface. ++ **/ ++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, ++ u16 length) ++{ ++ struct e1000_host_mng_command_header hdr; ++ s32 ret_val; ++ u32 hicr; ++ ++ DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); ++ ++ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; ++ hdr.command_length = length; ++ hdr.reserved1 = 0; ++ hdr.reserved2 = 0; ++ hdr.checksum = 0; ++ ++ /* Enable the host interface */ ++ ret_val = e1000_mng_enable_host_if_generic(hw); ++ if (ret_val) ++ return ret_val; ++ ++ /* Populate the host interface with the contents of "buffer". */ ++ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, ++ sizeof(hdr), &(hdr.checksum)); ++ if (ret_val) ++ return ret_val; ++ ++ /* Write the manageability command header */ ++ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); ++ if (ret_val) ++ return ret_val; ++ ++ /* Tell the ARC a new command is pending. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_enable_mng_pass_thru - Check if management passthrough is needed ++ * @hw: pointer to the HW structure ++ * ++ * Verifies the hardware needs to leave interface enabled so that frames can ++ * be directed to and from the management interface. ++ **/ ++/* Changed name, duplicated with e1000 */ ++bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw) ++{ ++ u32 manc; ++ u32 fwsm, factps; ++ ++ DEBUGFUNC("igb_e1000_enable_mng_pass_thru"); ++ ++ if (!hw->mac.asf_firmware_present) ++ return false; ++ ++ manc = E1000_READ_REG(hw, E1000_MANC); ++ ++ if (!(manc & E1000_MANC_RCV_TCO_EN)) ++ return false; ++ ++ if (hw->mac.has_fwsm) { ++ fwsm = E1000_READ_REG(hw, E1000_FWSM); ++ factps = E1000_READ_REG(hw, E1000_FACTPS); ++ ++ if (!(factps & E1000_FACTPS_MNGCG) && ++ ((fwsm & E1000_FWSM_MODE_MASK) == ++ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) ++ return true; ++ } else if ((manc & E1000_MANC_SMBUS_EN) && ++ !(manc & E1000_MANC_ASF_EN)) { ++ return true; ++ } ++ ++ return false; ++} ++ ++/** ++ * e1000_host_interface_command - Writes buffer to host interface ++ * @hw: pointer to the HW structure ++ * @buffer: contains a command to write ++ * @length: the byte length of the buffer, must be multiple of 4 bytes ++ * ++ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS ++ * else returns E1000_ERR_HOST_INTERFACE_COMMAND. ++ **/ ++s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) ++{ ++ u32 hicr, i; ++ ++ DEBUGFUNC("e1000_host_interface_command"); ++ ++ if (!(hw->mac.arc_subsystem_valid)) { ++ DEBUGOUT("Hardware doesn't support host interface command.\n"); ++ return E1000_SUCCESS; ++ } ++ ++ if (!hw->mac.asf_firmware_present) { ++ DEBUGOUT("Firmware is not present.\n"); ++ return E1000_SUCCESS; ++ } ++ ++ if (length == 0 || length & 0x3 || ++ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { ++ DEBUGOUT("Buffer length failure.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Check that the host interface is enabled. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_EN)) { ++ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Calculate length in DWORDs */ ++ length >>= 2; ++ ++ /* The device driver writes the relevant command block ++ * into the ram area. ++ */ ++ for (i = 0; i < length; i++) ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, ++ *((u32 *)buffer + i)); ++ ++ /* Setting this bit tells the ARC that a new command is pending. */ ++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); ++ ++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_C)) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check command successful completion. */ ++ if (i == E1000_HI_COMMAND_TIMEOUT || ++ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { ++ DEBUGOUT("Command has failed with no status valid.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ for (i = 0; i < length; i++) ++ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, ++ E1000_HOST_IF, ++ i); ++ ++ return E1000_SUCCESS; ++} ++/** ++ * e1000_load_firmware - Writes proxy FW code buffer to host interface ++ * and execute. ++ * @hw: pointer to the HW structure ++ * @buffer: contains a firmware to write ++ * @length: the byte length of the buffer, must be multiple of 4 bytes ++ * ++ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled ++ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. ++ **/ ++s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) ++{ ++ u32 hicr, hibba, fwsm, icr, i; ++ ++ DEBUGFUNC("e1000_load_firmware"); ++ ++ if (hw->mac.type < e1000_i210) { ++ DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ ++ /* Check that the host interface is enabled. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_EN)) { ++ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { ++ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ ++ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { ++ DEBUGOUT("Buffer length failure.\n"); ++ return -E1000_ERR_INVALID_ARGUMENT; ++ } ++ ++ /* Clear notification from ROM-FW by reading ICR register */ ++ icr = E1000_READ_REG(hw, E1000_ICR_V2); ++ ++ /* Reset ROM-FW */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ hicr |= E1000_HICR_FW_RESET_ENABLE; ++ E1000_WRITE_REG(hw, E1000_HICR, hicr); ++ hicr |= E1000_HICR_FW_RESET; ++ E1000_WRITE_REG(hw, E1000_HICR, hicr); ++ E1000_WRITE_FLUSH(hw); ++ ++ /* Wait till MAC notifies about its readiness after ROM-FW reset */ ++ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { ++ icr = E1000_READ_REG(hw, E1000_ICR_V2); ++ if (icr & E1000_ICR_MNG) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check for timeout */ ++ if (i == E1000_HI_COMMAND_TIMEOUT) { ++ DEBUGOUT("FW reset failed.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Wait till MAC is ready to accept new FW code */ ++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { ++ fwsm = E1000_READ_REG(hw, E1000_FWSM); ++ if ((fwsm & E1000_FWSM_FW_VALID) && ++ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == ++ E1000_FWSM_HI_EN_ONLY_MODE)) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check for timeout */ ++ if (i == E1000_HI_COMMAND_TIMEOUT) { ++ DEBUGOUT("FW reset failed.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Calculate length in DWORDs */ ++ length >>= 2; ++ ++ /* The device driver writes the relevant FW code block ++ * into the ram area in DWORDs via 1kB ram addressing window. ++ */ ++ for (i = 0; i < length; i++) { ++ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { ++ /* Point to correct 1kB ram window */ ++ hibba = E1000_HI_FW_BASE_ADDRESS + ++ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * ++ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); ++ ++ E1000_WRITE_REG(hw, E1000_HIBBA, hibba); ++ } ++ ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, ++ i % E1000_HI_FW_BLOCK_DWORD_LENGTH, ++ *((u32 *)buffer + i)); ++ } ++ ++ /* Setting this bit tells the ARC that a new FW is ready to execute. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); ++ ++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_C)) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check for successful FW start. */ ++ if (i == E1000_HI_COMMAND_TIMEOUT) { ++ DEBUGOUT("New FW did not start within timeout period.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ return E1000_SUCCESS; ++} ++ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.h b/drivers/net/ethernet/intel/igb/e1000_manage.h +--- a/drivers/net/ethernet/intel/igb/e1000_manage.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_manage.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,86 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _E1000_MANAGE_H_ ++#define _E1000_MANAGE_H_ ++ ++bool e1000_check_mng_mode_generic(struct e1000_hw *hw); ++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); ++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); ++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, ++ u16 length, u16 offset, u8 *sum); ++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr); ++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, ++ u8 *buffer, u16 length); ++bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw); ++u8 e1000_calculate_checksum(u8 *buffer, u32 length); ++s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); ++s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); ++ ++enum e1000_mng_mode { ++ e1000_mng_mode_none = 0, ++ e1000_mng_mode_asf, ++ e1000_mng_mode_pt, ++ e1000_mng_mode_ipmi, ++ e1000_mng_mode_host_if_only ++}; ++ ++#define E1000_FACTPS_MNGCG 0x20000000 ++ ++#define E1000_FWSM_MODE_MASK 0xE ++#define E1000_FWSM_MODE_SHIFT 1 ++#define E1000_FWSM_FW_VALID 0x00008000 ++#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 ++ ++#define E1000_MNG_IAMT_MODE 0x3 ++#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 ++#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 ++#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 ++#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 ++#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 ++#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 ++ ++#define E1000_VFTA_ENTRY_SHIFT 5 ++#define E1000_VFTA_ENTRY_MASK 0x7F ++#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F ++ ++#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ ++#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ ++#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ ++#define E1000_HI_FW_BASE_ADDRESS 0x10000 ++#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ ++#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ ++#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ ++#define E1000_HICR_EN 0x01 /* Enable bit - RO */ ++/* Driver sets this bit when done to put command in RAM */ ++#define E1000_HICR_C 0x02 ++#define E1000_HICR_SV 0x04 /* Status Validity */ ++#define E1000_HICR_FW_RESET_ENABLE 0x40 ++#define E1000_HICR_FW_RESET 0x80 ++ ++/* Intel(R) Active Management Technology signature */ ++#define E1000_IAMT_SIGNATURE 0x544D4149 ++ ++#endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c +--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,42 +1,71 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #include "e1000_mbx.h" + + /** +- * igb_read_mbx - Reads a message from the mailbox ++ * e1000_null_mbx_check_for_flag - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG mbx_id) ++{ ++ DEBUGFUNC("e1000_null_mbx_check_flag"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_mbx_transact - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG *msg, ++ u16 E1000_UNUSEDARG size, ++ u16 E1000_UNUSEDARG mbx_id) ++{ ++ DEBUGFUNC("e1000_null_mbx_rw_msg"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * +- * returns SUCCESS if it successfully read message from buffer ++ * returns SUCCESS if it successfuly read message from buffer + **/ +-s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_read_mbx"); ++ + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; +@@ -48,7 +77,7 @@ + } + + /** +- * igb_write_mbx - Write a message to the mailbox ++ * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -56,10 +85,12 @@ + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +-s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; +@@ -71,17 +102,19 @@ + } + + /** +- * igb_check_for_msg - checks to see if someone sent us mail ++ * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +-s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) ++s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_msg"); ++ + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + +@@ -89,17 +122,19 @@ + } + + /** +- * igb_check_for_ack - checks to see if someone sent us ACK ++ * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +-s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) ++s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_ack"); ++ + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + +@@ -107,17 +142,19 @@ + } + + /** +- * igb_check_for_rst - checks to see if other side has reset ++ * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +-s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) ++s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_rst"); ++ + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + +@@ -125,17 +162,19 @@ + } + + /** +- * igb_poll_for_msg - Wait for message notification ++ * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +-static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) ++static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + ++ DEBUGFUNC("e1000_poll_for_msg"); ++ + if (!countdown || !mbx->ops.check_for_msg) + goto out; + +@@ -143,28 +182,30 @@ + countdown--; + if (!countdown) + break; +- udelay(mbx->usec_delay); ++ usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + out: +- return countdown ? 0 : -E1000_ERR_MBX; ++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; + } + + /** +- * igb_poll_for_ack - Wait for message acknowledgement ++ * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +-static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) ++static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + ++ DEBUGFUNC("e1000_poll_for_ack"); ++ + if (!countdown || !mbx->ops.check_for_ack) + goto out; + +@@ -172,18 +213,18 @@ + countdown--; + if (!countdown) + break; +- udelay(mbx->usec_delay); ++ usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + out: +- return countdown ? 0 : -E1000_ERR_MBX; ++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; + } + + /** +- * igb_read_posted_mbx - Wait for message notification and receive message ++ * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -192,17 +233,19 @@ + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +-static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 mbx_id) ++s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_read_posted_mbx"); ++ + if (!mbx->ops.read) + goto out; + +- ret_val = igb_poll_for_msg(hw, mbx_id); ++ ret_val = e1000_poll_for_msg(hw, mbx_id); + ++ /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + out: +@@ -210,7 +253,7 @@ + } + + /** +- * igb_write_posted_mbx - Write a message to the mailbox, wait for ack ++ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -219,12 +262,13 @@ + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +-static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 mbx_id) ++s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_write_posted_mbx"); ++ + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; +@@ -234,37 +278,58 @@ + + /* if msg sent wait until we receive an ack */ + if (!ret_val) +- ret_val = igb_poll_for_ack(hw, mbx_id); ++ ret_val = e1000_poll_for_ack(hw, mbx_id); + out: + return ret_val; + } + +-static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) ++/** ++ * e1000_init_mbx_ops_generic - Initialize mbx function pointers ++ * @hw: pointer to the HW structure ++ * ++ * Sets the function pointers to no-op functions ++ **/ ++void e1000_init_mbx_ops_generic(struct e1000_hw *hw) + { +- u32 mbvficr = rd32(E1000_MBVFICR); ++ struct e1000_mbx_info *mbx = &hw->mbx; ++ mbx->ops.init_params = e1000_null_ops_generic; ++ mbx->ops.read = e1000_null_mbx_transact; ++ mbx->ops.write = e1000_null_mbx_transact; ++ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; ++ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; ++ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; ++ mbx->ops.read_posted = e1000_read_posted_mbx; ++ mbx->ops.write_posted = e1000_write_posted_mbx; ++} ++ ++static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) ++{ ++ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { +- ret_val = 0; +- wr32(E1000_MBVFICR, mask); ++ ret_val = E1000_SUCCESS; ++ E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; + } + + /** +- * igb_check_for_msg_pf - checks to see if the VF has sent mail ++ * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) + { + s32 ret_val = -E1000_ERR_MBX; + +- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { +- ret_val = 0; ++ DEBUGFUNC("e1000_check_for_msg_pf"); ++ ++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { ++ ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + +@@ -272,18 +337,20 @@ + } + + /** +- * igb_check_for_ack_pf - checks to see if the VF has ACKed ++ * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) + { + s32 ret_val = -E1000_ERR_MBX; + +- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { +- ret_val = 0; ++ DEBUGFUNC("e1000_check_for_ack_pf"); ++ ++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { ++ ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + +@@ -291,20 +358,22 @@ + } + + /** +- * igb_check_for_rst_pf - checks to see if the VF has reset ++ * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) + { +- u32 vflre = rd32(E1000_VFLRE); ++ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_rst_pf"); ++ + if (vflre & (1 << vf_number)) { +- ret_val = 0; +- wr32(E1000_VFLRE, (1 << vf_number)); ++ ret_val = E1000_SUCCESS; ++ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + +@@ -312,30 +381,40 @@ + } + + /** +- * igb_obtain_mbx_lock_pf - obtain mailbox lock ++ * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +-static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) + { + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; ++ int count = 10; + +- /* Take ownership of the buffer */ +- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); ++ DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + +- /* reserve mailbox for vf use */ +- p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); +- if (p2v_mailbox & E1000_P2VMAILBOX_PFU) +- ret_val = 0; ++ do { ++ /* Take ownership of the buffer */ ++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), ++ E1000_P2VMAILBOX_PFU); ++ ++ /* reserve mailbox for pf use */ ++ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); ++ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { ++ ret_val = E1000_SUCCESS; ++ break; ++ } ++ usec_delay(1000); ++ } while (count-- > 0); + + return ret_val; ++ + } + + /** +- * igb_write_mbx_pf - Places a message in the mailbox ++ * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -343,27 +422,29 @@ + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +-static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 vf_number) ++static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, ++ u16 vf_number) + { + s32 ret_val; + u16 i; + ++ DEBUGFUNC("e1000_write_mbx_pf"); ++ + /* lock the mailbox to prevent pf/vf race condition */ +- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); ++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ +- igb_check_for_msg_pf(hw, vf_number); +- igb_check_for_ack_pf(hw, vf_number); ++ e1000_check_for_msg_pf(hw, vf_number); ++ e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) +- array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); ++ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ +- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); ++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; +@@ -374,7 +455,7 @@ + } + + /** +- * igb_read_mbx_pf - Read a message from the mailbox ++ * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -384,23 +465,25 @@ + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +-static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 vf_number) ++static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, ++ u16 vf_number) + { + s32 ret_val; + u16 i; + ++ DEBUGFUNC("e1000_read_mbx_pf"); ++ + /* lock the mailbox to prevent pf/vf race condition */ +- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); ++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) +- msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); ++ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ +- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); ++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; +@@ -415,29 +498,34 @@ + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +-s32 igb_init_mbx_params_pf(struct e1000_hw *hw) ++s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) + { + struct e1000_mbx_info *mbx = &hw->mbx; + +- mbx->timeout = 0; +- mbx->usec_delay = 0; +- +- mbx->size = E1000_VFMAILBOX_SIZE; +- +- mbx->ops.read = igb_read_mbx_pf; +- mbx->ops.write = igb_write_mbx_pf; +- mbx->ops.read_posted = igb_read_posted_mbx; +- mbx->ops.write_posted = igb_write_posted_mbx; +- mbx->ops.check_for_msg = igb_check_for_msg_pf; +- mbx->ops.check_for_ack = igb_check_for_ack_pf; +- mbx->ops.check_for_rst = igb_check_for_rst_pf; ++ switch (hw->mac.type) { ++ case e1000_82576: ++ case e1000_i350: ++ case e1000_i354: ++ mbx->timeout = 0; ++ mbx->usec_delay = 0; + +- mbx->stats.msgs_tx = 0; +- mbx->stats.msgs_rx = 0; +- mbx->stats.reqs = 0; +- mbx->stats.acks = 0; +- mbx->stats.rsts = 0; ++ mbx->size = E1000_VFMAILBOX_SIZE; + +- return 0; ++ mbx->ops.read = e1000_read_mbx_pf; ++ mbx->ops.write = e1000_write_mbx_pf; ++ mbx->ops.read_posted = e1000_read_posted_mbx; ++ mbx->ops.write_posted = e1000_write_posted_mbx; ++ mbx->ops.check_for_msg = e1000_check_for_msg_pf; ++ mbx->ops.check_for_ack = e1000_check_for_ack_pf; ++ mbx->ops.check_for_rst = e1000_check_for_rst_pf; ++ ++ mbx->stats.msgs_tx = 0; ++ mbx->stats.msgs_rx = 0; ++ mbx->stats.reqs = 0; ++ mbx->stats.acks = 0; ++ mbx->stats.rsts = 0; ++ default: ++ return E1000_SUCCESS; ++ } + } + +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h +--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,30 +1,31 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_MBX_H_ + #define _E1000_MBX_H_ + +-#include "e1000_hw.h" ++#include "e1000_api.h" + + #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ + #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +@@ -32,10 +33,10 @@ + #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ + #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +-#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +-#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ ++#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ ++#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ ++#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ ++#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +@@ -43,31 +44,41 @@ + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +-/* Messages below or'd with this are the ACK */ ++/* Msgs below or'd with this are the ACK */ + #define E1000_VT_MSGTYPE_ACK 0x80000000 +-/* Messages below or'd with this are the NACK */ ++/* Msgs below or'd with this are the NACK */ + #define E1000_VT_MSGTYPE_NACK 0x40000000 + /* Indicates that VF is still clear to send requests */ + #define E1000_VT_MSGTYPE_CTS 0x20000000 + #define E1000_VT_MSGINFO_SHIFT 16 +-/* bits 23:16 are used for exra info for certain messages */ ++/* bits 23:16 are used for extra info for certain messages */ + #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +-#define E1000_VF_RESET 0x01 /* VF requests reset */ +-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +-#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +-#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +-#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ ++#define E1000_VF_RESET 0x01 /* VF requests reset */ ++#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ ++#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ ++#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) ++#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) ++#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ ++#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) ++#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ ++#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ ++#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) + #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ ++#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ ++ ++#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ ++#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +-s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); +-s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); +-s32 igb_check_for_msg(struct e1000_hw *, u16); +-s32 igb_check_for_ack(struct e1000_hw *, u16); +-s32 igb_check_for_rst(struct e1000_hw *, u16); +-s32 igb_init_mbx_params_pf(struct e1000_hw *); ++s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_check_for_msg(struct e1000_hw *, u16); ++s32 e1000_check_for_ack(struct e1000_hw *, u16); ++s32 e1000_check_for_rst(struct e1000_hw *, u16); ++void e1000_init_mbx_ops_generic(struct e1000_hw *hw); ++s32 e1000_init_mbx_params_pf(struct e1000_hw *); + + #endif /* _E1000_MBX_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c +--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,63 +1,131 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++static void e1000_reload_nvm_generic(struct e1000_hw *hw); ++ ++/** ++ * e1000_init_nvm_ops_generic - Initialize NVM function pointers ++ * @hw: pointer to the HW structure + * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++ * Setups up the function pointers to no-op functions ++ **/ ++void e1000_init_nvm_ops_generic(struct e1000_hw *hw) ++{ ++ struct e1000_nvm_info *nvm = &hw->nvm; ++ DEBUGFUNC("e1000_init_nvm_ops_generic"); ++ ++ /* Initialize function pointers */ ++ nvm->ops.init_params = e1000_null_ops_generic; ++ nvm->ops.acquire = e1000_null_ops_generic; ++ nvm->ops.read = e1000_null_read_nvm; ++ nvm->ops.release = e1000_null_nvm_generic; ++ nvm->ops.reload = e1000_reload_nvm_generic; ++ nvm->ops.update = e1000_null_ops_generic; ++ nvm->ops.valid_led_default = e1000_null_led_default; ++ nvm->ops.validate = e1000_null_ops_generic; ++ nvm->ops.write = e1000_null_write_nvm; ++} + +-#include +-#include ++/** ++ * e1000_null_nvm_read - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, ++ u16 E1000_UNUSEDARG *c) ++{ ++ DEBUGFUNC("e1000_null_read_nvm"); ++ return E1000_SUCCESS; ++} + +-#include "e1000_mac.h" +-#include "e1000_nvm.h" ++/** ++ * e1000_null_nvm_generic - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_nvm_generic"); ++ return; ++} + + /** +- * igb_raise_eec_clk - Raise EEPROM clock ++ * e1000_null_led_default - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG *data) ++{ ++ DEBUGFUNC("e1000_null_led_default"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_write_nvm - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, ++ u16 E1000_UNUSEDARG *c) ++{ ++ DEBUGFUNC("e1000_null_write_nvm"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +-static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) ++static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) + { + *eecd = *eecd | E1000_EECD_SK; +- wr32(E1000_EECD, *eecd); +- wrfl(); +- udelay(hw->nvm.delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, *eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(hw->nvm.delay_usec); + } + + /** +- * igb_lower_eec_clk - Lower EEPROM clock ++ * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +-static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) ++static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) + { + *eecd = *eecd & ~E1000_EECD_SK; +- wr32(E1000_EECD, *eecd); +- wrfl(); +- udelay(hw->nvm.delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, *eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(hw->nvm.delay_usec); + } + + /** +- * igb_shift_out_eec_bits - Shift data bits our to the EEPROM ++ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out +@@ -66,12 +134,14 @@ + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +-static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) ++static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + ++ DEBUGFUNC("e1000_shift_out_eec_bits"); ++ + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; +@@ -82,23 +152,23 @@ + if (data & mask) + eecd |= E1000_EECD_DI; + +- wr32(E1000_EECD, eecd); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); + +- udelay(nvm->delay_usec); ++ usec_delay(nvm->delay_usec); + +- igb_raise_eec_clk(hw, &eecd); +- igb_lower_eec_clk(hw, &eecd); ++ e1000_raise_eec_clk(hw, &eecd); ++ e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; +- wr32(E1000_EECD, eecd); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); + } + + /** +- * igb_shift_in_eec_bits - Shift data bits in from the EEPROM ++ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * +@@ -108,121 +178,124 @@ + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +-static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) ++static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) + { + u32 eecd; + u32 i; + u16 data; + +- eecd = rd32(E1000_EECD); ++ DEBUGFUNC("e1000_shift_in_eec_bits"); ++ ++ eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; +- igb_raise_eec_clk(hw, &eecd); ++ e1000_raise_eec_clk(hw, &eecd); + +- eecd = rd32(E1000_EECD); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + +- igb_lower_eec_clk(hw, &eecd); ++ e1000_lower_eec_clk(hw, &eecd); + } + + return data; + } + + /** +- * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion ++ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +-static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) ++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) + { + u32 attempts = 100000; + u32 i, reg = 0; +- s32 ret_val = -E1000_ERR_NVM; ++ ++ DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) +- reg = rd32(E1000_EERD); ++ reg = E1000_READ_REG(hw, E1000_EERD); + else +- reg = rd32(E1000_EEWR); ++ reg = E1000_READ_REG(hw, E1000_EEWR); + +- if (reg & E1000_NVM_RW_REG_DONE) { +- ret_val = 0; +- break; +- } ++ if (reg & E1000_NVM_RW_REG_DONE) ++ return E1000_SUCCESS; + +- udelay(5); ++ usec_delay(5); + } + +- return ret_val; ++ return -E1000_ERR_NVM; + } + + /** +- * igb_acquire_nvm - Generic request for access to EEPROM ++ * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +-s32 igb_acquire_nvm(struct e1000_hw *hw) ++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) + { +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; +- s32 ret_val = 0; + ++ DEBUGFUNC("e1000_acquire_nvm_generic"); + +- wr32(E1000_EECD, eecd | E1000_EECD_REQ); +- eecd = rd32(E1000_EECD); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; +- udelay(5); +- eecd = rd32(E1000_EECD); ++ usec_delay(5); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; +- wr32(E1000_EECD, eecd); +- hw_dbg("Could not acquire NVM grant\n"); +- ret_val = -E1000_ERR_NVM; ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ DEBUGOUT("Could not acquire NVM grant\n"); ++ return -E1000_ERR_NVM; + } + +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_standby_nvm - Return EEPROM to standby state ++ * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +-static void igb_standby_nvm(struct e1000_hw *hw) ++static void e1000_standby_nvm(struct e1000_hw *hw) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); ++ ++ DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; +- wr32(E1000_EECD, eecd); +- wrfl(); +- udelay(nvm->delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; +- wr32(E1000_EECD, eecd); +- wrfl(); +- udelay(nvm->delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(nvm->delay_usec); + } + } + +@@ -236,53 +309,57 @@ + { + u32 eecd; + +- eecd = rd32(E1000_EECD); ++ DEBUGFUNC("e1000_stop_nvm"); ++ ++ eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; +- igb_lower_eec_clk(hw, &eecd); ++ e1000_lower_eec_clk(hw, &eecd); + } + } + + /** +- * igb_release_nvm - Release exclusive access to EEPROM ++ * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +-void igb_release_nvm(struct e1000_hw *hw) ++void e1000_release_nvm_generic(struct e1000_hw *hw) + { + u32 eecd; + ++ DEBUGFUNC("e1000_release_nvm_generic"); ++ + e1000_stop_nvm(hw); + +- eecd = rd32(E1000_EECD); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; +- wr32(E1000_EECD, eecd); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); + } + + /** +- * igb_ready_nvm_eeprom - Prepares EEPROM for read/write ++ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +-static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) ++static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); +- s32 ret_val = 0; +- u16 timeout = 0; ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u8 spi_stat_reg; + ++ DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_spi) { ++ u16 timeout = NVM_MAX_RETRY_SPI; ++ + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); +- wr32(E1000_EECD, eecd); +- wrfl(); +- udelay(1); +- timeout = NVM_MAX_RETRY_SPI; ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed +@@ -290,30 +367,28 @@ + * not cleared within 'timeout', then error out. + */ + while (timeout) { +- igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, +- hw->nvm.opcode_bits); +- spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); ++ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, ++ hw->nvm.opcode_bits); ++ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + +- udelay(5); +- igb_standby_nvm(hw); ++ usec_delay(5); ++ e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { +- hw_dbg("SPI NVM Status error\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("SPI NVM Status error\n"); ++ return -E1000_ERR_NVM; + } + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_nvm_spi - Read EEPROM's using SPI ++ * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read +@@ -321,7 +396,7 @@ + * + * Reads a 16 bit word from the EEPROM. + **/ +-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; +@@ -329,51 +404,51 @@ + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + ++ DEBUGFUNC("e1000_read_nvm_spi"); ++ + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); ++ return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- ret_val = igb_ready_nvm_eeprom(hw); ++ ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ +- igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); +- igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); ++ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); ++ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { +- word_in = igb_shift_in_eec_bits(hw, 16); ++ word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + release: + nvm->ops.release(hw); + +-out: + return ret_val; + } + + /** +- * igb_read_nvm_eerd - Reads EEPROM using EERD register ++ * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read +@@ -381,41 +456,44 @@ + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, +- * and not enough words. ++ * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); ++ return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + +- E1000_NVM_RW_REG_START; ++ E1000_NVM_RW_REG_START; + +- wr32(E1000_EERD, eerd); +- ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); ++ E1000_WRITE_REG(hw, E1000_EERD, eerd); ++ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + +- data[i] = (rd32(E1000_EERD) >> +- E1000_NVM_RW_REG_DATA); ++ data[i] = (E1000_READ_REG(hw, E1000_EERD) >> ++ E1000_NVM_RW_REG_DATA); + } + +-out: ++ if (ret_val) ++ DEBUGOUT1("NVM read error: %d\n", ret_val); ++ + return ret_val; + } + + /** +- * igb_write_nvm_spi - Write to EEPROM using SPI ++ * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write +@@ -424,21 +502,23 @@ + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the +- * EEPROM will most likley contain an invalid checksum. ++ * EEPROM will most likely contain an invalid checksum. + **/ +-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + ++ DEBUGFUNC("e1000_write_nvm_spi"); ++ + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); +- return ret_val; ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); ++ return -E1000_ERR_NVM; + } + + while (widx < words) { +@@ -448,19 +528,19 @@ + if (ret_val) + return ret_val; + +- ret_val = igb_ready_nvm_eeprom(hw); ++ ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ +- igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, ++ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode +@@ -469,24 +549,23 @@ + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ +- igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); +- igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), ++ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); ++ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; +- + word_out = (word_out >> 8) | (word_out << 8); +- igb_shift_out_eec_bits(hw, word_out, 16); ++ e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + break; + } + } +- usleep_range(1000, 2000); ++ msec_delay(10); + nvm->ops.release(hw); + } + +@@ -494,132 +573,199 @@ + } + + /** +- * igb_read_part_string - Read device part number ++ * igb_e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure +- * @part_num: pointer to device part number +- * @part_num_size: size of part number buffer ++ * @pba_num: pointer to device part number ++ * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores +- * the value in part_num. ++ * the value in pba_num. + **/ +-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, ++ u32 pba_num_size) + { + s32 ret_val; + u16 nvm_data; +- u16 pointer; ++ u16 pba_ptr; + u16 offset; + u16 length; + +- if (part_num == NULL) { +- hw_dbg("PBA string buffer was null\n"); +- ret_val = E1000_ERR_INVALID_ARGUMENT; +- goto out; ++ DEBUGFUNC("igb_e1000_read_pba_string_generic"); ++ ++ if ((hw->mac.type >= e1000_i210) && ++ !e1000_get_flash_presence_i210(hw)) { ++ DEBUGOUT("Flashless no PBA string\n"); ++ return -E1000_ERR_NVM_PBA_SECTION; ++ } ++ ++ if (pba_num == NULL) { ++ DEBUGOUT("PBA string buffer was null\n"); ++ return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + +- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); ++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which +- * means pointer is actually our second data word for the PBA number ++ * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { +- hw_dbg("NVM PBA number is not stored as string\n"); ++ DEBUGOUT("NVM PBA number is not stored as string\n"); + +- /* we will need 11 characters to store the PBA */ +- if (part_num_size < 11) { +- hw_dbg("PBA string buffer too small\n"); ++ /* make sure callers buffer is big enough to store the PBA */ ++ if (pba_num_size < E1000_PBANUM_LENGTH) { ++ DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + +- /* extract hex string from data and pointer */ +- part_num[0] = (nvm_data >> 12) & 0xF; +- part_num[1] = (nvm_data >> 8) & 0xF; +- part_num[2] = (nvm_data >> 4) & 0xF; +- part_num[3] = nvm_data & 0xF; +- part_num[4] = (pointer >> 12) & 0xF; +- part_num[5] = (pointer >> 8) & 0xF; +- part_num[6] = '-'; +- part_num[7] = 0; +- part_num[8] = (pointer >> 4) & 0xF; +- part_num[9] = pointer & 0xF; ++ /* extract hex string from data and pba_ptr */ ++ pba_num[0] = (nvm_data >> 12) & 0xF; ++ pba_num[1] = (nvm_data >> 8) & 0xF; ++ pba_num[2] = (nvm_data >> 4) & 0xF; ++ pba_num[3] = nvm_data & 0xF; ++ pba_num[4] = (pba_ptr >> 12) & 0xF; ++ pba_num[5] = (pba_ptr >> 8) & 0xF; ++ pba_num[6] = '-'; ++ pba_num[7] = 0; ++ pba_num[8] = (pba_ptr >> 4) & 0xF; ++ pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ +- part_num[10] = '\0'; ++ pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { +- if (part_num[offset] < 0xA) +- part_num[offset] += '0'; +- else if (part_num[offset] < 0x10) +- part_num[offset] += 'A' - 0xA; ++ if (pba_num[offset] < 0xA) ++ pba_num[offset] += '0'; ++ else if (pba_num[offset] < 0x10) ++ pba_num[offset] += 'A' - 0xA; + } + +- goto out; ++ return E1000_SUCCESS; + } + +- ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); ++ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + if (length == 0xFFFF || length == 0) { +- hw_dbg("NVM PBA number section invalid length\n"); +- ret_val = E1000_ERR_NVM_PBA_SECTION; +- goto out; +- } +- /* check if part_num buffer is big enough */ +- if (part_num_size < (((u32)length * 2) - 1)) { +- hw_dbg("PBA string buffer too small\n"); +- ret_val = E1000_ERR_NO_SPACE; +- goto out; ++ DEBUGOUT("NVM PBA number section invalid length\n"); ++ return -E1000_ERR_NVM_PBA_SECTION; ++ } ++ /* check if pba_num buffer is big enough */ ++ if (pba_num_size < (((u32)length * 2) - 1)) { ++ DEBUGOUT("PBA string buffer too small\n"); ++ return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ +- pointer++; ++ pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { +- ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); ++ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } +- part_num[offset * 2] = (u8)(nvm_data >> 8); +- part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); ++ pba_num[offset * 2] = (u8)(nvm_data >> 8); ++ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } +- part_num[offset * 2] = '\0'; ++ pba_num[offset * 2] = '\0'; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_mac_addr - Read device MAC address ++ * e1000_read_pba_length_generic - Read device part number length ++ * @hw: pointer to the HW structure ++ * @pba_num_size: size of part number buffer ++ * ++ * Reads the product board assembly (PBA) number length from the EEPROM and ++ * stores the value in pba_num_size. ++ **/ ++s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) ++{ ++ s32 ret_val; ++ u16 nvm_data; ++ u16 pba_ptr; ++ u16 length; ++ ++ DEBUGFUNC("e1000_read_pba_length_generic"); ++ ++ if (pba_num_size == NULL) { ++ DEBUGOUT("PBA buffer size was null\n"); ++ return -E1000_ERR_INVALID_ARGUMENT; ++ } ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ /* if data is not ptr guard the PBA must be in legacy format */ ++ if (nvm_data != NVM_PBA_PTR_GUARD) { ++ *pba_num_size = E1000_PBANUM_LENGTH; ++ return E1000_SUCCESS; ++ } ++ ++ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ if (length == 0xFFFF || length == 0) { ++ DEBUGOUT("NVM PBA number section invalid length\n"); ++ return -E1000_ERR_NVM_PBA_SECTION; ++ } ++ ++ /* Convert from length in u16 values to u8 chars, add 1 for NULL, ++ * and subtract 2 because length field is included in length. ++ */ ++ *pba_num_size = ((u32)length * 2) - 1; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +-s32 igb_read_mac_addr(struct e1000_hw *hw) ++ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw) + { + u32 rar_high; + u32 rar_low; + u16 i; + +- rar_high = rd32(E1000_RAH(0)); +- rar_low = rd32(E1000_RAL(0)); ++ rar_high = E1000_READ_REG(hw, E1000_RAH(0)); ++ rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); +@@ -627,83 +773,104 @@ + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + +- for (i = 0; i < ETH_ALEN; i++) ++ for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_validate_nvm_checksum - Validate EEPROM checksum ++ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +-s32 igb_validate_nvm_checksum(struct e1000_hw *hw) ++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + ++ DEBUGFUNC("e1000_validate_nvm_checksum_generic"); ++ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { +- hw_dbg("NVM Checksum Invalid\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("NVM Checksum Invalid\n"); ++ return -E1000_ERR_NVM; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_update_nvm_checksum - Update EEPROM checksum ++ * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +-s32 igb_update_nvm_checksum(struct e1000_hw *hw) ++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) + { +- s32 ret_val; ++ s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + ++ DEBUGFUNC("e1000_update_nvm_checksum"); ++ + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error while updating checksum.\n"); +- goto out; ++ DEBUGOUT("NVM Read Error while updating checksum.\n"); ++ return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) +- hw_dbg("NVM Write Error while updating checksum.\n"); ++ DEBUGOUT("NVM Write Error while updating checksum.\n"); + +-out: + return ret_val; + } + + /** +- * igb_get_fw_version - Get firmware version information ++ * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure +- * @fw_vers: pointer to output structure + * +- * unsupported MAC types will return all 0 version structure ++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the ++ * extended control register. + **/ +-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) ++static void e1000_reload_nvm_generic(struct e1000_hw *hw) ++{ ++ u32 ctrl_ext; ++ ++ DEBUGFUNC("e1000_reload_nvm_generic"); ++ ++ usec_delay(10); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ctrl_ext |= E1000_CTRL_EXT_EE_RST; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_FLUSH(hw); ++} ++ ++/** ++ * e1000_get_fw_version - Get firmware version information ++ * @hw: pointer to the HW structure ++ * @fw_vers: pointer to output version structure ++ * ++ * unsupported/not present features return 0 in version structure ++ **/ ++void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) + { + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; +@@ -711,17 +878,18 @@ + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + +- /* basic eeprom version numbers and bits used vary by part and by tool +- * used to create the nvm images. Check which data format we have. +- */ +- hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); ++ /* basic eeprom version numbers, bits used vary by part and by tool ++ * used to create the nvm images */ ++ /* Check which data format we have */ + switch (hw->mac.type) { + case e1000_i211: +- igb_read_invm_version(hw, fw_vers); ++ e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: ++ case e1000_i354: ++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ +@@ -736,12 +904,13 @@ + } + break; + case e1000_i210: +- if (!(igb_get_flash_presence_i210(hw))) { +- igb_read_invm_version(hw, fw_vers); ++ if (!(e1000_get_flash_presence_i210(hw))) { ++ e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: ++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && +@@ -769,6 +938,7 @@ + } + break; + default: ++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); +@@ -797,5 +967,11 @@ + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; ++ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { ++ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); ++ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); ++ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | ++ eeprom_verl; + } + } ++ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h +--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,41 +1,30 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_NVM_H_ + #define _E1000_NVM_H_ + +-s32 igb_acquire_nvm(struct e1000_hw *hw); +-void igb_release_nvm(struct e1000_hw *hw); +-s32 igb_read_mac_addr(struct e1000_hw *hw); +-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, +- u32 part_num_size); +-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +-s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +-s32 igb_update_nvm_checksum(struct e1000_hw *hw); +- + struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; +@@ -51,6 +40,31 @@ + u16 or_build; + u16 or_patch; + }; +-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); ++ ++void e1000_init_nvm_ops_generic(struct e1000_hw *hw); ++s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); ++void e1000_null_nvm_generic(struct e1000_hw *hw); ++s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); ++s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); ++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); ++ ++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); ++s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw); ++s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, ++ u32 pba_num_size); ++s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); ++s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); ++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data); ++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); ++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); ++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data); ++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); ++void e1000_release_nvm_generic(struct e1000_hw *hw); ++void e1000_get_fw_version(struct e1000_hw *hw, ++ struct e1000_fw_version *fw_vers); ++ ++#define E1000_STM_OPCODE 0xDB00 + + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_osdep.h b/drivers/net/ethernet/intel/igb/e1000_osdep.h +--- a/drivers/net/ethernet/intel/igb/e1000_osdep.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_osdep.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,141 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* glue for the OS independent part of e1000 ++ * includes register access macros ++ */ ++ ++#ifndef _E1000_OSDEP_H_ ++#define _E1000_OSDEP_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "kcompat.h" ++ ++#define usec_delay(x) udelay(x) ++#define usec_delay_irq(x) udelay(x) ++#ifndef msec_delay ++#define msec_delay(x) do { \ ++ /* Don't mdelay in interrupt context! */ \ ++ if (in_interrupt()) \ ++ BUG(); \ ++ else \ ++ msleep(x); \ ++} while (0) ++ ++/* Some workarounds require millisecond delays and are run during interrupt ++ * context. Most notably, when establishing link, the phy may need tweaking ++ * but cannot process phy register reads/writes faster than millisecond ++ * intervals...and we establish link due to a "link status change" interrupt. ++ */ ++#define msec_delay_irq(x) mdelay(x) ++ ++#define E1000_READ_REG(x, y) e1000_read_reg(x, y) ++#endif ++ ++#define PCI_COMMAND_REGISTER PCI_COMMAND ++#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE ++#define ETH_ADDR_LEN ETH_ALEN ++ ++#ifdef __BIG_ENDIAN ++#define E1000_BIG_ENDIAN __BIG_ENDIAN ++#endif ++ ++#ifdef DEBUG ++#define DEBUGOUT(S) pr_debug(S) ++#define DEBUGOUT1(S, A...) pr_debug(S, ## A) ++#else ++#define DEBUGOUT(S) ++#define DEBUGOUT1(S, A...) ++#endif ++ ++#ifdef DEBUG_FUNC ++#define DEBUGFUNC(F) DEBUGOUT(F "\n") ++#else ++#define DEBUGFUNC(F) ++#endif ++#define DEBUGOUT2 DEBUGOUT1 ++#define DEBUGOUT3 DEBUGOUT2 ++#define DEBUGOUT7 DEBUGOUT3 ++ ++#define E1000_REGISTER(a, reg) reg ++ ++/* forward declaration */ ++struct e1000_hw; ++ ++/* write operations, indexed using DWORDS */ ++#define E1000_WRITE_REG(hw, reg, val) \ ++do { \ ++ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ ++ if (!E1000_REMOVED(hw_addr)) \ ++ writel((val), &hw_addr[(reg)]); \ ++} while (0) ++ ++u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); ++ ++#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \ ++ E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val)) ++ ++#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \ ++ e1000_read_reg((hw), (reg) + ((idx) << 2))) ++ ++#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY ++#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY ++ ++#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ ++ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \ ++ ((offset) << 1)))) ++ ++#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ ++ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) ++ ++#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ ++ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) ++ ++#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ ++ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) ++ ++#define E1000_WRITE_REG_IO(a, reg, offset) do { \ ++ outl(reg, ((a)->io_base)); \ ++ outl(offset, ((a)->io_base + 4)); \ ++ } while (0) ++ ++#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) ++ ++#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ ++ writel((value), ((a)->flash_address + reg))) ++ ++#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ ++ writew((value), ((a)->flash_address + reg))) ++ ++#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) ++ ++#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) ++ ++#define E1000_REMOVED(h) unlikely(!(h)) ++ ++#endif /* _E1000_OSDEP_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c +--- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,147 +1,271 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ +- +-#include +-#include +- +-#include "e1000_mac.h" +-#include "e1000_phy.h" +- +-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, +- u16 *phy_ctrl); +-static s32 igb_wait_autoneg(struct e1000_hw *hw); +-static s32 igb_set_master_slave_mode(struct e1000_hw *hw); ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++static s32 e1000_wait_autoneg(struct e1000_hw *hw); + /* Cable length tables */ + static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ +- (sizeof(e1000_m88_cable_length_table) / \ +- sizeof(e1000_m88_cable_length_table[0])) ++ (sizeof(e1000_m88_cable_length_table) / \ ++ sizeof(e1000_m88_cable_length_table[0])) + + static const u16 e1000_igp_2_cable_length_table[] = { +- 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, +- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, +- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, +- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, +- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, +- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, +- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, +- 104, 109, 114, 118, 121, 124}; ++ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, ++ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, ++ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, ++ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, ++ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, ++ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, ++ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, ++ 124}; + #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ +- (sizeof(e1000_igp_2_cable_length_table) / \ +- sizeof(e1000_igp_2_cable_length_table[0])) ++ (sizeof(e1000_igp_2_cable_length_table) / \ ++ sizeof(e1000_igp_2_cable_length_table[0])) ++ ++/** ++ * e1000_init_phy_ops_generic - Initialize PHY function pointers ++ * @hw: pointer to the HW structure ++ * ++ * Setups up the function pointers to no-op functions ++ **/ ++void e1000_init_phy_ops_generic(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ DEBUGFUNC("e1000_init_phy_ops_generic"); ++ ++ /* Initialize function pointers */ ++ phy->ops.init_params = e1000_null_ops_generic; ++ phy->ops.acquire = e1000_null_ops_generic; ++ phy->ops.check_polarity = e1000_null_ops_generic; ++ phy->ops.check_reset_block = e1000_null_ops_generic; ++ phy->ops.commit = e1000_null_ops_generic; ++ phy->ops.force_speed_duplex = e1000_null_ops_generic; ++ phy->ops.get_cfg_done = e1000_null_ops_generic; ++ phy->ops.get_cable_length = e1000_null_ops_generic; ++ phy->ops.get_info = e1000_null_ops_generic; ++ phy->ops.set_page = e1000_null_set_page; ++ phy->ops.read_reg = e1000_null_read_reg; ++ phy->ops.read_reg_locked = e1000_null_read_reg; ++ phy->ops.read_reg_page = e1000_null_read_reg; ++ phy->ops.release = e1000_null_phy_generic; ++ phy->ops.reset = e1000_null_ops_generic; ++ phy->ops.set_d0_lplu_state = e1000_null_lplu_state; ++ phy->ops.set_d3_lplu_state = e1000_null_lplu_state; ++ phy->ops.write_reg = e1000_null_write_reg; ++ phy->ops.write_reg_locked = e1000_null_write_reg; ++ phy->ops.write_reg_page = e1000_null_write_reg; ++ phy->ops.power_up = e1000_null_phy_generic; ++ phy->ops.power_down = e1000_null_phy_generic; ++ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; ++ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; ++} ++ ++/** ++ * e1000_null_set_page - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG data) ++{ ++ DEBUGFUNC("e1000_null_set_page"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_read_reg - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) ++{ ++ DEBUGFUNC("e1000_null_read_reg"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_phy_generic - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_phy_generic"); ++ return; ++} ++ ++/** ++ * e1000_null_lplu_state - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, ++ bool E1000_UNUSEDARG active) ++{ ++ DEBUGFUNC("e1000_null_lplu_state"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_write_reg - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) ++{ ++ DEBUGFUNC("e1000_null_write_reg"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_i2c_byte_null - No-op function, return 0 ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: device address ++ * @data: data value read ++ * ++ **/ ++s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG byte_offset, ++ u8 E1000_UNUSEDARG dev_addr, ++ u8 E1000_UNUSEDARG *data) ++{ ++ DEBUGFUNC("e1000_read_i2c_byte_null"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_i2c_byte_null - No-op function, return 0 ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: device address ++ * @data: data value to write ++ * ++ **/ ++s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG byte_offset, ++ u8 E1000_UNUSEDARG dev_addr, ++ u8 E1000_UNUSEDARG data) ++{ ++ DEBUGFUNC("e1000_write_i2c_byte_null"); ++ return E1000_SUCCESS; ++} + + /** +- * igb_check_reset_block - Check if PHY reset is blocked ++ * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset +- * is blocked. If a reset is not blocked return 0, otherwise ++ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +-s32 igb_check_reset_block(struct e1000_hw *hw) ++s32 e1000_check_reset_block_generic(struct e1000_hw *hw) + { + u32 manc; + +- manc = rd32(E1000_MANC); ++ DEBUGFUNC("e1000_check_reset_block"); ++ ++ manc = E1000_READ_REG(hw, E1000_MANC); + +- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; ++ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? ++ E1000_BLK_PHY_RESET : E1000_SUCCESS; + } + + /** +- * igb_get_phy_id - Retrieve the PHY ID and revision ++ * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +-s32 igb_get_phy_id(struct e1000_hw *hw) ++s32 e1000_get_phy_id(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 phy_id; + ++ DEBUGFUNC("e1000_get_phy_id"); ++ ++ if (!phy->ops.read_reg) ++ return E1000_SUCCESS; ++ + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) +- goto out; ++ return ret_val; + + phy->id = (u32)(phy_id << 16); +- udelay(20); ++ usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) +- goto out; ++ return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_phy_reset_dsp - Reset PHY DSP ++ * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +-static s32 igb_phy_reset_dsp(struct e1000_hw *hw) ++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + +- if (!(hw->phy.ops.write_reg)) +- goto out; ++ DEBUGFUNC("e1000_phy_reset_dsp_generic"); ++ ++ if (!hw->phy.ops.write_reg) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) +- goto out; +- +- ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); ++ return ret_val; + +-out: +- return ret_val; ++ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + } + + /** +- * igb_read_phy_reg_mdic - Read MDI control register ++ * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * +- * Reads the MDI control regsiter in the PHY at offset and stores the ++ * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { +- hw_dbg("PHY Address %d is out of range\n", offset); +- ret_val = -E1000_ERR_PARAM; +- goto out; ++ DEBUGOUT1("PHY Address %d is out of range\n", offset); ++ return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI +@@ -152,52 +276,55 @@ + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + +- wr32(E1000_MDIC, mdic); ++ E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { +- udelay(50); +- mdic = rd32(E1000_MDIC); ++ usec_delay_irq(50); ++ mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { +- hw_dbg("MDI Read did not complete\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Read did not complete\n"); ++ return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { +- hw_dbg("MDI Error\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Error\n"); ++ return -E1000_ERR_PHY; ++ } ++ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { ++ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", ++ offset, ++ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); ++ return -E1000_ERR_PHY; + } + *data = (u16) mdic; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_phy_reg_mdic - Write MDI control register ++ * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { +- hw_dbg("PHY Address %d is out of range\n", offset); +- ret_val = -E1000_ERR_PARAM; +- goto out; ++ DEBUGOUT1("PHY Address %d is out of range\n", offset); ++ return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI +@@ -209,35 +336,38 @@ + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + +- wr32(E1000_MDIC, mdic); ++ E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { +- udelay(50); +- mdic = rd32(E1000_MDIC); ++ usec_delay_irq(50); ++ mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { +- hw_dbg("MDI Write did not complete\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Write did not complete\n"); ++ return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { +- hw_dbg("MDI Error\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Error\n"); ++ return -E1000_ERR_PHY; ++ } ++ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { ++ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", ++ offset, ++ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); ++ return -E1000_ERR_PHY; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_phy_reg_i2c - Read PHY register using i2c ++ * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data +@@ -245,11 +375,13 @@ + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + ++ DEBUGFUNC("e1000_read_phy_reg_i2c"); ++ + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. +@@ -258,47 +390,49 @@ + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + +- wr32(E1000_I2CCMD, i2ccmd); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { +- udelay(50); +- i2ccmd = rd32(E1000_I2CCMD); ++ usec_delay(50); ++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { +- hw_dbg("I2CCMD Read did not complete\n"); ++ DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { +- hw_dbg("I2CCMD Error bit set\n"); ++ DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_phy_reg_i2c - Write PHY register using i2c ++ * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + ++ DEBUGFUNC("e1000_write_phy_reg_i2c"); ++ + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { +- hw_dbg("PHY I2C Address %d is out of range.\n", ++ DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } +@@ -315,29 +449,29 @@ + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + +- wr32(E1000_I2CCMD, i2ccmd); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { +- udelay(50); +- i2ccmd = rd32(E1000_I2CCMD); ++ usec_delay(50); ++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { +- hw_dbg("I2CCMD Write did not complete\n"); ++ DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { +- hw_dbg("I2CCMD Error bit set\n"); ++ DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_sfp_data_byte - Reads SFP module data. ++ * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer +@@ -349,14 +483,16 @@ + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +-s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) ++s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) + { + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + ++ DEBUGFUNC("e1000_read_sfp_data_byte"); ++ + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { +- hw_dbg("I2CCMD command address exceeds upper limit\n"); ++ DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + +@@ -367,30 +503,103 @@ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + +- wr32(E1000_I2CCMD, i2ccmd); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { +- udelay(50); +- data_local = rd32(E1000_I2CCMD); ++ usec_delay(50); ++ data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { +- hw_dbg("I2CCMD Read did not complete\n"); ++ DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { +- hw_dbg("I2CCMD Error bit set\n"); ++ DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_phy_reg_igp - Read igp PHY register ++ * e1000_write_sfp_data_byte - Writes SFP module data. ++ * @hw: pointer to the HW structure ++ * @offset: byte location offset to write to ++ * @data: data to write ++ * ++ * Writes one byte to SFP module data stored ++ * in SFP resided EEPROM memory or SFP diagnostic area. ++ * Function should be called with ++ * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access ++ * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters ++ * access ++ **/ ++s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) ++{ ++ u32 i = 0; ++ u32 i2ccmd = 0; ++ u32 data_local = 0; ++ ++ DEBUGFUNC("e1000_write_sfp_data_byte"); ++ ++ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { ++ DEBUGOUT("I2CCMD command address exceeds upper limit\n"); ++ return -E1000_ERR_PHY; ++ } ++ /* The programming interface is 16 bits wide ++ * so we need to read the whole word first ++ * then update appropriate byte lane and write ++ * the updated word back. ++ */ ++ /* Set up Op-code, EEPROM Address,in the I2CCMD ++ * register. The MAC will take care of interfacing ++ * with an EEPROM to write the data given. ++ */ ++ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | ++ E1000_I2CCMD_OPCODE_READ); ++ /* Set a command to read single word */ ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); ++ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { ++ usec_delay(50); ++ /* Poll the ready bit to see if lastly ++ * launched I2C operation completed ++ */ ++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); ++ if (i2ccmd & E1000_I2CCMD_READY) { ++ /* Check if this is READ or WRITE phase */ ++ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == ++ E1000_I2CCMD_OPCODE_READ) { ++ /* Write the selected byte ++ * lane and update whole word ++ */ ++ data_local = i2ccmd & 0xFF00; ++ data_local |= data; ++ i2ccmd = ((offset << ++ E1000_I2CCMD_REG_ADDR_SHIFT) | ++ E1000_I2CCMD_OPCODE_WRITE | data_local); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); ++ } else { ++ break; ++ } ++ } ++ } ++ if (!(i2ccmd & E1000_I2CCMD_READY)) { ++ DEBUGOUT("I2CCMD Write did not complete\n"); ++ return -E1000_ERR_PHY; ++ } ++ if (i2ccmd & E1000_I2CCMD_ERROR) { ++ DEBUGOUT("I2CCMD Error bit set\n"); ++ return -E1000_ERR_PHY; ++ } ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data +@@ -399,38 +608,29 @@ + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) + { +- s32 ret_val = 0; ++ s32 ret_val; + +- if (!(hw->phy.ops.acquire)) +- goto out; ++ DEBUGFUNC("e1000_read_phy_reg_m88"); ++ ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) +- goto out; +- +- if (offset > MAX_PHY_MULTI_PAGE_REG) { +- ret_val = igb_write_phy_reg_mdic(hw, +- IGP01E1000_PHY_PAGE_SELECT, +- (u16)offset); +- if (ret_val) { +- hw->phy.ops.release(hw); +- goto out; +- } +- } ++ return ret_val; + +- ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, +- data); ++ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, ++ data); + + hw->phy.ops.release(hw); + +-out: + return ret_val; + } + + /** +- * igb_write_phy_reg_igp - Write igp PHY register ++ * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset +@@ -438,80 +638,415 @@ + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) + { +- s32 ret_val = 0; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_write_phy_reg_m88"); + +- if (!(hw->phy.ops.acquire)) +- goto out; ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- if (offset > MAX_PHY_MULTI_PAGE_REG) { +- ret_val = igb_write_phy_reg_mdic(hw, +- IGP01E1000_PHY_PAGE_SELECT, +- (u16)offset); +- if (ret_val) { +- hw->phy.ops.release(hw); +- goto out; +- } ++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, ++ data); ++ ++ hw->phy.ops.release(hw); ++ ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_set_page_igp - Set page as on IGP-like PHY(s) ++ * @hw: pointer to the HW structure ++ * @page: page to set (shifted left when necessary) ++ * ++ * Sets PHY page required for PHY register access. Assumes semaphore is ++ * already acquired. Note, this function sets phy.addr to 1 so the caller ++ * must set it appropriately (if necessary) after this function returns. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page) ++{ ++ DEBUGFUNC("igb_e1000_set_page_igp"); ++ ++ DEBUGOUT1("Setting page 0x%x\n", page); ++ ++ hw->phy.addr = 1; ++ ++ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); ++} ++ ++/** ++ * __e1000_read_phy_reg_igp - Read igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary, then reads the PHY register at offset ++ * and stores the retrieved information in data. Release any acquired ++ * semaphores before exiting. ++ **/ ++static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, ++ bool locked) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("__e1000_read_phy_reg_igp"); ++ ++ if (!locked) { ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; + } + +- ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, +- data); ++ if (offset > MAX_PHY_MULTI_PAGE_REG) ++ ret_val = e1000_write_phy_reg_mdic(hw, ++ IGP01E1000_PHY_PAGE_SELECT, ++ (u16)offset); ++ if (!ret_val) ++ ret_val = e1000_read_phy_reg_mdic(hw, ++ MAX_PHY_REG_ADDRESS & offset, ++ data); ++ if (!locked) ++ hw->phy.ops.release(hw); + +- hw->phy.ops.release(hw); ++ return ret_val; ++} ++ ++/** ++ * e1000_read_phy_reg_igp - Read igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Acquires semaphore then reads the PHY register at offset and stores the ++ * retrieved information in data. ++ * Release the acquired semaphore before exiting. ++ **/ ++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_phy_reg_igp(hw, offset, data, false); ++} ++ ++/** ++ * e1000_read_phy_reg_igp_locked - Read igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Reads the PHY register at offset and stores the retrieved information ++ * in data. Assumes semaphore already acquired. ++ **/ ++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_phy_reg_igp(hw, offset, data, true); ++} ++ ++/** ++ * e1000_write_phy_reg_igp - Write igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary, then writes the data to PHY register ++ * at the offset. Release any acquired semaphores before exiting. ++ **/ ++static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, ++ bool locked) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_write_phy_reg_igp"); ++ ++ if (!locked) { ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ if (offset > MAX_PHY_MULTI_PAGE_REG) ++ ret_val = e1000_write_phy_reg_mdic(hw, ++ IGP01E1000_PHY_PAGE_SELECT, ++ (u16)offset); ++ if (!ret_val) ++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & ++ offset, ++ data); ++ if (!locked) ++ hw->phy.ops.release(hw); + +-out: + return ret_val; + } + + /** +- * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link ++ * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset + * +- * Sets up Carrier-sense on Transmit and downshift values. ++ * Acquires semaphore then writes the data to PHY register ++ * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_copper_link_setup_82580(struct e1000_hw *hw) ++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_phy_reg_igp(hw, offset, data, false); ++} ++ ++/** ++ * e1000_write_phy_reg_igp_locked - Write igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * ++ * Writes the data to PHY register at the offset. ++ * Assumes semaphore already acquired. ++ **/ ++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_phy_reg_igp(hw, offset, data, true); ++} ++ ++/** ++ * __e1000_read_kmrn_reg - Read kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary. Then reads the PHY register at offset ++ * using the kumeran interface. The information retrieved is stored in data. ++ * Release any acquired semaphores before exiting. ++ **/ ++static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, ++ bool locked) ++{ ++ u32 kmrnctrlsta; ++ ++ DEBUGFUNC("__e1000_read_kmrn_reg"); ++ ++ if (!locked) { ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & ++ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); ++ E1000_WRITE_FLUSH(hw); ++ ++ usec_delay(2); ++ ++ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); ++ *data = (u16)kmrnctrlsta; ++ ++ if (!locked) ++ hw->phy.ops.release(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_kmrn_reg_generic - Read kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Acquires semaphore then reads the PHY register at offset using the ++ * kumeran interface. The information retrieved is stored in data. ++ * Release the acquired semaphore before exiting. ++ **/ ++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_kmrn_reg(hw, offset, data, false); ++} ++ ++/** ++ * e1000_read_kmrn_reg_locked - Read kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Reads the PHY register at offset using the kumeran interface. The ++ * information retrieved is stored in data. ++ * Assumes semaphore already acquired. ++ **/ ++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_kmrn_reg(hw, offset, data, true); ++} ++ ++/** ++ * __e1000_write_kmrn_reg - Write kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary. Then write the data to PHY register ++ * at the offset using the kumeran interface. Release any acquired semaphores ++ * before exiting. ++ **/ ++static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, ++ bool locked) ++{ ++ u32 kmrnctrlsta; ++ ++ DEBUGFUNC("e1000_write_kmrn_reg_generic"); ++ ++ if (!locked) { ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & ++ E1000_KMRNCTRLSTA_OFFSET) | data; ++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); ++ E1000_WRITE_FLUSH(hw); ++ ++ usec_delay(2); ++ ++ if (!locked) ++ hw->phy.ops.release(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_kmrn_reg_generic - Write kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * ++ * Acquires semaphore then writes the data to the PHY register at the offset ++ * using the kumeran interface. Release the acquired semaphore before exiting. ++ **/ ++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_kmrn_reg(hw, offset, data, false); ++} ++ ++/** ++ * e1000_write_kmrn_reg_locked - Write kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * ++ * Write the data to PHY register at the offset using the kumeran interface. ++ * Assumes semaphore already acquired. ++ **/ ++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_kmrn_reg(hw, offset, data, true); ++} ++ ++/** ++ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode ++ * @hw: pointer to the HW structure ++ * ++ * Sets up Master/slave mode ++ **/ ++static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) + { +- struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + +- if (phy->reset_disable) { +- ret_val = 0; +- goto out; ++ /* Resolve Master/Slave mode */ ++ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); ++ if (ret_val) ++ return ret_val; ++ ++ /* load defaults for future use */ ++ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? ++ ((phy_data & CR_1000T_MS_VALUE) ? ++ e1000_ms_force_master : ++ e1000_ms_force_slave) : e1000_ms_auto; ++ ++ switch (hw->phy.ms_type) { ++ case e1000_ms_force_master: ++ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); ++ break; ++ case e1000_ms_force_slave: ++ phy_data |= CR_1000T_MS_ENABLE; ++ phy_data &= ~(CR_1000T_MS_VALUE); ++ break; ++ case e1000_ms_auto: ++ phy_data &= ~CR_1000T_MS_ENABLE; ++ /* fall-through */ ++ default: ++ break; + } + +- if (phy->type == e1000_phy_82580) { ++ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); ++} ++ ++/** ++ * igb_e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link ++ * @hw: pointer to the HW structure ++ * ++ * Sets up Carrier-sense on Transmit and downshift values. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 phy_data; ++ ++ DEBUGFUNC("igb_e1000_copper_link_setup_82577"); ++ ++ if (hw->phy.reset_disable) ++ return E1000_SUCCESS; ++ ++ if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { +- hw_dbg("Error resetting the PHY.\n"); +- goto out; ++ DEBUGOUT("Error resetting the PHY.\n"); ++ return ret_val; + } + } + +- /* Enable CRS on TX. This must be set for half-duplex operation. */ +- ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); ++ /* Enable CRS on Tx. This must be set for half-duplex operation. */ ++ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; ++ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ +- phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; ++ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + +- ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); ++ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Set MDI/MDIX mode */ +- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); ++ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) +- goto out; +- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; ++ return ret_val; ++ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode +@@ -521,41 +1056,42 @@ + case 1: + break; + case 2: +- phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; ++ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: +- phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; ++ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } +- ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); ++ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); ++ if (ret_val) ++ return ret_val; + +-out: +- return ret_val; ++ return e1000_set_master_slave_mode(hw); + } + + /** +- * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link ++ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +-s32 igb_copper_link_setup_m88(struct e1000_hw *hw) ++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + +- if (phy->reset_disable) { +- ret_val = 0; +- goto out; +- } ++ DEBUGFUNC("e1000_copper_link_setup_m88"); ++ ++ if (phy->reset_disable) ++ return E1000_SUCCESS; + +- /* Enable CRS on TX. This must be set for half-duplex operation. */ ++ /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + +@@ -591,12 +1127,12 @@ + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; +- if (phy->disable_polarity_correction == 1) ++ if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register +@@ -605,7 +1141,7 @@ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + +@@ -617,42 +1153,43 @@ + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | +- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); ++ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) +- goto out; ++ return ret_val; + } + + /* Commit the changes. */ +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = phy->ops.commit(hw); + if (ret_val) { +- hw_dbg("Error committing the PHY changes\n"); +- goto out; ++ DEBUGOUT("Error committing the PHY changes\n"); ++ return ret_val; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link ++ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) ++s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + ++ DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); ++ + if (phy->reset_disable) +- return 0; ++ return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); +@@ -694,7 +1231,7 @@ + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; +- if (phy->disable_polarity_correction == 1) ++ if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ +@@ -705,9 +1242,9 @@ + if (ret_val) + return ret_val; + +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = phy->ops.commit(hw); + if (ret_val) { +- hw_dbg("Error committing the PHY changes\n"); ++ DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } +@@ -721,70 +1258,60 @@ + return ret_val; + + /* Commit the changes. */ +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = phy->ops.commit(hw); + if (ret_val) { +- hw_dbg("Error committing the PHY changes\n"); ++ DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } +- ret_val = igb_set_master_slave_mode(hw); ++ ++ ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_copper_link_setup_igp - Setup igp PHY's for copper link ++ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +-s32 igb_copper_link_setup_igp(struct e1000_hw *hw) ++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + +- if (phy->reset_disable) { +- ret_val = 0; +- goto out; +- } ++ DEBUGFUNC("e1000_copper_link_setup_igp"); ++ ++ if (phy->reset_disable) ++ return E1000_SUCCESS; + +- ret_val = phy->ops.reset(hw); ++ ret_val = hw->phy.ops.reset(hw); + if (ret_val) { +- hw_dbg("Error resetting the PHY.\n"); +- goto out; ++ DEBUGOUT("Error resetting the PHY.\n"); ++ return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ +- msleep(100); ++ msec_delay(100); + +- /* The NVM settings will configure LPLU in D3 for +- * non-IGP1 PHYs. +- */ +- if (phy->type == e1000_phy_igp) { +- /* disable lplu d3 during driver init */ +- if (phy->ops.set_d3_lplu_state) +- ret_val = phy->ops.set_d3_lplu_state(hw, false); ++ /* disable lplu d0 during driver init */ ++ if (hw->phy.ops.set_d0_lplu_state) { ++ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { +- hw_dbg("Error Disabling LPLU D3\n"); +- goto out; ++ DEBUGOUT("Error Disabling LPLU D0\n"); ++ return ret_val; + } + } +- +- /* disable lplu d0 during driver init */ +- ret_val = phy->ops.set_d0_lplu_state(hw, false); +- if (ret_val) { +- hw_dbg("Error Disabling LPLU D0\n"); +- goto out; +- } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + +@@ -802,7 +1329,7 @@ + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) +- goto out; ++ return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { +@@ -816,124 +1343,34 @@ + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) +- goto out; +- } +- +- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); +- if (ret_val) +- goto out; +- +- /* load defaults for future use */ +- phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? +- ((data & CR_1000T_MS_VALUE) ? +- e1000_ms_force_master : +- e1000_ms_force_slave) : +- e1000_ms_auto; +- +- switch (phy->ms_type) { +- case e1000_ms_force_master: +- data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); +- break; +- case e1000_ms_force_slave: +- data |= CR_1000T_MS_ENABLE; +- data &= ~(CR_1000T_MS_VALUE); +- break; +- case e1000_ms_auto: +- data &= ~CR_1000T_MS_ENABLE; +- default: +- break; ++ return ret_val; + } +- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); +- if (ret_val) +- goto out; +- } +- +-out: +- return ret_val; +-} +- +-/** +- * igb_copper_link_autoneg - Setup/Enable autoneg for copper link +- * @hw: pointer to the HW structure +- * +- * Performs initial bounds checking on autoneg advertisement parameter, then +- * configure to advertise the full capability. Setup the PHY to autoneg +- * and restart the negotiation process between the link partner. If +- * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. +- **/ +-static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +-{ +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; +- u16 phy_ctrl; +- +- /* Perform some bounds checking on the autoneg advertisement +- * parameter. +- */ +- phy->autoneg_advertised &= phy->autoneg_mask; +- +- /* If autoneg_advertised is zero, we assume it was not defaulted +- * by the calling code so we set to advertise full capability. +- */ +- if (phy->autoneg_advertised == 0) +- phy->autoneg_advertised = phy->autoneg_mask; +- +- hw_dbg("Reconfiguring auto-neg advertisement params\n"); +- ret_val = igb_phy_setup_autoneg(hw); +- if (ret_val) { +- hw_dbg("Error Setting up Auto-Negotiation\n"); +- goto out; +- } +- hw_dbg("Restarting Auto-Neg\n"); +- +- /* Restart auto-negotiation by setting the Auto Neg Enable bit and +- * the Auto Neg Restart bit in the PHY control register. +- */ +- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); +- if (ret_val) +- goto out; + +- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); +- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); +- if (ret_val) +- goto out; +- +- /* Does the user want to wait for Auto-Neg to complete here, or +- * check at a later time (for example, callback routine). +- */ +- if (phy->autoneg_wait_to_complete) { +- ret_val = igb_wait_autoneg(hw); +- if (ret_val) { +- hw_dbg("Error while waiting for autoneg to complete\n"); +- goto out; +- } ++ ret_val = e1000_set_master_slave_mode(hw); + } + +- hw->mac.get_link_status = true; +- +-out: + return ret_val; + } + + /** +- * igb_phy_setup_autoneg - Configure PHY for auto-negotiation ++ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control +@@ -941,26 +1378,28 @@ + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) ++static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + ++ DEBUGFUNC("e1000_phy_setup_autoneg"); ++ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) +- goto out; ++ return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) +- goto out; ++ return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up +@@ -980,39 +1419,39 @@ + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + +- hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); ++ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { +- hw_dbg("Advertise 10mb Half duplex\n"); ++ DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { +- hw_dbg("Advertise 10mb Full duplex\n"); ++ DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { +- hw_dbg("Advertise 100mb Half duplex\n"); ++ DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { +- hw_dbg("Advertise 100mb Full duplex\n"); ++ DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) +- hw_dbg("Advertise 1000mb Half duplex request denied!\n"); ++ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { +- hw_dbg("Advertise 1000mb Full duplex\n"); ++ DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + +@@ -1029,68 +1468,126 @@ + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). +- * 3: Both Rx and TX flow control (symmetric) are enabled. ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: +- /* Flow control (RX & TX) is completely disabled by a ++ /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: +- /* RX Flow control is enabled, and TX Flow control is ++ /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are +- * capable of RX Pause ONLY, we will advertise that we +- * support both symmetric and asymmetric RX PAUSE. Later ++ * capable of Rx Pause ONLY, we will advertise that we ++ * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: +- /* TX Flow control is enabled, and RX Flow control is ++ /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: +- /* Flow control (both RX and TX) is enabled by a software ++ /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: +- hw_dbg("Flow control param set incorrectly\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Flow control param set incorrectly\n"); ++ return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) +- goto out; ++ return ret_val; + +- hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); ++ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + +- if (phy->autoneg_mask & ADVERTISE_1000_FULL) { +- ret_val = phy->ops.write_reg(hw, +- PHY_1000T_CTRL, ++ if (phy->autoneg_mask & ADVERTISE_1000_FULL) ++ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); +- if (ret_val) +- goto out; ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link ++ * @hw: pointer to the HW structure ++ * ++ * Performs initial bounds checking on autoneg advertisement parameter, then ++ * configure to advertise the full capability. Setup the PHY to autoneg ++ * and restart the negotiation process between the link partner. If ++ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. ++ **/ ++static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 phy_ctrl; ++ ++ DEBUGFUNC("e1000_copper_link_autoneg"); ++ ++ /* Perform some bounds checking on the autoneg advertisement ++ * parameter. ++ */ ++ phy->autoneg_advertised &= phy->autoneg_mask; ++ ++ /* If autoneg_advertised is zero, we assume it was not defaulted ++ * by the calling code so we set to advertise full capability. ++ */ ++ if (!phy->autoneg_advertised) ++ phy->autoneg_advertised = phy->autoneg_mask; ++ ++ DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); ++ ret_val = e1000_phy_setup_autoneg(hw); ++ if (ret_val) { ++ DEBUGOUT("Error Setting up Auto-Negotiation\n"); ++ return ret_val; ++ } ++ DEBUGOUT("Restarting Auto-Neg\n"); ++ ++ /* Restart auto-negotiation by setting the Auto Neg Enable bit and ++ * the Auto Neg Restart bit in the PHY control register. ++ */ ++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); ++ if (ret_val) ++ return ret_val; ++ ++ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); ++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); ++ if (ret_val) ++ return ret_val; ++ ++ /* Does the user want to wait for Auto-Neg to complete here, or ++ * check at a later time (for example, callback routine). ++ */ ++ if (phy->autoneg_wait_to_complete) { ++ ret_val = e1000_wait_autoneg(hw); ++ if (ret_val) { ++ DEBUGOUT("Error while waiting for autoneg to complete\n"); ++ return ret_val; ++ } + } + +-out: ++ hw->mac.get_link_status = true; ++ + return ret_val; + } + + /** +- * igb_setup_copper_link - Configure copper link settings ++ * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced +@@ -1098,129 +1595,134 @@ + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +-s32 igb_setup_copper_link(struct e1000_hw *hw) ++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) + { + s32 ret_val; + bool link; + ++ DEBUGFUNC("e1000_setup_copper_link_generic"); ++ + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ +- ret_val = igb_copper_link_autoneg(hw); ++ ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) +- goto out; ++ return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ +- hw_dbg("Forcing Speed and Duplex\n"); ++ DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { +- hw_dbg("Error Forcing Speed and Duplex\n"); +- goto out; ++ DEBUGOUT("Error Forcing Speed and Duplex\n"); ++ return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ +- ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); ++ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, ++ &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (link) { +- hw_dbg("Valid link established!!!\n"); +- igb_config_collision_dist(hw); +- ret_val = igb_config_fc_after_link_up(hw); ++ DEBUGOUT("Valid link established!!!\n"); ++ hw->mac.ops.config_collision_dist(hw); ++ ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { +- hw_dbg("Unable to establish link!!!\n"); ++ DEBUGOUT("Unable to establish link!!!\n"); + } + +-out: + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY ++ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) ++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); ++ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- igb_phy_force_speed_duplex_setup(hw, &phy_data); ++ e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- hw_dbg("IGP PSCR: %X\n", phy_data); ++ DEBUGOUT1("IGP PSCR: %X\n", phy_data); + +- udelay(1); ++ usec_delay(1); + + if (phy->autoneg_wait_to_complete) { +- hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); ++ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) +- hw_dbg("Link taking longer than expected.\n"); ++ DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); +- if (ret_val) +- goto out; ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + } + +-out: + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY ++ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. +- * After reset, TX_CLK and CRS on TX must be set. Return successful upon ++ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) ++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); ++ + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 +@@ -1229,45 +1731,49 @@ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- hw_dbg("M88E1000 PSCR: %X\n", phy_data); ++ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- igb_phy_force_speed_duplex_setup(hw, &phy_data); ++ e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Reset the phy to commit changes. */ +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = hw->phy.ops.commit(hw); + if (ret_val) +- goto out; ++ return ret_val; + + if (phy->autoneg_wait_to_complete) { +- hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ++ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: ++ case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; +@@ -1276,9 +1782,10 @@ + reset_dsp = false; + break; + } +- if (!reset_dsp) +- hw_dbg("Link taking longer than expected.\n"); +- else { ++ ++ if (!reset_dsp) { ++ DEBUGOUT("Link taking longer than expected.\n"); ++ } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ +@@ -1286,29 +1793,35 @@ + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) +- goto out; +- ret_val = igb_phy_reset_dsp(hw); ++ return ret_val; ++ ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) +- goto out; ++ return ret_val; + } + } + + /* Try once more */ +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, +- 100000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + } + +- if (hw->phy.type != e1000_phy_m88 || +- hw->phy.id == I347AT4_E_PHY_ID || +- hw->phy.id == M88E1112_E_PHY_ID || +- hw->phy.id == I210_I_PHY_ID) +- goto out; ++ if (hw->phy.type != e1000_phy_m88) ++ return E1000_SUCCESS; + ++ if (hw->phy.id == I347AT4_E_PHY_ID || ++ hw->phy.id == M88E1340M_E_PHY_ID || ++ hw->phy.id == M88E1112_E_PHY_ID) ++ return E1000_SUCCESS; ++ if (hw->phy.id == I210_I_PHY_ID) ++ return E1000_SUCCESS; ++ if ((hw->phy.id == M88E1543_E_PHY_ID) || ++ (hw->phy.id == M88E1512_E_PHY_ID)) ++ return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from +@@ -1317,24 +1830,88 @@ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +-out: + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex ++ * igb_e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex ++ * @hw: pointer to the HW structure ++ * ++ * Forces the speed and duplex settings of the PHY. ++ * This is a function pointer entry point only called by ++ * PHY setup routines. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data; ++ bool link; ++ ++ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_ife"); ++ ++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ ++ e1000_phy_force_speed_duplex_setup(hw, &data); ++ ++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); ++ if (ret_val) ++ return ret_val; ++ ++ /* Disable MDI-X support for 10/100 */ ++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ ++ data &= ~IFE_PMC_AUTO_MDIX; ++ data &= ~IFE_PMC_FORCE_MDIX; ++ ++ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); ++ if (ret_val) ++ return ret_val; ++ ++ DEBUGOUT1("IFE PMC: %X\n", data); ++ ++ usec_delay(1); ++ ++ if (phy->autoneg_wait_to_complete) { ++ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); ++ if (ret_val) ++ return ret_val; ++ ++ if (!link) ++ DEBUGOUT("Link taking longer than expected.\n"); ++ ++ /* Try once more */ ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * +@@ -1345,17 +1922,18 @@ + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, +- u16 *phy_ctrl) ++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) + { + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + ++ DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); ++ + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + +@@ -1369,33 +1947,32 @@ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; +- hw_dbg("Half Duplex\n"); ++ DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; +- hw_dbg("Full Duplex\n"); ++ DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; +- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); +- hw_dbg("Forcing 100mb\n"); ++ *phy_ctrl &= ~MII_CR_SPEED_1000; ++ DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); +- *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); +- hw_dbg("Forcing 10mb\n"); ++ DEBUGOUT("Forcing 10mb\n"); + } + +- igb_config_collision_dist(hw); ++ hw->mac.ops.config_collision_dist(hw); + +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + + /** +- * igb_set_d3_lplu_state - Sets low power link up state for D3 ++ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * +@@ -1408,25 +1985,27 @@ + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) ++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val; + u16 data; + +- if (!(hw->phy.ops.read_reg)) +- goto out; ++ DEBUGFUNC("e1000_set_d3_lplu_state_generic"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) +- goto out; ++ return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) +- goto out; ++ return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable +@@ -1437,176 +2016,219 @@ + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) +- goto out; ++ return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) +- goto out; ++ return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, +- &data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) +- goto out; ++ return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, +- data); ++ data); + if (ret_val) +- goto out; ++ return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +-out: + return ret_val; + } + + /** +- * igb_check_downshift - Checks whether a downshift in speed occurred ++ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +-s32 igb_check_downshift(struct e1000_hw *hw) ++s32 e1000_check_downshift_generic(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + ++ DEBUGFUNC("e1000_check_downshift_generic"); ++ + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: +- offset = M88E1000_PHY_SPEC_STATUS; +- mask = M88E1000_PSSR_DOWNSHIFT; ++ offset = M88E1000_PHY_SPEC_STATUS; ++ mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: +- case e1000_phy_igp: + case e1000_phy_igp_3: +- offset = IGP01E1000_PHY_LINK_HEALTH; +- mask = IGP01E1000_PLHR_SS_DOWNGRADE; ++ offset = IGP01E1000_PHY_LINK_HEALTH; ++ mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; +- ret_val = 0; +- goto out; ++ return E1000_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) +- phy->speed_downgraded = (phy_data & mask) ? true : false; ++ phy->speed_downgraded = !!(phy_data & mask); + +-out: + return ret_val; + } + + /** +- * igb_check_polarity_m88 - Checks the polarity. ++ * igb_e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +-s32 igb_check_polarity_m88(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + ++ DEBUGFUNC("igb_e1000_check_polarity_m88"); ++ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) +- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) +- ? e1000_rev_polarity_reversed +- : e1000_rev_polarity_normal; ++ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); ++ ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_check_polarity_igp - Checks the polarity. ++ * @hw: pointer to the HW structure ++ * ++ * Success returns 0, Failure returns -E1000_ERR_PHY (-2) ++ * ++ * Polarity is determined based on the PHY port status register, and the ++ * current speed (since there is no polarity at 100Mbps). ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data, offset, mask; ++ ++ DEBUGFUNC("igb_e1000_check_polarity_igp"); ++ ++ /* Polarity is determined based on the speed of ++ * our connection. ++ */ ++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); ++ if (ret_val) ++ return ret_val; ++ ++ if ((data & IGP01E1000_PSSR_SPEED_MASK) == ++ IGP01E1000_PSSR_SPEED_1000MBPS) { ++ offset = IGP01E1000_PHY_PCS_INIT_REG; ++ mask = IGP01E1000_PHY_POLARITY_MASK; ++ } else { ++ /* This really only applies to 10Mbps since ++ * there is no polarity for 100Mbps (always 0). ++ */ ++ offset = IGP01E1000_PHY_PORT_STATUS; ++ mask = IGP01E1000_PSSR_POLARITY_REVERSED; ++ } ++ ++ ret_val = phy->ops.read_reg(hw, offset, &data); ++ ++ if (!ret_val) ++ phy->cable_polarity = ((data & mask) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); + + return ret_val; + } + + /** +- * igb_check_polarity_igp - Checks the polarity. ++ * igb_e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * +- * Success returns 0, Failure returns -E1000_ERR_PHY (-2) +- * +- * Polarity is determined based on the PHY port status register, and the +- * current speed (since there is no polarity at 100Mbps). ++ * Polarity is determined on the polarity reversal feature being enabled. + **/ +-static s32 igb_check_polarity_igp(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; +- u16 data, offset, mask; ++ u16 phy_data, offset, mask; + +- /* Polarity is determined based on the speed of +- * our connection. +- */ +- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); +- if (ret_val) +- goto out; ++ DEBUGFUNC("igb_e1000_check_polarity_ife"); + +- if ((data & IGP01E1000_PSSR_SPEED_MASK) == +- IGP01E1000_PSSR_SPEED_1000MBPS) { +- offset = IGP01E1000_PHY_PCS_INIT_REG; +- mask = IGP01E1000_PHY_POLARITY_MASK; ++ /* Polarity is determined based on the reversal feature being enabled. ++ */ ++ if (phy->polarity_correction) { ++ offset = IFE_PHY_EXTENDED_STATUS_CONTROL; ++ mask = IFE_PESC_POLARITY_REVERSED; + } else { +- /* This really only applies to 10Mbps since +- * there is no polarity for 100Mbps (always 0). +- */ +- offset = IGP01E1000_PHY_PORT_STATUS; +- mask = IGP01E1000_PSSR_POLARITY_REVERSED; ++ offset = IFE_PHY_SPECIAL_CONTROL; ++ mask = IFE_PSC_FORCE_POLARITY; + } + +- ret_val = phy->ops.read_reg(hw, offset, &data); ++ ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) +- phy->cable_polarity = (data & mask) +- ? e1000_rev_polarity_reversed +- : e1000_rev_polarity_normal; ++ phy->cable_polarity = ((phy_data & mask) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); + +-out: + return ret_val; + } + + /** +- * igb_wait_autoneg - Wait for auto-neg completion ++ * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +-static s32 igb_wait_autoneg(struct e1000_hw *hw) ++static s32 e1000_wait_autoneg(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + ++ DEBUGFUNC("e1000_wait_autoneg"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; ++ + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); +@@ -1617,7 +2239,7 @@ + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; +- msleep(100); ++ msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation +@@ -1627,7 +2249,7 @@ + } + + /** +- * igb_phy_has_link - Polls PHY for link ++ * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts +@@ -1635,27 +2257,32 @@ + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, +- u32 usec_interval, bool *success) ++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ++ u32 usec_interval, bool *success) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + ++ DEBUGFUNC("e1000_phy_has_link_generic"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; ++ + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); +- if (ret_val && usec_interval > 0) { ++ if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) +- mdelay(usec_interval/1000); ++ msec_delay(usec_interval/1000); + else +- udelay(usec_interval); ++ usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) +@@ -1663,18 +2290,18 @@ + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) +- mdelay(usec_interval/1000); ++ msec_delay(usec_interval/1000); + else +- udelay(usec_interval); ++ usec_delay(usec_interval); + } + +- *success = (i < iterations) ? true : false; ++ *success = (i < iterations); + + return ret_val; + } + + /** +- * igb_get_cable_length_m88 - Determine cable length for m88 PHY ++ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length +@@ -1688,37 +2315,40 @@ + * 3 110 - 140 meters + * 4 > 140 meters + **/ +-s32 igb_get_cable_length_m88(struct e1000_hw *hw) ++s32 e1000_get_cable_length_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + ++ DEBUGFUNC("e1000_get_cable_length_m88"); ++ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> +- M88E1000_PSSR_CABLE_LENGTH_SHIFT; +- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { +- ret_val = -E1000_ERR_PHY; +- goto out; +- } ++ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> ++ M88E1000_PSSR_CABLE_LENGTH_SHIFT); ++ ++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) ++ return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + +-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) ++s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; +- u16 phy_data, phy_data2, index, default_page, is_cm; ++ u16 phy_data, phy_data2, is_cm; ++ u16 index, default_page; ++ ++ DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: +@@ -1743,27 +2373,29 @@ + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) +- goto out; ++ return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) +- goto out; ++ return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + +@@ -1772,34 +2404,34 @@ + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + +- /* Reset the page selec to its original value */ ++ /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) +- goto out; ++ return ret_val; + break; ++ + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; +- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { +- ret_val = -E1000_ERR_PHY; +- goto out; +- } ++ ++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) ++ return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; +@@ -1811,20 +2443,18 @@ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) +- goto out; ++ return ret_val; + + break; + default: +- ret_val = -E1000_ERR_PHY; +- goto out; ++ return -E1000_ERR_PHY; + } + +-out: + return ret_val; + } + + /** +- * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY ++ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the +@@ -1834,10 +2464,10 @@ + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) ++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; +@@ -1848,26 +2478,26 @@ + IGP02E1000_PHY_AGC_D + }; + ++ DEBUGFUNC("e1000_get_cable_length_igp_2"); ++ + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ +- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & +- IGP02E1000_AGC_LENGTH_MASK; ++ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & ++ IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || +- (cur_agc_index == 0)) { +- ret_val = -E1000_ERR_PHY; +- goto out; +- } ++ (cur_agc_index == 0)) ++ return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > +@@ -1885,18 +2515,17 @@ + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ +- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? +- (agc_value - IGP02E1000_AGC_RANGE) : 0; ++ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? ++ (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_phy_info_m88 - Retrieve PHY information ++ * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) +@@ -1905,54 +2534,54 @@ + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +-s32 igb_get_phy_info_m88(struct e1000_hw *hw) ++s32 e1000_get_phy_info_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("e1000_get_phy_info_m88"); ++ + if (phy->media_type != e1000_media_type_copper) { +- hw_dbg("Phy info is only valid for copper media\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid for copper media\n"); ++ return -E1000_ERR_CONFIG; + } + +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { +- hw_dbg("Phy info is only valid if link is up\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) +- ? true : false; ++ phy->polarity_correction = !!(phy_data & ++ M88E1000_PSCR_POLARITY_REVERSAL); + +- ret_val = igb_check_polarity_m88(hw); ++ ret_val = igb_e1000_check_polarity_m88(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; ++ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { +- ret_val = phy->ops.get_cable_length(hw); ++ ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok +@@ -1968,12 +2597,11 @@ + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +-out: + return ret_val; + } + + /** +- * igb_get_phy_info_igp - Retrieve igp PHY information ++ * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then +@@ -1981,44 +2609,45 @@ + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +-s32 igb_get_phy_info_igp(struct e1000_hw *hw) ++s32 e1000_get_phy_info_igp(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ DEBUGFUNC("e1000_get_phy_info_igp"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { +- hw_dbg("Phy info is only valid if link is up\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + +- ret_val = igb_check_polarity_igp(hw); ++ ret_val = igb_e1000_check_polarity_igp(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; ++ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) +- goto out; ++ return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok +@@ -2033,42 +2662,97 @@ + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +-out: + return ret_val; + } + + /** +- * igb_phy_sw_reset - PHY software reset ++ * igb_e1000_get_phy_info_ife - Retrieves various IFE PHY states ++ * @hw: pointer to the HW structure ++ * ++ * Populates "phy" structure with various feature states. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data; ++ bool link; ++ ++ DEBUGFUNC("igb_e1000_get_phy_info_ife"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); ++ if (ret_val) ++ return ret_val; ++ ++ if (!link) { ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ ++ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); ++ ++ if (phy->polarity_correction) { ++ ret_val = igb_e1000_check_polarity_ife(hw); ++ if (ret_val) ++ return ret_val; ++ } else { ++ /* Polarity is forced */ ++ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); ++ } ++ ++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ ++ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); ++ ++ /* The following parameters are undefined for 10/100 operation. */ ++ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; ++ phy->local_rx = e1000_1000t_rx_status_undefined; ++ phy->remote_rx = e1000_1000t_rx_status_undefined; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +-s32 igb_phy_sw_reset(struct e1000_hw *hw) ++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + u16 phy_ctrl; + +- if (!(hw->phy.ops.read_reg)) +- goto out; ++ DEBUGFUNC("e1000_phy_sw_reset_generic"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) +- goto out; ++ return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) +- goto out; ++ return ret_val; + +- udelay(1); ++ usec_delay(1); + +-out: + return ret_val; + } + + /** +- * igb_phy_hw_reset - PHY hardware reset ++ * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire +@@ -2076,50 +2760,65 @@ + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +-s32 igb_phy_hw_reset(struct e1000_hw *hw) ++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; ++ s32 ret_val; + u32 ctrl; + +- ret_val = igb_check_reset_block(hw); +- if (ret_val) { +- ret_val = 0; +- goto out; ++ DEBUGFUNC("e1000_phy_hw_reset_generic"); ++ ++ if (phy->ops.check_reset_block) { ++ ret_val = phy->ops.check_reset_block(hw); ++ if (ret_val) ++ return E1000_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- ctrl = rd32(E1000_CTRL); +- wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); +- wrfl(); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); ++ E1000_WRITE_FLUSH(hw); + +- udelay(phy->reset_delay_us); ++ usec_delay(phy->reset_delay_us); + +- wr32(E1000_CTRL, ctrl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ E1000_WRITE_FLUSH(hw); + +- udelay(150); ++ usec_delay(150); + + phy->ops.release(hw); + +- ret_val = phy->ops.get_cfg_done(hw); ++ return phy->ops.get_cfg_done(hw); ++} + +-out: +- return ret_val; ++/** ++ * e1000_get_cfg_done_generic - Generic configuration done ++ * @hw: pointer to the HW structure ++ * ++ * Generic function to wait 10 milli-seconds for configuration to complete ++ * and return success. ++ **/ ++s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_get_cfg_done_generic"); ++ ++ msec_delay_irq(10); ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_phy_init_script_igp3 - Inits the IGP3 PHY ++ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +-s32 igb_phy_init_script_igp3(struct e1000_hw *hw) ++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) + { +- hw_dbg("Running IGP 3 PHY init script\n"); ++ DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ +@@ -2130,7 +2829,7 @@ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); +- /* Add 4% to TX amplitude in Giga mode */ ++ /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); +@@ -2191,17 +2890,106 @@ + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + +- return 0; ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_get_phy_type_from_id - Get PHY type from id ++ * @phy_id: phy_id read from the phy ++ * ++ * Returns the phy type from the id. ++ **/ ++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) ++{ ++ enum e1000_phy_type phy_type = e1000_phy_unknown; ++ ++ switch (phy_id) { ++ case M88E1000_I_PHY_ID: ++ case M88E1000_E_PHY_ID: ++ case M88E1111_I_PHY_ID: ++ case M88E1011_I_PHY_ID: ++ case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: ++ case I347AT4_E_PHY_ID: ++ case M88E1112_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: ++ phy_type = e1000_phy_m88; ++ break; ++ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ ++ phy_type = e1000_phy_igp_2; ++ break; ++ case GG82563_E_PHY_ID: ++ phy_type = e1000_phy_gg82563; ++ break; ++ case IGP03E1000_E_PHY_ID: ++ phy_type = e1000_phy_igp_3; ++ break; ++ case IFE_E_PHY_ID: ++ case IFE_PLUS_E_PHY_ID: ++ case IFE_C_E_PHY_ID: ++ phy_type = e1000_phy_ife; ++ break; ++ case I82580_I_PHY_ID: ++ phy_type = e1000_phy_82580; ++ break; ++ case I210_I_PHY_ID: ++ phy_type = e1000_phy_i210; ++ break; ++ default: ++ phy_type = e1000_phy_unknown; ++ break; ++ } ++ return phy_type; ++} ++ ++/** ++ * e1000_determine_phy_address - Determines PHY address. ++ * @hw: pointer to the HW structure ++ * ++ * This uses a trial and error method to loop through possible PHY ++ * addresses. It tests each by reading the PHY ID registers and ++ * checking for a match. ++ **/ ++s32 e1000_determine_phy_address(struct e1000_hw *hw) ++{ ++ u32 phy_addr = 0; ++ u32 i; ++ enum e1000_phy_type phy_type = e1000_phy_unknown; ++ ++ hw->phy.id = phy_type; ++ ++ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { ++ hw->phy.addr = phy_addr; ++ i = 0; ++ ++ do { ++ e1000_get_phy_id(hw); ++ phy_type = e1000_get_phy_type_from_id(hw->phy.id); ++ ++ /* If phy_type is valid, break - we found our ++ * PHY address ++ */ ++ if (phy_type != e1000_phy_unknown) ++ return E1000_SUCCESS; ++ ++ msec_delay(1); ++ i++; ++ } while (i < 10); ++ } ++ ++ return -E1000_ERR_PHY_TYPE; + } + + /** +- * igb_power_up_phy_copper - Restore copper link in case of PHY power down ++ * igb_e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a +- * driver unload, restore the link to previous settings. ++ * driver unload, or wake on lan is not enabled, restore the link to previous ++ * settings. + **/ +-void igb_power_up_phy_copper(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_power_up_phy_copper(struct e1000_hw *hw) + { + u16 mii_reg = 0; + +@@ -2212,13 +3000,15 @@ + } + + /** +- * igb_power_down_phy_copper - Power down copper PHY ++ * igb_e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * +- * Power down PHY to save power when interface is down and wake on lan +- * is not enabled. ++ * In the case of a PHY power down to save power, or to turn off link during a ++ * driver unload, or wake on lan is not enabled, restore the link to previous ++ * settings. + **/ +-void igb_power_down_phy_copper(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_power_down_phy_copper(struct e1000_hw *hw) + { + u16 mii_reg = 0; + +@@ -2226,98 +3016,85 @@ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +- usleep_range(1000, 2000); ++ msec_delay(1); + } + + /** +- * igb_check_polarity_82580 - Checks the polarity. ++ * igb_e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +-static s32 igb_check_polarity_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + ++ DEBUGFUNC("igb_e1000_check_polarity_82577"); + +- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); ++ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) +- phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) +- ? e1000_rev_polarity_reversed +- : e1000_rev_polarity_normal; ++ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); + + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY ++ * igb_e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * +- * Calls the PHY setup function to force speed and duplex. Clears the +- * auto-crossover to force MDI manually. Waits for link and returns +- * successful if link up is successful, else -E1000_ERR_PHY (-2). ++ * Calls the PHY setup function to force speed and duplex. + **/ +-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_82577"); ++ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- igb_phy_force_speed_duplex_setup(hw, &phy_data); ++ e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) +- goto out; +- +- /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI +- * forced whenever speed and duplex are forced. +- */ +- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); +- if (ret_val) +- goto out; +- +- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; +- +- ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); +- if (ret_val) +- goto out; +- +- hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); ++ return ret_val; + +- udelay(1); ++ usec_delay(1); + + if (phy->autoneg_wait_to_complete) { +- hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); ++ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) +- hw_dbg("Link taking longer than expected.\n"); ++ DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); +- if (ret_val) +- goto out; ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + } + +-out: + return ret_val; + } + + /** +- * igb_get_phy_info_82580 - Retrieve I82580 PHY information ++ * igb_e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then +@@ -2325,44 +3102,46 @@ + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +-s32 igb_get_phy_info_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ DEBUGFUNC("igb_e1000_get_phy_info_82577"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { +- hw_dbg("Phy info is only valid if link is up\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + +- ret_val = igb_check_polarity_82580(hw); ++ ret_val = igb_e1000_check_polarity_82577(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); ++ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; ++ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + +- if ((data & I82580_PHY_STATUS2_SPEED_MASK) == +- I82580_PHY_STATUS2_SPEED_1000MBPS) { ++ if ((data & I82577_PHY_STATUS2_SPEED_MASK) == ++ I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) +- goto out; ++ return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok +@@ -2377,63 +3156,65 @@ + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_cable_length_82580 - Determine cable length for 82580 PHY ++ * igb_e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +-s32 igb_get_cable_length_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + +- ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); ++ DEBUGFUNC("igb_e1000_get_cable_length_82577"); ++ ++ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> +- I82580_DSTATUS_CABLE_LENGTH_SHIFT; ++ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> ++ I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) +- ret_val = -E1000_ERR_PHY; ++ return -E1000_ERR_PHY; + + phy->cable_length = length; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_phy_reg_gs40g - Write GS40G PHY register ++ * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure +- * @offset: lower half is register offset to write to +- * upper half is page to use. ++ * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) + { + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + ++ DEBUGFUNC("e1000_write_phy_reg_gs40g"); ++ + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + +- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); ++ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; +- ret_val = igb_write_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + release: + hw->phy.ops.release(hw); +@@ -2441,7 +3222,7 @@ + } + + /** +- * igb_read_phy_reg_gs40g - Read GS40G PHY register ++ * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. +@@ -2450,20 +3231,22 @@ + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) + { + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + ++ DEBUGFUNC("e1000_read_phy_reg_gs40g"); ++ + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + +- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); ++ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; +- ret_val = igb_read_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + release: + hw->phy.ops.release(hw); +@@ -2471,41 +3254,156 @@ + } + + /** +- * igb_set_master_slave_mode - Setup PHY for Master/slave mode ++ * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure ++ * @address: address to be read ++ * @data: pointer to the read data + * +- * Sets up Master/slave mode ++ * Reads the mPHY control register in the PHY at offset and stores the ++ * information read to data. + **/ +-static s32 igb_set_master_slave_mode(struct e1000_hw *hw) ++s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) + { +- s32 ret_val; +- u16 phy_data; ++ u32 mphy_ctrl = 0; ++ bool locked = false; ++ bool ready; + +- /* Resolve Master/Slave mode */ +- ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); +- if (ret_val) +- return ret_val; ++ DEBUGFUNC("e1000_read_phy_reg_mphy"); + +- /* load defaults for future use */ +- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? +- ((phy_data & CR_1000T_MS_VALUE) ? +- e1000_ms_force_master : +- e1000_ms_force_slave) : e1000_ms_auto; ++ /* Check if mPHY is ready to read/write operations */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; + +- switch (hw->phy.ms_type) { +- case e1000_ms_force_master: +- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); +- break; +- case e1000_ms_force_slave: +- phy_data |= CR_1000T_MS_ENABLE; +- phy_data &= ~(CR_1000T_MS_VALUE); +- break; +- case e1000_ms_auto: +- phy_data &= ~CR_1000T_MS_ENABLE; +- /* fall-through */ +- default: ++ /* Check if mPHY access is disabled and enable it if so */ ++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); ++ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { ++ locked = true; ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ mphy_ctrl |= E1000_MPHY_ENA_ACCESS; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ } ++ ++ /* Set the address that we want to read */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ ++ /* We mask address, because we want to use only current lane */ ++ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & ++ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | ++ (address & E1000_MPHY_ADDRESS_MASK); ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ ++ /* Read data from the address */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ *data = E1000_READ_REG(hw, E1000_MPHY_DATA); ++ ++ /* Disable access to mPHY if it was originally disabled */ ++ if (locked) ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, ++ E1000_MPHY_DIS_ACCESS); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_phy_reg_mphy - Write mPHY control register ++ * @hw: pointer to the HW structure ++ * @address: address to write to ++ * @data: data to write to register at offset ++ * @line_override: used when we want to use different line than default one ++ * ++ * Writes data to mPHY control register. ++ **/ ++s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, ++ bool line_override) ++{ ++ u32 mphy_ctrl = 0; ++ bool locked = false; ++ bool ready; ++ ++ DEBUGFUNC("e1000_write_phy_reg_mphy"); ++ ++ /* Check if mPHY is ready to read/write operations */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ ++ /* Check if mPHY access is disabled and enable it if so */ ++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); ++ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { ++ locked = true; ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ mphy_ctrl |= E1000_MPHY_ENA_ACCESS; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ } ++ ++ /* Set the address that we want to read */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ ++ /* We mask address, because we want to use only current lane */ ++ if (line_override) ++ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; ++ else ++ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; ++ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | ++ (address & E1000_MPHY_ADDRESS_MASK); ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ ++ /* Read data from the address */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); ++ ++ /* Disable access to mPHY if it was originally disabled */ ++ if (locked) ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, ++ E1000_MPHY_DIS_ACCESS); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_is_mphy_ready - Check if mPHY control register is not busy ++ * @hw: pointer to the HW structure ++ * ++ * Returns mPHY control register status. ++ **/ ++bool e1000_is_mphy_ready(struct e1000_hw *hw) ++{ ++ u16 retry_count = 0; ++ u32 mphy_ctrl = 0; ++ bool ready = false; ++ ++ while (retry_count < 2) { ++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); ++ if (mphy_ctrl & E1000_MPHY_BUSY) { ++ usec_delay(20); ++ retry_count++; ++ continue; ++ } ++ ready = true; + break; + } + +- return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); ++ if (!ready) ++ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); ++ ++ return ready; + } +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h +--- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,146 +1,115 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* + +-#ifndef _E1000_PHY_H_ +-#define _E1000_PHY_H_ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. + +-enum e1000_ms_type { +- e1000_ms_hw_default = 0, +- e1000_ms_force_master, +- e1000_ms_force_slave, +- e1000_ms_auto +-}; ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-enum e1000_smart_speed { +- e1000_smart_speed_default = 0, +- e1000_smart_speed_on, +- e1000_smart_speed_off +-}; ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. + +-s32 igb_check_downshift(struct e1000_hw *hw); +-s32 igb_check_reset_block(struct e1000_hw *hw); +-s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +-s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +-s32 igb_get_cable_length_m88(struct e1000_hw *hw); +-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +-s32 igb_get_phy_id(struct e1000_hw *hw); +-s32 igb_get_phy_info_igp(struct e1000_hw *hw); +-s32 igb_get_phy_info_m88(struct e1000_hw *hw); +-s32 igb_phy_sw_reset(struct e1000_hw *hw); +-s32 igb_phy_hw_reset(struct e1000_hw *hw); +-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +-s32 igb_setup_copper_link(struct e1000_hw *hw); +-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, +- u32 usec_interval, bool *success); +-void igb_power_up_phy_copper(struct e1000_hw *hw); +-void igb_power_down_phy_copper(struct e1000_hw *hw); +-s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +-s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +-s32 igb_get_phy_info_82580(struct e1000_hw *hw); +-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +-s32 igb_get_cable_length_82580(struct e1000_hw *hw); +-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_check_polarity_m88(struct e1000_hw *hw); ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". + +-/* IGP01E1000 Specific Registers */ +-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +-#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +- +-#define I82580_ADDR_REG 16 +-#define I82580_CFG_REG 22 +-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) +-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +-#define I82580_CTRL_REG 23 +-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) +- +-/* 82580 specific PHY registers */ +-#define I82580_PHY_CTRL_2 18 +-#define I82580_PHY_LBK_CTRL 19 +-#define I82580_PHY_STATUS_2 26 +-#define I82580_PHY_DIAG_STATUS 31 +- +-/* I82580 PHY Status 2 */ +-#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +-#define I82580_PHY_STATUS2_MDIX 0x0800 +-#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +-#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +-#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 +- +-/* I82580 PHY Control 2 */ +-#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +-#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +-#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 +- +-/* I82580 PHY Diagnostics Status */ +-#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +-#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +-/* 82580 PHY Power Management */ +-#define E1000_82580_PHY_POWER_MGMT 0xE14 +-#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +-#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +-#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +-#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ ++*******************************************************************************/ ++ ++#ifndef _E1000_PHY_H_ ++#define _E1000_PHY_H_ ++ ++void e1000_init_phy_ops_generic(struct e1000_hw *hw); ++s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); ++void e1000_null_phy_generic(struct e1000_hw *hw); ++s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); ++s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); ++s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++s32 e1000_check_downshift_generic(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw); ++s32 e1000_check_reset_block_generic(struct e1000_hw *hw); ++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); ++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); ++s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); ++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); ++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); ++s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); ++s32 e1000_get_cable_length_m88(struct e1000_hw *hw); ++s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); ++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); ++s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); ++s32 e1000_get_phy_id(struct e1000_hw *hw); ++s32 e1000_get_phy_info_igp(struct e1000_hw *hw); ++s32 e1000_get_phy_info_m88(struct e1000_hw *hw); ++s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw); ++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); ++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); ++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); ++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); ++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page); ++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); ++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); ++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ++ u32 usec_interval, bool *success); ++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); ++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); ++s32 e1000_determine_phy_address(struct e1000_hw *hw); ++s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); ++s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); ++void igb_e1000_power_up_phy_copper(struct e1000_hw *hw); ++void igb_e1000_power_down_phy_copper(struct e1000_hw *hw); ++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); ++s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); ++s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw); ++s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw); ++s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); ++s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw); ++s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); ++s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, ++ bool line_override); ++bool e1000_is_mphy_ready(struct e1000_hw *hw); + +-/* Enable flexible speed on link-up */ +-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +-#define IGP01E1000_PSSR_MDIX 0x0800 +-#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +-#define IGP02E1000_PHY_CHANNEL_NUM 4 +-#define IGP02E1000_PHY_AGC_A 0x11B1 +-#define IGP02E1000_PHY_AGC_B 0x12B1 +-#define IGP02E1000_PHY_AGC_C 0x14B1 +-#define IGP02E1000_PHY_AGC_D 0x18B1 +-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +-#define IGP02E1000_AGC_LENGTH_MASK 0x7F +-#define IGP02E1000_AGC_RANGE 15 ++#define E1000_MAX_PHY_ADDR 8 + +-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF ++/* IGP01E1000 Specific Registers */ ++#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ ++#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ ++#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ ++#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ ++#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ ++#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ ++#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ ++#define IGP_PAGE_SHIFT 5 ++#define PHY_REG_MASK 0x1F + + /* GS40G - I210 PHY defines */ + #define GS40G_PAGE_SELECT 0x16 +@@ -151,7 +120,110 @@ + #define GS40G_MAC_LB 0x4140 + #define GS40G_MAC_SPEED_1G 0X0006 + #define GS40G_COPPER_SPEC 0x0010 +-#define GS40G_LINE_LB 0x4000 ++ ++#define HV_INTC_FC_PAGE_START 768 ++#define I82578_ADDR_REG 29 ++#define I82577_ADDR_REG 16 ++#define I82577_CFG_REG 22 ++#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) ++#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ ++#define I82577_CTRL_REG 23 ++ ++/* 82577 specific PHY registers */ ++#define I82577_PHY_CTRL_2 18 ++#define I82577_PHY_LBK_CTRL 19 ++#define I82577_PHY_STATUS_2 26 ++#define I82577_PHY_DIAG_STATUS 31 ++ ++/* I82577 PHY Status 2 */ ++#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 ++#define I82577_PHY_STATUS2_MDIX 0x0800 ++#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 ++#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 ++ ++/* I82577 PHY Control 2 */ ++#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 ++#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 ++#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 ++ ++/* I82577 PHY Diagnostics Status */ ++#define I82577_DSTATUS_CABLE_LENGTH 0x03FC ++#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 ++ ++/* 82580 PHY Power Management */ ++#define E1000_82580_PHY_POWER_MGMT 0xE14 ++#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ ++#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ ++#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ ++#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ ++ ++#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ ++#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ ++#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ ++#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ ++#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ ++ ++#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 ++#define IGP01E1000_PHY_POLARITY_MASK 0x0078 ++ ++#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 ++#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ ++ ++#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 ++ ++#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ ++#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ ++#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ ++ ++#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 ++ ++#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 ++#define IGP01E1000_PSSR_MDIX 0x0800 ++#define IGP01E1000_PSSR_SPEED_MASK 0xC000 ++#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 ++ ++#define IGP02E1000_PHY_CHANNEL_NUM 4 ++#define IGP02E1000_PHY_AGC_A 0x11B1 ++#define IGP02E1000_PHY_AGC_B 0x12B1 ++#define IGP02E1000_PHY_AGC_C 0x14B1 ++#define IGP02E1000_PHY_AGC_D 0x18B1 ++ ++#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ ++#define IGP02E1000_AGC_LENGTH_MASK 0x7F ++#define IGP02E1000_AGC_RANGE 15 ++ ++#define E1000_CABLE_LENGTH_UNDEFINED 0xFF ++ ++#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 ++#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 ++#define E1000_KMRNCTRLSTA_REN 0x00200000 ++#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ ++#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ ++#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ ++#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ ++#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ ++ ++#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 ++#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ ++#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ ++#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ ++ ++/* IFE PHY Extended Status Control */ ++#define IFE_PESC_POLARITY_REVERSED 0x0100 ++ ++/* IFE PHY Special Control */ ++#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 ++#define IFE_PSC_FORCE_POLARITY 0x0020 ++ ++/* IFE PHY Special Control and LED Control */ ++#define IFE_PSCL_PROBE_MODE 0x0020 ++#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ ++#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ ++ ++/* IFE PHY MDIX Control */ ++#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ ++#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ ++#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + + /* SFP modules ID memory locations */ + #define E1000_SFF_IDENTIFIER_OFFSET 0x00 +@@ -160,7 +232,7 @@ + + #define E1000_SFF_ETH_FLAGS_OFFSET 0x06 + /* Flags for SFP modules compatible with ETH up to 1Gb */ +-struct e1000_sfp_flags { ++struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; +@@ -171,4 +243,10 @@ + u8 e10_base_px:1; + }; + ++/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ ++#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 ++#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 ++#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 ++#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 ++ + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h +--- a/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,154 +1,196 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_REGS_H_ + #define _E1000_REGS_H_ + +-#define E1000_CTRL 0x00000 /* Device Control - RW */ +-#define E1000_STATUS 0x00008 /* Device Status - RO */ +-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +-#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +-#define E1000_MDIC 0x00020 /* MDI Control - RW */ +-#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +-#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +-#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +-#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +-#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +-#define E1000_RCTL 0x00100 /* RX Control - RW */ +-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +-#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +-#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +-#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +-#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +-#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +-#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +-#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +-#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +-#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +-#define E1000_TCTL 0x00400 /* TX Control - RW */ +-#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +-#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +-#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +-#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +-#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +-#define E1000_PBS 0x01008 /* Packet Buffer Size */ +-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +-#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +-#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +-#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +-#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +-#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +-#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +-#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +-#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +-#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +-#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +-#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +-#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +-#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +-#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ ++#define E1000_CTRL 0x00000 /* Device Control - RW */ ++#define E1000_STATUS 0x00008 /* Device Status - RO */ ++#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ ++#define E1000_EERD 0x00014 /* EEPROM Read - RW */ ++#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ ++#define E1000_FLA 0x0001C /* Flash Access - RW */ ++#define E1000_MDIC 0x00020 /* MDI Control - RW */ ++#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ ++#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ ++#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ ++#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ ++#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ ++#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ ++#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ + #define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ + #define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ + #define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ ++#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ ++#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ ++#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ ++#define E1000_SCTL 0x00024 /* SerDes Control - RW */ ++#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ ++#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ ++#define E1000_FCT 0x00030 /* Flow Control Type - RW */ ++#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ ++#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ ++#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ ++#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ ++#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ ++#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ ++#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ ++#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ ++#define E1000_RCTL 0x00100 /* Rx Control - RW */ ++#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ ++#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ ++#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ ++#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ ++#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) ++#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ ++#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ ++#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ ++#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ ++#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ ++#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ ++#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ ++#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ ++#define E1000_TCTL 0x00400 /* Tx Control - RW */ ++#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ ++#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ ++#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ ++#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ ++#define E1000_LEDMUX 0x08130 /* LED MUX Control */ ++#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ ++#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ ++#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ ++#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ ++#define E1000_PBS 0x01008 /* Packet Buffer Size */ ++#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ ++#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ ++#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ ++#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ ++#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ ++#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ ++#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ ++#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ ++#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ ++#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ ++#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ ++#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ ++#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ ++#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ ++#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ ++#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ ++#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ ++#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ ++#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ ++#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ ++#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ ++#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ ++#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ ++#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ ++#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ ++#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ ++#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ ++#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ ++#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ ++#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ ++#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ ++#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ ++#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ ++#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ ++#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ ++#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ ++#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ ++/* Split and Replication Rx Control - RW */ ++#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ ++#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ ++#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ ++#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ ++#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ ++#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ ++#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ ++#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ ++#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ ++#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ ++#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ ++#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ ++#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ ++#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ ++#define E1000_I210_FLMNGCTL 0x12038 ++#define E1000_I210_FLMNGDATA 0x1203C ++#define E1000_I210_FLMNGCNT 0x12040 + +-/* IEEE 1588 TIMESYNCH */ +-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +-#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +-#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +-#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +-#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +-#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +-#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +-#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +-#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +-#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +-#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +-#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +-#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +-#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +-#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +-#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +-#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +-#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +-#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +-#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +-#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +-#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ ++#define E1000_I210_FLSWCTL 0x12048 ++#define E1000_I210_FLSWDATA 0x1204C ++#define E1000_I210_FLSWCNT 0x12050 + +-/* Filtering Registers */ +-#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +-#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +-#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +-#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +-#define E1000_SAQF0 E1000_SAQF(0) +-#define E1000_DAQF0 E1000_DAQF(0) +-#define E1000_SPQF0 E1000_SPQF(0) +-#define E1000_FTQF0 E1000_FTQF(0) +-#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +-#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ ++#define E1000_I210_FLA 0x1201C + +-#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) ++#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) ++#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +-/* DMA Coalescing registers */ +-#define E1000_DMACR 0x02508 /* Control Register */ +-#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +-#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +-#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +-#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +-#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ ++/* QAV Tx mode control register */ ++#define E1000_I210_TQAVCTRL 0x3570 + +-/* TX Rate Limit Registers */ +-#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +-#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +-#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ ++/* QAV Tx mode control register bitfields masks */ ++/* QAV enable */ ++#define E1000_TQAVCTRL_MODE (1 << 0) ++/* Fetching arbitration type */ ++#define E1000_TQAVCTRL_FETCH_ARB (1 << 4) ++/* Fetching timer enable */ ++#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) ++/* Launch arbitration type */ ++#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) ++/* Launch timer enable */ ++#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) ++/* SP waits for SR enable */ ++#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) ++/* Fetching timer correction */ ++#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 ++#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ ++ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) ++ ++/* High credit registers where _n can be 0 or 1. */ ++#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) ++ ++/* Queues fetch arbitration priority control register */ ++#define E1000_I210_TQAVARBCTRL 0x3574 ++/* Queues priority masks where _n and _p can be 0-3. */ ++#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) ++/* QAV Tx mode control registers where _n can be 0 or 1. */ ++#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) ++ ++/* QAV Tx mode control register bitfields masks */ ++#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ ++#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ ++#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +-/* Split and Replication RX Control - RW */ +-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ ++/* Good transmitted packets counter registers */ ++#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +-/* Thermal sensor configuration and status registers */ +-#define E1000_THMJT 0x08100 /* Junction Temperature */ +-#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +-#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +-#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +-#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ ++/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ ++#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) ++ ++#define E1000_MMDAC 13 /* MMD Access Control */ ++#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + + /* Convenience macros + * +@@ -157,269 +199,442 @@ + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +-#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ +- : (0x0C000 + ((_n) * 0x40))) +-#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ +- : (0x0C004 + ((_n) * 0x40))) +-#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ +- : (0x0C008 + ((_n) * 0x40))) +-#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ +- : (0x0C00C + ((_n) * 0x40))) +-#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ +- : (0x0C010 + ((_n) * 0x40))) +-#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ +- : (0x0C018 + ((_n) * 0x40))) +-#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ +- : (0x0C028 + ((_n) * 0x40))) +-#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ +- : (0x0E000 + ((_n) * 0x40))) +-#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ +- : (0x0E004 + ((_n) * 0x40))) +-#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ +- : (0x0E008 + ((_n) * 0x40))) +-#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ +- : (0x0E010 + ((_n) * 0x40))) +-#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ +- : (0x0E018 + ((_n) * 0x40))) +-#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ +- : (0x0E028 + ((_n) * 0x40))) +-#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ +- (0x0C014 + ((_n) * 0x40))) ++#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ ++ (0x0C000 + ((_n) * 0x40))) ++#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ ++ (0x0C004 + ((_n) * 0x40))) ++#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ ++ (0x0C008 + ((_n) * 0x40))) ++#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ ++ (0x0C00C + ((_n) * 0x40))) ++#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ ++ (0x0C010 + ((_n) * 0x40))) ++#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ ++ (0x0C014 + ((_n) * 0x40))) + #define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +-#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ +- (0x0E014 + ((_n) * 0x40))) ++#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ ++ (0x0C018 + ((_n) * 0x40))) ++#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ ++ (0x0C028 + ((_n) * 0x40))) ++#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ ++ (0x0C030 + ((_n) * 0x40))) ++#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ ++ (0x0E000 + ((_n) * 0x40))) ++#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ ++ (0x0E004 + ((_n) * 0x40))) ++#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ ++ (0x0E008 + ((_n) * 0x40))) ++#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ ++ (0x0E010 + ((_n) * 0x40))) ++#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ ++ (0x0E014 + ((_n) * 0x40))) + #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +-#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ +- : (0x0E038 + ((_n) * 0x40))) +-#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ +- : (0x0E03C + ((_n) * 0x40))) +- +-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +-#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ ++#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ ++ (0x0E018 + ((_n) * 0x40))) ++#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ ++ (0x0E028 + ((_n) * 0x40))) ++#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ ++ (0x0E038 + ((_n) * 0x40))) ++#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ ++ (0x0E03C + ((_n) * 0x40))) ++#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) ++#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ ++#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ ++#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ ++#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) ++#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ ++ (0x054E0 + ((_i - 16) * 8))) ++#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ ++ (0x054E4 + ((_i - 16) * 8))) ++#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) ++#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) ++#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) ++#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) ++#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) ++#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) ++#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) ++#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) ++#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ ++#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ ++#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ ++/* Same as TXPBS, renamed for newer Si - RW */ ++#define E1000_ITPBS 0x03404 ++#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ ++#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ ++#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ ++#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ ++#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ ++#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ ++#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ ++#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ ++#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ ++#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ ++#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ ++#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ ++#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ ++/* DMA Tx Max Total Allow Size Reqs - RW */ ++#define E1000_DTXMXSZRQ 0x03540 ++#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ ++#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ ++#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ ++#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ ++#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ ++#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ ++#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ ++#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ ++#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ ++#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ ++#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ ++#define E1000_COLC 0x04028 /* Collision Count - R/clr */ ++#define E1000_DC 0x04030 /* Defer Count - R/clr */ ++#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ ++#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ ++#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ ++#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ ++#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ ++#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ ++#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ ++#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ ++#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ ++#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ ++#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ ++#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ ++#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ ++#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ ++#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ ++#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ ++#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ ++#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ ++#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ ++#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ ++#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ ++#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ ++#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ ++#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ ++#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ ++#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ ++#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ ++#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ ++#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ ++#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ ++#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ ++#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ ++#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ ++#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ ++#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ ++#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ ++#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ ++#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ ++#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ ++#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ ++#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ ++#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ ++#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ ++#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ ++#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ ++#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ ++#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ ++#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ ++#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ ++#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ ++#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ ++#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ ++#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ ++#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ ++#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ ++#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ ++ ++/* Virtualization statistical counters */ ++#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) ++#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) ++#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) ++#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) ++#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) ++#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) ++#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) ++#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) ++#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) ++ ++/* LinkSec */ ++#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ ++#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ ++#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ ++#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ ++#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ ++#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ ++#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ ++#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ ++#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ ++#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ ++#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ ++#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ ++#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ ++#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ ++#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ ++#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ ++#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ ++#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ ++#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ ++#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ ++#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ ++#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ ++#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ ++#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ ++#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ ++#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ ++#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ ++#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ ++#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ ++#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ ++/* LinkSec Tx 128-bit Key 0 - WO */ ++#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) ++/* LinkSec Tx 128-bit Key 1 - WO */ ++#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) ++#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ ++#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ ++/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit ++ * key - RW. ++ */ ++#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +-#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +-#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +-#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +-#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +-#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +-#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +-#define E1000_DC 0x04030 /* Defer Count - R/clr */ +-#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +-#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +-#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +-#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +-#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +-#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +-#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +-#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +-#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +-#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +-#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +-#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +-#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +-#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +-#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +-#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +-#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +-#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +-#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +-#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +-#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +-#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +-#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +-#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +-#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +-#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +-#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +-#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +-#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +-#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +-#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +-#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +-#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +-#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +-#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +-#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +-#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +-#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +-#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +-#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +-#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +-#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +-#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +-#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +-/* Interrupt Cause Rx Packet Timer Expire Count */ +-#define E1000_ICRXPTC 0x04104 +-/* Interrupt Cause Rx Absolute Timer Expire Count */ +-#define E1000_ICRXATC 0x04108 +-/* Interrupt Cause Tx Packet Timer Expire Count */ +-#define E1000_ICTXPTC 0x0410C +-/* Interrupt Cause Tx Absolute Timer Expire Count */ +-#define E1000_ICTXATC 0x04110 +-/* Interrupt Cause Tx Queue Empty Count */ +-#define E1000_ICTXQEC 0x04118 +-/* Interrupt Cause Tx Queue Minimum Threshold Count */ +-#define E1000_ICTXQMTC 0x0411C +-/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +-#define E1000_ICRXDMTC 0x04120 +-#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +-#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +-#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +-#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +-#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +-#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +-#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +-#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +-#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +-#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +-#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +-#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +-#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +-#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +-#define E1000_LENERRS 0x04138 /* Length Errors Count */ +-#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +-#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +-#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +-#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +-#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +-#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +-#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +-#define E1000_RA 0x05400 /* Receive Address - RW Array */ +-#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +-#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +-#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ +- (0x054E0 + ((_i - 16) * 8))) +-#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ +- (0x054E4 + ((_i - 16) * 8))) +-#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +-#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +-#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +-#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +-#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +-#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +-#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +-#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +-#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +-#define E1000_MANC 0x05820 /* Management Control - RW */ +-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +- +-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +-#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +-#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +-#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +-#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +-#define E1000_SWSM 0x05B50 /* SW Semaphore */ +-#define E1000_FWSM 0x05B54 /* FW Semaphore */ +-#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ ++#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ ++#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ ++#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ ++#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ ++/* IPSec Rx IPv4/v6 Address - RW */ ++#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) ++/* IPSec Rx 128-bit Key - RW */ ++#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) ++#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ ++#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ ++/* IPSec Tx 128-bit Key - RW */ ++#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) ++#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ ++#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ ++#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ ++#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ ++#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ ++#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ ++#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ ++#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ ++#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ ++#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ ++#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ ++#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ ++#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ ++#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ ++#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ ++#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ ++#define E1000_LENERRS 0x04138 /* Length Errors Count */ ++#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ ++#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ ++#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ ++#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ ++#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ ++#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ ++#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ ++#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ ++#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ ++#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ ++#define E1000_RA 0x05400 /* Receive Address - RW Array */ ++#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ ++#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ ++#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ ++#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ ++#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ ++#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ ++#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ ++#define E1000_WUC 0x05800 /* Wakeup Control - RW */ ++#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ ++#define E1000_WUS 0x05810 /* Wakeup Status - RO */ ++#define E1000_MANC 0x05820 /* Management Control - RW */ ++#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ ++#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ ++#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ ++#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ ++#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ ++#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ ++#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ ++#define E1000_HOST_IF 0x08800 /* Host Interface */ ++#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ ++/* Flexible Host Filter Table */ ++#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) ++/* Ext Flexible Host Filter Table */ ++#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) ++ ++#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ ++#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ ++/* Management Decision Filters */ ++#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) ++#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ ++#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ ++#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ ++#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ ++#define E1000_GCR 0x05B00 /* PCI-Ex Control */ ++#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ ++#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ ++#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ ++#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ ++#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ ++#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ ++#define E1000_SWSM 0x05B50 /* SW Semaphore */ ++#define E1000_FWSM 0x05B54 /* FW Semaphore */ ++/* Driver-only SW semaphore (not used by BOOT agents) */ ++#define E1000_SWSM2 0x05B58 ++#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ ++#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ ++#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ ++#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ ++#define E1000_HICR 0x08F00 /* Host Interface Control */ ++#define E1000_FWSTS 0x08F0C /* FW Status */ + + /* RSS registers */ +-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +-#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +-#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +-#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +-/* MSI-X Allocation Register (_i) - RW */ +-#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +-/* Redirection Table - RW Array */ +-#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +-#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ +- ++#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ ++#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ ++#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ ++#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ ++#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ ++#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ ++#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ ++#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ ++#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ ++#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ + /* VT Registers */ +-#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +-#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +-#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +-#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +-#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +-#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +-#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +-#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +-#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +-#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +-#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +-#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ ++#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ ++#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ ++#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ ++#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ ++#define E1000_VFRE 0x00C8C /* VF Receive Enables */ ++#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ ++#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ ++#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ ++#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ ++#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ ++#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ ++#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ ++#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ ++#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ ++#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ ++#define E1000_MDFB 0x03558 /* Malicious Driver free block */ ++#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ ++#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ ++#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ ++#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ ++#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ + /* These act per VF so an array friendly macro is used */ +-#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +-#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +-#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +-#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +-#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +- +-struct e1000_hw; +- +-u32 igb_rd32(struct e1000_hw *hw, u32 reg); +- +-/* write operations, indexed using DWORDS */ +-#define wr32(reg, val) \ +-do { \ +- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ +- if (!E1000_REMOVED(hw_addr)) \ +- writel((val), &hw_addr[(reg)]); \ +-} while (0) +- +-#define rd32(reg) (igb_rd32(hw, reg)) ++#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) ++#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) ++#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) ++#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) ++#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) ++/* VLAN Virtual Machine Filter - RW */ ++#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) ++#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) ++#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ ++#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ ++#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ ++#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ ++#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ ++#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ ++#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ ++#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ ++#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ ++#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ ++#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ ++#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ ++#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ ++#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ ++#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ ++#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ ++#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ ++#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ ++#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ ++#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +-#define wrfl() ((void)rd32(E1000_STATUS)) +- +-#define array_wr32(reg, offset, value) \ +- wr32((reg) + ((offset) << 2), (value)) +- +-#define array_rd32(reg, offset) \ +- (readl(hw->hw_addr + reg + ((offset) << 2))) ++/* Filtering Registers */ ++#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ ++#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ ++#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ ++#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ ++#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ ++#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ ++#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ ++ ++#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ ++#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ ++#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ ++#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ ++#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ ++/* Tx Desc plane TC Rate-scheduler config */ ++#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) ++/* Tx Packet plane TC Rate-Scheduler Config */ ++#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) ++/* Rx Packet plane TC Rate-Scheduler Config */ ++#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) ++/* Tx Desc Plane TC Rate-Scheduler Status */ ++#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) ++/* Tx Desc Plane TC Rate-Scheduler MMW */ ++#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) ++/* Tx Packet plane TC Rate-Scheduler Status */ ++#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) ++/* Tx Packet plane TC Rate-scheduler MMW */ ++#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) ++/* Rx Packet plane TC Rate-Scheduler Status */ ++#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) ++/* Rx Packet plane TC Rate-Scheduler MMW */ ++#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) ++/* Tx Desc plane VM Rate-Scheduler MMW*/ ++#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) ++/* Tx BCN Rate-Scheduler MMW */ ++#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) ++#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ ++#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ ++#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ ++#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ ++#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ ++#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ ++#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ ++#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ ++#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ ++#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ ++#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ ++#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ ++#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ ++#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + + /* DMA Coalescing registers */ ++#define E1000_DMACR 0x02508 /* Control Register */ ++#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ ++#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ ++#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ ++#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ ++#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ + #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +-/* Energy Efficient Ethernet "EEE" register */ +-#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +-#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +-#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +-#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +-#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +-#define E1000_MMDAC 13 /* MMD Access Control */ +-#define E1000_MMDAAD 14 /* MMD Access Address/Data */ ++/* PCIe Parity Status Register */ ++#define E1000_PCIEERRSTS 0x05BA8 + +-/* Thermal Sensor Register */ ++#define E1000_PROXYS 0x5F64 /* Proxying Status */ ++#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ ++/* Thermal sensor configuration and status registers */ ++#define E1000_THMJT 0x08100 /* Junction Temperature */ ++#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ ++#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ ++#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ + #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + ++/* Energy Efficient Ethernet "EEE" registers */ ++#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ ++#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ ++#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ ++#define E1000_EEE_SU 0x0E34 /* EEE Setup */ ++#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ ++#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ ++ + /* OS2BMC Registers */ + #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ + #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ + #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ + #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +-#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +-#define E1000_I210_FLMNGCTL 0x12038 +-#define E1000_I210_FLMNGDATA 0x1203C +-#define E1000_I210_FLMNGCNT 0x12040 +- +-#define E1000_I210_FLSWCTL 0x12048 +-#define E1000_I210_FLSWDATA 0x1204C +-#define E1000_I210_FLSWCNT 0x12050 +- +-#define E1000_I210_FLA 0x1201C +- +-#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +-#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ +- +-#define E1000_REMOVED(h) unlikely(!(h)) +- + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h +--- a/drivers/net/ethernet/intel/igb/igb.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,107 +1,149 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + /* Linux PRO/1000 Ethernet Driver main header file */ + + #ifndef _IGB_H_ + #define _IGB_H_ + +-#include "e1000_mac.h" ++#include ++ ++#ifndef IGB_NO_LRO ++#include ++#endif ++ ++#include ++#include ++#include ++ ++#ifdef SIOCETHTOOL ++#include ++#endif ++ ++struct igb_adapter; ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++#define IGB_DCA ++#endif ++#ifdef IGB_DCA ++#include ++#endif ++ ++#include "kcompat.h" ++ ++#ifdef HAVE_SCTP ++#include ++#endif ++ ++#include "e1000_api.h" + #include "e1000_82575.h" ++#include "e1000_manage.h" ++#include "e1000_mbx.h" ++ ++#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args) + ++#define PFX "igb: " ++#define DPRINTK(nlevel, klevel, fmt, args...) \ ++ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ ++ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ ++ __func__ , ## args)) ++ ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#include ++#else + #include ++#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ + #include + #include +-#include +-#include ++#endif /* HAVE_PTP_1588_CLOCK */ ++ ++#ifdef HAVE_I2C_SUPPORT + #include + #include +-#include +-#include +- +-struct igb_adapter; +- +-#define E1000_PCS_CFG_IGN_SD 1 ++#endif /* HAVE_I2C_SUPPORT */ + + /* Interrupt defines */ +-#define IGB_START_ITR 648 /* ~6000 ints/sec */ +-#define IGB_4K_ITR 980 +-#define IGB_20K_ITR 196 +-#define IGB_70K_ITR 56 ++#define IGB_START_ITR 648 /* ~6000 ints/sec */ ++#define IGB_4K_ITR 980 ++#define IGB_20K_ITR 196 ++#define IGB_70K_ITR 56 ++ ++/* Interrupt modes, as used by the IntMode paramter */ ++#define IGB_INT_MODE_LEGACY 0 ++#define IGB_INT_MODE_MSI 1 ++#define IGB_INT_MODE_MSIX 2 + + /* TX/RX descriptor defines */ +-#define IGB_DEFAULT_TXD 256 +-#define IGB_DEFAULT_TX_WORK 128 +-#define IGB_MIN_TXD 80 +-#define IGB_MAX_TXD 4096 +- +-#define IGB_DEFAULT_RXD 256 +-#define IGB_MIN_RXD 80 +-#define IGB_MAX_RXD 4096 +- +-#define IGB_DEFAULT_ITR 3 /* dynamic */ +-#define IGB_MAX_ITR_USECS 10000 +-#define IGB_MIN_ITR_USECS 10 +-#define NON_Q_VECTORS 1 +-#define MAX_Q_VECTORS 8 +-#define MAX_MSIX_ENTRIES 10 ++#define IGB_DEFAULT_TXD 256 ++#define IGB_DEFAULT_TX_WORK 128 ++#define IGB_MIN_TXD 80 ++#define IGB_MAX_TXD 4096 ++ ++#define IGB_DEFAULT_RXD 256 ++#define IGB_MIN_RXD 80 ++#define IGB_MAX_RXD 4096 ++ ++#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */ ++#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ ++ ++#define NON_Q_VECTORS 1 ++#define MAX_Q_VECTORS 10 + + /* Transmit and receive queues */ +-#define IGB_MAX_RX_QUEUES 8 +-#define IGB_MAX_RX_QUEUES_82575 4 +-#define IGB_MAX_RX_QUEUES_I211 2 +-#define IGB_MAX_TX_QUEUES 8 +-#define IGB_MAX_VF_MC_ENTRIES 30 +-#define IGB_MAX_VF_FUNCTIONS 8 +-#define IGB_MAX_VFTA_ENTRIES 128 +-#define IGB_82576_VF_DEV_ID 0x10CA +-#define IGB_I350_VF_DEV_ID 0x1520 +- +-/* NVM version defines */ +-#define IGB_MAJOR_MASK 0xF000 +-#define IGB_MINOR_MASK 0x0FF0 +-#define IGB_BUILD_MASK 0x000F +-#define IGB_COMB_VER_MASK 0x00FF +-#define IGB_MAJOR_SHIFT 12 +-#define IGB_MINOR_SHIFT 4 +-#define IGB_COMB_VER_SHFT 8 +-#define IGB_NVM_VER_INVALID 0xFFFF +-#define IGB_ETRACK_SHIFT 16 +-#define NVM_ETRACK_WORD 0x0042 +-#define NVM_COMB_VER_OFF 0x0083 +-#define NVM_COMB_VER_PTR 0x003d ++#define IGB_MAX_RX_QUEUES 16 ++#define IGB_MAX_RX_QUEUES_82575 4 ++#define IGB_MAX_RX_QUEUES_I211 2 ++#define IGB_MAX_TX_QUEUES 16 ++ ++#define IGB_MAX_VF_MC_ENTRIES 30 ++#define IGB_MAX_VF_FUNCTIONS 8 ++#define IGB_82576_VF_DEV_ID 0x10CA ++#define IGB_I350_VF_DEV_ID 0x1520 ++#define IGB_MAX_UTA_ENTRIES 128 ++#define MAX_EMULATION_MAC_ADDRS 16 ++#define OUI_LEN 3 ++#define IGB_MAX_VMDQ_QUEUES 8 + + struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; ++ u16 default_vf_vlan_id; + u16 vlans_enabled; ++ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN]; ++ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; + u32 flags; + unsigned long last_nack; ++#ifdef IFLA_VF_MAX + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + bool spoofchk_enabled; ++#endif ++#endif + }; + + #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +@@ -125,31 +167,97 @@ + #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) + #define IGB_TX_HTHRESH 1 + #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ +- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ +- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) ++ adapter->msix_entries) ? 1 : 4) + + /* this is the size past which hardware will drop packets when setting LPE=0 */ + #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + ++/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we ++ * reserve 2 more, and skb_shared_info adds an additional 384 more, ++ * this adds roughly 448 bytes of extra data meaning the smallest ++ * allocation we could have is 1K. ++ * i.e. RXBUFFER_512 --> size-1024 slab ++ */ + /* Supported Rx Buffer Sizes */ +-#define IGB_RXBUFFER_256 256 +-#define IGB_RXBUFFER_2048 2048 +-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 ++#define IGB_RXBUFFER_256 256 ++#define IGB_RXBUFFER_2048 2048 ++#define IGB_RXBUFFER_16384 16384 ++#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 ++#if MAX_SKB_FRAGS < 8 ++#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024) ++#else ++#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 ++#endif ++ ++ ++/* Packet Buffer allocations */ ++#define IGB_PBA_BYTES_SHIFT 0xA ++#define IGB_TX_HEAD_ADDR_SHIFT 7 ++#define IGB_PBA_TX_MASK 0xFFFF0000 ++ ++#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ + + /* How many Rx Buffers do we bundle into one write to the hardware ? */ +-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ ++#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +-#define AUTO_ALL_MODES 0 +-#define IGB_EEPROM_APME 0x0400 ++#define IGB_EEPROM_APME 0x0400 ++#define AUTO_ALL_MODES 0 + + #ifndef IGB_MASTER_SLAVE + /* Switch to override PHY master/slave setting */ + #define IGB_MASTER_SLAVE e1000_ms_hw_default + #endif + +-#define IGB_MNG_VLAN_NONE -1 ++#define IGB_MNG_VLAN_NONE -1 ++ ++#ifndef IGB_NO_LRO ++#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ ++struct igb_lro_stats { ++ u32 flushed; ++ u32 coal; ++}; ++ ++/* ++ * igb_lro_header - header format to be aggregated by LRO ++ * @iph: IP header without options ++ * @tcp: TCP header ++ * @ts: Optional TCP timestamp data in TCP options ++ * ++ * This structure relies on the check above that verifies that the header ++ * is IPv4 and does not contain any options. ++ */ ++struct igb_lrohdr { ++ struct iphdr iph; ++ struct tcphdr th; ++ __be32 ts[0]; ++}; ++ ++struct igb_lro_list { ++ struct sk_buff_head active; ++ struct igb_lro_stats stats; ++}; ++ ++#endif /* IGB_NO_LRO */ ++struct igb_cb { ++#ifndef IGB_NO_LRO ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ union { /* Union defining head/tail partner */ ++ struct sk_buff *head; ++ struct sk_buff *tail; ++ }; ++#endif ++ __be32 tsecr; /* timestamp echo response */ ++ u32 tsval; /* timestamp value in host order */ ++ u32 next_seq; /* next expected sequence number */ ++ u16 free; /* 65521 minus total size */ ++ u16 mss; /* size of data portion of packet */ ++ u16 append_cnt; /* number of skb's appended */ ++#endif /* IGB_NO_LRO */ ++#ifdef HAVE_VLAN_RX_REGISTER ++ u16 vid; /* VLAN tag */ ++#endif ++}; ++#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) + + enum igb_tx_flags { + /* cmd_type flags */ +@@ -163,30 +271,28 @@ + }; + + /* VLAN info */ +-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +-#define IGB_TX_FLAGS_VLAN_SHIFT 16 ++#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 ++#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +-/* The largest size we can write to the descriptor is 65535. In order to ++/* ++ * The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +-#define IGB_MAX_TXD_PWR 15 ++#define IGB_MAX_TXD_PWR 15 + #define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + + /* Tx Descriptors needed, worst case */ +-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +-#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +- +-/* EEPROM byte offsets */ +-#define IGB_SFF_8472_SWAP 0x5C +-#define IGB_SFF_8472_COMP 0x5E +- +-/* Bitmasks */ +-#define IGB_SFF_ADDRESSING_MODE 0x4 +-#define IGB_SFF_8472_UNSUP 0x00 ++#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) ++#ifndef MAX_SKB_FRAGS ++#define DESC_NEEDED 4 ++#elif (MAX_SKB_FRAGS < 16) ++#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) ++#else ++#define DESC_NEEDED (MAX_SKB_FRAGS + 4) ++#endif + + /* wrapper around a pointer to a socket buffer, +- * so a DMA handle can be stored along with the buffer +- */ ++ * so a DMA handle can be stored along with the buffer */ + struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; +@@ -202,15 +308,18 @@ + + struct igb_rx_buffer { + dma_addr_t dma; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ struct sk_buff *skb; ++#else + struct page *page; +- unsigned int page_offset; ++ u32 page_offset; ++#endif + }; + + struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; +- u64 restart_queue2; + }; + + struct igb_rx_queue_stats { +@@ -221,6 +330,18 @@ + u64 alloc_failed; + }; + ++struct igb_rx_packet_stats { ++ u64 ipv4_packets; /* IPv4 headers processed */ ++ u64 ipv4e_packets; /* IPv4E headers with extensions processed */ ++ u64 ipv6_packets; /* IPv6 headers processed */ ++ u64 ipv6e_packets; /* IPv6E headers with extensions processed */ ++ u64 tcp_packets; /* TCP headers processed */ ++ u64 udp_packets; /* UDP headers processed */ ++ u64 sctp_packets; /* SCTP headers processed */ ++ u64 nfs_packets; /* NFS headers processe */ ++ u64 other_packets; ++}; ++ + struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ +@@ -231,22 +352,22 @@ + }; + + struct igb_ring { +- struct igb_q_vector *q_vector; /* backlink to q_vector */ +- struct net_device *netdev; /* back pointer to net_device */ +- struct device *dev; /* device pointer for dma mapping */ ++ struct igb_q_vector *q_vector; /* backlink to q_vector */ ++ struct net_device *netdev; /* back pointer to net_device */ ++ struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; +- void *desc; /* descriptor ring memory */ +- unsigned long flags; /* ring specific flags */ +- void __iomem *tail; /* pointer to ring tail register */ ++ void *desc; /* descriptor ring memory */ ++ unsigned long flags; /* ring specific flags */ ++ void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ +- unsigned int size; /* length of desc. ring in bytes */ ++ unsigned int size; /* length of desc. ring in bytes */ + +- u16 count; /* number of desc. in the ring */ +- u8 queue_index; /* logical index of the ring*/ +- u8 reg_idx; /* physical index of the ring */ ++ u16 count; /* number of desc. in the ring */ ++ u8 queue_index; /* logical index of the ring*/ ++ u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; +@@ -257,16 +378,22 @@ + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; +- struct u64_stats_sync tx_syncp; +- struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { +- struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; +- struct u64_stats_sync rx_syncp; ++ struct igb_rx_packet_stats pkt_stats; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ u16 rx_buffer_len; ++#else ++ struct sk_buff *skb; ++#endif + }; + }; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ struct net_device *vmdq_netdev; ++ int vqueue_index; /* queue index for virtual netdev */ ++#endif + } ____cacheline_internodealigned_in_smp; + + struct igb_q_vector { +@@ -281,29 +408,57 @@ + struct igb_ring_container rx, tx; + + struct napi_struct napi; ++#ifndef IGB_NO_LRO ++ struct igb_lro_list lrolist; /* LRO list for queue vector*/ ++#endif + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; ++#ifndef HAVE_NETDEV_NAPI_LIST ++ struct net_device poll_dev; ++#endif + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; + }; + + enum e1000_ring_flags_t { ++#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES) ++ IGB_RING_FLAG_RX_CSUM, ++#endif + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, +- IGB_RING_FLAG_TX_DETECT_HANG ++ IGB_RING_FLAG_TX_DETECT_HANG, + }; + ++struct igb_mac_addr { ++ u8 addr[ETH_ALEN]; ++ u16 queue; ++ u16 state; /* bitmask */ ++}; ++#define IGB_MAC_STATE_DEFAULT 0x1 ++#define IGB_MAC_STATE_MODIFIED 0x2 ++#define IGB_MAC_STATE_IN_USE 0x4 ++ + #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +-#define IGB_RX_DESC(R, i) \ ++#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +-#define IGB_TX_DESC(R, i) \ ++#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +-#define IGB_TX_CTXTDESC(R, i) \ ++#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++#define netdev_ring(ring) \ ++ ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) ++#define ring_queue_index(ring) \ ++ ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) ++#else ++#define netdev_ring(ring) (ring->netdev) ++#define ring_queue_index(ring) (ring->queue_index) ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ + /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ + static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +@@ -312,16 +467,27 @@ + } + + /* igb_desc_unused - calculate if we have unused descriptors */ +-static inline int igb_desc_unused(struct igb_ring *ring) ++static inline u16 igb_desc_unused(const struct igb_ring *ring) + { +- if (ring->next_to_clean > ring->next_to_use) +- return ring->next_to_clean - ring->next_to_use - 1; ++ u16 ntc = ring->next_to_clean; ++ u16 ntu = ring->next_to_use; + +- return ring->count + ring->next_to_clean - ring->next_to_use - 1; ++ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; + } + +-#ifdef CONFIG_IGB_HWMON ++#ifdef CONFIG_BQL ++static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) ++{ ++ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); ++} ++#endif /* CONFIG_BQL */ + ++struct igb_therm_proc_data { ++ struct e1000_hw *hw; ++ struct e1000_thermal_diode_data *sensor_data; ++}; ++ ++#ifdef IGB_HWMON + #define IGB_HWMON_TYPE_LOC 0 + #define IGB_HWMON_TYPE_TEMP 1 + #define IGB_HWMON_TYPE_CAUTION 2 +@@ -335,69 +501,79 @@ + }; + + struct hwmon_buff { +- struct attribute_group group; +- const struct attribute_group *groups[2]; +- struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; +- struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; ++ struct device *device; ++ struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +-#endif +- ++#endif /* IGB_HWMON */ ++#ifdef ETHTOOL_GRXFHINDIR + #define IGB_RETA_SIZE 128 ++#endif /* ETHTOOL_GRXFHINDIR */ + + /* board specific private data structure */ + struct igb_adapter { ++#ifdef HAVE_VLAN_RX_REGISTER ++ /* vlgrp must be first member of structure */ ++ struct vlan_group *vlgrp; ++#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +- ++#endif + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; +- struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; ++ struct msix_entry *msix_entries; + +- /* Interrupt Throttle Rate */ +- u32 rx_itr_setting; +- u32 tx_itr_setting; +- u16 tx_itr; +- u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; +- struct igb_ring *tx_ring[16]; ++ struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; +- struct igb_ring *rx_ring[16]; +- +- u32 max_frame_size; +- u32 min_frame_size; ++ struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; ++ struct timer_list dma_err_timer; + struct timer_list phy_info_timer; +- + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; ++ u8 port_num; ++ ++ u8 __iomem *io_addr; /* for iounmap */ ++ ++ /* Interrupt Throttle Rate */ ++ u32 rx_itr_setting; ++ u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; ++ struct work_struct dma_err_task; + bool fc_autoneg; + u8 tx_timeout_factor; +- struct timer_list blink_timer; +- unsigned long led_status; ++ ++#ifdef DEBUG ++ bool tx_hang_detected; ++ bool disable_hw_reset; ++#endif ++ u32 max_frame_size; + + /* OS defined structs */ + struct pci_dev *pdev; +- +- spinlock_t stats64_lock; +- struct rtnl_link_stats64 stats64; ++#ifndef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats net_stats; ++#endif ++#ifndef IGB_NO_LRO ++ struct igb_lro_stats lro_stats; ++#endif + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; +@@ -405,9 +581,11 @@ + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + ++#ifdef ETHTOOL_TEST + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; ++#endif + + int msg_enable; + +@@ -416,15 +594,48 @@ + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ ++ u32 *config_space; + u16 tx_ring_count; + u16 rx_ring_count; +- unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; ++#ifdef IFLA_VF_MAX + int vf_rate_link_speed; ++#endif ++ u32 lli_port; ++ u32 lli_size; ++ unsigned int vfs_allocated_count; ++ /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ ++ bool mdd; ++ int int_mode; + u32 rss_queues; ++ u32 tss_queues; ++ u32 vmdq_pools; ++ char fw_version[32]; + u32 wvbr; ++ struct igb_mac_addr *mac_table; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; ++#endif ++ int vferr_refcount; ++ int dmac; + u32 *shadow_vfta; + ++ /* External Thermal Sensor support flag */ ++ bool ets; ++#ifdef IGB_HWMON ++ struct hwmon_buff igb_hwmon_buff; ++#else /* IGB_HWMON */ ++#ifdef IGB_PROCFS ++ struct proc_dir_entry *eth_dir; ++ struct proc_dir_entry *info_dir; ++ struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; ++ struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; ++ bool old_lsc; ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ ++ u32 etrack_id; ++ ++#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; +@@ -439,39 +650,57 @@ + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; ++#endif /* HAVE_PTP_1588_CLOCK */ + +- char fw_version[32]; +-#ifdef CONFIG_IGB_HWMON +- struct hwmon_buff *igb_hwmon_buff; +- bool ets; +-#endif ++#ifdef HAVE_I2C_SUPPORT + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; +- u32 rss_indir_tbl_init; +- u8 rss_indir_tbl[IGB_RETA_SIZE]; +- ++#endif /* HAVE_I2C_SUPPORT */ + unsigned long link_check_timeout; ++ ++ int devrc; ++ + int copper_tries; +- struct e1000_info ei; + u16 eee_advert; ++#ifdef ETHTOOL_GRXFHINDIR ++ u32 rss_indir_tbl_init; ++ u8 rss_indir_tbl[IGB_RETA_SIZE]; ++#endif ++}; ++ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++struct igb_vmdq_adapter { ++#ifdef HAVE_VLAN_RX_REGISTER ++ /* vlgrp must be first member of structure */ ++ struct vlan_group *vlgrp; ++#else ++ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; ++#endif ++ struct igb_adapter *real_adapter; ++ struct net_device *vnetdev; ++ struct net_device_stats net_stats; ++ struct igb_ring *tx_ring; ++ struct igb_ring *rx_ring; + }; ++#endif + + #define IGB_FLAG_HAS_MSI (1 << 0) + #define IGB_FLAG_DCA_ENABLED (1 << 1) +-#define IGB_FLAG_QUAD_PORT_A (1 << 2) +-#define IGB_FLAG_QUEUE_PAIRS (1 << 3) +-#define IGB_FLAG_DMAC (1 << 4) +-#define IGB_FLAG_PTP (1 << 5) +-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) +-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) +-#define IGB_FLAG_WOL_SUPPORTED (1 << 8) +-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) +-#define IGB_FLAG_MEDIA_RESET (1 << 10) +-#define IGB_FLAG_MAS_CAPABLE (1 << 11) +-#define IGB_FLAG_MAS_ENABLE (1 << 12) +-#define IGB_FLAG_HAS_MSIX (1 << 13) +-#define IGB_FLAG_EEE (1 << 14) ++#define IGB_FLAG_LLI_PUSH (1 << 2) ++#define IGB_FLAG_QUAD_PORT_A (1 << 3) ++#define IGB_FLAG_QUEUE_PAIRS (1 << 4) ++#define IGB_FLAG_EEE (1 << 5) ++#define IGB_FLAG_DMAC (1 << 6) ++#define IGB_FLAG_DETECT_BAD_DMA (1 << 7) ++#define IGB_FLAG_PTP (1 << 8) ++#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9) ++#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10) ++#define IGB_FLAG_WOL_SUPPORTED (1 << 11) ++#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12) ++#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13) ++#define IGB_FLAG_MEDIA_RESET (1 << 14) ++#define IGB_FLAG_MAS_ENABLE (1 << 15) + + /* Media Auto Sense */ + #define IGB_MAS_ENABLE_0 0X0001 +@@ -479,13 +708,63 @@ + #define IGB_MAS_ENABLE_2 0X0004 + #define IGB_MAS_ENABLE_3 0X0008 + ++#define IGB_MIN_TXPBSIZE 20408 ++#define IGB_TX_BUF_4096 4096 ++ ++#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ ++ + /* DMA Coalescing defines */ +-#define IGB_MIN_TXPBSIZE 20408 +-#define IGB_TX_BUF_4096 4096 +-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ ++#define IGB_DMAC_DISABLE 0 ++#define IGB_DMAC_MIN 250 ++#define IGB_DMAC_500 500 ++#define IGB_DMAC_EN_DEFAULT 1000 ++#define IGB_DMAC_2000 2000 ++#define IGB_DMAC_3000 3000 ++#define IGB_DMAC_4000 4000 ++#define IGB_DMAC_5000 5000 ++#define IGB_DMAC_6000 6000 ++#define IGB_DMAC_7000 7000 ++#define IGB_DMAC_8000 8000 ++#define IGB_DMAC_9000 9000 ++#define IGB_DMAC_MAX 10000 ++ ++#define IGB_82576_TSYNC_SHIFT 19 ++#define IGB_82580_TSYNC_SHIFT 24 ++#define IGB_TS_HDR_LEN 16 ++ ++/* CEM Support */ ++#define FW_HDR_LEN 0x4 ++#define FW_CMD_DRV_INFO 0xDD ++#define FW_CMD_DRV_INFO_LEN 0x5 ++#define FW_CMD_RESERVED 0X0 ++#define FW_RESP_SUCCESS 0x1 ++#define FW_UNUSED_VER 0x0 ++#define FW_MAX_RETRIES 3 ++#define FW_STATUS_SUCCESS 0x1 ++#define FW_FAMILY_DRV_VER 0Xffffffff ++ ++#define IGB_MAX_LINK_TRIES 20 ++ ++struct e1000_fw_hdr { ++ u8 cmd; ++ u8 buf_len; ++ union { ++ u8 cmd_resv; ++ u8 ret_status; ++ } cmd_or_resp; ++ u8 checksum; ++}; ++ ++#pragma pack(push, 1) ++struct e1000_fw_drv_info { ++ struct e1000_fw_hdr hdr; ++ u8 port_num; ++ u32 drv_version; ++ u16 pad; /* end spacing to ensure length is mult. of dword */ ++ u8 pad2; /* end spacing to ensure length is mult. of dword2 */ ++}; ++#pragma pack(pop) + +-#define IGB_82576_TSYNC_SHIFT 19 +-#define IGB_TS_HDR_LEN 16 + enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, +@@ -493,85 +772,82 @@ + __IGB_PTP_TX_IN_PROGRESS, + }; + +-enum igb_boards { +- board_82575, +-}; +- + extern char igb_driver_name[]; + extern char igb_driver_version[]; + +-int igb_up(struct igb_adapter *); +-void igb_down(struct igb_adapter *); +-void igb_reinit_locked(struct igb_adapter *); +-void igb_reset(struct igb_adapter *); +-int igb_reinit_queues(struct igb_adapter *); +-void igb_write_rss_indir_tbl(struct igb_adapter *); +-int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +-int igb_setup_tx_resources(struct igb_ring *); +-int igb_setup_rx_resources(struct igb_ring *); +-void igb_free_tx_resources(struct igb_ring *); +-void igb_free_rx_resources(struct igb_ring *); +-void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +-void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +-void igb_setup_tctl(struct igb_adapter *); +-void igb_setup_rctl(struct igb_adapter *); +-netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); +-void igb_alloc_rx_buffers(struct igb_ring *, u16); +-void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); +-bool igb_has_link(struct igb_adapter *adapter); +-void igb_set_ethtool_ops(struct net_device *); +-void igb_power_up_link(struct igb_adapter *); +-void igb_set_fw_version(struct igb_adapter *); +-void igb_ptp_init(struct igb_adapter *adapter); +-void igb_ptp_stop(struct igb_adapter *adapter); +-void igb_ptp_reset(struct igb_adapter *adapter); +-void igb_ptp_rx_hang(struct igb_adapter *adapter); +-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, +- struct sk_buff *skb); ++extern int igb_open(struct net_device *netdev); ++extern int igb_close(struct net_device *netdev); ++extern int igb_up(struct igb_adapter *); ++extern void igb_down(struct igb_adapter *); ++extern void igb_reinit_locked(struct igb_adapter *); ++extern void igb_reset(struct igb_adapter *); ++extern int igb_reinit_queues(struct igb_adapter *); ++#ifdef ETHTOOL_SRXFHINDIR ++extern void igb_write_rss_indir_tbl(struct igb_adapter *); ++#endif ++extern int igb_set_spd_dplx(struct igb_adapter *, u16); ++extern int igb_setup_tx_resources(struct igb_ring *); ++extern int igb_setup_rx_resources(struct igb_ring *); ++extern void igb_free_tx_resources(struct igb_ring *); ++extern void igb_free_rx_resources(struct igb_ring *); ++extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); ++extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); ++extern void igb_setup_tctl(struct igb_adapter *); ++extern void igb_setup_rctl(struct igb_adapter *); ++extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); ++extern void igb_unmap_and_free_tx_resource(struct igb_ring *, ++ struct igb_tx_buffer *); ++extern void igb_alloc_rx_buffers(struct igb_ring *, u16); ++extern void igb_clean_rx_ring(struct igb_ring *); ++extern int igb_setup_queues(struct igb_adapter *adapter); ++extern void igb_update_stats(struct igb_adapter *); ++extern bool igb_has_link(struct igb_adapter *adapter); ++extern void igb_set_ethtool_ops(struct net_device *); ++extern void igb_check_options(struct igb_adapter *); ++extern void igb_power_up_link(struct igb_adapter *); ++#ifdef HAVE_PTP_1588_CLOCK ++extern void igb_ptp_init(struct igb_adapter *adapter); ++extern void igb_ptp_stop(struct igb_adapter *adapter); ++extern void igb_ptp_reset(struct igb_adapter *adapter); ++extern void igb_ptp_tx_work(struct work_struct *work); ++extern void igb_ptp_rx_hang(struct igb_adapter *adapter); ++extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); ++extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, ++ struct sk_buff *skb); ++extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, ++ unsigned char *va, ++ struct sk_buff *skb); ++extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, ++ struct ifreq *ifr, int cmd); ++#endif /* HAVE_PTP_1588_CLOCK */ ++#ifdef ETHTOOL_OPS_COMPAT ++extern int ethtool_ioctl(struct ifreq *); ++#endif ++extern int igb_write_mc_addr_list(struct net_device *netdev); ++extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); ++extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); ++extern int igb_available_rars(struct igb_adapter *adapter); ++extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); ++extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); ++extern void igb_enable_vlan_tags(struct igb_adapter *adapter); ++#ifndef HAVE_VLAN_RX_REGISTER ++extern void igb_vlan_mode(struct net_device *, u32); ++#endif ++ ++#define E1000_PCS_CFG_IGN_SD 1 ++ + int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); + int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +-void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +-#ifdef CONFIG_IGB_HWMON ++#ifdef IGB_HWMON + void igb_sysfs_exit(struct igb_adapter *adapter); + int igb_sysfs_init(struct igb_adapter *adapter); +-#endif +-static inline s32 igb_reset_phy(struct e1000_hw *hw) +-{ +- if (hw->phy.ops.reset) +- return hw->phy.ops.reset(hw); +- +- return 0; +-} +- +-static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +-{ +- if (hw->phy.ops.read_reg) +- return hw->phy.ops.read_reg(hw, offset, data); +- +- return 0; +-} +- +-static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +-{ +- if (hw->phy.ops.write_reg) +- return hw->phy.ops.write_reg(hw, offset, data); +- +- return 0; +-} +- +-static inline s32 igb_get_phy_info(struct e1000_hw *hw) +-{ +- if (hw->phy.ops.get_phy_info) +- return hw->phy.ops.get_phy_info(hw); +- +- return 0; +-} +- +-static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +-{ +- return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +-} ++#else ++#ifdef IGB_PROCFS ++int igb_procfs_init(struct igb_adapter *adapter); ++void igb_procfs_exit(struct igb_adapter *adapter); ++int igb_procfs_topdir_init(void); ++void igb_procfs_topdir_exit(void); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + + #endif /* _IGB_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_debugfs.c b/drivers/net/ethernet/intel/igb/igb_debugfs.c +--- a/drivers/net/ethernet/intel/igb/igb_debugfs.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_debugfs.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,26 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "igb.h" ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,43 +1,50 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + /* ethtool support for igb */ + +-#include + #include +-#include +-#include +-#include +-#include ++#include ++ ++#ifdef SIOCETHTOOL + #include +-#include +-#include ++#ifdef CONFIG_PM_RUNTIME + #include ++#endif /* CONFIG_PM_RUNTIME */ + #include +-#include + + #include "igb.h" ++#include "igb_regtest.h" ++#include ++#ifdef ETHTOOL_GEEE ++#include ++#endif + ++#ifdef ETHTOOL_OPS_COMPAT ++#include "kcompat_ethtool.c" ++#endif ++#ifdef ETHTOOL_GSTATS + struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; +@@ -49,6 +56,7 @@ + .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ + } ++ + static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), +@@ -82,6 +90,10 @@ + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), ++#ifndef IGB_NO_LRO ++ IGB_STAT("lro_aggregated", lro_stats.coal), ++ IGB_STAT("lro_flushed", lro_stats.flushed), ++#endif /* IGB_LRO */ + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), +@@ -89,15 +101,18 @@ + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), ++#ifdef HAVE_PTP_1588_CLOCK + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), ++#endif /* HAVE_PTP_1588_CLOCK */ + }; + + #define IGB_NETDEV_STAT(_net_stat) { \ +- .stat_string = __stringify(_net_stat), \ +- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ +- .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ ++ .stat_string = #_net_stat, \ ++ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ ++ .stat_offset = offsetof(struct net_device_stats, _net_stat) \ + } ++ + static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), +@@ -110,15 +125,12 @@ + IGB_NETDEV_STAT(tx_heartbeat_errors) + }; + +-#define IGB_GLOBAL_STATS_LEN \ +- (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +-#define IGB_NETDEV_STATS_LEN \ +- (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) ++#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats) ++#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats) + #define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) +- +-#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +- ++#define IGB_TX_QUEUE_STATS_LEN \ ++ (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) + #define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ +@@ -127,23 +139,23 @@ + #define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + ++#endif /* ETHTOOL_GSTATS */ ++#ifdef ETHTOOL_TEST + static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" + }; ++ + #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) ++#endif /* ETHTOOL_TEST */ + + static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; +- u32 speed; + +- status = rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | +@@ -165,80 +177,85 @@ + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; ++ + } else { +- ecmd->supported = (SUPPORTED_FIBRE | +- SUPPORTED_1000baseKX_Full | ++ ecmd->supported = (SUPPORTED_1000baseT_Full | ++ SUPPORTED_100baseT_Full | ++ SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause); +- ecmd->advertising = (ADVERTISED_FIBRE | +- ADVERTISED_1000baseKX_Full); +- if (hw->mac.type == e1000_i354) { +- if ((hw->device_id == +- E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && +- !(status & E1000_STATUS_2P5_SKU_OVER)) { +- ecmd->supported |= SUPPORTED_2500baseX_Full; +- ecmd->supported &= +- ~SUPPORTED_1000baseKX_Full; +- ecmd->advertising |= ADVERTISED_2500baseX_Full; +- ecmd->advertising &= +- ~ADVERTISED_1000baseKX_Full; +- } +- } +- if (eth_flags->e100_base_fx) { +- ecmd->supported |= SUPPORTED_100baseT_Full; +- ecmd->advertising |= ADVERTISED_100baseT_Full; ++ if (hw->mac.type == e1000_i354) ++ ecmd->supported |= (SUPPORTED_2500baseX_Full); ++ ++ ecmd->advertising = ADVERTISED_FIBRE; ++ ++ switch (adapter->link_speed) { ++ case SPEED_2500: ++ ecmd->advertising = ADVERTISED_2500baseX_Full; ++ break; ++ case SPEED_1000: ++ ecmd->advertising = ADVERTISED_1000baseT_Full; ++ break; ++ case SPEED_100: ++ ecmd->advertising = ADVERTISED_100baseT_Full; ++ break; ++ default: ++ break; + } ++ + if (hw->mac.autoneg == 1) + ecmd->advertising |= ADVERTISED_Autoneg; + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } ++ + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + +- switch (hw->fc.requested_mode) { +- case e1000_fc_full: ++ if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; +- break; +- case e1000_fc_rx_pause: ++ else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); +- break; +- case e1000_fc_tx_pause: ++ else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; +- break; +- default: ++ else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); +- } ++ ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ + if (status & E1000_STATUS_LU) { +- if ((status & E1000_STATUS_2P5_SKU) && +- !(status & E1000_STATUS_2P5_SKU_OVER)) { +- speed = SPEED_2500; +- } else if (status & E1000_STATUS_SPEED_1000) { +- speed = SPEED_1000; +- } else if (status & E1000_STATUS_SPEED_100) { +- speed = SPEED_100; +- } else { +- speed = SPEED_10; +- } ++ if ((hw->mac.type == e1000_i354) && ++ (status & E1000_STATUS_2P5_SKU) && ++ !(status & E1000_STATUS_2P5_SKU_OVER)) ++ ethtool_cmd_speed_set(ecmd, SPEED_2500); ++ else if (status & E1000_STATUS_SPEED_1000) ++ ethtool_cmd_speed_set(ecmd, SPEED_1000); ++ else if (status & E1000_STATUS_SPEED_100) ++ ethtool_cmd_speed_set(ecmd, SPEED_100); ++ else ++ ethtool_cmd_speed_set(ecmd, SPEED_10); ++ + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; ++ + } else { +- speed = SPEED_UNKNOWN; +- ecmd->duplex = DUPLEX_UNKNOWN; ++ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ++ ecmd->duplex = -1; + } +- ethtool_cmd_speed_set(ecmd, speed); ++ + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; ++#ifdef ETH_TP_MDI_X + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) +@@ -247,11 +264,14 @@ + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + ++#ifdef ETH_TP_MDI_AUTO + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + ++#endif ++#endif /* ETH_TP_MDI_X */ + return 0; + } + +@@ -260,16 +280,26 @@ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + ++ if (ecmd->duplex == DUPLEX_HALF) { ++ if (!hw->dev_spec._82575.eee_disable) ++ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n"); ++ hw->dev_spec._82575.eee_disable = true; ++ } else { ++ if (hw->dev_spec._82575.eee_disable) ++ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n"); ++ hw->dev_spec._82575.eee_disable = false; ++ } ++ + /* When SoL/IDER sessions are active, autoneg/speed/duplex +- * cannot be changed +- */ +- if (igb_check_reset_block(hw)) { +- dev_err(&adapter->pdev->dev, +- "Cannot change link characteristics when SoL/IDER is active.\n"); ++ * cannot be changed */ ++ if (e1000_check_reset_block(hw)) { ++ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + +- /* MDI setting is only allowed when autoneg enabled because ++#ifdef ETH_TP_MDI_AUTO ++ /* ++ * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ +@@ -284,6 +314,7 @@ + } + } + ++#endif /* ETH_TP_MDI_AUTO */ + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + +@@ -318,14 +349,13 @@ + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { +- u32 speed = ethtool_cmd_speed(ecmd); +- /* calling this overrides forced MDI setting */ +- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { ++ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + ++#ifdef ETH_TP_MDI_AUTO + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped +@@ -337,6 +367,7 @@ + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + ++#endif /* ETH_TP_MDI_AUTO */ + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); +@@ -353,7 +384,8 @@ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + +- /* If the link is not reported up to netdev, interrupts are disabled, ++ /* ++ * If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly +@@ -391,10 +423,6 @@ + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + +- /* 100basefx does not support setting link flow control */ +- if (hw->dev_spec._82575.eth_flags.e100_base_fx) +- return -EINVAL; +- + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) +@@ -420,10 +448,18 @@ + + hw->fc.current_mode = hw->fc.requested_mode; + +- retval = ((hw->phy.media_type == e1000_media_type_copper) ? +- igb_force_mac_fc(hw) : igb_setup_link(hw)); ++ if (hw->phy.media_type == e1000_media_type_fiber) { ++ retval = hw->mac.ops.setup_link(hw); ++ /* implicit goto out */ ++ } else { ++ retval = igb_e1000_force_mac_fc(hw); ++ if (retval) ++ goto out; ++ e1000_set_fc_watermarks_generic(hw); ++ } + } + ++out: + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; + } +@@ -442,7 +478,7 @@ + + static int igb_get_regs_len(struct net_device *netdev) + { +-#define IGB_REGS_LEN 739 ++#define IGB_REGS_LEN 555 + return IGB_REGS_LEN * sizeof(u32); + } + +@@ -459,80 +495,78 @@ + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ +- regs_buff[0] = rd32(E1000_CTRL); +- regs_buff[1] = rd32(E1000_STATUS); +- regs_buff[2] = rd32(E1000_CTRL_EXT); +- regs_buff[3] = rd32(E1000_MDIC); +- regs_buff[4] = rd32(E1000_SCTL); +- regs_buff[5] = rd32(E1000_CONNSW); +- regs_buff[6] = rd32(E1000_VET); +- regs_buff[7] = rd32(E1000_LEDCTL); +- regs_buff[8] = rd32(E1000_PBA); +- regs_buff[9] = rd32(E1000_PBS); +- regs_buff[10] = rd32(E1000_FRTIMER); +- regs_buff[11] = rd32(E1000_TCPTIMER); ++ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL); ++ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS); ++ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC); ++ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL); ++ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW); ++ regs_buff[6] = E1000_READ_REG(hw, E1000_VET); ++ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL); ++ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA); ++ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS); ++ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER); ++ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER); + + /* NVM Register */ +- regs_buff[12] = rd32(E1000_EECD); ++ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the +- * same but EICS does not clear on read +- */ +- regs_buff[13] = rd32(E1000_EICS); +- regs_buff[14] = rd32(E1000_EICS); +- regs_buff[15] = rd32(E1000_EIMS); +- regs_buff[16] = rd32(E1000_EIMC); +- regs_buff[17] = rd32(E1000_EIAC); +- regs_buff[18] = rd32(E1000_EIAM); ++ * same but EICS does not clear on read */ ++ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS); ++ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS); ++ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS); ++ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC); ++ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC); ++ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM); + /* Reading ICS for ICR because they read the +- * same but ICS does not clear on read +- */ +- regs_buff[19] = rd32(E1000_ICS); +- regs_buff[20] = rd32(E1000_ICS); +- regs_buff[21] = rd32(E1000_IMS); +- regs_buff[22] = rd32(E1000_IMC); +- regs_buff[23] = rd32(E1000_IAC); +- regs_buff[24] = rd32(E1000_IAM); +- regs_buff[25] = rd32(E1000_IMIRVP); ++ * same but ICS does not clear on read */ ++ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS); ++ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS); ++ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS); ++ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC); ++ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC); ++ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM); ++ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP); + + /* Flow Control */ +- regs_buff[26] = rd32(E1000_FCAL); +- regs_buff[27] = rd32(E1000_FCAH); +- regs_buff[28] = rd32(E1000_FCTTV); +- regs_buff[29] = rd32(E1000_FCRTL); +- regs_buff[30] = rd32(E1000_FCRTH); +- regs_buff[31] = rd32(E1000_FCRTV); ++ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL); ++ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH); ++ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV); ++ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL); ++ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH); ++ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV); + + /* Receive */ +- regs_buff[32] = rd32(E1000_RCTL); +- regs_buff[33] = rd32(E1000_RXCSUM); +- regs_buff[34] = rd32(E1000_RLPML); +- regs_buff[35] = rd32(E1000_RFCTL); +- regs_buff[36] = rd32(E1000_MRQC); +- regs_buff[37] = rd32(E1000_VT_CTL); ++ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL); ++ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM); ++ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML); ++ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL); ++ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC); ++ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL); + + /* Transmit */ +- regs_buff[38] = rd32(E1000_TCTL); +- regs_buff[39] = rd32(E1000_TCTL_EXT); +- regs_buff[40] = rd32(E1000_TIPG); +- regs_buff[41] = rd32(E1000_DTXCTL); ++ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL); ++ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT); ++ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG); ++ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL); + + /* Wake Up */ +- regs_buff[42] = rd32(E1000_WUC); +- regs_buff[43] = rd32(E1000_WUFC); +- regs_buff[44] = rd32(E1000_WUS); +- regs_buff[45] = rd32(E1000_IPAV); +- regs_buff[46] = rd32(E1000_WUPL); ++ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC); ++ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC); ++ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS); ++ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV); ++ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL); + + /* MAC */ +- regs_buff[47] = rd32(E1000_PCS_CFG0); +- regs_buff[48] = rd32(E1000_PCS_LCTL); +- regs_buff[49] = rd32(E1000_PCS_LSTAT); +- regs_buff[50] = rd32(E1000_PCS_ANADV); +- regs_buff[51] = rd32(E1000_PCS_LPAB); +- regs_buff[52] = rd32(E1000_PCS_NPTX); +- regs_buff[53] = rd32(E1000_PCS_LPABNP); ++ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0); ++ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL); ++ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT); ++ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV); ++ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB); ++ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX); ++ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; +@@ -598,112 +632,75 @@ + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) +- regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); ++ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) +- regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); ++ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) +- regs_buff[129 + i] = rd32(E1000_RDBAL(i)); ++ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i)); + for (i = 0; i < 4; i++) +- regs_buff[133 + i] = rd32(E1000_RDBAH(i)); ++ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i)); + for (i = 0; i < 4; i++) +- regs_buff[137 + i] = rd32(E1000_RDLEN(i)); ++ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i)); + for (i = 0; i < 4; i++) +- regs_buff[141 + i] = rd32(E1000_RDH(i)); ++ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i)); + for (i = 0; i < 4; i++) +- regs_buff[145 + i] = rd32(E1000_RDT(i)); ++ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i)); + for (i = 0; i < 4; i++) +- regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); ++ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) +- regs_buff[153 + i] = rd32(E1000_EITR(i)); ++ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i)); + for (i = 0; i < 8; i++) +- regs_buff[163 + i] = rd32(E1000_IMIR(i)); ++ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i)); + for (i = 0; i < 8; i++) +- regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); ++ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) +- regs_buff[179 + i] = rd32(E1000_RAL(i)); ++ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i)); + for (i = 0; i < 16; i++) +- regs_buff[195 + i] = rd32(E1000_RAH(i)); ++ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i)); + + for (i = 0; i < 4; i++) +- regs_buff[211 + i] = rd32(E1000_TDBAL(i)); ++ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i)); + for (i = 0; i < 4; i++) +- regs_buff[215 + i] = rd32(E1000_TDBAH(i)); ++ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i)); + for (i = 0; i < 4; i++) +- regs_buff[219 + i] = rd32(E1000_TDLEN(i)); ++ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i)); + for (i = 0; i < 4; i++) +- regs_buff[223 + i] = rd32(E1000_TDH(i)); ++ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i)); + for (i = 0; i < 4; i++) +- regs_buff[227 + i] = rd32(E1000_TDT(i)); ++ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i)); + for (i = 0; i < 4; i++) +- regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); ++ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) +- regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); ++ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) +- regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); ++ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) +- regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); ++ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) +- regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); ++ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) +- regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); ++ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) +- regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); ++ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) +- regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); ++ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) +- regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); ++ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) +- regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); +- +- regs_buff[547] = rd32(E1000_TDFH); +- regs_buff[548] = rd32(E1000_TDFT); +- regs_buff[549] = rd32(E1000_TDFHS); +- regs_buff[550] = rd32(E1000_TDFPC); ++ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i)); + ++ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH); ++ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT); ++ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS); ++ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC); + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } +- +- if (hw->mac.type != e1000_82576) +- return; +- for (i = 0; i < 12; i++) +- regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); +- for (i = 0; i < 4; i++) +- regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); +- +- for (i = 0; i < 12; i++) +- regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + static int igb_get_eeprom_len(struct net_device *netdev) +@@ -736,13 +733,13 @@ + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) +- ret_val = hw->nvm.ops.read(hw, first_word, +- last_word - first_word + 1, +- eeprom_buff); ++ ret_val = e1000_read_nvm(hw, first_word, ++ last_word - first_word + 1, ++ eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { +- ret_val = hw->nvm.ops.read(hw, first_word + i, 1, +- &eeprom_buff[i]); ++ ret_val = e1000_read_nvm(hw, first_word + i, 1, ++ &eeprom_buff[i]); + if (ret_val) + break; + } +@@ -750,7 +747,7 @@ + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) +- le16_to_cpus(&eeprom_buff[i]); ++ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); +@@ -772,11 +769,6 @@ + if (eeprom->len == 0) + return -EOPNOTSUPP; + +- if ((hw->mac.type >= e1000_i210) && +- !igb_get_flash_presence_i210(hw)) { +- return -EOPNOTSUPP; +- } +- + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + +@@ -791,19 +783,17 @@ + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { +- /* need read/modify/write of first changed EEPROM word +- * only the second byte of the word is being modified +- */ +- ret_val = hw->nvm.ops.read(hw, first_word, 1, ++ /* need read/modify/write of first changed EEPROM word */ ++ /* only the second byte of the word is being modified */ ++ ret_val = e1000_read_nvm(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { +- /* need read/modify/write of last changed EEPROM word +- * only the first byte of the word is being modified +- */ +- ret_val = hw->nvm.ops.read(hw, last_word, 1, +- &eeprom_buff[last_word - first_word]); ++ /* need read/modify/write of last changed EEPROM word */ ++ /* only the first byte of the word is being modified */ ++ ret_val = e1000_read_nvm(hw, last_word, 1, ++ &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ +@@ -813,16 +803,16 @@ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) +- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); ++ cpu_to_le16s(&eeprom_buff[i]); + +- ret_val = hw->nvm.ops.write(hw, first_word, +- last_word - first_word + 1, eeprom_buff); ++ ret_val = e1000_write_nvm(hw, first_word, ++ last_word - first_word + 1, eeprom_buff); + +- /* Update the checksum if nvm write succeeded */ ++ /* Update the checksum if write succeeded. ++ * and flush shadow RAM for 82573 controllers */ + if (ret_val == 0) +- hw->nvm.ops.update(hw); ++ e1000_update_nvm_checksum(hw); + +- igb_set_fw_version(adapter); + kfree(eeprom_buff); + return ret_val; + } +@@ -832,16 +822,14 @@ + { + struct igb_adapter *adapter = netdev_priv(netdev); + +- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); +- strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); +- +- /* EEPROM image version # is reported as firmware version # for +- * 82575 controllers +- */ +- strlcpy(drvinfo->fw_version, adapter->fw_version, +- sizeof(drvinfo->fw_version)); +- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), +- sizeof(drvinfo->bus_info)); ++ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); ++ strncpy(drvinfo->version, igb_driver_version, ++ sizeof(drvinfo->version) - 1); ++ ++ strncpy(drvinfo->fw_version, adapter->fw_version, ++ sizeof(drvinfo->fw_version) - 1); ++ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), ++ sizeof(drvinfo->bus_info) - 1); + drvinfo->n_stats = IGB_STATS_LEN; + drvinfo->testinfo_len = IGB_TEST_LEN; + drvinfo->regdump_len = igb_get_regs_len(netdev); +@@ -855,8 +843,12 @@ + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; + } + + static int igb_set_ringparam(struct net_device *netdev, +@@ -870,12 +862,12 @@ + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + +- new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); +- new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); ++ new_rx_count = min_t(u16, ring->rx_pending, (u32)IGB_MAX_RXD); ++ new_rx_count = max_t(u16, new_rx_count, (u16)IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + +- new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); +- new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); ++ new_tx_count = min_t(u16, ring->tx_pending, (u32)IGB_MAX_TXD); ++ new_tx_count = max_t(u16, new_tx_count, (u16)IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && +@@ -898,11 +890,11 @@ + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) +- temp_ring = vmalloc(adapter->num_tx_queues * +- sizeof(struct igb_ring)); ++ temp_ring = vmalloc(adapter->num_tx_queues ++ * sizeof(struct igb_ring)); + else +- temp_ring = vmalloc(adapter->num_rx_queues * +- sizeof(struct igb_ring)); ++ temp_ring = vmalloc(adapter->num_rx_queues ++ * sizeof(struct igb_ring)); + + if (!temp_ring) { + err = -ENOMEM; +@@ -911,9 +903,10 @@ + + igb_down(adapter); + +- /* We can't just free everything and then setup again, ++ /* ++ * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers +- * to the Tx and Rx ring structs. ++ * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { +@@ -975,224 +968,6 @@ + return err; + } + +-/* ethtool register test data */ +-struct igb_reg_test { +- u16 reg; +- u16 reg_offset; +- u16 array_len; +- u16 test_type; +- u32 mask; +- u32 write; +-}; +- +-/* In the hardware, registers are laid out either singly, in arrays +- * spaced 0x100 bytes apart, or in contiguous tables. We assume +- * most tests take place on arrays or single registers (handled +- * as a single-element array) and special-case the tables. +- * Table tests are always pattern tests. +- * +- * We also make provision for some required setup steps by specifying +- * registers to be written without any read-back testing. +- */ +- +-#define PATTERN_TEST 1 +-#define SET_READ_TEST 2 +-#define WRITE_NO_TEST 3 +-#define TABLE32_TEST 4 +-#define TABLE64_TEST_LO 5 +-#define TABLE64_TEST_HI 6 +- +-/* i210 reg test */ +-static struct igb_reg_test reg_test_i210[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- /* RDH is read-only for i210, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, +- 0x900FFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0, 0 } +-}; +- +-/* i350 reg test */ +-static struct igb_reg_test reg_test_i350[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- /* RDH is read-only for i350, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, +- 0xC3FFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 16, TABLE64_TEST_HI, +- 0xC3FFFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- +-/* 82580 reg test */ +-static struct igb_reg_test reg_test_82580[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- /* RDH is read-only for 82580, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, +- 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_HI, +- 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- +-/* 82576 reg test */ +-static struct igb_reg_test reg_test_82576[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- /* Enable all RX queues before testing. */ +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, +- E1000_RXDCTL_QUEUE_ENABLE }, +- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, +- E1000_RXDCTL_QUEUE_ENABLE }, +- /* RDH is read-only for 82576, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, +- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- +-/* 82575 register test */ +-static struct igb_reg_test reg_test_82575[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- /* Enable all four RX queues before testing. */ +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, +- E1000_RXDCTL_QUEUE_ENABLE }, +- /* RDH is read-only for 82575, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- + static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) + { +@@ -1201,13 +976,14 @@ + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { +- wr32(reg, (_test[pat] & write)); +- val = rd32(reg) & mask; ++ E1000_WRITE_REG(hw, reg, (_test[pat] & write)); ++ val = E1000_READ_REG(hw, reg) & mask; + if (val != (_test[pat] & write & mask)) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", +- reg, val, (_test[pat] & write & mask)); +- *data = reg; ++ E1000_REGISTER(hw, reg), val, (_test[pat] ++ & write & mask)); ++ *data = E1000_REGISTER(hw, reg); + return true; + } + } +@@ -1220,14 +996,13 @@ + { + struct e1000_hw *hw = &adapter->hw; + u32 val; +- +- wr32(reg, write & mask); +- val = rd32(reg); ++ E1000_WRITE_REG(hw, reg, write & mask); ++ val = E1000_READ_REG(hw, reg); + if ((write & mask) != (val & mask)) { +- dev_err(&adapter->pdev->dev, +- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "set/check reg %04X test failed:got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); +- *data = reg; ++ *data = E1000_REGISTER(hw, reg); + return true; + } + +@@ -1283,19 +1058,19 @@ + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ +- before = rd32(E1000_STATUS); +- value = (rd32(E1000_STATUS) & toggle); +- wr32(E1000_STATUS, toggle); +- after = rd32(E1000_STATUS) & toggle; ++ before = E1000_READ_REG(hw, E1000_STATUS); ++ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle); ++ E1000_WRITE_REG(hw, E1000_STATUS, toggle); ++ after = E1000_READ_REG(hw, E1000_STATUS) & toggle; + if (value != after) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ +- wr32(E1000_STATUS, before); ++ E1000_WRITE_REG(hw, E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. +@@ -1317,7 +1092,7 @@ + break; + case WRITE_NO_TEST: + writel(test->write, +- (adapter->hw.hw_addr + test->reg) ++ (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: +@@ -1346,24 +1121,11 @@ + + static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) + { +- struct e1000_hw *hw = &adapter->hw; +- + *data = 0; + +- /* Validate eeprom on all parts but flashless */ +- switch (hw->mac.type) { +- case e1000_i210: +- case e1000_i211: +- if (igb_get_flash_presence_i210(hw)) { +- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) +- *data = 2; +- } +- break; +- default: +- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) +- *data = 2; +- break; +- } ++ /* Validate NVM checksum */ ++ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) ++ *data = 2; + + return *data; + } +@@ -1373,7 +1135,7 @@ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + +- adapter->test_icr |= rd32(E1000_ICR); ++ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR); + + return IRQ_HANDLED; + } +@@ -1382,20 +1144,20 @@ + { + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; +- u32 mask, ics_mask, i = 0, shared_int = true; ++ u32 mask, ics_mask, i = 0, shared_int = TRUE; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + if (request_irq(adapter->msix_entries[0].vector, +- igb_test_intr, 0, netdev->name, adapter)) { ++ &igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { +- shared_int = false; ++ shared_int = FALSE; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; +@@ -1403,19 +1165,19 @@ + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { +- shared_int = false; +- } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, ++ shared_int = FALSE; ++ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } +- dev_info(&adapter->pdev->dev, "testing %s interrupt\n", +- (shared_int ? "shared" : "unshared")); ++ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n", ++ (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ +- wr32(E1000_IMC, ~0); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, ~0); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { +@@ -1430,9 +1192,11 @@ + break; + case e1000_i350: + case e1000_i354: ++ ics_mask = 0x77DCFED5; ++ break; + case e1000_i210: + case e1000_i211: +- ics_mask = 0x77DCFED5; ++ ics_mask = 0x774CFED5; + break; + default: + ics_mask = 0x7FFFFFFF; +@@ -1457,12 +1221,12 @@ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ +- wr32(E1000_ICR, ~0); ++ E1000_WRITE_REG(hw, E1000_ICR, ~0); + +- wr32(E1000_IMC, mask); +- wr32(E1000_ICS, mask); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, mask); ++ E1000_WRITE_REG(hw, E1000_ICS, mask); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; +@@ -1479,12 +1243,12 @@ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ +- wr32(E1000_ICR, ~0); ++ E1000_WRITE_REG(hw, E1000_ICR, ~0); + +- wr32(E1000_IMS, mask); +- wr32(E1000_ICS, mask); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMS, mask); ++ E1000_WRITE_REG(hw, E1000_ICS, mask); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; +@@ -1501,12 +1265,12 @@ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ +- wr32(E1000_ICR, ~0); ++ E1000_WRITE_REG(hw, E1000_ICR, ~0); + +- wr32(E1000_IMC, ~mask); +- wr32(E1000_ICS, ~mask); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, ~mask); ++ E1000_WRITE_REG(hw, E1000_ICS, ~mask); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 5; +@@ -1516,12 +1280,12 @@ + } + + /* Disable all the interrupts */ +- wr32(E1000_IMC, ~0); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, ~0); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) ++ if (adapter->msix_entries) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); +@@ -1544,7 +1308,7 @@ + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; +- tx_ring->dev = &adapter->pdev->dev; ++ tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + +@@ -1558,17 +1322,20 @@ + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; +- rx_ring->dev = &adapter->pdev->dev; ++ rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN; ++#endif + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { +- ret_val = 3; ++ ret_val = 2; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ +- wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); ++ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); +@@ -1588,10 +1355,10 @@ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ +- igb_write_phy_reg(hw, 29, 0x001F); +- igb_write_phy_reg(hw, 30, 0x8FFC); +- igb_write_phy_reg(hw, 29, 0x001A); +- igb_write_phy_reg(hw, 30, 0x8FF0); ++ igb_e1000_write_phy_reg(hw, 29, 0x001F); ++ igb_e1000_write_phy_reg(hw, 30, 0x8FFC); ++ igb_e1000_write_phy_reg(hw, 29, 0x001A); ++ igb_e1000_write_phy_reg(hw, 30, 0x8FF0); + } + + static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +@@ -1599,34 +1366,32 @@ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + +- hw->mac.autoneg = false; ++ hw->mac.autoneg = FALSE; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ +- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); ++ igb_e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ +- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ +- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ +- igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); +- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); ++ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } +- } else if (hw->phy.type == e1000_phy_82580) { ++ } else { + /* enable MII loopback */ +- igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); ++ if (hw->phy.type == e1000_phy_82580) ++ igb_e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041); + } + +- /* add small delay to avoid loopback test failure */ +- msleep(50); +- +- /* force 1000, set loopback */ +- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); ++ /* force 1000, set loopback */ ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ +- ctrl_reg = rd32(E1000_CTRL); ++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ +@@ -1637,7 +1402,7 @@ + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + +- wr32(E1000_CTRL, ctrl_reg); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. +@@ -1659,64 +1424,64 @@ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + +- reg = rd32(E1000_CTRL_EXT); ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || +- (hw->device_id == E1000_DEV_ID_I354_SGMII) || +- (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || ++ (hw->device_id == E1000_DEV_ID_I354_SGMII) || ++ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ +- reg = rd32(E1000_MPHY_ADDR_CTL); ++ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | +- E1000_MPHY_PCS_CLK_REG_OFFSET; +- wr32(E1000_MPHY_ADDR_CTL, reg); ++ E1000_MPHY_PCS_CLK_REG_OFFSET; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + +- reg = rd32(E1000_MPHY_DATA); ++ reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; +- wr32(E1000_MPHY_DATA, reg); ++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + +- reg = rd32(E1000_RCTL); ++ reg = E1000_READ_REG(hw, E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; +- wr32(E1000_RCTL, reg); ++ E1000_WRITE_REG(hw, E1000_RCTL, reg); + +- wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); ++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + +- reg = rd32(E1000_CTRL); ++ reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; +- wr32(E1000_CTRL, reg); ++ E1000_WRITE_REG(hw, E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ +- reg = rd32(E1000_CONNSW); ++ reg = E1000_READ_REG(hw, E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; +- wr32(E1000_CONNSW, reg); ++ E1000_WRITE_REG(hw, E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on +- * 82580 and newer devices. ++ * 82580 and newer devices + */ + if (hw->mac.type >= e1000_82580) { +- reg = rd32(E1000_PCS_CFG0); ++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; +- wr32(E1000_PCS_CFG0, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ +- reg = rd32(E1000_PCS_LCTL); ++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ +- wr32(E1000_PCS_LCTL, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + return 0; + } +@@ -1731,36 +1496,37 @@ + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || +- (hw->device_id == E1000_DEV_ID_I354_SGMII)) { ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || ++ (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ +- reg = rd32(E1000_MPHY_ADDR_CTL); ++ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | +- E1000_MPHY_PCS_CLK_REG_OFFSET; +- wr32(E1000_MPHY_ADDR_CTL, reg); ++ E1000_MPHY_PCS_CLK_REG_OFFSET; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + +- reg = rd32(E1000_MPHY_DATA); ++ reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; +- wr32(E1000_MPHY_DATA, reg); ++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + +- hw->mac.autoneg = true; +- igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); ++ hw->mac.autoneg = TRUE; ++ igb_e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; +- igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); +- igb_phy_sw_reset(hw); ++ if (hw->phy.type == I210_I_PHY_ID) ++ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg); ++ e1000_phy_commit(hw); + } + } +- + static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) + { +@@ -1779,19 +1545,25 @@ + + frame_size >>= 1; + ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ data = rx_buffer->skb->data; ++#else + data = kmap(rx_buffer->page); ++#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + ++#endif + return match; + } + +-static int igb_clean_test_rings(struct igb_ring *rx_ring, ++static u16 igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) + { +@@ -1806,13 +1578,17 @@ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { +- /* check Rx buffer */ ++ /* check rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ IGB_RX_HDR_LEN, ++#else + IGB_RX_BUFSZ, ++#endif + DMA_FROM_DEVICE); + + /* verify contents of skb */ +@@ -1822,14 +1598,18 @@ + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ IGB_RX_HDR_LEN, ++#else + IGB_RX_BUFSZ, ++#endif + DMA_FROM_DEVICE); + +- /* unmap buffer on Tx side */ ++ /* unmap buffer on tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + +- /* increment Rx/Tx next to clean counters */ ++ /* increment rx/tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; +@@ -1841,8 +1621,6 @@ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + +- netdev_tx_reset_queue(txring_txq(tx_ring)); +- + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; +@@ -1870,7 +1648,8 @@ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + +- /* Calculate the loop count based on the largest descriptor ring ++ /* ++ * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ +@@ -1897,7 +1676,7 @@ + break; + } + +- /* allow 200 milliseconds for packets to go from Tx to Rx */ ++ /* allow 200 milliseconds for packets to go from tx to rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); +@@ -1916,21 +1695,14 @@ + static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) + { + /* PHY loopback cannot be performed if SoL/IDER +- * sessions are active +- */ +- if (igb_check_reset_block(&adapter->hw)) { +- dev_err(&adapter->pdev->dev, ++ * sessions are active */ ++ if (e1000_check_reset_block(&adapter->hw)) { ++ dev_err(pci_dev_to_dev(adapter->pdev), + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + +- if (adapter->hw.mac.type == e1000_i354) { +- dev_info(&adapter->pdev->dev, +- "Loopback test not supported on i354.\n"); +- *data = 0; +- goto out; +- } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; +@@ -1938,6 +1710,7 @@ + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); ++ + igb_loopback_cleanup(adapter); + + err_loopback: +@@ -1948,32 +1721,39 @@ + + static int igb_link_test(struct igb_adapter *adapter, u64 *data) + { +- struct e1000_hw *hw = &adapter->hw; ++ u32 link; ++ int i, time; ++ + *data = 0; +- if (hw->phy.media_type == e1000_media_type_internal_serdes) { ++ time = 0; ++ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; +- +- hw->mac.serdes_has_link = false; ++ adapter->hw.mac.serdes_has_link = FALSE; + + /* On some blade server designs, link establishment +- * could take as long as 2-3 minutes +- */ ++ * could take as long as 2-3 minutes */ + do { +- hw->mac.ops.check_for_link(&adapter->hw); +- if (hw->mac.serdes_has_link) +- return *data; ++ igb_e1000_check_for_link(&adapter->hw); ++ if (adapter->hw.mac.serdes_has_link) ++ goto out; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { +- hw->mac.ops.check_for_link(&adapter->hw); +- if (hw->mac.autoneg) +- msleep(5000); +- +- if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ++ for (i = 0; i < IGB_MAX_LINK_TRIES; i++) { ++ link = igb_has_link(adapter); ++ if (link) { ++ goto out; ++ } else { ++ time++; ++ msleep(1000); ++ } ++ } ++ if (!link) + *data = 1; + } ++out: + return *data; + } + +@@ -1986,10 +1766,6 @@ + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); +- +- /* can't do offline tests on media switching devices */ +- if (adapter->hw.dev_spec._82575.mas_capable) +- eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + +@@ -1998,20 +1774,19 @@ + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + +- dev_info(&adapter->pdev->dev, "offline testing starting\n"); ++ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't +- * interfere with test result +- */ ++ * interfere with test result */ + if (igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ +- dev_close(netdev); ++ igb_close(netdev); + else + igb_reset(adapter); + +@@ -2027,8 +1802,10 @@ + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); ++ + /* power up link for loopback test */ + igb_power_up_link(adapter); ++ + if (igb_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +@@ -2038,15 +1815,15 @@ + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ +- adapter->hw.phy.autoneg_wait_to_complete = true; ++ adapter->hw.phy.autoneg_wait_to_complete = TRUE; + igb_reset(adapter); +- adapter->hw.phy.autoneg_wait_to_complete = false; ++ adapter->hw.phy.autoneg_wait_to_complete = FALSE; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) +- dev_open(netdev); ++ igb_open(netdev); + } else { +- dev_info(&adapter->pdev->dev, "online testing starting\n"); ++ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[4])) +@@ -2125,8 +1902,7 @@ + } + + /* bit defines for adapter->led_status */ +-#define IGB_LED_ON 0 +- ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID + static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) + { +@@ -2135,23 +1911,47 @@ + + switch (state) { + case ETHTOOL_ID_ACTIVE: +- igb_blink_led(hw); ++ e1000_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: +- igb_blink_led(hw); ++ igb_e1000_led_on(hw); + break; + case ETHTOOL_ID_OFF: +- igb_led_off(hw); ++ igb_e1000_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: +- igb_led_off(hw); +- clear_bit(IGB_LED_ON, &adapter->led_status); +- igb_cleanup_led(hw); ++ igb_e1000_led_off(hw); ++ igb_e1000_cleanup_led(hw); + break; + } + + return 0; + } ++#else ++static int igb_phys_id(struct net_device *netdev, u32 data) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; ++ unsigned long timeout; ++ ++ timeout = data * 1000; ++ ++ /* ++ * msleep_interruptable only accepts unsigned int so we are limited ++ * in how long a duration we can wait ++ */ ++ if (!timeout || timeout > UINT_MAX) ++ timeout = UINT_MAX; ++ ++ e1000_blink_led(hw); ++ msleep_interruptible(timeout); ++ ++ igb_e1000_led_off(hw); ++ igb_e1000_cleanup_led(hw); ++ ++ return 0; ++} ++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + + static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +@@ -2159,11 +1959,36 @@ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + ++ if (ec->rx_max_coalesced_frames || ++ ec->rx_coalesce_usecs_irq || ++ ec->rx_max_coalesced_frames_irq || ++ ec->tx_max_coalesced_frames || ++ ec->tx_coalesce_usecs_irq || ++ ec->stats_block_coalesce_usecs || ++ ec->use_adaptive_rx_coalesce || ++ ec->use_adaptive_tx_coalesce || ++ ec->pkt_rate_low || ++ ec->rx_coalesce_usecs_low || ++ ec->rx_max_coalesced_frames_low || ++ ec->tx_coalesce_usecs_low || ++ ec->tx_max_coalesced_frames_low || ++ ec->pkt_rate_high || ++ ec->rx_coalesce_usecs_high || ++ ec->rx_max_coalesced_frames_high || ++ ec->tx_coalesce_usecs_high || ++ ec->tx_max_coalesced_frames_high || ++ ec->rate_sample_interval) { ++ netdev_err(netdev, "set_coalesce: invalid parameter"); ++ return -ENOTSUPP; ++ } ++ + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || +- (ec->rx_coalesce_usecs == 2)) ++ (ec->rx_coalesce_usecs == 2)) { ++ netdev_err(netdev, "set_coalesce: invalid setting"); + return -EINVAL; ++ } + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && +@@ -2174,11 +1999,12 @@ + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + ++ if (ec->tx_max_coalesced_frames_irq) ++ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; ++ + /* If ITR is disabled, disable DMAC */ +- if (ec->rx_coalesce_usecs == 0) { +- if (adapter->flags & IGB_FLAG_DMAC) +- adapter->flags &= ~IGB_FLAG_DMAC; +- } ++ if (ec->rx_coalesce_usecs == 0) ++ adapter->dmac = IGB_DMAC_DISABLE; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) +@@ -2219,6 +2045,8 @@ + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + ++ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; ++ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; +@@ -2237,6 +2065,7 @@ + return 0; + } + ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + static int igb_get_sset_count(struct net_device *netdev, int sset) + { + switch (sset) { +@@ -2248,19 +2077,32 @@ + return -ENOTSUPP; + } + } ++#else ++static int igb_get_stats_count(struct net_device *netdev) ++{ ++ return IGB_STATS_LEN; ++} ++ ++static int igb_diag_test_count(struct net_device *netdev) ++{ ++ return IGB_TEST_LEN; ++} ++#endif + + static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) + { + struct igb_adapter *adapter = netdev_priv(netdev); +- struct rtnl_link_stats64 *net_stats = &adapter->stats64; +- unsigned int start; +- struct igb_ring *ring; +- int i, j; ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats *net_stats = &netdev->stats; ++#else ++ struct net_device_stats *net_stats = &adapter->net_stats; ++#endif ++ u64 *queue_stat; ++ int i, j, k; + char *p; + +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, net_stats); ++ igb_update_stats(adapter); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; +@@ -2273,36 +2115,15 @@ + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { +- u64 restart2; +- +- ring = adapter->tx_ring[j]; +- do { +- start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +- data[i] = ring->tx_stats.packets; +- data[i+1] = ring->tx_stats.bytes; +- data[i+2] = ring->tx_stats.restart_queue; +- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +- do { +- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); +- restart2 = ring->tx_stats.restart_queue2; +- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); +- data[i+2] += restart2; +- +- i += IGB_TX_QUEUE_STATS_LEN; ++ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; ++ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) ++ data[i] = queue_stat[k]; + } + for (j = 0; j < adapter->num_rx_queues; j++) { +- ring = adapter->rx_ring[j]; +- do { +- start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +- data[i] = ring->rx_stats.packets; +- data[i+1] = ring->rx_stats.bytes; +- data[i+2] = ring->rx_stats.drops; +- data[i+3] = ring->rx_stats.csum_err; +- data[i+4] = ring->rx_stats.alloc_failed; +- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +- i += IGB_RX_QUEUE_STATS_LEN; ++ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; ++ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) ++ data[i] = queue_stat[k]; + } +- spin_unlock(&adapter->stats64_lock); + } + + static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +@@ -2347,22 +2168,19 @@ + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } +- /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ ++/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } + } + ++#ifdef HAVE_ETHTOOL_GET_TS_INFO + static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) + { + struct igb_adapter *adapter = netdev_priv(dev); + +- if (adapter->ptp_clock) +- info->phc_index = ptp_clock_index(adapter->ptp_clock); +- else +- info->phc_index = -1; +- + switch (adapter->hw.mac.type) { ++#ifdef HAVE_PTP_1588_CLOCK + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | +@@ -2383,6 +2201,11 @@ + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + ++ if (adapter->ptp_clock) ++ info->phc_index = ptp_clock_index(adapter->ptp_clock); ++ else ++ info->phc_index = -1; ++ + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); +@@ -2396,201 +2219,217 @@ + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; ++#endif /* HAVE_PTP_1588_CLOCK */ + default: + return -EOPNOTSUPP; + } + } ++#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + +-static int igb_get_rss_hash_opts(struct igb_adapter *adapter, +- struct ethtool_rxnfc *cmd) ++#ifdef CONFIG_PM_RUNTIME ++static int igb_ethtool_begin(struct net_device *netdev) + { +- cmd->data = 0; ++ struct igb_adapter *adapter = netdev_priv(netdev); + +- /* Report default options for RSS on igb */ +- switch (cmd->flow_type) { +- case TCP_V4_FLOW: +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case UDP_V4_FLOW: +- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case SCTP_V4_FLOW: +- case AH_ESP_V4_FLOW: +- case AH_V4_FLOW: +- case ESP_V4_FLOW: +- case IPV4_FLOW: +- cmd->data |= RXH_IP_SRC | RXH_IP_DST; +- break; +- case TCP_V6_FLOW: +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case UDP_V6_FLOW: +- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case SCTP_V6_FLOW: +- case AH_ESP_V6_FLOW: +- case AH_V6_FLOW: +- case ESP_V6_FLOW: +- case IPV6_FLOW: +- cmd->data |= RXH_IP_SRC | RXH_IP_DST; +- break; +- default: +- return -EINVAL; +- } ++ pm_runtime_get_sync(&adapter->pdev->dev); + + return 0; + } + +-static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +- u32 *rule_locs) ++static void igb_ethtool_complete(struct net_device *netdev) + { +- struct igb_adapter *adapter = netdev_priv(dev); +- int ret = -EOPNOTSUPP; ++ struct igb_adapter *adapter = netdev_priv(netdev); + +- switch (cmd->cmd) { +- case ETHTOOL_GRXRINGS: +- cmd->data = adapter->num_rx_queues; +- ret = 0; +- break; +- case ETHTOOL_GRXFH: +- ret = igb_get_rss_hash_opts(adapter, cmd); +- break; +- default: +- break; +- } ++ pm_runtime_put(&adapter->pdev->dev); ++} ++#endif /* CONFIG_PM_RUNTIME */ + +- return ret; ++#ifndef HAVE_NDO_SET_FEATURES ++static u32 igb_get_rx_csum(struct net_device *netdev) ++{ ++ return !!(netdev->features & NETIF_F_RXCSUM); + } + +-#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ +- IGB_FLAG_RSS_FIELD_IPV6_UDP) +-static int igb_set_rss_hash_opt(struct igb_adapter *adapter, +- struct ethtool_rxnfc *nfc) ++static int igb_set_rx_csum(struct net_device *netdev, u32 data) + { +- u32 flags = adapter->flags; ++ const u32 feature_list = NETIF_F_RXCSUM; + +- /* RSS does not support anything other than hashing +- * to queues on src and dst IPs and ports ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++ return 0; ++} ++ ++static int igb_set_tx_csum(struct net_device *netdev, u32 data) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++#ifdef NETIF_F_IPV6_CSUM ++ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#else ++ u32 feature_list = NETIF_F_IP_CSUM; ++#endif ++ ++ if (adapter->hw.mac.type >= e1000_82576) ++ feature_list |= NETIF_F_SCTP_CSUM; ++ ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++ return 0; ++} ++ ++#ifdef NETIF_F_TSO ++static int igb_set_tso(struct net_device *netdev, u32 data) ++{ ++#ifdef NETIF_F_TSO6 ++ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; ++#else ++ const u32 feature_list = NETIF_F_TSO; ++#endif ++ ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ if (!data) { ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct net_device *v_netdev; ++ int i; ++ ++ /* disable TSO on all VLANs if they're present */ ++ if (!adapter->vlgrp) ++ goto tso_out; ++ ++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { ++ v_netdev = vlan_group_get_device(adapter->vlgrp, i); ++ if (!v_netdev) ++ continue; ++ ++ v_netdev->features &= ~feature_list; ++ vlan_group_set_device(adapter->vlgrp, i, v_netdev); ++ } ++ } ++ ++tso_out: ++ ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ return 0; ++} ++ ++#endif /* NETIF_F_TSO */ ++#ifdef ETHTOOL_GFLAGS ++static int igb_set_flags(struct net_device *netdev, u32 data) ++{ ++ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ++ ETH_FLAG_RXHASH; ++#ifndef HAVE_VLAN_RX_REGISTER ++ u32 changed = netdev->features ^ data; ++#endif ++ int rc; ++#ifndef IGB_NO_LRO ++ ++ supported_flags |= ETH_FLAG_LRO; ++#endif ++ /* ++ * Since there is no support for separate tx vlan accel ++ * enabled make sure tx flag is cleared if rx is. + */ +- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | +- RXH_L4_B_0_1 | RXH_L4_B_2_3)) +- return -EINVAL; ++ if (!(data & ETH_FLAG_RXVLAN)) ++ data &= ~ETH_FLAG_TXVLAN; + +- switch (nfc->flow_type) { +- case TCP_V4_FLOW: +- case TCP_V6_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST) || +- !(nfc->data & RXH_L4_B_0_1) || +- !(nfc->data & RXH_L4_B_2_3)) +- return -EINVAL; +- break; +- case UDP_V4_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST)) +- return -EINVAL; +- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { +- case 0: +- flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; +- break; +- case (RXH_L4_B_0_1 | RXH_L4_B_2_3): +- flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; +- break; +- default: +- return -EINVAL; +- } +- break; +- case UDP_V6_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST)) +- return -EINVAL; +- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { +- case 0: +- flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; +- break; +- case (RXH_L4_B_0_1 | RXH_L4_B_2_3): +- flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; +- break; +- default: +- return -EINVAL; +- } +- break; +- case AH_ESP_V4_FLOW: +- case AH_V4_FLOW: +- case ESP_V4_FLOW: +- case SCTP_V4_FLOW: +- case AH_ESP_V6_FLOW: +- case AH_V6_FLOW: +- case ESP_V6_FLOW: +- case SCTP_V6_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST) || +- (nfc->data & RXH_L4_B_0_1) || +- (nfc->data & RXH_L4_B_2_3)) +- return -EINVAL; +- break; +- default: +- return -EINVAL; +- } +- +- /* if we changed something we need to update flags */ +- if (flags != adapter->flags) { +- struct e1000_hw *hw = &adapter->hw; +- u32 mrqc = rd32(E1000_MRQC); +- +- if ((flags & UDP_RSS_FLAGS) && +- !(adapter->flags & UDP_RSS_FLAGS)) +- dev_err(&adapter->pdev->dev, +- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); +- +- adapter->flags = flags; +- +- /* Perform hash on these packet types */ +- mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | +- E1000_MRQC_RSS_FIELD_IPV4_TCP | +- E1000_MRQC_RSS_FIELD_IPV6 | +- E1000_MRQC_RSS_FIELD_IPV6_TCP; +- +- mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | +- E1000_MRQC_RSS_FIELD_IPV6_UDP); +- +- if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) +- mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; +- +- if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) +- mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; +- +- wr32(E1000_MRQC, mrqc); +- } ++ rc = ethtool_op_set_flags(netdev, data, supported_flags); ++ if (rc) ++ return rc; ++#ifndef HAVE_VLAN_RX_REGISTER ++ ++ if (changed & ETH_FLAG_RXVLAN) ++ igb_vlan_mode(netdev, data); ++#endif + + return 0; + } + +-static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ++#endif /* ETHTOOL_GFLAGS */ ++#endif /* HAVE_NDO_SET_FEATURES */ ++#ifdef ETHTOOL_SADV_COAL ++static int igb_set_adv_coal(struct net_device *netdev, ++ struct ethtool_value *edata) + { +- struct igb_adapter *adapter = netdev_priv(dev); +- int ret = -EOPNOTSUPP; ++ struct igb_adapter *adapter = netdev_priv(netdev); + +- switch (cmd->cmd) { +- case ETHTOOL_SRXFH: +- ret = igb_set_rss_hash_opt(adapter, cmd); ++ switch (edata->data) { ++ case IGB_DMAC_DISABLE: ++ adapter->dmac = edata->data; + break; +- default: ++ case IGB_DMAC_MIN: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_500: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_EN_DEFAULT: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_2000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_3000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_4000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_5000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_6000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_7000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_8000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_9000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_MAX: ++ adapter->dmac = edata->data; + break; ++ default: ++ adapter->dmac = IGB_DMAC_DISABLE; ++ netdev_info(netdev, ++ "set_dmac: invalid setting, setting DMAC to %d\n", ++ adapter->dmac); + } ++ netdev_info(netdev, "%s: setting DMAC to %d\n", ++ netdev->name, adapter->dmac); ++ return 0; ++} + +- return ret; ++#endif /* ETHTOOL_SADV_COAL */ ++#ifdef ETHTOOL_GADV_COAL ++static void igb_get_dmac(struct net_device *netdev, ++ struct ethtool_value *edata) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ edata->data = adapter->dmac; ++ ++ return; + } ++#endif + ++#ifdef ETHTOOL_GEEE + static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) + { + struct igb_adapter *adapter = netdev_priv(netdev); +@@ -2604,17 +2443,18 @@ + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); ++ + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { +- igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); ++ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + +- eeer = rd32(E1000_EEER); ++ eeer = E1000_READ_REG(hw, E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) +@@ -2627,19 +2467,20 @@ + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: +- ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, +- &phy_data); ++ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, ++ &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); ++ + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: +- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, +- E1000_EEE_LP_ADV_DEV_I210, +- &phy_data); ++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, ++ E1000_EEE_LP_ADV_DEV_I210, ++ &phy_data); + if (ret_val) + return -ENODATA; + +@@ -2656,7 +2497,8 @@ + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + +- /* Report correct negotiated EEE status for devices that ++ /* ++ * report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { +@@ -2668,60 +2510,59 @@ + + return 0; + } ++#endif + ++#ifdef ETHTOOL_SEEE + static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; ++ bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + +- memset(&eee_curr, 0, sizeof(struct ethtool_eee)); +- + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + +- /* Tx LPI timer is not implemented currently */ ++ /* Tx LPI time is not implemented currently */ + if (edata->tx_lpi_timer) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + +- if (edata->advertised & +- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { +- dev_err(&adapter->pdev->dev, +- "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); ++ if (!edata->advertised || (edata->advertised & ++ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n"); + return -EINVAL; + } ++ adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); ++ adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { +- dev_err(&adapter->pdev->dev, +- "Setting EEE options are not supported with EEE disabled\n"); ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "Setting EEE options is not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); ++ + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; +- adapter->flags |= IGB_FLAG_EEE; +- if (hw->mac.type == e1000_i350) +- igb_set_eee_i350(hw); +- else +- igb_set_eee_i354(hw); + + /* reset link */ + if (netif_running(netdev)) +@@ -2730,109 +2571,232 @@ + igb_reset(adapter); + } + ++ if (hw->mac.type == e1000_i354) ++ ret_val = e1000_set_eee_i354(hw, adv1g_eee, adv100m_eee); ++ else ++ ret_val = e1000_set_eee_i350(hw, adv1g_eee, adv100m_eee); ++ ++ if (ret_val) { ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "Problem setting EEE advertisement options\n"); ++ return -EINVAL; ++ } ++ + return 0; + } ++#endif /* ETHTOOL_SEEE */ ++#ifdef ETHTOOL_GRXFH ++#ifdef ETHTOOL_GRXFHINDIR + +-static int igb_get_module_info(struct net_device *netdev, +- struct ethtool_modinfo *modinfo) ++static int igb_get_rss_hash_opts(struct igb_adapter *adapter, ++ struct ethtool_rxnfc *cmd) + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- u32 status = 0; +- u16 sff8472_rev, addr_mode; +- bool page_swap = false; +- +- if ((hw->phy.media_type == e1000_media_type_copper) || +- (hw->phy.media_type == e1000_media_type_unknown)) +- return -EOPNOTSUPP; ++ cmd->data = 0; + +- /* Check whether we support SFF-8472 or not */ +- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); +- if (status) +- return -EIO; +- +- /* addressing mode is not supported */ +- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); +- if (status) +- return -EIO; +- +- /* addressing mode is not supported */ +- if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { +- hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); +- page_swap = true; +- } +- +- if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { +- /* We have an SFP, but it does not support SFF-8472 */ +- modinfo->type = ETH_MODULE_SFF_8079; +- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; +- } else { +- /* We have an SFP which supports a revision of SFF-8472 */ +- modinfo->type = ETH_MODULE_SFF_8472; +- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; ++ /* Report default options for RSS on igb */ ++ switch (cmd->flow_type) { ++ case TCP_V4_FLOW: ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case UDP_V4_FLOW: ++ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case SCTP_V4_FLOW: ++ case AH_ESP_V4_FLOW: ++ case AH_V4_FLOW: ++ case ESP_V4_FLOW: ++ case IPV4_FLOW: ++ cmd->data |= RXH_IP_SRC | RXH_IP_DST; ++ break; ++ case TCP_V6_FLOW: ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case UDP_V6_FLOW: ++ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case SCTP_V6_FLOW: ++ case AH_ESP_V6_FLOW: ++ case AH_V6_FLOW: ++ case ESP_V6_FLOW: ++ case IPV6_FLOW: ++ cmd->data |= RXH_IP_SRC | RXH_IP_DST; ++ break; ++ default: ++ return -EINVAL; + } + + return 0; + } + +-static int igb_get_module_eeprom(struct net_device *netdev, +- struct ethtool_eeprom *ee, u8 *data) ++#endif /* ETHTOOL_GRXFHINDIR */ ++static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ++#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS ++ void *rule_locs) ++#else ++ u32 *rule_locs) ++#endif + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- u32 status = 0; +- u16 *dataword; +- u16 first_word, last_word; +- int i = 0; ++ struct igb_adapter *adapter = netdev_priv(dev); ++ int ret = -EOPNOTSUPP; + +- if (ee->len == 0) +- return -EINVAL; ++ switch (cmd->cmd) { ++ case ETHTOOL_GRXRINGS: ++ cmd->data = adapter->num_rx_queues; ++ ret = 0; ++ break; ++#ifdef ETHTOOL_GRXFHINDIR ++ case ETHTOOL_GRXFHINDIR: ++ ret = igb_get_rss_hash_opts(adapter, cmd); ++ break; ++#endif /* ETHTOOL_GRXFHINDIR */ ++ default: ++ break; ++ } + +- first_word = ee->offset >> 1; +- last_word = (ee->offset + ee->len - 1) >> 1; ++ return ret; ++} + +- dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1), +- GFP_KERNEL); +- if (!dataword) +- return -ENOMEM; ++#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ ++ IGB_FLAG_RSS_FIELD_IPV6_UDP) ++static int igb_set_rss_hash_opt(struct igb_adapter *adapter, ++ struct ethtool_rxnfc *nfc) ++{ ++ u32 flags = adapter->flags; + +- /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ +- for (i = 0; i < last_word - first_word + 1; i++) { +- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); +- if (status) { +- /* Error occurred while reading module */ +- kfree(dataword); +- return -EIO; +- } ++ /* ++ * RSS does not support anything other than hashing ++ * to queues on src and dst IPs and ports ++ */ ++ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | ++ RXH_L4_B_0_1 | RXH_L4_B_2_3)) ++ return -EINVAL; + +- be16_to_cpus(&dataword[i]); ++ switch (nfc->flow_type) { ++ case TCP_V4_FLOW: ++ case TCP_V6_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST) || ++ !(nfc->data & RXH_L4_B_0_1) || ++ !(nfc->data & RXH_L4_B_2_3)) ++ return -EINVAL; ++ break; ++ case UDP_V4_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST)) ++ return -EINVAL; ++ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { ++ case 0: ++ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; ++ break; ++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): ++ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; ++ break; ++ default: ++ return -EINVAL; ++ } ++ break; ++ case UDP_V6_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST)) ++ return -EINVAL; ++ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { ++ case 0: ++ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; ++ break; ++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): ++ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; ++ break; ++ default: ++ return -EINVAL; ++ } ++ break; ++ case AH_ESP_V4_FLOW: ++ case AH_V4_FLOW: ++ case ESP_V4_FLOW: ++ case SCTP_V4_FLOW: ++ case AH_ESP_V6_FLOW: ++ case AH_V6_FLOW: ++ case ESP_V6_FLOW: ++ case SCTP_V6_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST) || ++ (nfc->data & RXH_L4_B_0_1) || ++ (nfc->data & RXH_L4_B_2_3)) ++ return -EINVAL; ++ break; ++ default: ++ return -EINVAL; + } + +- memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); +- kfree(dataword); ++ /* if we changed something we need to update flags */ ++ if (flags != adapter->flags) { ++ struct e1000_hw *hw = &adapter->hw; ++ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC); + +- return 0; +-} ++ if ((flags & UDP_RSS_FLAGS) && ++ !(adapter->flags & UDP_RSS_FLAGS)) ++ DPRINTK(DRV, WARNING, ++ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); ++ ++ adapter->flags = flags; ++ ++ /* Perform hash on these packet types */ ++ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | ++ E1000_MRQC_RSS_FIELD_IPV4_TCP | ++ E1000_MRQC_RSS_FIELD_IPV6 | ++ E1000_MRQC_RSS_FIELD_IPV6_TCP; ++ ++ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | ++ E1000_MRQC_RSS_FIELD_IPV6_UDP); ++ ++ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) ++ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; ++ ++ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) ++ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; ++ ++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc); ++ } + +-static int igb_ethtool_begin(struct net_device *netdev) +-{ +- struct igb_adapter *adapter = netdev_priv(netdev); +- pm_runtime_get_sync(&adapter->pdev->dev); + return 0; + } + +-static void igb_ethtool_complete(struct net_device *netdev) ++static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- pm_runtime_put(&adapter->pdev->dev); ++ struct igb_adapter *adapter = netdev_priv(dev); ++ int ret = -EOPNOTSUPP; ++ ++ switch (cmd->cmd) { ++ case ETHTOOL_SRXFH: ++ ret = igb_set_rss_hash_opt(adapter, cmd); ++ break; ++ default: ++ break; ++ } ++ ++ return ret; + } + ++#endif /* ETHTOOL_GRXFH */ ++#ifdef ETHTOOL_GRXFHINDIR ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE + static u32 igb_get_rxfh_indir_size(struct net_device *netdev) + { + return IGB_RETA_SIZE; + } + ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) ++#ifdef HAVE_RXFH_HASHFUNC ++static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ++ u8 *hfunc) ++#else + static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) ++#endif /* HAVE_RXFH_HASHFUNC */ ++#else ++static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) ++#endif /* HAVE_ETHTOOL_GSRSSH */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + int i; +@@ -2843,6 +2807,22 @@ + return 0; + } + ++#else ++static int igb_get_rxfh_indir(struct net_device *netdev, ++ struct ethtool_rxfh_indir *indir) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ size_t copy_size = ++ min_t(size_t, indir->size, ARRAY_SIZE(adapter->rss_indir_tbl)); ++ ++ indir->size = ARRAY_SIZE(adapter->rss_indir_tbl); ++ memcpy(indir->ring_index, adapter->rss_indir_tbl, ++ copy_size * sizeof(indir->ring_index[0])); ++ return 0; ++} ++#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ ++#endif /* ETHTOOL_GRXFHINDIR */ ++#ifdef ETHTOOL_SRXFHINDIR + void igb_write_rss_indir_tbl(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +@@ -2872,14 +2852,24 @@ + val |= adapter->rss_indir_tbl[i + j]; + } + +- wr32(reg, val << shift); ++ E1000_WRITE_REG(hw, reg, val << shift); + reg += 4; + i += 4; + } + } + ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) ++#ifdef HAVE_RXFH_HASHFUNC + static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, +- const u8 *key) ++ const u8 *key, const u8 hfunc) ++#else ++static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, ++ const u8 *key) ++#endif /* HAVE_RXFH_HASHFUNC */ ++#else ++static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) ++#endif /* HAVE_ETHTOOL_GSRSSH */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +@@ -2911,135 +2901,314 @@ + + return 0; + } ++#else ++static int igb_set_rxfh_indir(struct net_device *netdev, ++ const struct ethtool_rxfh_indir *indir) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ size_t i; ++ ++ if (indir->size != ARRAY_SIZE(adapter->rss_indir_tbl)) ++ return -EINVAL; ++ for (i = 0; i < ARRAY_SIZE(adapter->rss_indir_tbl); i++) ++ if (indir->ring_index[i] >= adapter->rss_queues) ++ return -EINVAL; + +-static unsigned int igb_max_channels(struct igb_adapter *adapter) ++ memcpy(adapter->rss_indir_tbl, indir->ring_index, ++ sizeof(adapter->rss_indir_tbl)); ++ igb_write_rss_indir_tbl(adapter); ++ return 0; ++} ++#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ ++#endif /* ETHTOOL_SRXFHINDIR */ ++#ifdef ETHTOOL_GCHANNELS ++ ++static unsigned int igb_max_rss_queues(struct igb_adapter *adapter) + { +- struct e1000_hw *hw = &adapter->hw; +- unsigned int max_combined = 0; ++ unsigned int max_rss_queues; + +- switch (hw->mac.type) { ++ /* Determine the maximum number of RSS queues supported. */ ++ switch (adapter->hw.mac.type) { + case e1000_i211: +- max_combined = IGB_MAX_RX_QUEUES_I211; ++ max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: +- max_combined = IGB_MAX_RX_QUEUES_82575; ++ max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: +- if (!!adapter->vfs_allocated_count) { +- max_combined = 1; ++ /* I350 cannot do RSS and SR-IOV at the same time */ ++ if (adapter->vfs_allocated_count) { ++ max_rss_queues = 1; + break; + } + /* fall through */ + case e1000_82576: +- if (!!adapter->vfs_allocated_count) { +- max_combined = 2; ++ if (adapter->vfs_allocated_count) { ++ max_rss_queues = 2; + break; + } + /* fall through */ + case e1000_82580: +- case e1000_i354: + default: +- max_combined = IGB_MAX_RX_QUEUES; ++ max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + +- return max_combined; ++ return max_rss_queues; + } + +-static void igb_get_channels(struct net_device *netdev, ++static void igb_get_channels(struct net_device *dev, + struct ethtool_channels *ch) + { +- struct igb_adapter *adapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = netdev_priv(dev); + +- /* Report maximum channels */ +- ch->max_combined = igb_max_channels(adapter); ++ /* report maximum channels */ ++ ch->max_combined = igb_max_rss_queues(adapter); ++ ch->max_rx = ch->max_combined; ++ if (adapter->vfs_allocated_count) ++ ch->max_tx = 1; ++ else ++ ch->max_tx = ch->max_combined; + +- /* Report info for other vector */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ /* report info for other vector */ ++ if (adapter->msix_entries) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + +- ch->combined_count = adapter->rss_queues; ++ /* record RSS/TSS queues */ ++ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) { ++ if (adapter->num_rx_queues > adapter->num_tx_queues) { ++ ch->combined_count = adapter->num_tx_queues; ++ ch->rx_count = adapter->num_rx_queues - ++ adapter->num_tx_queues; ++ } else if (adapter->num_rx_queues < adapter->num_tx_queues) { ++ ch->combined_count = adapter->num_rx_queues; ++ ch->tx_count = adapter->num_tx_queues - ++ adapter->num_rx_queues; ++ } else { ++ ch->combined_count = adapter->num_rx_queues; ++ } ++ } else { ++ ch->rx_count = adapter->num_rx_queues; ++ ch->tx_count = adapter->num_tx_queues; ++ } + } ++#endif /* ETHTOOL_GCHANNELS */ ++#ifdef ETHTOOL_SCHANNELS + +-static int igb_set_channels(struct net_device *netdev, +- struct ethtool_channels *ch) ++static int igb_set_channels(struct net_device *dev, ++ struct ethtool_channels *ch) + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- unsigned int count = ch->combined_count; +- unsigned int max_combined = 0; ++ struct igb_adapter *adapter = netdev_priv(dev); ++ unsigned int max_rss_queues; + +- /* Verify they are not requesting separate vectors */ +- if (!count || ch->rx_count || ch->tx_count) ++ /* we cannot support combined, Rx, and Tx vectors simultaneously */ ++ if (ch->combined_count && ch->rx_count && ch->tx_count) + return -EINVAL; + +- /* Verify other_count is valid and has not been changed */ +- if (ch->other_count != NON_Q_VECTORS) ++ /* ignore other_count since it is not changeable */ ++ ++ /* verify we have at least one channel in each direction */ ++ if (!ch->combined_count && (!ch->rx_count || !ch->tx_count)) + return -EINVAL; + +- /* Verify the number of channels doesn't exceed hw limits */ +- max_combined = igb_max_channels(adapter); +- if (count > max_combined) ++ /* verify number of Tx queues does not exceed 1 if SR-IOV is enabled */ ++ if (adapter->vfs_allocated_count && ++ ((ch->combined_count + ch->tx_count) > 1)) + return -EINVAL; + +- if (count != adapter->rss_queues) { +- adapter->rss_queues = count; +- igb_set_flag_queue_pairs(adapter, max_combined); ++ /* verify the number of channels does not exceed hardware limits */ ++ max_rss_queues = igb_max_rss_queues(adapter); ++ if (((ch->combined_count + ch->rx_count) > max_rss_queues) || ++ ((ch->combined_count + ch->tx_count) > max_rss_queues)) ++ return -EINVAL; + +- /* Hardware has to reinitialize queues and interrupts to +- * match the new configuration. ++ /* Determine if we need to pair queues. */ ++ switch (adapter->hw.mac.type) { ++ case e1000_82575: ++ case e1000_i211: ++ /* Device supports enough interrupts without queue pairing. */ ++ break; ++ case e1000_i350: ++ /* The PF has 3 interrupts and 1 queue pair w/ SR-IOV */ ++ if (adapter->vfs_allocated_count) ++ break; ++ case e1000_82576: ++ /* ++ * The PF has access to 6 interrupt vectors if the number of ++ * VFs is less than 7. If that is the case we don't have ++ * to pair up the queues. + */ +- return igb_reinit_queues(adapter); ++ if ((adapter->vfs_allocated_count > 0) && ++ (adapter->vfs_allocated_count < 7)) ++ break; ++ /* fall through */ ++ case e1000_82580: ++ case e1000_i210: ++ default: ++ /* verify we can support as many queues as requested */ ++ if ((ch->combined_count + ++ ch->rx_count + ch->tx_count) > MAX_Q_VECTORS) ++ return -EINVAL; ++ break; + } + +- return 0; ++ /* update configuration values */ ++ adapter->rss_queues = ch->combined_count + ch->rx_count; ++ if (ch->rx_count == ch->tx_count || adapter->vfs_allocated_count) ++ adapter->tss_queues = 0; ++ else ++ adapter->tss_queues = ch->combined_count + ch->tx_count; ++ ++ if (ch->combined_count) ++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS; ++ else ++ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; ++ ++ /* update queue configuration for adapter */ ++ return igb_setup_queues(adapter); + } + ++#endif /* ETHTOOL_SCHANNELS */ + static const struct ethtool_ops igb_ethtool_ops = { +- .get_settings = igb_get_settings, +- .set_settings = igb_set_settings, +- .get_drvinfo = igb_get_drvinfo, +- .get_regs_len = igb_get_regs_len, +- .get_regs = igb_get_regs, +- .get_wol = igb_get_wol, +- .set_wol = igb_set_wol, +- .get_msglevel = igb_get_msglevel, +- .set_msglevel = igb_set_msglevel, +- .nway_reset = igb_nway_reset, +- .get_link = igb_get_link, +- .get_eeprom_len = igb_get_eeprom_len, +- .get_eeprom = igb_get_eeprom, +- .set_eeprom = igb_set_eeprom, +- .get_ringparam = igb_get_ringparam, +- .set_ringparam = igb_set_ringparam, +- .get_pauseparam = igb_get_pauseparam, +- .set_pauseparam = igb_set_pauseparam, +- .self_test = igb_diag_test, +- .get_strings = igb_get_strings, +- .set_phys_id = igb_set_phys_id, +- .get_sset_count = igb_get_sset_count, +- .get_ethtool_stats = igb_get_ethtool_stats, +- .get_coalesce = igb_get_coalesce, +- .set_coalesce = igb_set_coalesce, +- .get_ts_info = igb_get_ts_info, +- .get_rxnfc = igb_get_rxnfc, +- .set_rxnfc = igb_set_rxnfc, ++ .get_settings = igb_get_settings, ++ .set_settings = igb_set_settings, ++ .get_drvinfo = igb_get_drvinfo, ++ .get_regs_len = igb_get_regs_len, ++ .get_regs = igb_get_regs, ++ .get_wol = igb_get_wol, ++ .set_wol = igb_set_wol, ++ .get_msglevel = igb_get_msglevel, ++ .set_msglevel = igb_set_msglevel, ++ .nway_reset = igb_nway_reset, ++ .get_link = igb_get_link, ++ .get_eeprom_len = igb_get_eeprom_len, ++ .get_eeprom = igb_get_eeprom, ++ .set_eeprom = igb_set_eeprom, ++ .get_ringparam = igb_get_ringparam, ++ .set_ringparam = igb_set_ringparam, ++ .get_pauseparam = igb_get_pauseparam, ++ .set_pauseparam = igb_set_pauseparam, ++ .self_test = igb_diag_test, ++ .get_strings = igb_get_strings, ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID ++ .set_phys_id = igb_set_phys_id, ++#else ++ .phys_id = igb_phys_id, ++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT ++ .get_sset_count = igb_get_sset_count, ++#else ++ .get_stats_count = igb_get_stats_count, ++ .self_test_count = igb_diag_test_count, ++#endif ++ .get_ethtool_stats = igb_get_ethtool_stats, ++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++ .get_coalesce = igb_get_coalesce, ++ .set_coalesce = igb_set_coalesce, ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef HAVE_ETHTOOL_GET_TS_INFO ++ .get_ts_info = igb_get_ts_info, ++#endif /* HAVE_ETHTOOL_GET_TS_INFO */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifdef CONFIG_PM_RUNTIME ++ .begin = igb_ethtool_begin, ++ .complete = igb_ethtool_complete, ++#endif /* CONFIG_PM_RUNTIME */ ++#ifndef HAVE_NDO_SET_FEATURES ++ .get_rx_csum = igb_get_rx_csum, ++ .set_rx_csum = igb_set_rx_csum, ++ .get_tx_csum = ethtool_op_get_tx_csum, ++ .set_tx_csum = igb_set_tx_csum, ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++#ifdef NETIF_F_TSO ++ .get_tso = ethtool_op_get_tso, ++ .set_tso = igb_set_tso, ++#endif ++#ifdef ETHTOOL_GFLAGS ++ .get_flags = ethtool_op_get_flags, ++ .set_flags = igb_set_flags, ++#endif /* ETHTOOL_GFLAGS */ ++#endif /* HAVE_NDO_SET_FEATURES */ ++#ifdef ETHTOOL_GADV_COAL ++ .get_advcoal = igb_get_adv_coal, ++ .set_advcoal = igb_set_dmac_coal, ++#endif /* ETHTOOL_GADV_COAL */ ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef ETHTOOL_GEEE + .get_eee = igb_get_eee, ++#endif ++#ifdef ETHTOOL_SEEE + .set_eee = igb_set_eee, +- .get_module_info = igb_get_module_info, +- .get_module_eeprom = igb_get_module_eeprom, ++#endif ++#ifdef ETHTOOL_GRXFHINDIR ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE + .get_rxfh_indir_size = igb_get_rxfh_indir_size, ++#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) + .get_rxfh = igb_get_rxfh, ++#else ++ .get_rxfh_indir = igb_get_rxfh_indir, ++#endif /* HAVE_ETHTOOL_GSRSSH */ ++#endif /* ETHTOOL_GRXFHINDIR */ ++#ifdef ETHTOOL_SRXFHINDIR ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) + .set_rxfh = igb_set_rxfh, +- .get_channels = igb_get_channels, +- .set_channels = igb_set_channels, +- .begin = igb_ethtool_begin, +- .complete = igb_ethtool_complete, ++#else ++ .set_rxfh_indir = igb_set_rxfh_indir, ++#endif /* HAVE_ETHTOOL_GSRSSH */ ++#endif /* ETHTOOL_SRXFHINDIR */ ++#ifdef ETHTOOL_GCHANNELS ++ .get_channels = igb_get_channels, ++#endif /* ETHTOOL_GCHANNELS */ ++#ifdef ETHTOOL_SCHANNELS ++ .set_channels = igb_set_channels, ++#endif /* ETHTOOL_SCHANNELS */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifdef ETHTOOL_GRXFH ++ .get_rxnfc = igb_get_rxnfc, ++ .set_rxnfc = igb_set_rxnfc, ++#endif ++}; ++ ++#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++static const struct ethtool_ops_ext igb_ethtool_ops_ext = { ++ .size = sizeof(struct ethtool_ops_ext), ++ .get_ts_info = igb_get_ts_info, ++ .set_phys_id = igb_set_phys_id, ++ .get_eee = igb_get_eee, ++ .set_eee = igb_set_eee, ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE ++ .get_rxfh_indir_size = igb_get_rxfh_indir_size, ++#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ ++ .get_rxfh_indir = igb_get_rxfh_indir, ++ .set_rxfh_indir = igb_set_rxfh_indir, ++ .get_channels = igb_get_channels, ++ .set_channels = igb_set_channels, + }; + + void igb_set_ethtool_ops(struct net_device *netdev) + { +- netdev->ethtool_ops = &igb_ethtool_ops; ++ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); ++ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext); + } ++#else ++void igb_set_ethtool_ops(struct net_device *netdev) ++{ ++ /* have to "undeclare" const on this struct to remove warnings */ ++#ifndef ETHTOOL_OPS_COMPAT ++ netdev->ethtool_ops = (struct ethtool_ops *)&igb_ethtool_ops; ++#else ++ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops); ++#endif /* SET_ETHTOOL_OPS */ ++} ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#endif /* SIOCETHTOOL */ ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c +--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,30 +1,31 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #include "igb.h" + #include "e1000_82575.h" + #include "e1000_hw.h" +- ++#ifdef IGB_HWMON + #include + #include + #include +@@ -34,28 +35,29 @@ + #include + #include + +-#ifdef CONFIG_IGB_HWMON ++#ifdef HAVE_I2C_SUPPORT + static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), + }; ++#endif /* HAVE_I2C_SUPPORT */ + + /* hwmon callback functions */ + static ssize_t igb_hwmon_show_location(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); + } + + static ssize_t igb_hwmon_show_temp(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + unsigned int value; + + /* reset the temp field */ +@@ -70,11 +72,11 @@ + } + + static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ +@@ -84,11 +86,11 @@ + } + + static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ +@@ -107,35 +109,34 @@ + * the data structures we need to get the data to display. + */ + static int igb_add_hwmon_attr(struct igb_adapter *adapter, +- unsigned int offset, int type) +-{ ++ unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + +- n_attr = adapter->igb_hwmon_buff->n_hwmon; +- igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; ++ n_attr = adapter->igb_hwmon_buff.n_hwmon; ++ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_label", offset + 1); ++ "temp%u_label", offset); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_input", offset + 1); ++ "temp%u_input", offset); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_max", offset + 1); ++ "temp%u_max", offset); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_crit", offset + 1); ++ "temp%u_crit", offset); + break; + default: + rc = -EPERM; +@@ -150,16 +151,30 @@ + igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); ++ rc = device_create_file(&adapter->pdev->dev, ++ &igb_attr->dev_attr); ++ if (rc == 0) ++ ++adapter->igb_hwmon_buff.n_hwmon; + +- adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; +- +- ++adapter->igb_hwmon_buff->n_hwmon; +- +- return 0; ++ return rc; + } + + static void igb_sysfs_del_adapter(struct igb_adapter *adapter) + { ++ int i; ++ ++ if (adapter == NULL) ++ return; ++ ++ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) { ++ device_remove_file(&adapter->pdev->dev, ++ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr); ++ } ++ ++ kfree(adapter->igb_hwmon_buff.hwmon_list); ++ ++ if (adapter->igb_hwmon_buff.device) ++ hwmon_device_unregister(adapter->igb_hwmon_buff.device); + } + + /* called from igb_main.c */ +@@ -171,11 +186,13 @@ + /* called from igb_main.c */ + int igb_sysfs_init(struct igb_adapter *adapter) + { +- struct hwmon_buff *igb_hwmon; +- struct i2c_client *client; +- struct device *hwmon_dev; ++ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff; + unsigned int i; ++ int n_attrs; + int rc = 0; ++#ifdef HAVE_I2C_SUPPORT ++ struct i2c_client *client = NULL; ++#endif /* HAVE_I2C_SUPPORT */ + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) +@@ -183,16 +200,35 @@ + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); +- if (rc) ++ if (rc) ++ goto exit; ++#ifdef HAVE_I2C_SUPPORT ++ /* init i2c_client */ ++ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); ++ if (client == NULL) { ++ dev_info(&adapter->pdev->dev, ++ "Failed to create new i2c device..\n"); + goto exit; ++ } ++ adapter->i2c_client = client; ++#endif /* HAVE_I2C_SUPPORT */ + +- igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), +- GFP_KERNEL); +- if (!igb_hwmon) { ++ /* Allocation space for max attributes ++ * max num sensors * values (loc, temp, max, caution) ++ */ ++ n_attrs = E1000_MAX_SENSORS * 4; ++ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), ++ GFP_KERNEL); ++ if (!igb_hwmon->hwmon_list) { + rc = -ENOMEM; +- goto exit; ++ goto err; ++ } ++ ++ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev); ++ if (IS_ERR(igb_hwmon->device)) { ++ rc = PTR_ERR(igb_hwmon->device); ++ goto err; + } +- adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + +@@ -204,39 +240,11 @@ + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); ++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); ++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); ++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) +- goto exit; +- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); +- if (rc) +- goto exit; +- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); +- if (rc) +- goto exit; +- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); +- if (rc) +- goto exit; +- } +- +- /* init i2c_client */ +- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); +- if (client == NULL) { +- dev_info(&adapter->pdev->dev, +- "Failed to create new i2c device.\n"); +- rc = -ENODEV; +- goto exit; +- } +- adapter->i2c_client = client; +- +- igb_hwmon->groups[0] = &igb_hwmon->group; +- igb_hwmon->group.attrs = igb_hwmon->attrs; +- +- hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, +- client->name, +- igb_hwmon, +- igb_hwmon->groups); +- if (IS_ERR(hwmon_dev)) { +- rc = PTR_ERR(hwmon_dev); +- goto err; ++ goto err; + } + + goto exit; +@@ -246,4 +254,4 @@ + exit: + return rc; + } +-#endif ++#endif /* IGB_HWMON */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +--- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,113 +1,114 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #include + #include + #include +-#include + #include + #include + #include +-#include +-#include ++#include ++#ifdef NETIF_F_TSO + #include ++#ifdef NETIF_F_TSO6 ++#include + #include +-#include ++#endif ++#endif ++#ifdef SIOCGMIIPHY + #include ++#endif ++#ifdef SIOCETHTOOL + #include +-#include ++#endif + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#ifdef CONFIG_PM_RUNTIME + #include +-#ifdef CONFIG_IGB_DCA +-#include +-#endif +-#include ++#endif /* CONFIG_PM_RUNTIME */ ++ ++#include + #include "igb.h" ++#include "igb_vmdq.h" ++ ++#if defined(DEBUG) || defined(DEBUG_DUMP) || defined(DEBUG_ICR) \ ++ || defined(DEBUG_ITR) ++#define DRV_DEBUG "_debug" ++#else ++#define DRV_DEBUG ++#endif ++#define DRV_HW_PERF ++#define VERSION_SUFFIX + + #define MAJ 5 +-#define MIN 0 +-#define BUILD 5 +-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +-__stringify(BUILD) "-k" ++#define MIN 3 ++#define BUILD 5.4 ++#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "."\ ++ __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF ++ + char igb_driver_name[] = "igb"; + char igb_driver_version[] = DRV_VERSION; + static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; + static const char igb_copyright[] = +- "Copyright (c) 2007-2014 Intel Corporation."; +- +-static const struct e1000_info *igb_info_tbl[] = { +- [board_82575] = &e1000_82575_info, +-}; ++ "Copyright (c) 2007-2015 Intel Corporation."; + + static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) }, + /* required last entry */ + {0, } + }; +@@ -122,84 +123,114 @@ + static int igb_probe(struct pci_dev *, const struct pci_device_id *); + static void igb_remove(struct pci_dev *pdev); + static int igb_sw_init(struct igb_adapter *); +-static int igb_open(struct net_device *); +-static int igb_close(struct net_device *); + static void igb_configure(struct igb_adapter *); + static void igb_configure_tx(struct igb_adapter *); + static void igb_configure_rx(struct igb_adapter *); + static void igb_clean_all_tx_rings(struct igb_adapter *); + static void igb_clean_all_rx_rings(struct igb_adapter *); + static void igb_clean_tx_ring(struct igb_ring *); +-static void igb_clean_rx_ring(struct igb_ring *); + static void igb_set_rx_mode(struct net_device *); + static void igb_update_phy_info(unsigned long); + static void igb_watchdog(unsigned long); + static void igb_watchdog_task(struct work_struct *); ++static void igb_dma_err_task(struct work_struct *); ++static void igb_dma_err_timer(unsigned long data); + static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats); ++static struct net_device_stats *igb_get_stats(struct net_device *); + static int igb_change_mtu(struct net_device *, int); ++/* void igb_full_sync_mac_table(struct igb_adapter *adapter); */ + static int igb_set_mac(struct net_device *, void *); + static void igb_set_uta(struct igb_adapter *adapter); + static irqreturn_t igb_intr(int irq, void *); + static irqreturn_t igb_intr_msi(int irq, void *); + static irqreturn_t igb_msix_other(int irq, void *); ++static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32, u8); + static irqreturn_t igb_msix_ring(int irq, void *); +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + static void igb_update_dca(struct igb_q_vector *); + static void igb_setup_dca(struct igb_adapter *); +-#endif /* CONFIG_IGB_DCA */ ++#endif /* IGB_DCA */ + static int igb_poll(struct napi_struct *, int); + static bool igb_clean_tx_irq(struct igb_q_vector *); + static bool igb_clean_rx_irq(struct igb_q_vector *, int); + static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); + static void igb_tx_timeout(struct net_device *); + static void igb_reset_task(struct work_struct *); +-static void igb_vlan_mode(struct net_device *netdev, +- netdev_features_t features); +-static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +-static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); ++#ifdef HAVE_VLAN_RX_REGISTER ++static void igb_vlan_mode(struct net_device *, struct vlan_group *); ++#endif ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++static int igb_vlan_rx_add_vid(struct net_device *, ++ __always_unused __be16 proto, u16); ++static int igb_vlan_rx_kill_vid(struct net_device *, ++ __always_unused __be16 proto, u16); ++#else ++static int igb_vlan_rx_add_vid(struct net_device *, u16); ++static int igb_vlan_rx_kill_vid(struct net_device *, u16); ++#endif ++#else ++static void igb_vlan_rx_add_vid(struct net_device *, u16); ++static void igb_vlan_rx_kill_vid(struct net_device *, u16); ++#endif + static void igb_restore_vlan(struct igb_adapter *); +-static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); + static void igb_ping_all_vfs(struct igb_adapter *); + static void igb_msg_task(struct igb_adapter *); + static void igb_vmm_control(struct igb_adapter *); + static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); + static void igb_restore_vf_multicasts(struct igb_adapter *adapter); ++static void igb_process_mdd_event(struct igb_adapter *); ++#ifdef IFLA_VF_MAX + static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); + static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos); +-static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, +- bool setting); ++ bool setting); ++#endif ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, ++ int min_tx_rate, int tx_rate); ++#else ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); + static void igb_check_vf_rate_limit(struct igb_adapter *); +- +-#ifdef CONFIG_PCI_IOV +-static int igb_vf_configure(struct igb_adapter *adapter, int vf); +-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); + #endif +- ++static int igb_vf_configure(struct igb_adapter *adapter, int vf); + #ifdef CONFIG_PM +-#ifdef CONFIG_PM_SLEEP +-static int igb_suspend(struct device *); +-#endif +-static int igb_resume(struct device *); ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS ++static int igb_suspend(struct device *dev); ++static int igb_resume(struct device *dev); + #ifdef CONFIG_PM_RUNTIME + static int igb_runtime_suspend(struct device *dev); + static int igb_runtime_resume(struct device *dev); + static int igb_runtime_idle(struct device *dev); +-#endif ++#endif /* CONFIG_PM_RUNTIME */ + static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) ++#ifdef CONFIG_PM_RUNTIME + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) ++#endif /* CONFIG_PM_RUNTIME */ + }; +-#endif ++#else ++static int igb_suspend(struct pci_dev *pdev, pm_message_t state); ++static int igb_resume(struct pci_dev *pdev); ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ ++#endif /* CONFIG_PM */ ++#ifndef USE_REBOOT_NOTIFIER + static void igb_shutdown(struct pci_dev *); +-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +-#ifdef CONFIG_IGB_DCA ++#else ++static int igb_notify_reboot(struct notifier_block *, unsigned long, void *); ++static struct notifier_block igb_notifier_reboot = { ++ .notifier_call = igb_notify_reboot, ++ .next = NULL, ++ .priority = 0 ++}; ++#endif ++#ifdef IGB_DCA + static int igb_notify_dca(struct notifier_block *, unsigned long, void *); + static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, +@@ -211,462 +242,87 @@ + /* for netdump / net console */ + static void igb_netpoll(struct net_device *); + #endif +-#ifdef CONFIG_PCI_IOV +-static unsigned int max_vfs; +-module_param(max_vfs, uint, 0); +-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +-#endif /* CONFIG_PCI_IOV */ + ++#ifdef HAVE_PCI_ERS + static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); + static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); + static void igb_io_resume(struct pci_dev *); + +-static const struct pci_error_handlers igb_err_handler = { ++static struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, + }; ++#endif + ++static void igb_init_fw(struct igb_adapter *adapter); + static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + + static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, +- .remove = igb_remove, ++ .remove = __devexit_p(igb_remove), + #ifdef CONFIG_PM ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + .driver.pm = &igb_pm_ops, +-#endif ++#else ++ .suspend = igb_suspend, ++ .resume = igb_resume, ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ ++#endif /* CONFIG_PM */ ++#ifndef USE_REBOOT_NOTIFIER + .shutdown = igb_shutdown, +- .sriov_configure = igb_pci_sriov_configure, ++#endif ++#ifdef HAVE_PCI_ERS + .err_handler = &igb_err_handler ++#endif + }; + ++/* u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); */ ++ + MODULE_AUTHOR("Intel Corporation, "); + MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + +-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +-static int debug = -1; +-module_param(debug, int, 0); +-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +- +-struct igb_reg_info { +- u32 ofs; +- char *name; +-}; +- +-static const struct igb_reg_info igb_reg_info_tbl[] = { +- +- /* General Registers */ +- {E1000_CTRL, "CTRL"}, +- {E1000_STATUS, "STATUS"}, +- {E1000_CTRL_EXT, "CTRL_EXT"}, +- +- /* Interrupt Registers */ +- {E1000_ICR, "ICR"}, +- +- /* RX Registers */ +- {E1000_RCTL, "RCTL"}, +- {E1000_RDLEN(0), "RDLEN"}, +- {E1000_RDH(0), "RDH"}, +- {E1000_RDT(0), "RDT"}, +- {E1000_RXDCTL(0), "RXDCTL"}, +- {E1000_RDBAL(0), "RDBAL"}, +- {E1000_RDBAH(0), "RDBAH"}, +- +- /* TX Registers */ +- {E1000_TCTL, "TCTL"}, +- {E1000_TDBAL(0), "TDBAL"}, +- {E1000_TDBAH(0), "TDBAH"}, +- {E1000_TDLEN(0), "TDLEN"}, +- {E1000_TDH(0), "TDH"}, +- {E1000_TDT(0), "TDT"}, +- {E1000_TXDCTL(0), "TXDCTL"}, +- {E1000_TDFH, "TDFH"}, +- {E1000_TDFT, "TDFT"}, +- {E1000_TDFHS, "TDFHS"}, +- {E1000_TDFPC, "TDFPC"}, +- +- /* List Terminator */ +- {} +-}; +- +-/* igb_regdump - register printout routine */ +-static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +-{ +- int n = 0; +- char rname[16]; +- u32 regs[8]; +- +- switch (reginfo->ofs) { +- case E1000_RDLEN(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDLEN(n)); +- break; +- case E1000_RDH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDH(n)); +- break; +- case E1000_RDT(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDT(n)); +- break; +- case E1000_RXDCTL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RXDCTL(n)); +- break; +- case E1000_RDBAL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDBAL(n)); +- break; +- case E1000_RDBAH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDBAH(n)); +- break; +- case E1000_TDBAL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDBAL(n)); +- break; +- case E1000_TDBAH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDBAH(n)); +- break; +- case E1000_TDLEN(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDLEN(n)); +- break; +- case E1000_TDH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDH(n)); +- break; +- case E1000_TDT(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDT(n)); +- break; +- case E1000_TXDCTL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TXDCTL(n)); +- break; +- default: +- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); +- return; +- } +- +- snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); +- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], +- regs[2], regs[3]); +-} +- +-/* igb_dump - Print registers, Tx-rings and Rx-rings */ +-static void igb_dump(struct igb_adapter *adapter) +-{ +- struct net_device *netdev = adapter->netdev; +- struct e1000_hw *hw = &adapter->hw; +- struct igb_reg_info *reginfo; +- struct igb_ring *tx_ring; +- union e1000_adv_tx_desc *tx_desc; +- struct my_u0 { u64 a; u64 b; } *u0; +- struct igb_ring *rx_ring; +- union e1000_adv_rx_desc *rx_desc; +- u32 staterr; +- u16 i, n; +- +- if (!netif_msg_hw(adapter)) +- return; +- +- /* Print netdevice Info */ +- if (netdev) { +- dev_info(&adapter->pdev->dev, "Net device Info\n"); +- pr_info("Device Name state trans_start last_rx\n"); +- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, +- netdev->state, netdev->trans_start, netdev->last_rx); +- } +- +- /* Print Registers */ +- dev_info(&adapter->pdev->dev, "Register Dump\n"); +- pr_info(" Register Name Value\n"); +- for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; +- reginfo->name; reginfo++) { +- igb_regdump(hw, reginfo); +- } +- +- /* Print TX Ring Summary */ +- if (!netdev || !netif_running(netdev)) +- goto exit; +- +- dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); +- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); +- for (n = 0; n < adapter->num_tx_queues; n++) { +- struct igb_tx_buffer *buffer_info; +- tx_ring = adapter->tx_ring[n]; +- buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; +- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", +- n, tx_ring->next_to_use, tx_ring->next_to_clean, +- (u64)dma_unmap_addr(buffer_info, dma), +- dma_unmap_len(buffer_info, len), +- buffer_info->next_to_watch, +- (u64)buffer_info->time_stamp); +- } +- +- /* Print TX Rings */ +- if (!netif_msg_tx_done(adapter)) +- goto rx_ring_summary; +- +- dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); +- +- /* Transmit Descriptor Formats +- * +- * Advanced Transmit Descriptor +- * +--------------------------------------------------------------+ +- * 0 | Buffer Address [63:0] | +- * +--------------------------------------------------------------+ +- * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | +- * +--------------------------------------------------------------+ +- * 63 46 45 40 39 38 36 35 32 31 24 15 0 +- */ +- +- for (n = 0; n < adapter->num_tx_queues; n++) { +- tx_ring = adapter->tx_ring[n]; +- pr_info("------------------------------------\n"); +- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); +- pr_info("------------------------------------\n"); +- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); +- +- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { +- const char *next_desc; +- struct igb_tx_buffer *buffer_info; +- tx_desc = IGB_TX_DESC(tx_ring, i); +- buffer_info = &tx_ring->tx_buffer_info[i]; +- u0 = (struct my_u0 *)tx_desc; +- if (i == tx_ring->next_to_use && +- i == tx_ring->next_to_clean) +- next_desc = " NTC/U"; +- else if (i == tx_ring->next_to_use) +- next_desc = " NTU"; +- else if (i == tx_ring->next_to_clean) +- next_desc = " NTC"; +- else +- next_desc = ""; +- +- pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", +- i, le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- (u64)dma_unmap_addr(buffer_info, dma), +- dma_unmap_len(buffer_info, len), +- buffer_info->next_to_watch, +- (u64)buffer_info->time_stamp, +- buffer_info->skb, next_desc); +- +- if (netif_msg_pktdata(adapter) && buffer_info->skb) +- print_hex_dump(KERN_INFO, "", +- DUMP_PREFIX_ADDRESS, +- 16, 1, buffer_info->skb->data, +- dma_unmap_len(buffer_info, len), +- true); +- } +- } +- +- /* Print RX Rings Summary */ +-rx_ring_summary: +- dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); +- pr_info("Queue [NTU] [NTC]\n"); +- for (n = 0; n < adapter->num_rx_queues; n++) { +- rx_ring = adapter->rx_ring[n]; +- pr_info(" %5d %5X %5X\n", +- n, rx_ring->next_to_use, rx_ring->next_to_clean); +- } +- +- /* Print RX Rings */ +- if (!netif_msg_rx_status(adapter)) +- goto exit; +- +- dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); +- +- /* Advanced Receive Descriptor (Read) Format +- * 63 1 0 +- * +-----------------------------------------------------+ +- * 0 | Packet Buffer Address [63:1] |A0/NSE| +- * +----------------------------------------------+------+ +- * 8 | Header Buffer Address [63:1] | DD | +- * +-----------------------------------------------------+ +- * +- * +- * Advanced Receive Descriptor (Write-Back) Format +- * +- * 63 48 47 32 31 30 21 20 17 16 4 3 0 +- * +------------------------------------------------------+ +- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | +- * | Checksum Ident | | | | Type | Type | +- * +------------------------------------------------------+ +- * 8 | VLAN Tag | Length | Extended Error | Extended Status | +- * +------------------------------------------------------+ +- * 63 48 47 32 31 20 19 0 +- */ +- +- for (n = 0; n < adapter->num_rx_queues; n++) { +- rx_ring = adapter->rx_ring[n]; +- pr_info("------------------------------------\n"); +- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); +- pr_info("------------------------------------\n"); +- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); +- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); +- +- for (i = 0; i < rx_ring->count; i++) { +- const char *next_desc; +- struct igb_rx_buffer *buffer_info; +- buffer_info = &rx_ring->rx_buffer_info[i]; +- rx_desc = IGB_RX_DESC(rx_ring, i); +- u0 = (struct my_u0 *)rx_desc; +- staterr = le32_to_cpu(rx_desc->wb.upper.status_error); +- +- if (i == rx_ring->next_to_use) +- next_desc = " NTU"; +- else if (i == rx_ring->next_to_clean) +- next_desc = " NTC"; +- else +- next_desc = ""; +- +- if (staterr & E1000_RXD_STAT_DD) { +- /* Descriptor Done */ +- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", +- "RWB", i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- next_desc); +- } else { +- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", +- "R ", i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- (u64)buffer_info->dma, +- next_desc); +- +- if (netif_msg_pktdata(adapter) && +- buffer_info->dma && buffer_info->page) { +- print_hex_dump(KERN_INFO, "", +- DUMP_PREFIX_ADDRESS, +- 16, 1, +- page_address(buffer_info->page) + +- buffer_info->page_offset, +- IGB_RX_BUFSZ, true); +- } +- } +- } +- } +- +-exit: +- return; +-} +- +-/** +- * igb_get_i2c_data - Reads the I2C SDA data bit +- * @hw: pointer to hardware structure +- * @i2cctl: Current value of I2CCTL register +- * +- * Returns the I2C data bit value +- **/ +-static int igb_get_i2c_data(void *data) ++static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add) + { +- struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); ++ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie; ++ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; ++ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); ++ u32 vfta; + +- return !!(i2cctl & E1000_I2C_DATA_IN); +-} ++ /* ++ * if this is the management vlan the only option is to add it in so ++ * that the management pass through will continue to work ++ */ ++ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && ++ (vid == mng_cookie->vlan_id)) ++ add = TRUE; + +-/** +- * igb_set_i2c_data - Sets the I2C data bit +- * @data: pointer to hardware structure +- * @state: I2C data value (0 or 1) to set +- * +- * Sets the I2C data bit +- **/ +-static void igb_set_i2c_data(void *data, int state) +-{ +- struct igb_adapter *adapter = (struct igb_adapter *)data; +- struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); ++ vfta = adapter->shadow_vfta[index]; + +- if (state) +- i2cctl |= E1000_I2C_DATA_OUT; ++ if (add) ++ vfta |= mask; + else +- i2cctl &= ~E1000_I2C_DATA_OUT; ++ vfta &= ~mask; + +- i2cctl &= ~E1000_I2C_DATA_OE_N; +- i2cctl |= E1000_I2C_CLK_OE_N; +- wr32(E1000_I2CPARAMS, i2cctl); +- wrfl(); +- +-} +- +-/** +- * igb_set_i2c_clk - Sets the I2C SCL clock +- * @data: pointer to hardware structure +- * @state: state to set clock +- * +- * Sets the I2C clock line to state +- **/ +-static void igb_set_i2c_clk(void *data, int state) +-{ +- struct igb_adapter *adapter = (struct igb_adapter *)data; +- struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); +- +- if (state) { +- i2cctl |= E1000_I2C_CLK_OUT; +- i2cctl &= ~E1000_I2C_CLK_OE_N; +- } else { +- i2cctl &= ~E1000_I2C_CLK_OUT; +- i2cctl &= ~E1000_I2C_CLK_OE_N; +- } +- wr32(E1000_I2CPARAMS, i2cctl); +- wrfl(); +-} +- +-/** +- * igb_get_i2c_clk - Gets the I2C SCL clock state +- * @data: pointer to hardware structure +- * +- * Gets the I2C clock state +- **/ +-static int igb_get_i2c_clk(void *data) +-{ +- struct igb_adapter *adapter = (struct igb_adapter *)data; +- struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); +- +- return !!(i2cctl & E1000_I2C_CLK_IN); ++ igb_e1000_write_vfta(hw, index, vfta); ++ adapter->shadow_vfta[index] = vfta; + } + +-static const struct i2c_algo_bit_data igb_i2c_algo = { +- .setsda = igb_set_i2c_data, +- .setscl = igb_set_i2c_clk, +- .getsda = igb_get_i2c_data, +- .getscl = igb_get_i2c_clk, +- .udelay = 5, +- .timeout = 20, +-}; +- +-/** +- * igb_get_hw_dev - return device +- * @hw: pointer to hardware structure +- * +- * used by hardware layer to print debugging information +- **/ +-struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +-{ +- struct igb_adapter *adapter = hw->back; +- return adapter->netdev; +-} ++static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; ++module_param(debug, int, 0); ++MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)"); + + /** +- * igb_init_module - Driver Registration Routine ++ * igb_init_module - Driver Registration Routine + * +- * igb_init_module is the first routine called when the driver is +- * loaded. All it does is register with the PCI subsystem. ++ * igb_init_module is the first routine called when the driver is ++ * loaded. All it does is register with the PCI subsystem. + **/ + static int __init igb_init_module(void) + { +@@ -674,76 +330,89 @@ + + pr_info("%s - version %s\n", + igb_driver_string, igb_driver_version); ++ + pr_info("%s\n", igb_copyright); ++#ifdef IGB_HWMON ++/* only use IGB_PROCFS if IGB_HWMON is not defined */ ++#else ++#ifdef IGB_PROCFS ++ if (igb_procfs_topdir_init()) ++ pr_info("Procfs failed to initialize topdir\n"); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + dca_register_notify(&dca_notifier); + #endif + ret = pci_register_driver(&igb_driver); ++#ifdef USE_REBOOT_NOTIFIER ++ if (ret >= 0) ++ register_reboot_notifier(&igb_notifier_reboot); ++#endif + return ret; + } + + module_init(igb_init_module); + + /** +- * igb_exit_module - Driver Exit Cleanup Routine ++ * igb_exit_module - Driver Exit Cleanup Routine + * +- * igb_exit_module is called just before the driver is removed +- * from memory. ++ * igb_exit_module is called just before the driver is removed ++ * from memory. + **/ + static void __exit igb_exit_module(void) + { +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + dca_unregister_notify(&dca_notifier); + #endif ++#ifdef USE_REBOOT_NOTIFIER ++ unregister_reboot_notifier(&igb_notifier_reboot); ++#endif + pci_unregister_driver(&igb_driver); ++ ++#ifdef IGB_HWMON ++/* only compile IGB_PROCFS if IGB_HWMON is not defined */ ++#else ++#ifdef IGB_PROCFS ++ igb_procfs_topdir_exit(); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + } + + module_exit(igb_exit_module); + + #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) + /** +- * igb_cache_ring_register - Descriptor ring to register mapping +- * @adapter: board private structure to initialize ++ * igb_cache_ring_register - Descriptor ring to register mapping ++ * @adapter: board private structure to initialize + * +- * Once we know the feature-set enabled for the device, we'll cache +- * the register offset the descriptor ring is assigned to. ++ * Once we know the feature-set enabled for the device, we'll cache ++ * the register offset the descriptor ring is assigned to. + **/ + static void igb_cache_ring_register(struct igb_adapter *adapter) + { + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + +- switch (adapter->hw.mac.type) { +- case e1000_82576: ++ if (adapter->hw.mac.type == e1000_82576) { + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ +- if (adapter->vfs_allocated_count) { ++ if ((adapter->rss_queues > 1) && adapter->vmdq_pools) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + +- Q_IDX_82576(i); ++ Q_IDX_82576(i); + } +- /* Fall through */ +- case e1000_82575: +- case e1000_82580: +- case e1000_i350: +- case e1000_i354: +- case e1000_i210: +- case e1000_i211: +- /* Fall through */ +- default: +- for (; i < adapter->num_rx_queues; i++) +- adapter->rx_ring[i]->reg_idx = rbase_offset + i; +- for (; j < adapter->num_tx_queues; j++) +- adapter->tx_ring[j]->reg_idx = rbase_offset + j; +- break; + } ++ for (; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = rbase_offset + i; ++ for (; j < adapter->num_tx_queues; j++) ++ adapter->tx_ring[j]->reg_idx = rbase_offset + j; + } + +-u32 igb_rd32(struct e1000_hw *hw, u32 reg) ++u32 e1000_read_reg(struct e1000_hw *hw, u32 reg) + { + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); +@@ -757,6 +426,7 @@ + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; ++ + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); +@@ -765,6 +435,42 @@ + return value; + } + ++static void igb_configure_lli(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u16 port; ++ ++ /* LLI should only be enabled for MSI-X or MSI interrupts */ ++ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI)) ++ return; ++ ++ if (adapter->lli_port) { ++ /* use filter 0 for port */ ++ port = htons((u16)adapter->lli_port); ++ E1000_WRITE_REG(hw, E1000_IMIR(0), ++ (port | E1000_IMIR_PORT_IM_EN)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(0), ++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); ++ } ++ ++ if (adapter->flags & IGB_FLAG_LLI_PUSH) { ++ /* use filter 1 for push flag */ ++ E1000_WRITE_REG(hw, E1000_IMIR(1), ++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(1), ++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH)); ++ } ++ ++ if (adapter->lli_size) { ++ /* use filter 2 for size */ ++ E1000_WRITE_REG(hw, E1000_IMIR(2), ++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(2), ++ (adapter->lli_size | E1000_IMIREXT_CTRL_BP)); ++ } ++ ++} ++ + /** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure +@@ -780,7 +486,7 @@ + static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) + { +- u32 ivar = array_rd32(E1000_IVAR0, index); ++ u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); +@@ -788,7 +494,7 @@ + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + +- array_wr32(E1000_IVAR0, index, ivar); ++ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); + } + + #define IGB_N0_QUEUE -1 +@@ -816,13 +522,14 @@ + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; +- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) ++ if (!adapter->msix_entries && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; +- array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: +- /* 82576 uses a table that essentially consists of 2 columns ++ /* ++ * 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. +@@ -842,7 +549,8 @@ + case e1000_i354: + case e1000_i210: + case e1000_i211: +- /* On 82580 and newer adapters the scheme is similar to 82576 ++ /* ++ * On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the +@@ -871,11 +579,10 @@ + } + + /** +- * igb_configure_msix - Configure MSI-X hardware +- * @adapter: board private structure to initialize ++ * igb_configure_msix - Configure MSI-X hardware + * +- * igb_configure_msix sets up the hardware to properly +- * generate MSI-X interrupts. ++ * igb_configure_msix sets up the hardware to properly ++ * generate MSI-X interrupts. + **/ + static void igb_configure_msix(struct igb_adapter *adapter) + { +@@ -888,7 +595,7 @@ + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: +- tmp = rd32(E1000_CTRL_EXT); ++ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + +@@ -896,10 +603,11 @@ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + +- wr32(E1000_CTRL_EXT, tmp); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ +- array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++, ++ E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; +@@ -913,15 +621,15 @@ + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ +- wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | +- E1000_GPIE_PBA | E1000_GPIE_EIAME | +- E1000_GPIE_NSICR); ++ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | ++ E1000_GPIE_PBA | E1000_GPIE_EIAME | ++ E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = 1 << vector; + tmp = (vector++ | E1000_IVAR_VALID) << 8; + +- wr32(E1000_IVAR_MISC, tmp); ++ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ +@@ -933,24 +641,22 @@ + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + +- wrfl(); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_request_msix - Initialize MSI-X interrupts +- * @adapter: board private structure to initialize ++ * igb_request_msix - Initialize MSI-X interrupts + * +- * igb_request_msix allocates MSI-X vectors and requests interrupts from the +- * kernel. ++ * igb_request_msix allocates MSI-X vectors and requests interrupts from the ++ * kernel. + **/ + static int igb_request_msix(struct igb_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; +- struct e1000_hw *hw = &adapter->hw; + int i, err = 0, vector = 0, free_vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, +- igb_msix_other, 0, netdev->name, adapter); ++ &igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + +@@ -959,7 +665,7 @@ + + vector++; + +- q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); ++ q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, +@@ -997,11 +703,11 @@ + } + + /** +- * igb_free_q_vector - Free memory allocated for specific interrupt vector +- * @adapter: board private structure to initialize +- * @v_idx: Index of vector to be freed ++ * igb_free_q_vector - Free memory allocated for specific interrupt vector ++ * @adapter: board private structure to initialize ++ * @v_idx: Index of vector to be freed + * +- * This function frees the memory allocated to the q_vector. ++ * This function frees the memory allocated to the q_vector. + **/ + static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) + { +@@ -1013,6 +719,10 @@ + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); ++ ++#ifndef IGB_NO_LRO ++ __skb_queue_purge(&q_vector->lrolist.active); ++#endif + } + + /** +@@ -1027,8 +737,8 @@ + { + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + +- /* Coming from igb_set_interrupt_capability, the vectors are not yet +- * allocated. So, q_vector is NULL so we should stop here. ++ /* if we're coming from igb_set_interrupt_capability, the vectors are ++ * not yet allocated + */ + if (!q_vector) + return; +@@ -1047,22 +757,25 @@ + { + int v_idx = adapter->num_q_vectors; + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) ++ if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); +- else if (adapter->flags & IGB_FLAG_HAS_MSI) ++ kfree(adapter->msix_entries); ++ adapter->msix_entries = NULL; ++ } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); ++ } + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); + } + + /** +- * igb_free_q_vectors - Free memory allocated for interrupt vectors +- * @adapter: board private structure to initialize ++ * igb_free_q_vectors - Free memory allocated for interrupt vectors ++ * @adapter: board private structure to initialize + * +- * This function frees the memory allocated to the q_vectors. In addition if +- * NAPI is enabled it will delete any references to the NAPI struct prior +- * to freeing the q_vector. ++ * This function frees the memory allocated to the q_vectors. In addition if ++ * NAPI is enabled it will delete any references to the NAPI struct prior ++ * to freeing the q_vector. + **/ + static void igb_free_q_vectors(struct igb_adapter *adapter) + { +@@ -1079,11 +792,10 @@ + } + + /** +- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts +- * @adapter: board private structure to initialize ++ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * +- * This function resets the device so that it has 0 Rx queues, Tx queues, and +- * MSI-X interrupts allocated. ++ * This function resets the device so that it has 0 rx queues, tx queues, and ++ * MSI-X interrupts allocated. + */ + static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) + { +@@ -1092,108 +804,306 @@ + } + + /** +- * igb_set_interrupt_capability - set MSI or MSI-X if supported +- * @adapter: board private structure to initialize +- * @msix: boolean value of MSIX capability ++ * igb_process_mdd_event ++ * @adapter - board private structure + * +- * Attempt to configure interrupts using the best available +- * capabilities of the hardware and kernel. +- **/ +-static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) ++ * Identify a malicious VF, disable the VF TX/RX queues and log a message. ++ */ ++static void igb_process_mdd_event(struct igb_adapter *adapter) + { +- int err; +- int numvecs, i; +- +- if (!msix) +- goto msi_only; +- adapter->flags |= IGB_FLAG_HAS_MSIX; +- +- /* Number of supported queues. */ +- adapter->num_rx_queues = adapter->rss_queues; +- if (adapter->vfs_allocated_count) +- adapter->num_tx_queues = 1; +- else +- adapter->num_tx_queues = adapter->rss_queues; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 lvmmc, vfte, vfre, mdfb; ++ u8 vf_queue; + +- /* start with one vector for every Rx queue */ +- numvecs = adapter->num_rx_queues; ++ lvmmc = E1000_READ_REG(hw, E1000_LVMMC); ++ vf_queue = lvmmc >> 29; + +- /* if Tx handler is separate add 1 for every Tx queue */ +- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) +- numvecs += adapter->num_tx_queues; +- +- /* store the number of vectors reserved for queues */ +- adapter->num_q_vectors = numvecs; +- +- /* add 1 vector for link status interrupts */ +- numvecs++; +- for (i = 0; i < numvecs; i++) +- adapter->msix_entries[i].entry = i; +- +- err = pci_enable_msix_range(adapter->pdev, +- adapter->msix_entries, +- numvecs, +- numvecs); +- if (err > 0) ++ /* VF index cannot be bigger or equal to VFs allocated */ ++ if (vf_queue >= adapter->vfs_allocated_count) + return; + +- igb_reset_interrupt_capability(adapter); ++ netdev_info(adapter->netdev, ++ "VF %d misbehaved. VF queues are disabled. VM misbehavior code is 0x%x\n", ++ vf_queue, lvmmc); + +- /* If we can't do MSI-X, try MSI */ +-msi_only: +- adapter->flags &= ~IGB_FLAG_HAS_MSIX; +-#ifdef CONFIG_PCI_IOV +- /* disable SR-IOV for non MSI-X configurations */ +- if (adapter->vf_data) { +- struct e1000_hw *hw = &adapter->hw; +- /* disable iov and allow time for transactions to clear */ +- pci_disable_sriov(adapter->pdev); +- msleep(500); ++ /* Disable VFTE and VFRE related bits */ ++ vfte = E1000_READ_REG(hw, E1000_VFTE); ++ vfte &= ~(1 << vf_queue); ++ E1000_WRITE_REG(hw, E1000_VFTE, vfte); + +- kfree(adapter->vf_data); +- adapter->vf_data = NULL; +- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); +- wrfl(); +- msleep(100); +- dev_info(&adapter->pdev->dev, "IOV Disabled\n"); +- } +-#endif +- adapter->vfs_allocated_count = 0; +- adapter->rss_queues = 1; +- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; +- adapter->num_rx_queues = 1; +- adapter->num_tx_queues = 1; +- adapter->num_q_vectors = 1; +- if (!pci_enable_msi(adapter->pdev)) +- adapter->flags |= IGB_FLAG_HAS_MSI; +-} ++ vfre = E1000_READ_REG(hw, E1000_VFRE); ++ vfre &= ~(1 << vf_queue); ++ E1000_WRITE_REG(hw, E1000_VFRE, vfre); + +-static void igb_add_ring(struct igb_ring *ring, +- struct igb_ring_container *head) +-{ +- head->ring = ring; +- head->count++; ++ /* Disable MDFB related bit. Clear on write */ ++ mdfb = E1000_READ_REG(hw, E1000_MDFB); ++ mdfb |= (1 << vf_queue); ++ E1000_WRITE_REG(hw, E1000_MDFB, mdfb); ++ ++ /* Reset the specific VF */ ++ E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST); + } + + /** +- * igb_alloc_q_vector - Allocate memory for a single interrupt vector +- * @adapter: board private structure to initialize +- * @v_count: q_vectors allocated on adapter, used for ring interleaving +- * @v_idx: index of vector in adapter struct +- * @txr_count: total number of Tx rings to allocate +- * @txr_idx: index of first Tx ring to allocate +- * @rxr_count: total number of Rx rings to allocate +- * @rxr_idx: index of first Rx ring to allocate ++ * igb_disable_mdd ++ * @adapter - board private structure + * +- * We allocate one q_vector. If allocation fails we return -ENOMEM. ++ * Disable MDD behavior in the HW + **/ +-static int igb_alloc_q_vector(struct igb_adapter *adapter, +- int v_count, int v_idx, +- int txr_count, int txr_idx, +- int rxr_count, int rxr_idx) ++static void igb_disable_mdd(struct igb_adapter *adapter) + { +- struct igb_q_vector *q_vector; +- struct igb_ring *ring; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 reg; ++ ++ if ((hw->mac.type != e1000_i350) && ++ (hw->mac.type != e1000_i354)) ++ return; ++ ++ reg = E1000_READ_REG(hw, E1000_DTXCTL); ++ reg &= (~E1000_DTXCTL_MDP_EN); ++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); ++} ++ ++/** ++ * igb_enable_mdd ++ * @adapter - board private structure ++ * ++ * Enable the HW to detect malicious driver and sends an interrupt to ++ * the driver. ++ **/ ++static void igb_enable_mdd(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u32 reg; ++ ++ /* Only available on i350 device */ ++ if (hw->mac.type != e1000_i350) ++ return; ++ ++ reg = E1000_READ_REG(hw, E1000_DTXCTL); ++ reg |= E1000_DTXCTL_MDP_EN; ++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); ++} ++ ++/** ++ * igb_reset_sriov_capability - disable SR-IOV if enabled ++ * ++ * Attempt to disable single root IO virtualization capabilites present in the ++ * kernel. ++ **/ ++static void igb_reset_sriov_capability(struct igb_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ struct e1000_hw *hw = &adapter->hw; ++ ++ /* reclaim resources allocated to VFs */ ++ if (adapter->vf_data) { ++ if (!pci_vfs_assigned(pdev)) { ++ /* ++ * disable iov and allow time for transactions to ++ * clear ++ */ ++ pci_disable_sriov(pdev); ++ msleep(500); ++ ++ dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n"); ++ } else { ++ dev_info(pci_dev_to_dev(pdev), ++ "IOV Not Disabled\n VF(s) are assigned to guests!\n"); ++ } ++ /* Disable Malicious Driver Detection */ ++ igb_disable_mdd(adapter); ++ ++ /* free vf data storage */ ++ kfree(adapter->vf_data); ++ adapter->vf_data = NULL; ++ ++ /* switch rings back to PF ownership */ ++ E1000_WRITE_REG(hw, E1000_IOVCTL, ++ E1000_IOVCTL_REUSE_VFQ); ++ E1000_WRITE_FLUSH(hw); ++ msleep(100); ++ } ++ ++ adapter->vfs_allocated_count = 0; ++} ++ ++/** ++ * igb_set_sriov_capability - setup SR-IOV if supported ++ * ++ * Attempt to enable single root IO virtualization capabilites present in the ++ * kernel. ++ **/ ++static void igb_set_sriov_capability(struct igb_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ int old_vfs = 0; ++ int i; ++ ++ old_vfs = pci_num_vf(pdev); ++ if (old_vfs) { ++ dev_info(pci_dev_to_dev(pdev), ++ "%d pre-allocated VFs found - override max_vfs setting of %d\n", ++ old_vfs, adapter->vfs_allocated_count); ++ adapter->vfs_allocated_count = old_vfs; ++ } ++ /* no VFs requested, do nothing */ ++ if (!adapter->vfs_allocated_count) ++ return; ++ ++ /* allocate vf data storage */ ++ adapter->vf_data = kcalloc(adapter->vfs_allocated_count, ++ sizeof(struct vf_data_storage), ++ GFP_KERNEL); ++ ++ if (adapter->vf_data) { ++ if (!old_vfs) { ++ if (pci_enable_sriov(pdev, ++ adapter->vfs_allocated_count)) ++ goto err_out; ++ dev_warn(pci_dev_to_dev(pdev), ++ "SR-IOV has been enabled: configure port VLANs to keep your VFs secure\n"); ++ } ++ for (i = 0; i < adapter->vfs_allocated_count; i++) ++ igb_vf_configure(adapter, i); ++ ++ switch (adapter->hw.mac.type) { ++ case e1000_82576: ++ case e1000_i350: ++ /* Enable VM to VM loopback by default */ ++ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; ++ break; ++ default: ++ /* Currently no other hardware supports loopback */ ++ break; ++ } ++ ++ /* DMA Coalescing is not supported in IOV mode. */ ++ if (adapter->hw.mac.type >= e1000_i350) ++ adapter->dmac = IGB_DMAC_DISABLE; ++ if (adapter->hw.mac.type < e1000_i350) ++ adapter->flags |= IGB_FLAG_DETECT_BAD_DMA; ++ return; ++ ++ } ++ ++err_out: ++ kfree(adapter->vf_data); ++ adapter->vf_data = NULL; ++ adapter->vfs_allocated_count = 0; ++ dev_warn(pci_dev_to_dev(pdev), ++ "Failed to initialize SR-IOV virtualization\n"); ++} ++ ++/** ++ * igb_set_interrupt_capability - set MSI or MSI-X if supported ++ * ++ * Attempt to configure interrupts using the best available ++ * capabilities of the hardware and kernel. ++ **/ ++static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ int err; ++ int numvecs, i; ++ ++ if (!msix) ++ adapter->int_mode = IGB_INT_MODE_MSI; ++ ++ /* Number of supported queues. */ ++ adapter->num_rx_queues = adapter->rss_queues; ++ ++ if (adapter->vmdq_pools > 1) ++ adapter->num_rx_queues += adapter->vmdq_pools - 1; ++ ++#ifdef HAVE_TX_MQ ++ if (adapter->vmdq_pools) ++ adapter->num_tx_queues = adapter->vmdq_pools; ++ else ++ adapter->num_tx_queues = adapter->num_rx_queues; ++#else ++ adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools); ++#endif ++ ++ switch (adapter->int_mode) { ++ case IGB_INT_MODE_MSIX: ++ /* start with one vector for every Tx/Rx queue */ ++ numvecs = max_t(int, adapter->num_tx_queues, ++ adapter->num_rx_queues); ++ ++ /* if tx handler is separate make it 1 for every queue */ ++ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) ++ numvecs = adapter->num_tx_queues + ++ adapter->num_rx_queues; ++ ++ /* store the number of vectors reserved for queues */ ++ adapter->num_q_vectors = numvecs; ++ ++ /* add 1 vector for link status interrupts */ ++ numvecs++; ++ adapter->msix_entries = kcalloc(numvecs, ++ sizeof(struct msix_entry), ++ GFP_KERNEL); ++ if (adapter->msix_entries) { ++ for (i = 0; i < numvecs; i++) ++ adapter->msix_entries[i].entry = i; ++ ++ err = pci_enable_msix(pdev, ++ adapter->msix_entries, numvecs); ++ if (err == 0) ++ break; ++ } ++ /* MSI-X failed, so fall through and try MSI */ ++ dev_warn(pci_dev_to_dev(pdev), ++ "Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); ++ igb_reset_interrupt_capability(adapter); ++ case IGB_INT_MODE_MSI: ++ if (!pci_enable_msi(pdev)) ++ adapter->flags |= IGB_FLAG_HAS_MSI; ++ else ++ dev_warn(pci_dev_to_dev(pdev), ++ "Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); ++ /* Fall through */ ++ case IGB_INT_MODE_LEGACY: ++ /* disable advanced features and set number of queues to 1 */ ++ igb_reset_sriov_capability(adapter); ++ adapter->vmdq_pools = 0; ++ adapter->rss_queues = 1; ++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS; ++ adapter->num_rx_queues = 1; ++ adapter->num_tx_queues = 1; ++ adapter->num_q_vectors = 1; ++ /* Don't do anything; this is system default */ ++ break; ++ } ++} ++ ++static void igb_add_ring(struct igb_ring *ring, ++ struct igb_ring_container *head) ++{ ++ head->ring = ring; ++ head->count++; ++} ++ ++/** ++ * igb_alloc_q_vector - Allocate memory for a single interrupt vector ++ * @adapter: board private structure to initialize ++ * @v_count: q_vectors allocated on adapter, used for ring interleaving ++ * @v_idx: index of vector in adapter struct ++ * @txr_count: total number of Tx rings to allocate ++ * @txr_idx: index of first Tx ring to allocate ++ * @rxr_count: total number of Rx rings to allocate ++ * @rxr_idx: index of first Rx ring to allocate ++ * ++ * We allocate one q_vector. If allocation fails we return -ENOMEM. ++ **/ ++static int igb_alloc_q_vector(struct igb_adapter *adapter, ++ unsigned int v_count, unsigned int v_idx, ++ unsigned int txr_count, unsigned int txr_idx, ++ unsigned int rxr_count, unsigned int rxr_idx) ++{ ++ struct igb_q_vector *q_vector; ++ struct igb_ring *ring; + int ring_count, size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ +@@ -1206,17 +1116,18 @@ + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; +- if (!q_vector) { +- q_vector = kzalloc(size, GFP_KERNEL); +- } else if (size > ksize(q_vector)) { +- kfree_rcu(q_vector, rcu); ++ if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); +- } else { ++ else + memset(q_vector, 0, size); +- } + if (!q_vector) + return -ENOMEM; + ++#ifndef IGB_NO_LRO ++ /* initialize LRO */ ++ __skb_queue_head_init(&q_vector->lrolist.active); ++ ++#endif + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); +@@ -1229,7 +1140,7 @@ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ +- q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); ++ q_vector->itr_register = adapter->io_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ +@@ -1265,9 +1176,6 @@ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + +- u64_stats_init(&ring->tx_syncp); +- u64_stats_init(&ring->tx_syncp2); +- + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + +@@ -1286,22 +1194,23 @@ + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + ++#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES) ++ /* enable rx checksum */ ++ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); ++ ++#endif + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + +- /* On i350, i354, i210, and i211, loopback VLAN packets +- * have the tag byte-swapped. +- */ +- if (adapter->hw.mac.type >= e1000_i350) ++ if ((adapter->hw.mac.type == e1000_i350) || ++ (adapter->hw.mac.type == e1000_i354)) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + +- u64_stats_init(&ring->rx_syncp); +- + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } +@@ -1309,13 +1218,12 @@ + return 0; + } + +- + /** +- * igb_alloc_q_vectors - Allocate memory for interrupt vectors +- * @adapter: board private structure to initialize ++ * igb_alloc_q_vectors - Allocate memory for interrupt vectors ++ * @adapter: board private structure to initialize + * +- * We allocate one q_vector per queue interrupt. If allocation fails we +- * return -ENOMEM. ++ * We allocate one q_vector per queue interrupt. If allocation fails we ++ * return -ENOMEM. + **/ + static int igb_alloc_q_vectors(struct igb_adapter *adapter) + { +@@ -1370,11 +1278,9 @@ + } + + /** +- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors +- * @adapter: board private structure to initialize +- * @msix: boolean value of MSIX capability ++ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * +- * This function initializes the interrupts and allocates all of the queues. ++ * This function initializes the interrupts and allocates all of the queues. + **/ + static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) + { +@@ -1385,7 +1291,7 @@ + + err = igb_alloc_q_vectors(adapter); + if (err) { +- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); ++ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + +@@ -1399,11 +1305,10 @@ + } + + /** +- * igb_request_irq - initialize interrupts +- * @adapter: board private structure to initialize ++ * igb_request_irq - initialize interrupts + * +- * Attempts to configure interrupts using the best available +- * capabilities of the hardware and kernel. ++ * Attempts to configure interrupts using the best available ++ * capabilities of the hardware and kernel. + **/ + static int igb_request_irq(struct igb_adapter *adapter) + { +@@ -1411,7 +1316,7 @@ + struct pci_dev *pdev = adapter->pdev; + int err = 0; + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; +@@ -1420,10 +1325,10 @@ + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); ++ igb_reset_sriov_capability(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; +- + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); +@@ -1432,7 +1337,7 @@ + igb_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGB_FLAG_HAS_MSI) { +- err = request_irq(pdev->irq, igb_intr_msi, 0, ++ err = request_irq(pdev->irq, &igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; +@@ -1442,11 +1347,11 @@ + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + +- err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, ++ err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) +- dev_err(&pdev->dev, "Error %d getting interrupt\n", ++ dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n", + err); + + request_done: +@@ -1455,7 +1360,7 @@ + + static void igb_free_irq(struct igb_adapter *adapter) + { +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); +@@ -1469,64 +1374,76 @@ + } + + /** +- * igb_irq_disable - Mask off interrupt generation on the NIC +- * @adapter: board private structure ++ * igb_irq_disable - Mask off interrupt generation on the NIC ++ * @adapter: board private structure + **/ + static void igb_irq_disable(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + +- /* we need to be careful when disabling interrupts. The VFs are also ++ /* ++ * we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { +- u32 regval = rd32(E1000_EIAM); ++ if (adapter->msix_entries) { ++ u32 regval = E1000_READ_REG(hw, E1000_EIAM); + +- wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); +- wr32(E1000_EIMC, adapter->eims_enable_mask); +- regval = rd32(E1000_EIAC); +- wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); +- } ++ E1000_WRITE_REG(hw, E1000_EIAM, regval ++ & ~adapter->eims_enable_mask); ++ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask); ++ regval = E1000_READ_REG(hw, E1000_EIAC); ++ E1000_WRITE_REG(hw, E1000_EIAC, regval ++ & ~adapter->eims_enable_mask); ++ } ++ ++ E1000_WRITE_REG(hw, E1000_IAM, 0); ++ E1000_WRITE_REG(hw, E1000_IMC, ~0); ++ E1000_WRITE_FLUSH(hw); + +- wr32(E1000_IAM, 0); +- wr32(E1000_IMC, ~0); +- wrfl(); +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { +- int i; ++ if (adapter->msix_entries) { ++ int vector = 0, i; ++ ++ synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) +- synchronize_irq(adapter->msix_entries[i].vector); ++ synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } + } + + /** +- * igb_irq_enable - Enable default interrupt generation settings +- * @adapter: board private structure ++ * igb_irq_enable - Enable default interrupt generation settings ++ * @adapter: board private structure + **/ + static void igb_irq_enable(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; +- u32 regval = rd32(E1000_EIAC); ++ u32 regval = E1000_READ_REG(hw, E1000_EIAC); + +- wr32(E1000_EIAC, regval | adapter->eims_enable_mask); +- regval = rd32(E1000_EIAM); +- wr32(E1000_EIAM, regval | adapter->eims_enable_mask); +- wr32(E1000_EIMS, adapter->eims_enable_mask); ++ E1000_WRITE_REG(hw, E1000_EIAC, regval ++ | adapter->eims_enable_mask); ++ regval = E1000_READ_REG(hw, E1000_EIAM); ++ E1000_WRITE_REG(hw, E1000_EIAM, regval ++ | adapter->eims_enable_mask); ++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { +- wr32(E1000_MBVFIMR, 0xFF); ++ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; ++ if (adapter->mdd) ++ if ((adapter->hw.mac.type == e1000_i350) || ++ (adapter->hw.mac.type == e1000_i354)) ++ ims |= E1000_IMS_MDDET; + } +- wr32(E1000_IMS, ims); ++ E1000_WRITE_REG(hw, E1000_IMS, ims); + } else { +- wr32(E1000_IMS, IMS_ENABLE_MASK | ++ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); +- wr32(E1000_IAM, IMS_ENABLE_MASK | ++ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } + } +@@ -1539,7 +1456,7 @@ + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ +- igb_vfta_set(hw, vid, true); ++ igb_vfta_set(adapter, vid, TRUE); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; +@@ -1547,19 +1464,24 @@ + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && ++#ifdef HAVE_VLAN_RX_REGISTER ++ !vlan_group_get_device(adapter->vlgrp, old_vid)) { ++#else + !test_bit(old_vid, adapter->active_vlans)) { ++#endif + /* remove VID from filter table */ +- igb_vfta_set(hw, old_vid, false); ++ igb_vfta_set(adapter, old_vid, FALSE); + } + } + + /** +- * igb_release_hw_control - release control of the h/w to f/w +- * @adapter: address of board private structure ++ * igb_release_hw_control - release control of the h/w to f/w ++ * @adapter: address of board private structure ++ * ++ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. ++ * For ASF and Pass Through versions of f/w this means that the ++ * driver is no longer loaded. + * +- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. +- * For ASF and Pass Through versions of f/w this means that the +- * driver is no longer loaded. + **/ + static void igb_release_hw_control(struct igb_adapter *adapter) + { +@@ -1567,18 +1489,19 @@ + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- wr32(E1000_CTRL_EXT, ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } + + /** +- * igb_get_hw_control - get control of the h/w from f/w +- * @adapter: address of board private structure ++ * igb_get_hw_control - get control of the h/w from f/w ++ * @adapter: address of board private structure ++ * ++ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. ++ * For ASF and Pass Through versions of f/w this means that ++ * the driver is loaded. + * +- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. +- * For ASF and Pass Through versions of f/w this means that +- * the driver is loaded. + **/ + static void igb_get_hw_control(struct igb_adapter *adapter) + { +@@ -1586,14 +1509,14 @@ + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- wr32(E1000_CTRL_EXT, ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } + + /** +- * igb_configure - configure the hardware for RX and TX +- * @adapter: private board structure ++ * igb_configure - configure the hardware for RX and TX ++ * @adapter: private board structure + **/ + static void igb_configure(struct igb_adapter *adapter) + { +@@ -1612,7 +1535,13 @@ + igb_configure_tx(adapter); + igb_configure_rx(adapter); + +- igb_rx_fifo_flush_82575(&adapter->hw); ++ e1000_rx_fifo_flush_82575(&adapter->hw); ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++ if (adapter->num_tx_queues > 1) ++ netdev->features |= NETIF_F_MULTI_QUEUE; ++ else ++ netdev->features &= ~NETIF_F_MULTI_QUEUE; ++#endif + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure +@@ -1625,45 +1554,42 @@ + } + + /** +- * igb_power_up_link - Power up the phy/serdes link +- * @adapter: address of board private structure ++ * igb_power_up_link - Power up the phy/serdes link ++ * @adapter: address of board private structure + **/ + void igb_power_up_link(struct igb_adapter *adapter) + { +- igb_reset_phy(&adapter->hw); ++ igb_e1000_phy_hw_reset(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) +- igb_power_up_phy_copper(&adapter->hw); ++ igb_e1000_power_up_phy(&adapter->hw); + else +- igb_power_up_serdes_link_82575(&adapter->hw); +- +- igb_setup_link(&adapter->hw); ++ e1000_power_up_fiber_serdes_link(&adapter->hw); + } + + /** +- * igb_power_down_link - Power down the phy/serdes link +- * @adapter: address of board private structure ++ * igb_power_down_link - Power down the phy/serdes link ++ * @adapter: address of board private structure + */ + static void igb_power_down_link(struct igb_adapter *adapter) + { + if (adapter->hw.phy.media_type == e1000_media_type_copper) +- igb_power_down_phy_copper_82575(&adapter->hw); ++ e1000_power_down_phy(&adapter->hw); + else +- igb_shutdown_serdes_link_82575(&adapter->hw); ++ e1000_shutdown_fiber_serdes_link(&adapter->hw); + } + +-/** +- * Detect and switch function for Media Auto Sense +- * @adapter: address of the board private structure +- **/ ++/* Detect and switch function for Media Auto Sense */ + static void igb_check_swap_media(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; ++ bool link; + +- ctrl_ext = rd32(E1000_CTRL_EXT); +- connsw = rd32(E1000_CONNSW); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ connsw = E1000_READ_REG(hw, E1000_CONNSW); ++ link = igb_has_link(adapter); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. +@@ -1674,10 +1600,10 @@ + swap_now = true; + } else if (!(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ +- if (adapter->copper_tries < 4) { ++ if (adapter->copper_tries < 3) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; +- wr32(E1000_CONNSW, connsw); ++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; +@@ -1685,143 +1611,263 @@ + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; +- wr32(E1000_CONNSW, connsw); ++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); + } + } + } + +- if (!swap_now) +- return; +- +- switch (hw->phy.media_type) { +- case e1000_media_type_copper: +- netdev_info(adapter->netdev, +- "MAS: changing media to fiber/serdes\n"); +- ctrl_ext |= +- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; +- adapter->flags |= IGB_FLAG_MEDIA_RESET; +- adapter->copper_tries = 0; +- break; +- case e1000_media_type_internal_serdes: +- case e1000_media_type_fiber: +- netdev_info(adapter->netdev, +- "MAS: changing media to copper\n"); +- ctrl_ext &= +- ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; +- adapter->flags |= IGB_FLAG_MEDIA_RESET; +- break; +- default: +- /* shouldn't get here during regular operation */ +- netdev_err(adapter->netdev, +- "AMS: Invalid media type found, returning\n"); +- break; ++ if (swap_now) { ++ switch (hw->phy.media_type) { ++ case e1000_media_type_copper: ++ dev_info(pci_dev_to_dev(adapter->pdev), ++ "%s:MAS: changing media to fiber/serdes\n", ++ adapter->netdev->name); ++ ctrl_ext |= ++ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ++ adapter->flags |= IGB_FLAG_MEDIA_RESET; ++ adapter->copper_tries = 0; ++ break; ++ case e1000_media_type_internal_serdes: ++ case e1000_media_type_fiber: ++ dev_info(pci_dev_to_dev(adapter->pdev), ++ "%s:MAS: changing media to copper\n", ++ adapter->netdev->name); ++ ctrl_ext &= ++ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ++ adapter->flags |= IGB_FLAG_MEDIA_RESET; ++ break; ++ default: ++ /* shouldn't get here during regular operation */ ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "%s:AMS: Invalid media type found, returning\n", ++ adapter->netdev->name); ++ break; ++ } ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } +- wr32(E1000_CTRL_EXT, ctrl_ext); + } + +-/** +- * igb_up - Open the interface and prepare it to handle traffic +- * @adapter: board private structure +- **/ +-int igb_up(struct igb_adapter *adapter) ++#ifdef HAVE_I2C_SUPPORT ++/* igb_get_i2c_data - Reads the I2C SDA data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Returns the I2C data bit value ++ */ ++static int igb_get_i2c_data(void *data) + { ++ struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; +- int i; +- +- /* hardware has been reset, we need to reload some things */ +- igb_configure(adapter); ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- clear_bit(__IGB_DOWN, &adapter->state); ++ return !!(i2cctl & E1000_I2C_DATA_IN); ++} + +- for (i = 0; i < adapter->num_q_vectors; i++) +- napi_enable(&(adapter->q_vector[i]->napi)); ++/* igb_set_i2c_data - Sets the I2C data bit ++ * @data: pointer to hardware structure ++ * @state: I2C data value (0 or 1) to set ++ * ++ * Sets the I2C data bit ++ */ ++static void igb_set_i2c_data(void *data, int state) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ struct e1000_hw *hw = &adapter->hw; ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) +- igb_configure_msix(adapter); ++ if (state) ++ i2cctl |= E1000_I2C_DATA_OUT; + else +- igb_assign_vector(adapter->q_vector[0], 0); +- +- /* Clear any pending interrupts. */ +- rd32(E1000_ICR); +- igb_irq_enable(adapter); +- +- /* notify VFs that reset has been completed */ +- if (adapter->vfs_allocated_count) { +- u32 reg_data = rd32(E1000_CTRL_EXT); ++ i2cctl &= ~E1000_I2C_DATA_OUT; + +- reg_data |= E1000_CTRL_EXT_PFRSTD; +- wr32(E1000_CTRL_EXT, reg_data); +- } ++ i2cctl &= ~E1000_I2C_DATA_OE_N; ++ i2cctl |= E1000_I2C_CLK_OE_N; + +- netif_tx_start_all_queues(adapter->netdev); ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); ++ E1000_WRITE_FLUSH(hw); + +- /* start the watchdog. */ +- hw->mac.get_link_status = 1; +- schedule_work(&adapter->watchdog_task); ++} + +- if ((adapter->flags & IGB_FLAG_EEE) && +- (!hw->dev_spec._82575.eee_disable)) +- adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; ++/* igb_set_i2c_clk - Sets the I2C SCL clock ++ * @data: pointer to hardware structure ++ * @state: state to set clock ++ * ++ * Sets the I2C clock line to state ++ */ ++static void igb_set_i2c_clk(void *data, int state) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ struct e1000_hw *hw = &adapter->hw; ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- return 0; ++ if (state) { ++ i2cctl |= E1000_I2C_CLK_OUT; ++ i2cctl &= ~E1000_I2C_CLK_OE_N; ++ } else { ++ i2cctl &= ~E1000_I2C_CLK_OUT; ++ i2cctl &= ~E1000_I2C_CLK_OE_N; ++ } ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); ++ E1000_WRITE_FLUSH(hw); + } + +-void igb_down(struct igb_adapter *adapter) ++/* igb_get_i2c_clk - Gets the I2C SCL clock state ++ * @data: pointer to hardware structure ++ * ++ * Gets the I2C clock state ++ */ ++static int igb_get_i2c_clk(void *data) + { +- struct net_device *netdev = adapter->netdev; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; +- u32 tctl, rctl; +- int i; ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- /* signal that we're down so the interrupt handler does not +- * reschedule our watchdog timer +- */ +- set_bit(__IGB_DOWN, &adapter->state); ++ return !!(i2cctl & E1000_I2C_CLK_IN); ++} ++ ++static const struct i2c_algo_bit_data igb_i2c_algo = { ++ .setsda = igb_set_i2c_data, ++ .setscl = igb_set_i2c_clk, ++ .getsda = igb_get_i2c_data, ++ .getscl = igb_get_i2c_clk, ++ .udelay = 5, ++ .timeout = 20, ++}; ++ ++/* igb_init_i2c - Init I2C interface ++ * @adapter: pointer to adapter structure ++ * ++ */ ++static s32 igb_init_i2c(struct igb_adapter *adapter) ++{ ++ s32 status = E1000_SUCCESS; ++ ++ /* I2C interface supported on i350 devices */ ++ if (adapter->hw.mac.type != e1000_i350) ++ return E1000_SUCCESS; ++ ++ /* Initialize the i2c bus which is controlled by the registers. ++ * This bus will use the i2c_algo_bit structue that implements ++ * the protocol through toggling of the 4 bits in the register. ++ */ ++ adapter->i2c_adap.owner = THIS_MODULE; ++ adapter->i2c_algo = igb_i2c_algo; ++ adapter->i2c_algo.data = adapter; ++ adapter->i2c_adap.algo_data = &adapter->i2c_algo; ++ adapter->i2c_adap.dev.parent = &adapter->pdev->dev; ++ strlcpy(adapter->i2c_adap.name, "igb BB", ++ sizeof(adapter->i2c_adap.name)); ++ status = i2c_bit_add_bus(&adapter->i2c_adap); ++ return status; ++} ++ ++#endif /* HAVE_I2C_SUPPORT */ ++/** ++ * igb_up - Open the interface and prepare it to handle traffic ++ * @adapter: board private structure ++ **/ ++int igb_up(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ int i; ++ ++ /* hardware has been reset, we need to reload some things */ ++ igb_configure(adapter); ++ ++ clear_bit(__IGB_DOWN, &adapter->state); ++ ++ for (i = 0; i < adapter->num_q_vectors; i++) ++ napi_enable(&(adapter->q_vector[i]->napi)); ++ ++ if (adapter->msix_entries) ++ igb_configure_msix(adapter); ++ else ++ igb_assign_vector(adapter->q_vector[0], 0); ++ ++ igb_configure_lli(adapter); ++ ++ /* Clear any pending interrupts. */ ++ E1000_READ_REG(hw, E1000_ICR); ++ igb_irq_enable(adapter); ++ ++ /* notify VFs that reset has been completed */ ++ if (adapter->vfs_allocated_count) { ++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ++ reg_data |= E1000_CTRL_EXT_PFRSTD; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); ++ } ++ ++ netif_tx_start_all_queues(adapter->netdev); ++ ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ schedule_work(&adapter->dma_err_task); ++ /* start the watchdog. */ ++ hw->mac.get_link_status = 1; ++ schedule_work(&adapter->watchdog_task); ++ ++ if ((adapter->flags & IGB_FLAG_EEE) && ++ (!hw->dev_spec._82575.eee_disable)) ++ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; ++ ++ return 0; ++} ++ ++void igb_down(struct igb_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 tctl, rctl; ++ int i; ++ ++ /* signal that we're down so the interrupt handler does not ++ * reschedule our watchdog timer ++ */ ++ set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ +- rctl = rd32(E1000_RCTL); +- wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + ++ netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ +- tctl = rd32(E1000_TCTL); ++ tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_EN; +- wr32(E1000_TCTL, tctl); ++ E1000_WRITE_REG(hw, E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); ++ ++ for (i = 0; i < adapter->num_q_vectors; i++) ++ napi_disable(&(adapter->q_vector[i]->napi)); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + +- for (i = 0; i < adapter->num_q_vectors; i++) { +- napi_synchronize(&(adapter->q_vector[i]->napi)); +- napi_disable(&(adapter->q_vector[i]->napi)); +- } +- +- + del_timer_sync(&adapter->watchdog_timer); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ del_timer_sync(&adapter->dma_err_timer); + del_timer_sync(&adapter->phy_info_timer); + +- netif_carrier_off(netdev); +- + /* record the stats before reset*/ +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, &adapter->stats64); +- spin_unlock(&adapter->stats64_lock); ++ igb_update_stats(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + ++#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); ++#else ++ igb_reset(adapter); ++#endif + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +-#ifdef CONFIG_IGB_DCA +- ++#ifdef IGB_DCA + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); + #endif +@@ -1837,35 +1883,26 @@ + clear_bit(__IGB_RESETTING, &adapter->state); + } + +-/** igb_enable_mas - Media Autosense re-enable after swap ++/** ++ * igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +-static s32 igb_enable_mas(struct igb_adapter *adapter) ++void igb_enable_mas(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 connsw; +- s32 ret_val = 0; + +- connsw = rd32(E1000_CONNSW); +- if (!(hw->phy.media_type == e1000_media_type_copper)) +- return ret_val; ++ connsw = E1000_READ_REG(hw, E1000_CONNSW); + + /* configure for SerDes media detect */ +- if (!(connsw & E1000_CONNSW_SERDESD)) { ++ if ((hw->phy.media_type == e1000_media_type_copper) && ++ (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; +- wr32(E1000_CONNSW, connsw); +- wrfl(); +- } else if (connsw & E1000_CONNSW_SERDESD) { +- /* already SerDes, no need to enable anything */ +- return ret_val; +- } else { +- netdev_info(adapter->netdev, +- "MAS: Unable to configure feature, disabling..\n"); +- adapter->flags &= ~IGB_FLAG_MAS_ENABLE; ++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); ++ E1000_WRITE_FLUSH(hw); + } +- return ret_val; + } + + void igb_reset(struct igb_adapter *adapter) +@@ -1881,13 +1918,13 @@ + */ + switch (mac->type) { + case e1000_i350: +- case e1000_i354: + case e1000_82580: +- pba = rd32(E1000_RXPBS); +- pba = igb_rxpbs_adjust_82580(pba); ++ case e1000_i354: ++ pba = E1000_READ_REG(hw, E1000_RXPBS); ++ pba = e1000_rxpbs_adjust_82580(pba); + break; + case e1000_82576: +- pba = rd32(E1000_RXPBS); ++ pba = E1000_READ_REG(hw, E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: +@@ -1901,7 +1938,7 @@ + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && + (mac->type < e1000_82576)) { + /* adjust PBA for jumbo frames */ +- wr32(E1000_PBA, pba); ++ E1000_WRITE_REG(hw, E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, +@@ -1910,12 +1947,12 @@ + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ +- pba = rd32(E1000_PBA); ++ pba = E1000_READ_REG(hw, E1000_PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; +- /* the Tx fifo also stores 16 bytes of information about the Tx ++ /* the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + +@@ -1936,13 +1973,13 @@ + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + +- /* if short on Rx space, Rx wins and must trump Tx ++ /* if short on rx space, rx wins and must trump tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } +- wr32(E1000_PBA, pba); ++ E1000_WRITE_REG(hw, E1000_PBA, pba); + } + + /* flow control settings */ +@@ -1965,6 +2002,10 @@ + if (adapter->vfs_allocated_count) { + int i; + ++ /* ++ * Clear all flags except indication that the PF has set ++ * the VF MAC addresses administratively ++ */ + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + +@@ -1972,116 +2013,334 @@ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ +- wr32(E1000_VFRE, 0); +- wr32(E1000_VFTE, 0); ++ E1000_WRITE_REG(hw, E1000_VFRE, 0); ++ E1000_WRITE_REG(hw, E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ +- hw->mac.ops.reset_hw(hw); +- wr32(E1000_WUC, 0); ++ igb_e1000_reset_hw(hw); ++ E1000_WRITE_REG(hw, E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { +- /* need to resetup here after media swap */ +- adapter->ei.get_invariants(hw); ++ e1000_setup_init_funcs(hw, TRUE); ++ igb_check_options(adapter); ++ igb_e1000_get_bus_info(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } +- if (adapter->flags & IGB_FLAG_MAS_ENABLE) { +- if (igb_enable_mas(adapter)) +- dev_err(&pdev->dev, +- "Error enabling Media Auto Sense\n"); ++ if ((mac->type == e1000_82575) && ++ (adapter->flags & IGB_FLAG_MAS_ENABLE)) { ++ igb_enable_mas(adapter); + } +- if (hw->mac.ops.init_hw(hw)) +- dev_err(&pdev->dev, "Hardware Error\n"); ++ if (igb_e1000_init_hw(hw)) ++ dev_err(pci_dev_to_dev(pdev), "Hardware Error\n"); + +- /* Flow control settings reset on hardware reset, so guarantee flow ++ /* ++ * Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) +- igb_force_mac_fc(hw); ++ igb_e1000_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +-#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ +- if (!test_bit(__IGB_DOWN, &adapter->state)) { +- if (mac->type == e1000_i350 && hw->bus.func == 0) { +- /* If present, re-initialize the external thermal sensor +- * interface. +- */ +- if (adapter->ets) +- mac->ops.init_thermal_sensor_thresh(hw); +- } ++ if (mac->type == e1000_i350 && hw->bus.func == 0) { ++ /* ++ * If present, re-initialize the external thermal sensor ++ * interface. ++ */ ++ if (adapter->ets) ++ e1000_set_i2c_bb(hw); ++ e1000_init_thermal_sensor_thresh(hw); + } +-#endif +- /* Re-establish EEE setting */ ++ ++ /*Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: +- igb_set_eee_i350(hw); ++ e1000_set_eee_i350(hw, true, true); + break; + case e1000_i354: +- igb_set_eee_i354(hw); ++ e1000_set_eee_i354(hw, true, true); + break; + default: + break; + } + } ++ + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ +- wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); ++ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + ++#ifdef HAVE_PTP_1588_CLOCK + /* Re-enable PTP, where applicable. */ + igb_ptp_reset(adapter); ++#endif /* HAVE_PTP_1588_CLOCK */ + +- igb_get_phy_info(hw); ++ e1000_get_phy_info(hw); ++ ++ adapter->devrc++; + } + ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++static u32 igb_fix_features(struct net_device *netdev, ++ u32 features) ++#else + static netdev_features_t igb_fix_features(struct net_device *netdev, +- netdev_features_t features) ++ netdev_features_t features) ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + { +- /* Since there is no support for separate Rx/Tx vlan accel +- * enable/disable make sure Tx flag is always in same state as Rx. ++ /* ++ * Since there is no support for separate tx vlan accel ++ * enabled make sure tx flag is cleared if rx is. + */ +- if (features & NETIF_F_HW_VLAN_CTAG_RX) +- features |= NETIF_F_HW_VLAN_CTAG_TX; +- else ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; ++#else ++ if (!(features & NETIF_F_HW_VLAN_RX)) ++ features &= ~NETIF_F_HW_VLAN_TX; ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++ ++#ifndef IGB_NO_LRO ++ /* If Rx checksum is disabled, then LRO should also be disabled */ ++ if (!(features & NETIF_F_RXCSUM)) ++ features &= ~NETIF_F_LRO; + ++#endif + return features; + } + + static int igb_set_features(struct net_device *netdev, +- netdev_features_t features) ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ u32 features) ++#else ++ netdev_features_t features) ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + { + netdev_features_t changed = netdev->features ^ features; ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + struct igb_adapter *adapter = netdev_priv(netdev); ++#endif + ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (changed & NETIF_F_HW_VLAN_CTAG_RX) ++#else ++ if (changed & NETIF_F_HW_VLAN_RX) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++ netdev->features = features; ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_vlan_mode(netdev, adapter->vlgrp); ++#else + igb_vlan_mode(netdev, features); ++#endif + +- if (!(changed & NETIF_F_RXALL)) ++ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + netdev->features = features; + +- if (netif_running(netdev)) +- igb_reinit_locked(adapter); +- else +- igb_reset(adapter); ++ return 0; ++} ++#endif /* HAVE_NDO_SET_FEATURES */ ++ ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr, ++#ifdef HAVE_NDO_FDB_ADD_VID ++ u16 vid, ++#endif /* HAVE_NDO_FDB_ADD_VID */ ++ u16 flags) ++#else /* USE_CONST_DEV_UC_CHAR */ ++static int igb_ndo_fdb_add(struct ndmsg *ndm, ++ struct net_device *dev, ++ unsigned char *addr, ++ u16 flags) ++#endif /* USE_CONST_DEV_UC_CHAR */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ struct e1000_hw *hw = &adapter->hw; ++ int err; ++ ++ if (!(adapter->vfs_allocated_count)) ++ return -EOPNOTSUPP; ++ ++ /* Hardware does not support aging addresses so if a ++ * ndm_state is given only allow permanent addresses ++ */ ++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", ++ igb_driver_name); ++ return -EINVAL; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { ++ u32 rar_uc_entries = hw->mac.rar_entry_count - ++ (adapter->vfs_allocated_count + 1); ++ ++ if (netdev_uc_count(dev) < rar_uc_entries) ++ err = dev_uc_add_excl(dev, addr); ++ else ++ err = -ENOMEM; ++ } else if (is_multicast_ether_addr(addr)) { ++ err = dev_mc_add_excl(dev, addr); ++ } else { ++ err = -EINVAL; ++ } ++ ++ /* Only return duplicate errors if NLM_F_EXCL is set */ ++ if (err == -EEXIST && !(flags & NLM_F_EXCL)) ++ err = 0; ++ ++ return err; ++} ++ ++#ifndef USE_DEFAULT_FDB_DEL_DUMP ++#ifdef USE_CONST_DEV_UC_CHAR ++static int igb_ndo_fdb_del(struct ndmsg *ndm, ++ struct net_device *dev, ++ const unsigned char *addr) ++#else ++static int igb_ndo_fdb_del(struct ndmsg *ndm, ++ struct net_device *dev, ++ unsigned char *addr) ++#endif /* USE_CONST_DEV_UC_CHAR */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ int err = -EOPNOTSUPP; ++ ++ if (ndm->ndm_state & NUD_PERMANENT) { ++ pr_info("%s: FDB only supports static addresses\n", ++ igb_driver_name); ++ return -EINVAL; ++ } ++ ++ if (adapter->vfs_allocated_count) { ++ if (is_unicast_ether_addr(addr)) ++ err = dev_uc_del(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_del(dev, addr); ++ else ++ err = -EINVAL; ++ } ++ ++ return err; ++} ++ ++static int igb_ndo_fdb_dump(struct sk_buff *skb, ++ struct netlink_callback *cb, ++ struct net_device *dev, ++ int idx) ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ ++ if (adapter->vfs_allocated_count) ++ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); ++ ++ return idx; ++} ++#endif /* USE_DEFAULT_FDB_DEL_DUMP */ ++#ifdef HAVE_BRIDGE_ATTRIBS ++#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++static int igb_ndo_bridge_setlink(struct net_device *dev, ++ struct nlmsghdr *nlh, ++ u16 flags) ++#else ++static int igb_ndo_bridge_setlink(struct net_device *dev, ++ struct nlmsghdr *nlh) ++#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ struct e1000_hw *hw = &adapter->hw; ++ struct nlattr *attr, *br_spec; ++ int rem; ++ ++ if (!(adapter->vfs_allocated_count)) ++ return -EOPNOTSUPP; ++ ++ switch (adapter->hw.mac.type) { ++ case e1000_82576: ++ case e1000_i350: ++ case e1000_i354: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); ++ ++ nla_for_each_nested(attr, br_spec, rem) { ++ __u16 mode; ++ ++ if (nla_type(attr) != IFLA_BRIDGE_MODE) ++ continue; ++ ++ mode = nla_get_u16(attr); ++ if (mode == BRIDGE_MODE_VEPA) { ++ e1000_vmdq_set_loopback_pf(hw, 0); ++ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE; ++ } else if (mode == BRIDGE_MODE_VEB) { ++ e1000_vmdq_set_loopback_pf(hw, 1); ++ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; ++ } else ++ return -EINVAL; ++ ++ netdev_info(adapter->netdev, "enabling bridge mode: %s\n", ++ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); ++ } + + return 0; + } + ++#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev, u32 filter_mask, ++ int nlflags) ++#elif defined(HAVE_BRIDGE_FILTER) ++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev, u32 filter_mask) ++#else ++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev) ++#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ u16 mode; ++ ++ if (!(adapter->vfs_allocated_count)) ++ return -EOPNOTSUPP; ++ ++ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE) ++ mode = BRIDGE_MODE_VEB; ++ else ++ mode = BRIDGE_MODE_VEPA; ++#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, ++ filter_mask, NULL); ++#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); ++#elif defined(NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS) ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); ++#else ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); ++#endif /* NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS */ ++} ++#endif /* HAVE_BRIDGE_ATTRIBS */ ++#endif /* HAVE_FDB_OPS */ ++#ifdef HAVE_NET_DEVICE_OPS + static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, +- .ndo_get_stats64 = igb_get_stats64, ++ .ndo_get_stats = igb_get_stats, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, +@@ -2090,60 +2349,190 @@ + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, ++#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = igb_ndo_set_vf_bw, +- .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, ++#else ++ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, ++#endif /*HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + .ndo_get_vf_config = igb_ndo_get_vf_config, ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ++ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, ++#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ ++#endif /* IFLA_VF_MAX */ + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igb_netpoll, + #endif ++#ifdef HAVE_VLAN_RX_REGISTER ++ .ndo_vlan_rx_register = igb_vlan_mode, ++#endif ++#ifdef HAVE_FDB_OPS ++ .ndo_fdb_add = igb_ndo_fdb_add, ++#ifndef USE_DEFAULT_FDB_DEL_DUMP ++ .ndo_fdb_del = igb_ndo_fdb_del, ++ .ndo_fdb_dump = igb_ndo_fdb_dump, ++#endif ++#ifdef HAVE_BRIDGE_ATTRIBS ++ .ndo_bridge_setlink = igb_ndo_bridge_setlink, ++ .ndo_bridge_getlink = igb_ndo_bridge_getlink, ++#endif /* HAVE_BRIDGE_ATTRIBS */ ++#endif ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++}; ++ ++/* RHEL6 keeps these operations in a separate structure */ ++static const struct net_device_ops_ext igb_netdev_ops_ext = { ++ .size = sizeof(struct net_device_ops_ext), ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, ++#endif /* HAVE_NDO_SET_FEATURES */ + }; + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++static const struct net_device_ops igb_vmdq_ops = { ++ .ndo_open = &igb_vmdq_open, ++ .ndo_stop = &igb_vmdq_close, ++ .ndo_start_xmit = &igb_vmdq_xmit_frame, ++ .ndo_get_stats = &igb_vmdq_get_stats, ++ .ndo_set_rx_mode = &igb_vmdq_set_rx_mode, ++ .ndo_validate_addr = eth_validate_addr, ++ .ndo_set_mac_address = &igb_vmdq_set_mac, ++ .ndo_change_mtu = &igb_vmdq_change_mtu, ++ .ndo_tx_timeout = &igb_vmdq_tx_timeout, ++ .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register, ++ .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid, ++ .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid, ++}; ++ ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++#endif /* HAVE_NET_DEVICE_OPS */ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev) ++{ ++#ifdef HAVE_NET_DEVICE_OPS ++ vnetdev->netdev_ops = &igb_vmdq_ops; ++#else ++ dev->open = &igb_vmdq_open; ++ dev->stop = &igb_vmdq_close; ++ dev->hard_start_xmit = &igb_vmdq_xmit_frame; ++ dev->get_stats = &igb_vmdq_get_stats; ++#ifdef HAVE_SET_RX_MODE ++ dev->set_rx_mode = &igb_vmdq_set_rx_mode; ++#endif ++ dev->set_multicast_list = &igb_vmdq_set_rx_mode; ++ dev->set_mac_address = &igb_vmdq_set_mac; ++ dev->change_mtu = &igb_vmdq_change_mtu; ++#ifdef HAVE_TX_TIMEOUT ++ dev->tx_timeout = &igb_vmdq_tx_timeout; ++#endif ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++ dev->vlan_rx_register = &igb_vmdq_vlan_rx_register; ++ dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid; ++ dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid; ++#endif ++#endif /* HAVE_NET_DEVICE_OPS */ ++ igb_vmdq_set_ethtool_ops(vnetdev); ++ vnetdev->watchdog_timeo = 5 * HZ; ++ ++} ++ ++int igb_init_vmdq_netdevs(struct igb_adapter *adapter) ++{ ++ int pool, err = 0, base_queue; ++ struct net_device *vnetdev; ++ struct igb_vmdq_adapter *vmdq_adapter; ++ ++ for (pool = 1; pool < adapter->vmdq_pools; pool++) { ++ int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues); ++ ++ base_queue = pool * qpp; ++ vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter)); ++ if (!vnetdev) { ++ err = -ENOMEM; ++ break; ++ } ++ ++ vmdq_adapter = netdev_priv(vnetdev); ++ vmdq_adapter->vnetdev = vnetdev; ++ vmdq_adapter->real_adapter = adapter; ++ vmdq_adapter->rx_ring = adapter->rx_ring[base_queue]; ++ vmdq_adapter->tx_ring = adapter->tx_ring[base_queue]; ++ igb_assign_vmdq_netdev_ops(vnetdev); ++ snprintf(vnetdev->name, IFNAMSIZ, "%sv%d", ++ adapter->netdev->name, pool); ++ vnetdev->features = adapter->netdev->features; ++#ifdef HAVE_NETDEV_VLAN_FEATURES ++ vnetdev->vlan_features = adapter->netdev->vlan_features; ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ adapter->vmdq_netdev[pool-1] = vnetdev; ++ err = register_netdev(vnetdev); ++ if (err) ++ break; ++ } ++ return err; ++} ++ ++int igb_remove_vmdq_netdevs(struct igb_adapter *adapter) ++{ ++ int pool, err = 0; ++ ++ for (pool = 1; pool < adapter->vmdq_pools; pool++) { ++ unregister_netdev(adapter->vmdq_netdev[pool-1]); ++ free_netdev(adapter->vmdq_netdev[pool-1]); ++ adapter->vmdq_netdev[pool-1] = NULL; ++ } ++ return err; ++} ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ + /** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct ++ * + **/ +-void igb_set_fw_version(struct igb_adapter *adapter) ++static void igb_set_fw_version(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + +- igb_get_fw_version(hw, &fw); ++ e1000_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: +- if (!(igb_get_flash_presence_i210(hw))) { ++ if (!(e1000_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, +- sizeof(adapter->fw_version), +- "%2d.%2d-%d", +- fw.invm_major, fw.invm_minor, +- fw.invm_img_type); ++ sizeof(adapter->fw_version), ++ "%2d.%2d-%d", ++ fw.invm_major, fw.invm_minor, fw.invm_img_type); + break; + } + /* fall through */ + default: +- /* if option is rom valid, display its version too */ ++ /* if option rom is valid, display its version too*/ + if (fw.or_valid) { + snprintf(adapter->fw_version, +- sizeof(adapter->fw_version), +- "%d.%d, 0x%08x, %d.%d.%d", +- fw.eep_major, fw.eep_minor, fw.etrack_id, +- fw.or_major, fw.or_build, fw.or_patch); ++ sizeof(adapter->fw_version), ++ "%d.%d, 0x%08x, %d.%d.%d", ++ fw.eep_major, fw.eep_minor, fw.etrack_id, ++ fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ +- } else if (fw.etrack_id != 0X0000) { ++ } else { ++ if (fw.etrack_id != 0X0000) { ++ snprintf(adapter->fw_version, ++ sizeof(adapter->fw_version), ++ "%d.%d, 0x%08x", ++ fw.eep_major, fw.eep_minor, fw.etrack_id); ++ } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), +- "%d.%d, 0x%08x", +- fw.eep_major, fw.eep_minor, fw.etrack_id); +- } else { +- snprintf(adapter->fw_version, +- sizeof(adapter->fw_version), +- "%d.%d.%d", +- fw.eep_major, fw.eep_minor, fw.eep_build); ++ "%d.%d.%d", ++ fw.eep_major, fw.eep_minor, fw.eep_build); ++ } + } + break; + } +@@ -2159,126 +2548,130 @@ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + +- hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); ++ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: +- if (eeprom_data & IGB_MAS_ENABLE_0) { ++ if (eeprom_data & IGB_MAS_ENABLE_0) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + case E1000_FUNC_1: +- if (eeprom_data & IGB_MAS_ENABLE_1) { ++ if (eeprom_data & IGB_MAS_ENABLE_1) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + case E1000_FUNC_2: +- if (eeprom_data & IGB_MAS_ENABLE_2) { ++ if (eeprom_data & IGB_MAS_ENABLE_2) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + case E1000_FUNC_3: +- if (eeprom_data & IGB_MAS_ENABLE_3) { ++ if (eeprom_data & IGB_MAS_ENABLE_3) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + default: + /* Shouldn't get here */ +- netdev_err(adapter->netdev, +- "MAS: Invalid port configuration, returning\n"); ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "%s:AMS: Invalid port configuration, returning\n", ++ adapter->netdev->name); + break; + } + } + +-/** +- * igb_init_i2c - Init I2C interface +- * @adapter: pointer to adapter structure +- **/ +-static s32 igb_init_i2c(struct igb_adapter *adapter) ++void igb_rar_set(struct igb_adapter *adapter, u32 index) + { +- s32 status = 0; ++ u32 rar_low, rar_high; ++ struct e1000_hw *hw = &adapter->hw; ++ u8 *addr = adapter->mac_table[index].addr; ++ /* HW expects these in little endian so we reverse the byte order ++ * from network order (big endian) to little endian ++ */ ++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); ++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + +- /* I2C interface supported on i350 devices */ +- if (adapter->hw.mac.type != e1000_i350) +- return 0; ++ /* Indicate to hardware the Address is Valid. */ ++ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) ++ rar_high |= E1000_RAH_AV; + +- /* Initialize the i2c bus which is controlled by the registers. +- * This bus will use the i2c_algo_bit structue that implements +- * the protocol through toggling of the 4 bits in the register. +- */ +- adapter->i2c_adap.owner = THIS_MODULE; +- adapter->i2c_algo = igb_i2c_algo; +- adapter->i2c_algo.data = adapter; +- adapter->i2c_adap.algo_data = &adapter->i2c_algo; +- adapter->i2c_adap.dev.parent = &adapter->pdev->dev; +- strlcpy(adapter->i2c_adap.name, "igb BB", +- sizeof(adapter->i2c_adap.name)); +- status = i2c_bit_add_bus(&adapter->i2c_adap); +- return status; ++ if (hw->mac.type == e1000_82575) ++ rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; ++ else ++ rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; ++ ++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); ++ E1000_WRITE_FLUSH(hw); ++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_probe - Device Initialization Routine +- * @pdev: PCI device information struct +- * @ent: entry in igb_pci_tbl ++ * igb_probe - Device Initialization Routine ++ * @pdev: PCI device information struct ++ * @ent: entry in igb_pci_tbl + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + * +- * igb_probe initializes an adapter identified by a pci_dev structure. +- * The OS initialization, configuring of the adapter private structure, +- * and a hardware reset occur. ++ * igb_probe initializes an adapter identified by a pci_dev structure. ++ * The OS initialization, configuring of the adapter private structure, ++ * and a hardware reset occur. + **/ +-static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++static int igb_probe(struct pci_dev *pdev, ++ const struct pci_device_id *ent) + { + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; ++ u8 pba_str[E1000_PBANUM_LENGTH]; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ +- const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + int err, pci_using_dac; +- u8 part_str[E1000_PBANUM_LENGTH]; +- +- /* Catch broken hardware that put the wrong VF device ID in +- * the PCIe SR-IOV capability. +- */ +- if (pdev->is_virtfn) { +- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", +- pci_name(pdev), pdev->vendor, pdev->device); +- return -EINVAL; +- } ++ static int cards_found; ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ u32 hw_features; ++#else ++ netdev_features_t hw_features; ++#endif ++#endif + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; +- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); + if (!err) { +- pci_using_dac = 1; ++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), ++ DMA_BIT_MASK(64)); ++ if (!err) ++ pci_using_dac = 1; + } else { +- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { +- dev_err(&pdev->dev, +- "No usable DMA configuration, aborting\n"); +- goto err_dma; ++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), ++ DMA_BIT_MASK(32)); ++ if (err) { ++ IGB_ERR( ++ "No usable DMA configuration, aborting\n"); ++ goto err_dma; ++ } + } + } + +- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, +- IORESOURCE_MEM), ++#ifndef HAVE_ASPM_QUIRKS ++ /* 82575 requires that the pci-e link partner disable the L0s state */ ++ switch (pdev->device) { ++ case E1000_DEV_ID_82575EB_COPPER: ++ case E1000_DEV_ID_82575EB_FIBER_SERDES: ++ case E1000_DEV_ID_82575GB_QUAD_COPPER: ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); ++ default: ++ break; ++ } ++ ++#endif /* HAVE_ASPM_QUIRKS */ ++ err = pci_request_selected_regions(pdev, ++ pci_select_bars(pdev, ++ IORESOURCE_MEM), + igb_driver_name); + if (err) + goto err_pci_reg; +@@ -2286,14 +2679,18 @@ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); +- pci_save_state(pdev); + + err = -ENOMEM; ++#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); ++#else ++ netdev = alloc_etherdev(sizeof(struct igb_adapter)); ++#endif /* HAVE_TX_MQ */ + if (!netdev) + goto err_alloc_etherdev; + ++ SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); +@@ -2302,158 +2699,225 @@ + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; +- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); ++ adapter->port_num = hw->bus.func; ++ adapter->msg_enable = (1 << debug) - 1; + ++#ifdef HAVE_PCI_ERS ++ err = pci_save_state(pdev); ++ if (err) ++ goto err_ioremap; ++#endif + err = -EIO; +- hw->hw_addr = pci_iomap(pdev, 0, 0); +- if (!hw->hw_addr) ++ adapter->io_addr = ioremap(pci_resource_start(pdev, 0), ++ pci_resource_len(pdev, 0)); ++ if (!adapter->io_addr) + goto err_ioremap; ++ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ ++ hw->hw_addr = adapter->io_addr; + ++#ifdef HAVE_NET_DEVICE_OPS + netdev->netdev_ops = &igb_netdev_ops; ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ set_netdev_ops_ext(netdev, &igb_netdev_ops_ext); ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++#else /* HAVE_NET_DEVICE_OPS */ ++ netdev->open = &igb_open; ++ netdev->stop = &igb_close; ++ netdev->get_stats = &igb_get_stats; ++#ifdef HAVE_SET_RX_MODE ++ netdev->set_rx_mode = &igb_set_rx_mode; ++#endif ++ netdev->set_multicast_list = &igb_set_rx_mode; ++ netdev->set_mac_address = &igb_set_mac; ++ netdev->change_mtu = &igb_change_mtu; ++ netdev->do_ioctl = &igb_ioctl; ++#ifdef HAVE_TX_TIMEOUT ++ netdev->tx_timeout = &igb_tx_timeout; ++#endif ++ netdev->vlan_rx_register = igb_vlan_mode; ++ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; ++ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = igb_netpoll; ++#endif ++ netdev->hard_start_xmit = &igb_xmit_frame; ++#endif /* HAVE_NET_DEVICE_OPS */ + igb_set_ethtool_ops(netdev); ++#ifdef HAVE_TX_TIMEOUT + netdev->watchdog_timeo = 5 * HZ; ++#endif + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + +- netdev->mem_start = pci_resource_start(pdev, 0); +- netdev->mem_end = pci_resource_end(pdev, 0); +- +- /* PCI config space info */ +- hw->vendor_id = pdev->vendor; +- hw->device_id = pdev->device; +- hw->revision_id = pdev->revision; +- hw->subsystem_vendor_id = pdev->subsystem_vendor; +- hw->subsystem_device_id = pdev->subsystem_device; +- +- /* Copy the default MAC, PHY and NVM function pointers */ +- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); +- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); +- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); +- /* Initialize skew-specific constants */ +- err = ei->get_invariants(hw); +- if (err) +- goto err_sw_init; ++ adapter->bd_number = cards_found; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + +- igb_get_bus_info_pcie(hw); ++ igb_e1000_get_bus_info(hw); + +- hw->phy.autoneg_wait_to_complete = false; ++ hw->phy.autoneg_wait_to_complete = FALSE; ++ hw->mac.adaptive_ifs = FALSE; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; +- hw->phy.disable_polarity_correction = false; ++ hw->phy.disable_polarity_correction = FALSE; + hw->phy.ms_type = e1000_ms_hw_default; + } + +- if (igb_check_reset_block(hw)) +- dev_info(&pdev->dev, ++ if (e1000_check_reset_block(hw)) ++ dev_info(pci_dev_to_dev(pdev), + "PHY reset is blocked due to SOL/IDER session.\n"); + +- /* features is initialized to 0 in allocation, it might have bits ++ /* ++ * features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM | ++#ifdef NETIF_F_IPV6_CSUM + NETIF_F_IPV6_CSUM | ++#endif ++#ifdef NETIF_F_TSO + NETIF_F_TSO | ++#ifdef NETIF_F_TSO6 + NETIF_F_TSO6 | ++#endif ++#endif /* NETIF_F_TSO */ ++#ifdef NETIF_F_RXHASH + NETIF_F_RXHASH | ++#endif + NETIF_F_RXCSUM | ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; ++#else ++ NETIF_F_HW_VLAN_RX | ++ NETIF_F_HW_VLAN_TX; ++#endif ++ ++ if (hw->mac.type >= e1000_82576) ++ netdev->features |= NETIF_F_SCTP_CSUM; + ++#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +- netdev->hw_features |= netdev->features; +- netdev->hw_features |= NETIF_F_RXALL; ++#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ hw_features = netdev->hw_features; ++ ++ /* give us the option of enabling LRO later */ ++ hw_features |= NETIF_F_LRO; ++ ++#else ++ hw_features = get_netdev_hw_features(netdev); ++ ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++ hw_features |= netdev->features; ++ ++#else ++#ifdef NETIF_F_GRO ++ ++ /* this is only needed on kernels prior to 2.6.39 */ ++ netdev->features |= NETIF_F_GRO; ++#endif /* NETIF_F_GRO */ ++#endif /* HAVE_NDO_SET_FEATURES */ + + /* set this bit last since it cannot be part of hw_features */ ++#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ++#endif /* NETIF_F_HW_FLAN_CTAG_FILTER */ ++#ifdef NETIF_F_HW_VLAN_TX ++ netdev->features |= NETIF_F_HW_VLAN_FILTER; ++#endif /* NETIF_F_HW_VLAN_TX */ ++ ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ set_netdev_hw_features(netdev, hw_features); ++#else ++ netdev->hw_features = hw_features; ++#endif ++#endif + ++#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + +- netdev->priv_flags |= IFF_SUPP_NOFCS; +- +- if (pci_using_dac) { ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; +- netdev->vlan_features |= NETIF_F_HIGHDMA; +- } + +- if (hw->mac.type >= e1000_82576) { +- netdev->hw_features |= NETIF_F_SCTP_CSUM; +- netdev->features |= NETIF_F_SCTP_CSUM; +- } +- +- netdev->priv_flags |= IFF_UNICAST_FLT; +- +- adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); ++ adapter->en_mng_pt = igb_e1000_enable_mng_pass_thru(hw); ++#ifdef DEBUG ++ if (adapter->dmac != IGB_DMAC_DISABLE) ++ netdev_info(netdev, "%s: DMA Coalescing is enabled..\n", ++ netdev->name); ++#endif + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ +- hw->mac.ops.reset_hw(hw); ++ igb_e1000_reset_hw(hw); + +- /* make sure the NVM is good , i211/i210 parts can have special NVM +- * that doesn't contain a checksum +- */ +- switch (hw->mac.type) { +- case e1000_i210: +- case e1000_i211: +- if (igb_get_flash_presence_i210(hw)) { +- if (hw->nvm.ops.validate(hw) < 0) { +- dev_err(&pdev->dev, +- "The NVM Checksum Is Not Valid\n"); +- err = -EIO; +- goto err_eeprom; +- } +- } +- break; +- default: +- if (hw->nvm.ops.validate(hw) < 0) { +- dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); +- err = -EIO; +- goto err_eeprom; +- } +- break; ++ /* make sure the NVM is good */ ++ if (e1000_validate_nvm_checksum(hw) < 0) { ++ dev_err(pci_dev_to_dev(pdev), ++ "The NVM Checksum Is Not Valid\n"); ++ err = -EIO; ++ goto err_eeprom; + } + + /* copy the MAC address out of the NVM */ +- if (hw->mac.ops.read_mac_addr(hw)) +- dev_err(&pdev->dev, "NVM Read Error\n"); +- ++ if (igb_e1000_read_mac_addr(hw)) ++ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n"); + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); ++#ifdef ETHTOOL_GPERMADDR ++ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + ++ if (!is_valid_ether_addr(netdev->perm_addr)) { ++#else + if (!is_valid_ether_addr(netdev->dev_addr)) { +- dev_err(&pdev->dev, "Invalid MAC Address\n"); ++#endif ++ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + ++ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len); ++ adapter->mac_table[0].queue = adapter->vfs_allocated_count; ++ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT ++ | IGB_MAC_STATE_IN_USE); ++ igb_rar_set(adapter, 0); ++ + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { +- wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); +- wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); ++ E1000_WRITE_REG(hw, E1000_RXPBS, I210_RXPBSIZE_DEFAULT); ++ E1000_WRITE_REG(hw, E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + +- setup_timer(&adapter->watchdog_timer, igb_watchdog, ++ /* Check if Media Autosense is enabled */ ++ if (hw->mac.type == e1000_82580) ++ igb_init_mas(adapter); ++ setup_timer(&adapter->watchdog_timer, &igb_watchdog, + (unsigned long) adapter); +- setup_timer(&adapter->phy_info_timer, igb_update_phy_info, ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer, ++ (unsigned long) adapter); ++ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; +@@ -2463,19 +2927,19 @@ + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + +- igb_validate_mdi_setting(hw); ++ igb_e1000_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + +- /* Check the NVM for wake support on non-port A ports */ ++ /* Check the NVM for wake support for non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) +- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); ++ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; +@@ -2494,7 +2958,7 @@ + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ +- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) ++ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: +@@ -2509,9 +2973,7 @@ + global_quad_port_a = 0; + break; + default: +- /* If the device can't wake, don't set software support */ +- if (!device_can_wakeup(&adapter->pdev->dev)) +- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; ++ break; + } + + /* initialize the wol settings based on the eeprom settings */ +@@ -2525,145 +2987,185 @@ + adapter->wol = 0; + } + +- device_set_wakeup_enable(&adapter->pdev->dev, ++ /* Some vendors want the ability to Use the EEPROM setting as ++ * enable/disable only, and not for capability ++ */ ++ if (((hw->mac.type == e1000_i350) || ++ (hw->mac.type == e1000_i354)) && ++ (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { ++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; ++ adapter->wol = 0; ++ } ++ if (hw->mac.type == e1000_i350) { ++ if (((pdev->subsystem_device == 0x5001) || ++ (pdev->subsystem_device == 0x5002)) && ++ (hw->bus.func == 0)) { ++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; ++ adapter->wol = 0; ++ } ++ if (pdev->subsystem_device == 0x1F52) ++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; ++ } ++ ++ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); ++ adapter->devrc = 0; + ++#ifdef HAVE_I2C_SUPPORT + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } ++#endif /* HAVE_I2C_SUPPORT */ + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + +- strcpy(netdev->name, "eth%d"); ++ strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ err = igb_init_vmdq_netdevs(adapter); ++ if (err) ++ goto err_register; ++#endif + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +-#ifdef CONFIG_IGB_DCA +- if (dca_add_requester(&pdev->dev) == 0) { ++#ifdef IGB_DCA ++ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; +- dev_info(&pdev->dev, "DCA enabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); + igb_setup_dca(adapter); + } + + #endif +-#ifdef CONFIG_IGB_HWMON ++#ifdef HAVE_PTP_1588_CLOCK ++ /* do hw tstamp init after resetting */ ++ igb_ptp_init(adapter); ++#endif /* HAVE_PTP_1588_CLOCK */ ++ ++ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n"); ++ /* print bus type/speed/width info */ ++ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ", ++ netdev->name, ++ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" : ++ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" : ++ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"), ++ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : ++ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : ++ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : ++ (hw->mac.type == e1000_i354) ? "integrated" : "unknown")); ++ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); ++ ++ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH); ++ if (ret_val) ++ strcpy(pba_str, "Unknown"); ++ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name, ++ pba_str); ++ + /* Initialize the thermal sensor on i350 devices. */ +- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { +- u16 ets_word; ++ if (hw->mac.type == e1000_i350) { ++ if (hw->bus.func == 0) { ++ u16 ets_word; + +- /* Read the NVM to determine if this i350 device supports an +- * external thermal sensor. +- */ +- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); +- if (ets_word != 0x0000 && ets_word != 0xFFFF) +- adapter->ets = true; +- else +- adapter->ets = false; +- if (igb_sysfs_init(adapter)) +- dev_err(&pdev->dev, +- "failed to allocate sysfs resources\n"); +- } else { +- adapter->ets = false; +- } +-#endif +- /* Check if Media Autosense is enabled */ +- adapter->ei = *ei; +- if (hw->dev_spec._82575.mas_capable) +- igb_init_mas(adapter); ++ /* ++ * Read the NVM to determine if this i350 device ++ * supports an external thermal sensor. ++ */ ++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word); ++ if (ets_word != 0x0000 && ets_word != 0xFFFF) ++ adapter->ets = true; ++ else ++ adapter->ets = false; ++ } ++#ifdef IGB_HWMON + +- /* do hw tstamp init after resetting */ +- igb_ptp_init(adapter); ++ igb_sysfs_init(adapter); ++#else ++#ifdef IGB_PROCFS + +- dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); +- /* print bus type/speed/width info, not applicable to i354 */ +- if (hw->mac.type != e1000_i354) { +- dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", +- netdev->name, +- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : +- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : +- "unknown"), +- ((hw->bus.width == e1000_bus_width_pcie_x4) ? +- "Width x4" : +- (hw->bus.width == e1000_bus_width_pcie_x2) ? +- "Width x2" : +- (hw->bus.width == e1000_bus_width_pcie_x1) ? +- "Width x1" : "unknown"), netdev->dev_addr); +- } +- +- if ((hw->mac.type >= e1000_i210 || +- igb_get_flash_presence_i210(hw))) { +- ret_val = igb_read_part_string(hw, part_str, +- E1000_PBANUM_LENGTH); ++ igb_procfs_init(adapter); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + } else { +- ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; ++ adapter->ets = false; + } + +- if (ret_val) +- strcpy(part_str, "Unknown"); +- dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); +- dev_info(&pdev->dev, +- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", +- (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : +- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", +- adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ +- err = igb_set_eee_i350(hw); ++ err = e1000_set_eee_i350(hw, true, true); + if ((!err) && +- (!hw->dev_spec._82575.eee_disable)) { ++ (adapter->flags & IGB_FLAG_EEE)) + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; +- adapter->flags |= IGB_FLAG_EEE; +- } + break; + case e1000_i354: +- if ((rd32(E1000_CTRL_EXT) & +- E1000_CTRL_EXT_LINK_MODE_SGMII)) { +- err = igb_set_eee_i354(hw); ++ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) & ++ (E1000_CTRL_EXT_LINK_MODE_SGMII)) { ++ err = e1000_set_eee_i354(hw, true, true); + if ((!err) && +- (!hw->dev_spec._82575.eee_disable)) { ++ (adapter->flags & IGB_FLAG_EEE)) + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; +- adapter->flags |= IGB_FLAG_EEE; +- } + } + break; + default: + break; + } + } ++ ++ /* send driver version info to firmware */ ++ if ((hw->mac.type >= e1000_i350) && ++ (e1000_get_flash_presence_i210(hw))) ++ igb_init_fw(adapter); ++ ++#ifndef IGB_NO_LRO ++ if (netdev->features & NETIF_F_LRO) ++ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled\n"); ++ else ++ dev_info(pci_dev_to_dev(pdev), "LRO is disabled\n"); ++#endif ++ dev_info(pci_dev_to_dev(pdev), ++ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", ++ adapter->msix_entries ? "MSI-X" : ++ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", ++ adapter->num_rx_queues, adapter->num_tx_queues); ++ ++ cards_found++; ++ + pm_runtime_put_noidle(&pdev->dev); + return 0; + + err_register: + igb_release_hw_control(adapter); ++#ifdef HAVE_I2C_SUPPORT + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); ++#endif /* HAVE_I2C_SUPPORT */ + err_eeprom: +- if (!igb_check_reset_block(hw)) +- igb_reset_phy(hw); ++ if (!e1000_check_reset_block(hw)) ++ igb_e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + err_sw_init: ++ kfree(adapter->shadow_vfta); + igb_clear_interrupt_scheme(adapter); +- pci_iounmap(pdev, hw->hw_addr); ++ igb_reset_sriov_capability(adapter); ++ iounmap(adapter->io_addr); + err_ioremap: + free_netdev(netdev); + err_alloc_etherdev: +@@ -2674,117 +3176,28 @@ + pci_disable_device(pdev); + return err; + } +- +-#ifdef CONFIG_PCI_IOV +-static int igb_disable_sriov(struct pci_dev *pdev) +-{ +- struct net_device *netdev = pci_get_drvdata(pdev); +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- +- /* reclaim resources allocated to VFs */ +- if (adapter->vf_data) { +- /* disable iov and allow time for transactions to clear */ +- if (pci_vfs_assigned(pdev)) { +- dev_warn(&pdev->dev, +- "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); +- return -EPERM; +- } else { +- pci_disable_sriov(pdev); +- msleep(500); +- } +- +- kfree(adapter->vf_data); +- adapter->vf_data = NULL; +- adapter->vfs_allocated_count = 0; +- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); +- wrfl(); +- msleep(100); +- dev_info(&pdev->dev, "IOV Disabled\n"); +- +- /* Re-enable DMA Coalescing flag since IOV is turned off */ +- adapter->flags |= IGB_FLAG_DMAC; +- } +- +- return 0; +-} +- +-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +-{ +- struct net_device *netdev = pci_get_drvdata(pdev); +- struct igb_adapter *adapter = netdev_priv(netdev); +- int old_vfs = pci_num_vf(pdev); +- int err = 0; +- int i; +- +- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { +- err = -EPERM; +- goto out; +- } +- if (!num_vfs) +- goto out; +- +- if (old_vfs) { +- dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", +- old_vfs, max_vfs); +- adapter->vfs_allocated_count = old_vfs; +- } else +- adapter->vfs_allocated_count = num_vfs; +- +- adapter->vf_data = kcalloc(adapter->vfs_allocated_count, +- sizeof(struct vf_data_storage), GFP_KERNEL); +- +- /* if allocation failed then we do not support SR-IOV */ +- if (!adapter->vf_data) { +- adapter->vfs_allocated_count = 0; +- dev_err(&pdev->dev, +- "Unable to allocate memory for VF Data Storage\n"); +- err = -ENOMEM; +- goto out; +- } +- +- /* only call pci_enable_sriov() if no VFs are allocated already */ +- if (!old_vfs) { +- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); +- if (err) +- goto err_out; +- } +- dev_info(&pdev->dev, "%d VFs allocated\n", +- adapter->vfs_allocated_count); +- for (i = 0; i < adapter->vfs_allocated_count; i++) +- igb_vf_configure(adapter, i); +- +- /* DMA Coalescing is not supported in IOV mode. */ +- adapter->flags &= ~IGB_FLAG_DMAC; +- goto out; +- +-err_out: +- kfree(adapter->vf_data); +- adapter->vf_data = NULL; +- adapter->vfs_allocated_count = 0; +-out: +- return err; +-} +- +-#endif +-/** ++#ifdef HAVE_I2C_SUPPORT ++/* + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure +- **/ ++ * ++ */ + static void igb_remove_i2c(struct igb_adapter *adapter) + { ++ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); + } ++#endif /* HAVE_I2C_SUPPORT */ + + /** +- * igb_remove - Device Removal Routine +- * @pdev: PCI device information struct ++ * igb_remove - Device Removal Routine ++ * @pdev: PCI device information struct + * +- * igb_remove is called by the PCI subsystem to alert the driver +- * that it should release a PCI device. The could be caused by a +- * Hot-Plug event, or because the driver is going to be removed from +- * memory. ++ * igb_remove is called by the PCI subsystem to alert the driver ++ * that it should release a PCI device. The could be caused by a ++ * Hot-Plug event, or because the driver is going to be removed from ++ * memory. + **/ + static void igb_remove(struct pci_dev *pdev) + { +@@ -2793,30 +3206,39 @@ + struct e1000_hw *hw = &adapter->hw; + + pm_runtime_get_noresume(&pdev->dev); +-#ifdef CONFIG_IGB_HWMON +- igb_sysfs_exit(adapter); +-#endif ++#ifdef HAVE_I2C_SUPPORT + igb_remove_i2c(adapter); ++#endif /* HAVE_I2C_SUPPORT */ ++#ifdef HAVE_PTP_1588_CLOCK + igb_ptp_stop(adapter); +- /* The watchdog timer may be rescheduled, so explicitly +- * disable watchdog from being rescheduled. ++#endif /* HAVE_PTP_1588_CLOCK */ ++ ++ /* flush_scheduled work may reschedule our watchdog task, so ++ * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ del_timer_sync(&adapter->dma_err_timer); + del_timer_sync(&adapter->phy_info_timer); + +- cancel_work_sync(&adapter->reset_task); +- cancel_work_sync(&adapter->watchdog_task); ++ flush_scheduled_work(); + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { +- dev_info(&pdev->dev, "DCA disabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; +- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); ++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); + } + #endif + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ igb_remove_vmdq_netdevs(adapter); ++#endif ++ ++ igb_reset_sriov_capability(adapter); ++ + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ +@@ -2826,16 +3248,21 @@ + + igb_clear_interrupt_scheme(adapter); + +-#ifdef CONFIG_PCI_IOV +- igb_disable_sriov(pdev); +-#endif +- +- pci_iounmap(pdev, hw->hw_addr); ++ if (adapter->io_addr) ++ iounmap(adapter->io_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + ++#ifdef IGB_HWMON ++ igb_sysfs_exit(adapter); ++#else ++#ifdef IGB_PROCFS ++ igb_procfs_exit(adapter); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ ++ kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + +@@ -2845,110 +3272,12 @@ + } + + /** +- * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space +- * @adapter: board private structure to initialize +- * +- * This function initializes the vf specific data storage and then attempts to +- * allocate the VFs. The reason for ordering it this way is because it is much +- * mor expensive time wise to disable SR-IOV than it is to allocate and free +- * the memory for the VFs. +- **/ +-static void igb_probe_vfs(struct igb_adapter *adapter) +-{ +-#ifdef CONFIG_PCI_IOV +- struct pci_dev *pdev = adapter->pdev; +- struct e1000_hw *hw = &adapter->hw; +- +- /* Virtualization features not supported on i210 family. */ +- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) +- return; +- +- pci_sriov_set_totalvfs(pdev, 7); +- igb_pci_enable_sriov(pdev, max_vfs); +- +-#endif /* CONFIG_PCI_IOV */ +-} +- +-static void igb_init_queue_configuration(struct igb_adapter *adapter) +-{ +- struct e1000_hw *hw = &adapter->hw; +- u32 max_rss_queues; +- +- /* Determine the maximum number of RSS queues supported. */ +- switch (hw->mac.type) { +- case e1000_i211: +- max_rss_queues = IGB_MAX_RX_QUEUES_I211; +- break; +- case e1000_82575: +- case e1000_i210: +- max_rss_queues = IGB_MAX_RX_QUEUES_82575; +- break; +- case e1000_i350: +- /* I350 cannot do RSS and SR-IOV at the same time */ +- if (!!adapter->vfs_allocated_count) { +- max_rss_queues = 1; +- break; +- } +- /* fall through */ +- case e1000_82576: +- if (!!adapter->vfs_allocated_count) { +- max_rss_queues = 2; +- break; +- } +- /* fall through */ +- case e1000_82580: +- case e1000_i354: +- default: +- max_rss_queues = IGB_MAX_RX_QUEUES; +- break; +- } +- +- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); +- +- igb_set_flag_queue_pairs(adapter, max_rss_queues); +-} +- +-void igb_set_flag_queue_pairs(struct igb_adapter *adapter, +- const u32 max_rss_queues) +-{ +- struct e1000_hw *hw = &adapter->hw; +- +- /* Determine if we need to pair queues. */ +- switch (hw->mac.type) { +- case e1000_82575: +- case e1000_i211: +- /* Device supports enough interrupts without queue pairing. */ +- break; +- case e1000_82576: +- /* If VFs are going to be allocated with RSS queues then we +- * should pair the queues in order to conserve interrupts due +- * to limited supply. +- */ +- if ((adapter->rss_queues > 1) && +- (adapter->vfs_allocated_count > 6)) +- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; +- /* fall through */ +- case e1000_82580: +- case e1000_i350: +- case e1000_i354: +- case e1000_i210: +- default: +- /* If rss_queues > half of max_rss_queues, pair the queues in +- * order to conserve interrupts due to limited supply. +- */ +- if (adapter->rss_queues > (max_rss_queues / 2)) +- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; +- break; +- } +-} +- +-/** +- * igb_sw_init - Initialize general software structures (struct igb_adapter) +- * @adapter: board private structure to initialize ++ * igb_sw_init - Initialize general software structures (struct igb_adapter) ++ * @adapter: board private structure to initialize + * +- * igb_sw_init initializes the Adapter private data structure. +- * Fields are initialized based on PCI device information and +- * OS network device settings (MTU size). ++ * igb_sw_init initializes the Adapter private data structure. ++ * Fields are initialized based on PCI device information and ++ * OS network device settings (MTU size). + **/ + static int igb_sw_init(struct igb_adapter *adapter) + { +@@ -2956,84 +3285,78 @@ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + ++ /* PCI config space info */ ++ ++ hw->vendor_id = pdev->vendor; ++ hw->device_id = pdev->device; ++ hw->subsystem_vendor_id = pdev->subsystem_vendor; ++ hw->subsystem_device_id = pdev->subsystem_device; ++ ++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); ++ + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + +- /* set default ITR values */ +- adapter->rx_itr_setting = IGB_DEFAULT_ITR; +- adapter->tx_itr_setting = IGB_DEFAULT_ITR; +- + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + +- VLAN_HLEN; +- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; ++ VLAN_HLEN; + +- spin_lock_init(&adapter->stats64_lock); +-#ifdef CONFIG_PCI_IOV +- switch (hw->mac.type) { +- case e1000_82576: +- case e1000_i350: +- if (max_vfs > 7) { +- dev_warn(&pdev->dev, +- "Maximum of 7 VFs per PF, using max\n"); +- max_vfs = adapter->vfs_allocated_count = 7; +- } else +- adapter->vfs_allocated_count = max_vfs; +- if (adapter->vfs_allocated_count) +- dev_warn(&pdev->dev, +- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); +- break; +- default: +- break; ++ /* Initialize the hardware-specific values */ ++ if (e1000_setup_init_funcs(hw, TRUE)) { ++ dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n"); ++ return -EIO; + } +-#endif /* CONFIG_PCI_IOV */ + +- igb_init_queue_configuration(adapter); ++ igb_check_options(adapter); ++ ++ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) * ++ hw->mac.rar_entry_count, ++ GFP_ATOMIC); + + /* Setup and initialize a copy of the hw vlan table array */ +- adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), +- GFP_ATOMIC); ++ adapter->shadow_vfta = kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES, ++ GFP_ATOMIC); ++ ++ /* These calls may decrease the number of queues */ ++ if (hw->mac.type < e1000_i210) ++ igb_set_sriov_capability(adapter); + +- /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { +- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); ++ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + +- igb_probe_vfs(adapter); +- + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + +- if (hw->mac.type >= e1000_i350) +- adapter->flags &= ~IGB_FLAG_DMAC; +- + set_bit(__IGB_DOWN, &adapter->state); + return 0; + } + + /** +- * igb_open - Called when a network interface is made active +- * @netdev: network interface device structure ++ * igb_open - Called when a network interface is made active ++ * @netdev: network interface device structure + * +- * Returns 0 on success, negative value on failure ++ * Returns 0 on success, negative value on failure + * +- * The open entry point is called when a network interface is made +- * active by the system (IFF_UP). At this point all resources needed +- * for transmit and receive operations are allocated, the interrupt +- * handler is registered with the OS, the watchdog timer is started, +- * and the stack is notified that the interface is ready. ++ * The open entry point is called when a network interface is made ++ * active by the system (IFF_UP). At this point all resources needed ++ * for transmit and receive operations are allocated, the interrupt ++ * handler is registered with the OS, the watchdog timer is started, ++ * and the stack is notified that the interface is ready. + **/ + static int __igb_open(struct net_device *netdev, bool resuming) + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; ++#ifdef CONFIG_PM_RUNTIME + struct pci_dev *pdev = adapter->pdev; ++#endif /* CONFIG_PM_RUNTIME */ + int err; + int i; + +@@ -3043,8 +3366,10 @@ + return -EBUSY; + } + ++#ifdef CONFIG_PM_RUNTIME + if (!resuming) + pm_runtime_get_sync(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ + + netif_carrier_off(netdev); + +@@ -3072,12 +3397,12 @@ + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ +- err = netif_set_real_num_tx_queues(adapter->netdev, +- adapter->num_tx_queues); +- if (err) +- goto err_set_queues; ++ netif_set_real_num_tx_queues(netdev, ++ adapter->vmdq_pools ? 1 : ++ adapter->num_tx_queues); + +- err = netif_set_real_num_rx_queues(adapter->netdev, ++ err = netif_set_real_num_rx_queues(netdev, ++ adapter->vmdq_pools ? 1 : + adapter->num_rx_queues); + if (err) + goto err_set_queues; +@@ -3087,30 +3412,31 @@ + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); ++ igb_configure_lli(adapter); + + /* Clear any pending interrupts. */ +- rd32(E1000_ICR); ++ E1000_READ_REG(hw, E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { +- u32 reg_data = rd32(E1000_CTRL_EXT); ++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; +- wr32(E1000_CTRL_EXT, reg_data); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(netdev); + +- if (!resuming) +- pm_runtime_put(&pdev->dev); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ schedule_work(&adapter->dma_err_task); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + +- return 0; ++ return E1000_SUCCESS; + + err_set_queues: + igb_free_irq(adapter); +@@ -3122,59 +3448,72 @@ + igb_free_all_tx_resources(adapter); + err_setup_tx: + igb_reset(adapter); ++ ++#ifdef CONFIG_PM_RUNTIME + if (!resuming) + pm_runtime_put(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ + + return err; + } + +-static int igb_open(struct net_device *netdev) ++int igb_open(struct net_device *netdev) + { + return __igb_open(netdev, false); + } + + /** +- * igb_close - Disables a network interface +- * @netdev: network interface device structure ++ * igb_close - Disables a network interface ++ * @netdev: network interface device structure + * +- * Returns 0, this is not allowed to fail ++ * Returns 0, this is not allowed to fail + * +- * The close entry point is called when an interface is de-activated +- * by the OS. The hardware is still under the driver's control, but +- * needs to be disabled. A global MAC reset is issued to stop the +- * hardware, and all transmit and receive resources are freed. ++ * The close entry point is called when an interface is de-activated ++ * by the OS. The hardware is still under the driver's control, but ++ * needs to be disabled. A global MAC reset is issued to stop the ++ * hardware, and all transmit and receive resources are freed. + **/ + static int __igb_close(struct net_device *netdev, bool suspending) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++#ifdef CONFIG_PM_RUNTIME + struct pci_dev *pdev = adapter->pdev; ++#endif /* CONFIG_PM_RUNTIME */ + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + ++#ifdef CONFIG_PM_RUNTIME + if (!suspending) + pm_runtime_get_sync(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ + + igb_down(adapter); ++ ++ igb_release_hw_control(adapter); ++ + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + ++#ifdef CONFIG_PM_RUNTIME + if (!suspending) + pm_runtime_put_sync(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ ++ + return 0; + } + +-static int igb_close(struct net_device *netdev) ++int igb_close(struct net_device *netdev) + { + return __igb_close(netdev, false); + } + + /** +- * igb_setup_tx_resources - allocate Tx resources (Descriptors) +- * @tx_ring: tx descriptor ring (for a specific queue) to setup ++ * igb_setup_tx_resources - allocate Tx resources (Descriptors) ++ * @tx_ring: tx descriptor ring (for a specific queue) to setup + * +- * Return 0 on success, negative on failure ++ * Return 0 on success, negative on failure + **/ + int igb_setup_tx_resources(struct igb_ring *tx_ring) + { +@@ -3182,7 +3521,6 @@ + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; +- + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; +@@ -3193,6 +3531,7 @@ + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); ++ + if (!tx_ring->desc) + goto err; + +@@ -3203,17 +3542,17 @@ + + err: + vfree(tx_ring->tx_buffer_info); +- tx_ring->tx_buffer_info = NULL; +- dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); ++ dev_err(dev, ++ "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + + /** +- * igb_setup_all_tx_resources - wrapper to allocate Tx resources +- * (Descriptors) for all queues +- * @adapter: board private structure ++ * igb_setup_all_tx_resources - wrapper to allocate Tx resources ++ * (Descriptors) for all queues ++ * @adapter: board private structure + * +- * Return 0 on success, negative on failure ++ * Return 0 on success, negative on failure + **/ + static int igb_setup_all_tx_resources(struct igb_adapter *adapter) + { +@@ -3223,7 +3562,7 @@ + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); +@@ -3235,8 +3574,8 @@ + } + + /** +- * igb_setup_tctl - configure the transmit control registers +- * @adapter: Board private structure ++ * igb_setup_tctl - configure the transmit control registers ++ * @adapter: Board private structure + **/ + void igb_setup_tctl(struct igb_adapter *adapter) + { +@@ -3244,28 +3583,45 @@ + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ +- wr32(E1000_TXDCTL(0), 0); ++ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ +- tctl = rd32(E1000_TCTL); ++ tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + +- igb_config_collision_dist(hw); ++ igb_e1000_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + +- wr32(E1000_TCTL, tctl); ++ E1000_WRITE_REG(hw, E1000_TCTL, tctl); ++} ++ ++static u32 igb_tx_wthresh(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ ++ switch (hw->mac.type) { ++ case e1000_i354: ++ return 4; ++ case e1000_82576: ++ if (adapter->msix_entries) ++ return 1; ++ default: ++ break; ++ } ++ ++ return 16; + } + + /** +- * igb_configure_tx_ring - Configure transmit ring after Reset +- * @adapter: board private structure +- * @ring: tx ring to configure ++ * igb_configure_tx_ring - Configure transmit ring after Reset ++ * @adapter: board private structure ++ * @ring: tx ring to configure + * +- * Configure a transmit ring after a reset. ++ * Configure a transmit ring after a reset. + **/ + void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +@@ -3276,33 +3632,33 @@ + int reg_idx = ring->reg_idx; + + /* disable the queue */ +- wr32(E1000_TXDCTL(reg_idx), 0); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0); ++ E1000_WRITE_FLUSH(hw); + mdelay(10); + +- wr32(E1000_TDLEN(reg_idx), +- ring->count * sizeof(union e1000_adv_tx_desc)); +- wr32(E1000_TDBAL(reg_idx), +- tdba & 0x00000000ffffffffULL); +- wr32(E1000_TDBAH(reg_idx), tdba >> 32); ++ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx), ++ ring->count * sizeof(union e1000_adv_tx_desc)); ++ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx), ++ tdba & 0x00000000ffffffffULL); ++ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32); + +- ring->tail = hw->hw_addr + E1000_TDT(reg_idx); +- wr32(E1000_TDH(reg_idx), 0); ++ ring->tail = adapter->io_addr + E1000_TDT(reg_idx); ++ E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; +- txdctl |= IGB_TX_WTHRESH << 16; ++ txdctl |= igb_tx_wthresh(adapter) << 16; + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; +- wr32(E1000_TXDCTL(reg_idx), txdctl); ++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl); + } + + /** +- * igb_configure_tx - Configure transmit Unit after Reset +- * @adapter: board private structure ++ * igb_configure_tx - Configure transmit Unit after Reset ++ * @adapter: board private structure + * +- * Configure the Tx unit of the MAC after a reset. ++ * Configure the Tx unit of the MAC after a reset. + **/ + static void igb_configure_tx(struct igb_adapter *adapter) + { +@@ -3313,28 +3669,30 @@ + } + + /** +- * igb_setup_rx_resources - allocate Rx resources (Descriptors) +- * @rx_ring: Rx descriptor ring (for a specific queue) to setup ++ * igb_setup_rx_resources - allocate Rx resources (Descriptors) ++ * @rx_ring: rx descriptor ring (for a specific queue) to setup + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + **/ + int igb_setup_rx_resources(struct igb_ring *rx_ring) + { + struct device *dev = rx_ring->dev; +- int size; ++ int size, desc_len; + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; +- + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + ++ desc_len = sizeof(union e1000_adv_rx_desc); ++ + /* Round up to nearest 4K */ +- rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); ++ rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); ++ + if (!rx_ring->desc) + goto err; + +@@ -3347,16 +3705,17 @@ + err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; +- dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); ++ dev_err(dev, ++ "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + + /** +- * igb_setup_all_rx_resources - wrapper to allocate Rx resources +- * (Descriptors) for all queues +- * @adapter: board private structure ++ * igb_setup_all_rx_resources - wrapper to allocate Rx resources ++ * (Descriptors) for all queues ++ * @adapter: board private structure + * +- * Return 0 on success, negative on failure ++ * Return 0 on success, negative on failure + **/ + static int igb_setup_all_rx_resources(struct igb_adapter *adapter) + { +@@ -3366,7 +3725,7 @@ + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); +@@ -3378,14 +3737,17 @@ + } + + /** +- * igb_setup_mrqc - configure the multiple receive queue control registers +- * @adapter: Board private structure ++ * igb_setup_mrqc - configure the multiple receive queue control registers ++ * @adapter: Board private structure + **/ + static void igb_setup_mrqc(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; ++#ifndef ETHTOOL_SRXFHINDIR ++ u32 shift = 0, shift2 = 0; ++#endif /* ETHTOOL_SRXFHINDIR */ + static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, + 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, + 0xA32DCB77, 0x0CF23080, 0x3BB7426A, +@@ -3393,33 +3755,72 @@ + + /* Fill out hash function seeds */ + for (j = 0; j < 10; j++) +- wr32(E1000_RSSRK(j), rsskey[j]); ++ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]); + + num_rx_queues = adapter->rss_queues; + +- switch (hw->mac.type) { +- case e1000_82576: ++#ifdef ETHTOOL_SRXFHINDIR ++ if (hw->mac.type == e1000_82576) { + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; +- break; +- default: +- break; + } +- + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = +- (j * num_rx_queues) / IGB_RETA_SIZE; ++ (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); ++#else ++ /* 82575 and 82576 supports 2 RSS queues for VMDq */ ++ switch (hw->mac.type) { ++ case e1000_82575: ++ if (adapter->vmdq_pools) { ++ shift = 2; ++ shift2 = 6; ++ } ++ shift = 6; ++ break; ++ case e1000_82576: ++ /* 82576 supports 2 RSS queues for SR-IOV */ ++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) { ++ shift = 3; ++ num_rx_queues = 2; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ /* ++ * Populate the redirection table 4 entries at a time. To do this ++ * we are generating the results for n and n+2 and then interleaving ++ * those with the results with n+1 and n+3. ++ */ ++ for (j = 0; j < 32; j++) { ++ /* first pass generates n and n+2 */ ++ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; ++ u32 reta = (base & 0x07800780) >> (7 - shift); ++ ++ /* second pass generates n+1 and n+3 */ ++ base += 0x00010001 * num_rx_queues; ++ reta |= (base & 0x07800780) << (1 + shift); ++ ++ /* generate 2nd table for 82575 based parts */ ++ if (shift2) ++ reta |= (0x01010101 * num_rx_queues) << shift2; ++ ++ E1000_WRITE_REG(hw, E1000_RETA(j), reta); ++ } ++#endif /* ETHTOOL_SRXFHINDIR */ + +- /* Disable raw packet checksumming so that RSS hash is placed in ++ /* ++ * Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ +- rxcsum = rd32(E1000_RXCSUM); ++ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) +@@ -3427,7 +3828,7 @@ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ +- wr32(E1000_RXCSUM, rxcsum); ++ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses +@@ -3447,40 +3848,39 @@ + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ +- if (adapter->vfs_allocated_count) { ++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ +- u32 vtctl = rd32(E1000_VT_CTL); ++ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; +- wr32(E1000_VT_CTL, vtctl); ++ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { +- if (hw->mac.type != e1000_i211) +- mrqc |= E1000_MRQC_ENABLE_RSS_4Q; ++ mrqc |= E1000_MRQC_ENABLE_RSS_4Q; + } + igb_vmm_control(adapter); + +- wr32(E1000_MRQC, mrqc); ++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + } + + /** +- * igb_setup_rctl - configure the receive control registers +- * @adapter: Board private structure ++ * igb_setup_rctl - configure the receive control registers ++ * @adapter: Board private structure + **/ + void igb_setup_rctl(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); +@@ -3488,7 +3888,8 @@ + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + +- /* enable stripping of CRC. It's unlikely this will break BMC ++ /* ++ * enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ +@@ -3501,7 +3902,7 @@ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ +- wr32(E1000_RXDCTL(0), 0); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking +@@ -3509,27 +3910,10 @@ + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ +- wr32(E1000_QDE, ALL_QUEUES); +- } +- +- /* This is useful for sniffing bad packets. */ +- if (adapter->netdev->features & NETIF_F_RXALL) { +- /* UPE and MPE will be handled by normal PROMISC logic +- * in e1000e_set_rx_mode +- */ +- rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ +- E1000_RCTL_BAM | /* RX All Bcast Pkts */ +- E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ +- +- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ +- E1000_RCTL_DPF | /* Allow filtered pause */ +- E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ +- /* Do not mess with E1000_CTRL_VME, it affects transmit as well, +- * and that breaks VLANs. +- */ ++ E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES); + } + +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + + static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, +@@ -3543,21 +3927,31 @@ + */ + if (vfn < adapter->vfs_allocated_count && + adapter->vf_data[vfn].vlans_enabled) +- size += VLAN_TAG_SIZE; ++ size += VLAN_HLEN; + +- vmolr = rd32(E1000_VMOLR(vfn)); ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (vfn >= adapter->vfs_allocated_count) { ++ int queue = vfn - adapter->vfs_allocated_count; ++ struct igb_vmdq_adapter *vadapter; ++ ++ vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]); ++ if (vadapter->vlgrp) ++ size += VLAN_HLEN; ++ } ++#endif ++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; +- wr32(E1000_VMOLR(vfn), vmolr); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + + return 0; + } + + /** +- * igb_rlpml_set - set maximum receive packet size +- * @adapter: board private structure ++ * igb_rlpml_set - set maximum receive packet size ++ * @adapter: board private structure + * +- * Configure maximum receivable packet size. ++ * Configure maximum receivable packet size. + **/ + static void igb_rlpml_set(struct igb_adapter *adapter) + { +@@ -3565,9 +3959,13 @@ + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + +- if (pf_id) { +- igb_set_vf_rlpml(adapter, max_frame_size, pf_id); +- /* If we're in VMDQ or SR-IOV mode, then set global RLPML ++ if (adapter->vmdq_pools && hw->mac.type != e1000_82575) { ++ int i; ++ ++ for (i = 0; i < adapter->vmdq_pools; i++) ++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i); ++ /* ++ * If we're in VMDQ or SR-IOV mode, then set global RLPML + * to our max jumbo frame size, in case we need to enable + * jumbo frames on one of the rings later. + * This will not pass over-length frames into the default +@@ -3575,56 +3973,73 @@ + */ + max_frame_size = MAX_JUMBO_FRAME_SIZE; + } ++ /* Set VF RLPML for the PF device. */ ++ if (adapter->vfs_allocated_count) ++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id); + +- wr32(E1000_RLPML, max_frame_size); ++ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size); + } + ++static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, ++ int vfn, bool enable) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u32 val; ++ void __iomem *reg; ++ ++ if (hw->mac.type < e1000_82576) ++ return; ++ ++ if (hw->mac.type == e1000_i350) ++ reg = hw->hw_addr + E1000_DVMOLR(vfn); ++ else ++ reg = hw->hw_addr + E1000_VMOLR(vfn); ++ ++ val = readl(reg); ++ if (enable) ++ val |= E1000_VMOLR_STRVLAN; ++ else ++ val &= ~(E1000_VMOLR_STRVLAN); ++ writel(val, reg); ++} + static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) + { + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + +- /* This register exists only on 82576 and newer so if we are older then ++ /* ++ * This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + +- vmolr = rd32(E1000_VMOLR(vfn)); +- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ +- if (hw->mac.type == e1000_i350) { +- u32 dvmolr; ++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); + +- dvmolr = rd32(E1000_DVMOLR(vfn)); +- dvmolr |= E1000_DVMOLR_STRVLAN; +- wr32(E1000_DVMOLR(vfn), dvmolr); +- } + if (aupe) +- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ ++ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ +- vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); ++ vmolr &= ~E1000_VMOLR_RSSE; + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ +- /* for VMDq only allow the VFs and pool 0 to accept broadcast and +- * multicast packets +- */ +- if (vfn <= adapter->vfs_allocated_count) +- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + +- wr32(E1000_VMOLR(vfn), vmolr); ++ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ ++ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */ ++ ++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + } + + /** +- * igb_configure_rx_ring - Configure a receive ring after Reset +- * @adapter: board private structure +- * @ring: receive ring to be configured ++ * igb_configure_rx_ring - Configure a receive ring after Reset ++ * @adapter: board private structure ++ * @ring: receive ring to be configured + * +- * Configure the Rx unit of the MAC after a reset. ++ * Configure the Rx unit of the MAC after a reset. + **/ + void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +@@ -3634,32 +4049,67 @@ + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ /* ++ * RLPML prevents us from receiving a frame larger than max_frame so ++ * it is safe to just set the rx_buffer_len to max_frame without the ++ * risk of an skb over panic. ++ */ ++ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size, ++ MAXIMUM_ETHERNET_VLAN_SIZE); ++ ++#endif + /* disable the queue */ +- wr32(E1000_RXDCTL(reg_idx), 0); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ +- wr32(E1000_RDBAL(reg_idx), +- rdba & 0x00000000ffffffffULL); +- wr32(E1000_RDBAH(reg_idx), rdba >> 32); +- wr32(E1000_RDLEN(reg_idx), +- ring->count * sizeof(union e1000_adv_rx_desc)); ++ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx), ++ rdba & 0x00000000ffffffffULL); ++ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32); ++ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx), ++ ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ +- ring->tail = hw->hw_addr + E1000_RDT(reg_idx); +- wr32(E1000_RDH(reg_idx), 0); ++ ring->tail = adapter->io_addr + E1000_RDT(reg_idx); ++ E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + ++ /* reset next-to- use/clean to place SW in sync with hardwdare */ ++ ring->next_to_clean = 0; ++ ring->next_to_use = 0; ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ ring->next_to_alloc = 0; ++ ++#endif + /* set descriptor configuration */ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; ++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++ srrctl = ALIGN(ring->rx_buffer_len, 1024) >> ++ E1000_SRRCTL_BSIZEPKT_SHIFT; ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; ++#ifdef HAVE_PTP_1588_CLOCK + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; +- /* Only set Drop Enable if we are supporting multiple queues */ +- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) ++#endif /* HAVE_PTP_1588_CLOCK */ ++ /* ++ * We should set the drop enable bit if: ++ * SR-IOV is enabled ++ * or ++ * Flow Control is disabled and number of RX queues > 1 ++ * ++ * This allows us to avoid head of line blocking for security ++ * and performance reasons. ++ */ ++ if (adapter->vfs_allocated_count || ++ (adapter->num_rx_queues > 1 && ++ (hw->fc.requested_mode == e1000_fc_none || ++ hw->fc.requested_mode == e1000_fc_rx_pause))) + srrctl |= E1000_SRRCTL_DROP_EN; + +- wr32(E1000_SRRCTL(reg_idx), srrctl); ++ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); +@@ -3670,14 +4120,14 @@ + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; +- wr32(E1000_RXDCTL(reg_idx), rxdctl); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl); + } + + /** +- * igb_configure_rx - Configure receive Unit after Reset +- * @adapter: board private structure ++ * igb_configure_rx - Configure receive Unit after Reset ++ * @adapter: board private structure + * +- * Configure the Rx unit of the MAC after a reset. ++ * Configure the Rx unit of the MAC after a reset. + **/ + static void igb_configure_rx(struct igb_adapter *adapter) + { +@@ -3698,10 +4148,10 @@ + } + + /** +- * igb_free_tx_resources - Free Tx Resources per Queue +- * @tx_ring: Tx descriptor ring for a specific queue ++ * igb_free_tx_resources - Free Tx Resources per Queue ++ * @tx_ring: Tx descriptor ring for a specific queue + * +- * Free all transmit software resources ++ * Free all transmit software resources + **/ + void igb_free_tx_resources(struct igb_ring *tx_ring) + { +@@ -3721,10 +4171,10 @@ + } + + /** +- * igb_free_all_tx_resources - Free Tx Resources for All Queues +- * @adapter: board private structure ++ * igb_free_all_tx_resources - Free Tx Resources for All Queues ++ * @adapter: board private structure + * +- * Free all transmit software resources ++ * Free all transmit software resources + **/ + static void igb_free_all_tx_resources(struct igb_adapter *adapter) + { +@@ -3746,9 +4196,9 @@ + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, +- dma_unmap_addr(tx_buffer, dma), +- dma_unmap_len(tx_buffer, len), +- DMA_TO_DEVICE); ++ dma_unmap_addr(tx_buffer, dma), ++ dma_unmap_len(tx_buffer, len), ++ DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; +@@ -3757,8 +4207,8 @@ + } + + /** +- * igb_clean_tx_ring - Free Tx Buffers +- * @tx_ring: ring to be cleaned ++ * igb_clean_tx_ring - Free Tx Buffers ++ * @tx_ring: ring to be cleaned + **/ + static void igb_clean_tx_ring(struct igb_ring *tx_ring) + { +@@ -3788,8 +4238,8 @@ + } + + /** +- * igb_clean_all_tx_rings - Free Tx Buffers for all queues +- * @adapter: board private structure ++ * igb_clean_all_tx_rings - Free Tx Buffers for all queues ++ * @adapter: board private structure + **/ + static void igb_clean_all_tx_rings(struct igb_adapter *adapter) + { +@@ -3800,10 +4250,10 @@ + } + + /** +- * igb_free_rx_resources - Free Rx Resources +- * @rx_ring: ring to clean the resources from ++ * igb_free_rx_resources - Free Rx Resources ++ * @rx_ring: ring to clean the resources from + * +- * Free all receive software resources ++ * Free all receive software resources + **/ + void igb_free_rx_resources(struct igb_ring *rx_ring) + { +@@ -3823,10 +4273,10 @@ + } + + /** +- * igb_free_all_rx_resources - Free Rx Resources for All Queues +- * @adapter: board private structure ++ * igb_free_all_rx_resources - Free Rx Resources for All Queues ++ * @adapter: board private structure + * +- * Free all receive software resources ++ * Free all receive software resources + **/ + static void igb_free_all_rx_resources(struct igb_adapter *adapter) + { +@@ -3837,25 +4287,40 @@ + } + + /** +- * igb_clean_rx_ring - Free Rx Buffers per Queue +- * @rx_ring: ring to free buffers from ++ * igb_clean_rx_ring - Free Rx Buffers per Queue ++ * @rx_ring: ring to free buffers from + **/ +-static void igb_clean_rx_ring(struct igb_ring *rx_ring) ++void igb_clean_rx_ring(struct igb_ring *rx_ring) + { + unsigned long size; + u16 i; + ++ if (!rx_ring->rx_buffer_info) ++ return; ++ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + +- if (!rx_ring->rx_buffer_info) +- return; +- ++#endif + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ if (buffer_info->dma) { ++ dma_unmap_single(rx_ring->dev, ++ buffer_info->dma, ++ rx_ring->rx_buffer_len, ++ DMA_FROM_DEVICE); ++ buffer_info->dma = 0; ++ } + ++ if (buffer_info->skb) { ++ dev_kfree_skb(buffer_info->skb); ++ buffer_info->skb = NULL; ++ } ++#else + if (!buffer_info->page) + continue; + +@@ -3866,6 +4331,7 @@ + __free_page(buffer_info->page); + + buffer_info->page = NULL; ++#endif + } + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; +@@ -3880,8 +4346,8 @@ + } + + /** +- * igb_clean_all_rx_rings - Free Rx Buffers for all queues +- * @adapter: board private structure ++ * igb_clean_all_rx_rings - Free Rx Buffers for all queues ++ * @adapter: board private structure + **/ + static void igb_clean_all_rx_rings(struct igb_adapter *adapter) + { +@@ -3892,11 +4358,11 @@ + } + + /** +- * igb_set_mac - Change the Ethernet Address of the NIC +- * @netdev: network interface device structure +- * @p: pointer to an address structure ++ * igb_set_mac - Change the Ethernet Address of the NIC ++ * @netdev: network interface device structure ++ * @p: pointer to an address structure + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + **/ + static int igb_set_mac(struct net_device *netdev, void *p) + { +@@ -3910,60 +4376,155 @@ + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + +- /* set the correct pool for the new PF MAC address in entry 0 */ +- igb_rar_set_qsel(adapter, hw->mac.addr, 0, +- adapter->vfs_allocated_count); ++ /* set the correct pool for the new PF MAC address in entry 0 */ ++ igb_rar_set_qsel(adapter, hw->mac.addr, 0, ++ adapter->vfs_allocated_count); ++ ++ return 0; ++} ++ ++/** ++ * igb_write_mc_addr_list - write multicast addresses to MTA ++ * @netdev: network interface device structure ++ * ++ * Writes multicast address list to the MTA hash table. ++ * Returns: -ENOMEM on failure ++ * 0 on no addresses written ++ * X on writing X addresses to MTA ++ **/ ++int igb_write_mc_addr_list(struct net_device *netdev) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *ha; ++#endif ++ u8 *mta_list; ++ int i, count; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ int vm; ++#endif ++ count = netdev_mc_count(netdev); ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ for (vm = 1; vm < adapter->vmdq_pools; vm++) { ++ if (!adapter->vmdq_netdev[vm]) ++ break; ++ if (!netif_running(adapter->vmdq_netdev[vm])) ++ continue; ++ count += netdev_mc_count(adapter->vmdq_netdev[vm]); ++ } ++#endif ++ ++ if (!count) { ++ e1000_update_mc_addr_list(hw, NULL, 0); ++ return 0; ++ } ++ mta_list = kzalloc(count * 6, GFP_ATOMIC); ++ if (!mta_list) ++ return -ENOMEM; ++ ++ /* The shared function expects a packed array of only addresses. */ ++ i = 0; ++ netdev_for_each_mc_addr(ha, netdev) ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); ++#else ++ memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); ++#endif ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ for (vm = 1; vm < adapter->vmdq_pools; vm++) { ++ if (!adapter->vmdq_netdev[vm]) ++ break; ++ if (!netif_running(adapter->vmdq_netdev[vm]) || ++ !netdev_mc_count(adapter->vmdq_netdev[vm])) ++ continue; ++ netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm]) ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ memcpy(mta_list + (i++ * ETH_ALEN), ++ ha->addr, ETH_ALEN); ++#else ++ memcpy(mta_list + (i++ * ETH_ALEN), ++ ha->dmi_addr, ETH_ALEN); ++#endif ++ } ++#endif ++ e1000_update_mc_addr_list(hw, mta_list, i); ++ kfree(mta_list); ++ ++ return count; ++} ++ ++void igb_full_sync_mac_table(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ int i; + +- return 0; ++ for (i = 0; i < hw->mac.rar_entry_count; i++) ++ igb_rar_set(adapter, i); + } + +-/** +- * igb_write_mc_addr_list - write multicast addresses to MTA +- * @netdev: network interface device structure +- * +- * Writes multicast address list to the MTA hash table. +- * Returns: -ENOMEM on failure +- * 0 on no addresses written +- * X on writing X addresses to MTA +- **/ +-static int igb_write_mc_addr_list(struct net_device *netdev) ++void igb_sync_mac_table(struct igb_adapter *adapter) + { +- struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- struct netdev_hw_addr *ha; +- u8 *mta_list; + int i; + +- if (netdev_mc_empty(netdev)) { +- /* nothing to program, so clear mc list */ +- igb_update_mc_addr_list(hw, NULL, 0); +- igb_restore_vf_multicasts(adapter); +- return 0; ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED) ++ igb_rar_set(adapter, i); ++ adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED); + } ++} + +- mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); +- if (!mta_list) +- return -ENOMEM; ++int igb_available_rars(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ int i, count = 0; + +- /* The shared function expects a packed array of only addresses. */ +- i = 0; +- netdev_for_each_mc_addr(ha, netdev) +- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (adapter->mac_table[i].state == 0) ++ count++; ++ } ++ return count; ++} + +- igb_update_mc_addr_list(hw, mta_list, i); +- kfree(mta_list); ++static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, ++ u8 qsel) ++{ ++ u32 rar_low, rar_high; ++ struct e1000_hw *hw = &adapter->hw; ++ ++ /* HW expects these in little endian so we reverse the byte order ++ * from network order (big endian) to little endian ++ */ ++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); ++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); ++ ++ /* Indicate to hardware the Address is Valid. */ ++ rar_high |= E1000_RAH_AV; ++ ++ if (hw->mac.type == e1000_82575) ++ rar_high |= E1000_RAH_POOL_1 * qsel; ++ else ++ rar_high |= E1000_RAH_POOL_1 << qsel; + +- return netdev_mc_count(netdev); ++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); ++ E1000_WRITE_FLUSH(hw); ++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); ++ E1000_WRITE_FLUSH(hw); + } + ++#ifdef HAVE_SET_RX_MODE + /** +- * igb_write_uc_addr_list - write unicast addresses to RAR table +- * @netdev: network interface device structure ++ * igb_write_uc_addr_list - write unicast addresses to RAR table ++ * @netdev: network interface device structure + * +- * Writes unicast address list to the RAR table. +- * Returns: -ENOMEM on failure/insufficient address space +- * 0 on no addresses written +- * X on writing X addresses to the RAR table ++ * Writes unicast address list to the RAR table. ++ * Returns: -ENOMEM on failure/insufficient address space ++ * 0 on no addresses written ++ * X on writing X addresses to the RAR table + **/ + static int igb_write_uc_addr_list(struct net_device *netdev) + { +@@ -3974,39 +4535,48 @@ + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ +- if (netdev_uc_count(netdev) > rar_entries) ++ if (netdev_uc_count(netdev) > igb_available_rars(adapter)) + return -ENOMEM; +- + if (!netdev_uc_empty(netdev) && rar_entries) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +- ++#else ++ struct dev_mc_list *ha; ++#endif + netdev_for_each_uc_addr(ha, netdev) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST + if (!rar_entries) + break; + igb_rar_set_qsel(adapter, ha->addr, + rar_entries--, + vfn); ++#else ++ igb_rar_set_qsel(adapter, ha->da_addr, ++ rar_entries--, ++ vfn); ++#endif + count++; + } + } ++ + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) { +- wr32(E1000_RAH(rar_entries), 0); +- wr32(E1000_RAL(rar_entries), 0); ++ E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0); ++ E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0); + } +- wrfl(); +- ++ E1000_WRITE_FLUSH(hw); + return count; + } + ++#endif /* HAVE_SET_RX_MODE */ + /** +- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set +- * @netdev: network interface device structure ++ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set ++ * @netdev: network interface device structure + * +- * The set_rx_mode entry point is called whenever the unicast or multicast +- * address lists or the network interface flags are updated. This routine is +- * responsible for configuring the hardware for proper unicast, multicast, +- * promiscuous mode, and all-multi behavior. ++ * The set_rx_mode entry point is called whenever the unicast or multicast ++ * address lists or the network interface flags are updated. This routine is ++ * responsible for configuring the hardware for proper unicast, multicast, ++ * promiscuous mode, and all-multi behavior. + **/ + static void igb_set_rx_mode(struct net_device *netdev) + { +@@ -4017,23 +4587,24 @@ + int count; + + /* Check for Promiscuous and All Multicast modes */ +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* clear the effected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); + + if (netdev->flags & IFF_PROMISC) { +- /* retain VLAN HW filtering if in VT mode */ +- if (adapter->vfs_allocated_count) +- rctl |= E1000_RCTL_VFE; + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); ++ /* retain VLAN HW filtering if in VT mode */ ++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) ++ rctl |= E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { +- /* Write addresses to the MTA, if the attempt fails ++ /* ++ * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ +@@ -4045,7 +4616,9 @@ + vmolr |= E1000_VMOLR_ROMPE; + } + } +- /* Write addresses to available RAR registers, if there is not ++#ifdef HAVE_SET_RX_MODE ++ /* ++ * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ +@@ -4054,21 +4627,23 @@ + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } ++#endif /* HAVE_SET_RX_MODE */ + rctl |= E1000_RCTL_VFE; + } +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + +- /* In order to support SR-IOV and eventually VMDq it is necessary to set ++ /* ++ * In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ +- if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) ++ if (hw->mac.type < e1000_82576) + return; + +- vmolr |= rd32(E1000_VMOLR(vfn)) & +- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); +- wr32(E1000_VMOLR(vfn), vmolr); ++ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) & ++ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + igb_restore_vf_multicasts(adapter); + } + +@@ -4080,7 +4655,7 @@ + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: +- wvbr = rd32(E1000_WVBR); ++ wvbr = E1000_READ_REG(hw, E1000_WVBR); + if (!wvbr) + return; + break; +@@ -4100,15 +4675,34 @@ + if (!adapter->wvbr) + return; + +- for (j = 0; j < adapter->vfs_allocated_count; j++) { +- if (adapter->wvbr & (1 << j) || +- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { +- dev_warn(&adapter->pdev->dev, +- "Spoof event(s) detected on VF %d\n", j); +- adapter->wvbr &= +- ~((1 << j) | +- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); ++ switch (adapter->hw.mac.type) { ++ case e1000_82576: ++ for (j = 0; j < adapter->vfs_allocated_count; j++) { ++ if (adapter->wvbr & (1 << j) || ++ adapter->wvbr & (1 << (j ++ + IGB_STAGGERED_QUEUE_OFFSET))) { ++ DPRINTK(DRV, WARNING, ++ "Spoof event(s) detected on VF %d\n", ++ j); ++ adapter->wvbr &= ++ ~((1 << j) | ++ (1 << (j + ++ IGB_STAGGERED_QUEUE_OFFSET))); ++ } ++ } ++ break; ++ case e1000_i350: ++ for (j = 0; j < adapter->vfs_allocated_count; j++) { ++ if (adapter->wvbr & (1 << j)) { ++ DPRINTK(DRV, WARNING, ++ "Spoof event(s) detected on VF %d\n", ++ j); ++ adapter->wvbr &= ~(1 << j); ++ } + } ++ break; ++ default: ++ break; + } + } + +@@ -4118,21 +4712,22 @@ + static void igb_update_phy_info(unsigned long data) + { + struct igb_adapter *adapter = (struct igb_adapter *) data; +- igb_get_phy_info(&adapter->hw); ++ ++ e1000_get_phy_info(&adapter->hw); + } + + /** +- * igb_has_link - check shared code for link and determine up/down +- * @adapter: pointer to driver private info ++ * igb_has_link - check shared code for link and determine up/down ++ * @adapter: pointer to driver private info + **/ + bool igb_has_link(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +- bool link_active = false; ++ bool link_active = FALSE; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay +- * false until the e1000_check_for_link establishes link ++ * false until the igb_e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { +@@ -4140,11 +4735,11 @@ + if (!hw->mac.get_link_status) + return true; + case e1000_media_type_internal_serdes: +- hw->mac.ops.check_for_link(hw); ++ igb_e1000_check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; +- default: + case e1000_media_type_unknown: ++ default: + break; + } + +@@ -4162,27 +4757,9 @@ + return link_active; + } + +-static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +-{ +- bool ret = false; +- u32 ctrl_ext, thstat; +- +- /* check for thermal sensor event on i350 copper only */ +- if (hw->mac.type == e1000_i350) { +- thstat = rd32(E1000_THSTAT); +- ctrl_ext = rd32(E1000_CTRL_EXT); +- +- if ((hw->phy.media_type == e1000_media_type_copper) && +- !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) +- ret = !!(thstat & event); +- } +- +- return ret; +-} +- + /** +- * igb_watchdog - Timer Call-back +- * @data: pointer to adapter cast into an unsigned long ++ * igb_watchdog - Timer Call-back ++ * @data: pointer to adapter cast into an unsigned long + **/ + static void igb_watchdog(unsigned long data) + { +@@ -4197,29 +4774,28 @@ + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; +- struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; +- u32 link; ++ u32 thstat, ctrl_ext, link; + int i; + u32 connsw; + + link = igb_has_link(adapter); + +- if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { +- if (time_after(jiffies, (adapter->link_check_timeout + HZ))) +- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; +- else +- link = false; +- } +- + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { +- connsw = rd32(E1000_CONNSW); ++ connsw = E1000_READ_REG(hw, E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } ++ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { ++ if (time_after(jiffies, (adapter->link_check_timeout + HZ))) ++ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; ++ else ++ link = FALSE; ++ } ++ + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { +@@ -4227,48 +4803,29 @@ + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } ++ + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + +- hw->mac.ops.get_speed_and_duplex(hw, +- &adapter->link_speed, +- &adapter->link_duplex); ++ igb_e1000_get_speed_and_duplex(hw, ++ &adapter->link_speed, ++ &adapter->link_duplex); + +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, +- "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", +- netdev->name, +- adapter->link_speed, +- adapter->link_duplex == FULL_DUPLEX ? +- "Full" : "Half", +- (ctrl & E1000_CTRL_TFCE) && +- (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : +- (ctrl & E1000_CTRL_RFCE) ? "RX" : +- (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); +- +- /* disable EEE if enabled */ +- if ((adapter->flags & IGB_FLAG_EEE) && +- (adapter->link_duplex == HALF_DUPLEX)) { +- dev_info(&adapter->pdev->dev, +- "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); +- adapter->hw.dev_spec._82575.eee_disable = true; +- adapter->flags &= ~IGB_FLAG_EEE; +- } +- +- /* check if SmartSpeed worked */ +- igb_check_downshift(hw); +- if (phy->speed_downgraded) +- netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); +- +- /* check for thermal sensor event */ +- if (igb_thermal_sensor_event(hw, +- E1000_THSTAT_LINK_THROTTLE)) +- netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); +- ++ "igb: %s NIC Link is Up %d Mbps %s, Flow Control: %s\n", ++ netdev->name, ++ adapter->link_speed, ++ adapter->link_duplex == FULL_DUPLEX ? ++ "Full Duplex" : "Half Duplex", ++ ((ctrl & E1000_CTRL_TFCE) && ++ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : ++ ((ctrl & E1000_CTRL_RFCE) ? "RX" : ++ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { +@@ -4278,12 +4835,17 @@ + case SPEED_100: + /* maybe add some timeout factor ? */ + break; ++ default: ++ break; + } + + netif_carrier_on(netdev); ++ netif_tx_wake_all_queues(netdev); + + igb_ping_all_vfs(adapter); ++#ifdef IFLA_VF_MAX + igb_check_vf_rate_limit(adapter); ++#endif /* IFLA_VF_MAX */ + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) +@@ -4294,17 +4856,33 @@ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; +- +- /* check for thermal sensor event */ +- if (igb_thermal_sensor_event(hw, +- E1000_THSTAT_PWR_DOWN)) { +- netdev_err(netdev, "The network adapter was stopped because it overheated\n"); ++ /* check for thermal sensor event on i350 */ ++ if (hw->mac.type == e1000_i350) { ++ thstat = E1000_READ_REG(hw, E1000_THSTAT); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ if ((hw->phy.media_type == ++ e1000_media_type_copper) && ++ !(ctrl_ext & ++ E1000_CTRL_EXT_LINK_MODE_SGMII)) { ++ if (thstat & E1000_THSTAT_PWR_DOWN) { ++ netdev_err(netdev, ++ "igb: %s The network adapter was stopped because it overheated.\n", ++ netdev->name); ++ } ++ if (thstat & ++ E1000_THSTAT_LINK_THROTTLE) { ++ netdev_err(netdev, ++ "igb: %s The network adapter supported link speed was downshifted because it overheated.\n", ++ netdev->name); ++ } ++ } + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); + + igb_ping_all_vfs(adapter); + +@@ -4312,7 +4890,6 @@ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); +- + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); +@@ -4328,6 +4905,7 @@ + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { ++ hw->mac.ops.power_up_serdes(hw); + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); +@@ -4337,12 +4915,11 @@ + } + } + +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, &adapter->stats64); +- spin_unlock(&adapter->stats64_lock); ++ igb_update_stats(adapter); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; ++ + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going +@@ -4361,19 +4938,18 @@ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + +- /* Cause software interrupt to ensure Rx ring is cleaned */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ /* Cause software interrupt to ensure rx ring is cleaned */ ++ if (adapter->msix_entries) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; +- wr32(E1000_EICS, eics); ++ E1000_WRITE_REG(hw, E1000_EICS, eics); + } else { +- wr32(E1000_ICS, E1000_ICS_RXDMT0); ++ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); +- igb_ptp_rx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { +@@ -4386,6 +4962,70 @@ + } + } + ++static void igb_dma_err_task(struct work_struct *work) ++{ ++ struct igb_adapter *adapter = container_of(work, ++ struct igb_adapter, ++ dma_err_task); ++ int vf; ++ struct e1000_hw *hw = &adapter->hw; ++ struct net_device *netdev = adapter->netdev; ++ u32 hgptc; ++ u32 ciaa, ciad; ++ ++ hgptc = E1000_READ_REG(hw, E1000_HGPTC); ++ if (hgptc) /* If incrementing then no need for the check below */ ++ goto dma_timer_reset; ++ /* ++ * Check to see if a bad DMA write target from an errant or ++ * malicious VF has caused a PCIe error. If so then we can ++ * issue a VFLR to the offending VF(s) and then resume without ++ * requesting a full slot reset. ++ */ ++ ++ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { ++ ciaa = (vf << 16) | 0x80000000; ++ /* 32 bit read so align, we really want status at offset 6 */ ++ ciaa |= PCI_COMMAND; ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ ciad = E1000_READ_REG(hw, E1000_CIAD); ++ ciaa &= 0x7FFFFFFF; ++ /* disable debug mode asap after reading data */ ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ /* Get the upper 16 bits which will be the PCI status reg */ ++ ciad >>= 16; ++ if (ciad & (PCI_STATUS_REC_MASTER_ABORT | ++ PCI_STATUS_REC_TARGET_ABORT | ++ PCI_STATUS_SIG_SYSTEM_ERROR)) { ++ netdev_err(netdev, "VF %d suffered error\n", vf); ++ /* Issue VFLR */ ++ ciaa = (vf << 16) | 0x80000000; ++ ciaa |= 0xA8; ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ ciad = 0x00008000; /* VFLR */ ++ E1000_WRITE_REG(hw, E1000_CIAD, ciad); ++ ciaa &= 0x7FFFFFFF; ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ } ++ } ++dma_timer_reset: ++ /* Reset the timer */ ++ if (!test_bit(__IGB_DOWN, &adapter->state)) ++ mod_timer(&adapter->dma_err_timer, ++ round_jiffies(jiffies + HZ / 10)); ++} ++ ++/** ++ * igb_dma_err_timer - Timer Call-back ++ * @data: pointer to adapter cast into an unsigned long ++ **/ ++static void igb_dma_err_timer(unsigned long data) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ /* Do the rest outside of interrupt context */ ++ schedule_work(&adapter->dma_err_task); ++} ++ + enum latency_range { + lowest_latency = 0, + low_latency = 1, +@@ -4394,19 +5034,20 @@ + }; + + /** +- * igb_update_ring_itr - update the dynamic ITR value based on packet size +- * @q_vector: pointer to q_vector ++ * igb_update_ring_itr - update the dynamic ITR value based on packet size + * +- * Stores a new ITR value based on strictly on packet size. This +- * algorithm is less sophisticated than that used in igb_update_itr, +- * due to the difficulty of synchronizing statistics across multiple +- * receive rings. The divisors and thresholds used by this function +- * were determined based on theoretical maximum wire speed and testing +- * data, in order to minimize response time while increasing bulk +- * throughput. +- * This functionality is controlled by ethtool's coalescing settings. +- * NOTE: This function is called only when operating in a multiqueue +- * receive environment. ++ * Stores a new ITR value based on strictly on packet size. This ++ * algorithm is less sophisticated than that used in igb_update_itr, ++ * due to the difficulty of synchronizing statistics across multiple ++ * receive rings. The divisors and thresholds used by this function ++ * were determined based on theoretical maximum wire speed and testing ++ * data, in order to minimize response time while increasing bulk ++ * throughput. ++ * This functionality is controlled by the InterruptThrottleRate module ++ * parameter (see igb_param.c) ++ * NOTE: This function is called only when operating in a multiqueue ++ * receive environment. ++ * @q_vector: pointer to q_vector + **/ + static void igb_update_ring_itr(struct igb_q_vector *q_vector) + { +@@ -4418,9 +5059,13 @@ + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ +- if (adapter->link_speed != SPEED_1000) { ++ switch (adapter->link_speed) { ++ case SPEED_10: ++ case SPEED_100: + new_val = IGB_4K_ITR; + goto set_itr_val; ++ default: ++ break; + } + + packets = q_vector->rx.total_packets; +@@ -4467,20 +5112,20 @@ + } + + /** +- * igb_update_itr - update the dynamic ITR value based on statistics +- * @q_vector: pointer to q_vector +- * @ring_container: ring info to update the itr for +- * +- * Stores a new ITR value based on packets and byte +- * counts during the last interrupt. The advantage of per interrupt +- * computation is faster updates and more accurate ITR for the current +- * traffic pattern. Constants in this function were computed +- * based on theoretical maximum wire speed and thresholds were set based +- * on testing data as well as attempting to minimize response time +- * while increasing bulk throughput. +- * This functionality is controlled by ethtool's coalescing settings. +- * NOTE: These calculations are only valid when operating in a single- +- * queue environment. ++ * igb_update_itr - update the dynamic ITR value based on statistics ++ * Stores a new ITR value based on packets and byte ++ * counts during the last interrupt. The advantage of per interrupt ++ * computation is faster updates and more accurate ITR for the current ++ * traffic pattern. Constants in this function were computed ++ * based on theoretical maximum wire speed and thresholds were set based ++ * on testing data as well as attempting to minimize response time ++ * while increasing bulk throughput. ++ * this functionality is controlled by the InterruptThrottleRate module ++ * parameter (see igb_param.c) ++ * NOTE: These calculations are only valid when operating in a single- ++ * queue environment. ++ * @q_vector: pointer to q_vector ++ * @ring_container: ring info to update the itr for + **/ + static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +@@ -4504,12 +5149,13 @@ + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ +- if (bytes/packets > 8000) ++ if (bytes/packets > 8000) { + itrval = bulk_latency; +- else if ((packets < 10) || ((bytes/packets) > 1200)) ++ } else if ((packets < 10) || ((bytes/packets) > 1200)) { + itrval = bulk_latency; +- else if ((packets > 35)) ++ } else if ((packets > 35)) { + itrval = lowest_latency; ++ } + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { +@@ -4541,10 +5187,14 @@ + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ +- if (adapter->link_speed != SPEED_1000) { ++ switch (adapter->link_speed) { ++ case SPEED_10: ++ case SPEED_100: + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; ++ default: ++ break; + } + + igb_update_itr(q_vector, &q_vector->tx); +@@ -4580,9 +5230,9 @@ + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? +- max((new_itr * q_vector->itr_val) / +- (new_itr + (q_vector->itr_val >> 2)), +- new_itr) : new_itr; ++ max((new_itr * q_vector->itr_val) / ++ (new_itr + (q_vector->itr_val >> 2)), ++ new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR +@@ -4594,8 +5244,8 @@ + } + } + +-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, +- u32 type_tucmd, u32 mss_l4len_idx) ++void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, ++ u32 type_tucmd, u32 mss_l4len_idx) + { + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; +@@ -4622,26 +5272,32 @@ + struct igb_tx_buffer *first, + u8 *hdr_len) + { ++#ifdef NETIF_F_TSO + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; +- int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) ++#endif /* NETIF_F_TSO */ + return 0; ++#ifdef NETIF_F_TSO + +- err = skb_cow_head(skb, 0); +- if (err < 0) +- return err; ++ if (skb_header_cloned(skb)) { ++ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++ ++ if (err) ++ return err; ++ } + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); ++ + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, +@@ -4652,6 +5308,7 @@ + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; ++#ifdef NETIF_F_TSO6 + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, +@@ -4659,6 +5316,7 @@ + 0, IPPROTO_TCP, 0); + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; ++#endif + } + + /* compute header lengths */ +@@ -4681,6 +5339,7 @@ + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; ++#endif /* NETIF_F_TSO */ + } + + static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +@@ -4694,38 +5353,42 @@ + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) + return; + } else { +- u8 l4_hdr = 0; ++ u8 nexthdr = 0; + + switch (first->protocol) { +- case htons(ETH_P_IP): ++ case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; +- l4_hdr = ip_hdr(skb)->protocol; ++ nexthdr = ip_hdr(skb)->protocol; + break; +- case htons(ETH_P_IPV6): ++#ifdef NETIF_F_IPV6_CSUM ++ case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); +- l4_hdr = ipv6_hdr(skb)->nexthdr; ++ nexthdr = ipv6_hdr(skb)->nexthdr; + break; ++#endif + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, +- "partial checksum but proto=%x!\n", +- first->protocol); ++ "partial checksum but proto=%x!\n", ++ first->protocol); + } + break; + } + +- switch (l4_hdr) { ++ switch (nexthdr) { + case IPPROTO_TCP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + E1000_ADVTXD_L4LEN_SHIFT; + break; ++#ifdef HAVE_SCTP + case IPPROTO_SCTP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + E1000_ADVTXD_L4LEN_SHIFT; + break; ++#endif + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + E1000_ADVTXD_L4LEN_SHIFT; +@@ -4733,8 +5396,8 @@ + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, +- "partial checksum but l4 proto=%x!\n", +- l4_hdr); ++ "partial checksum but l4 proto=%x!\n", ++ nexthdr); + } + break; + } +@@ -4773,9 +5436,6 @@ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + +- /* insert frame checksum */ +- cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); +- + return cmd_type; + } + +@@ -4882,11 +5542,11 @@ + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); +- + /* set the timestamp */ + first->time_stamp = jiffies; + +- /* Force memory writes to complete before letting h/w know there ++ /* ++ * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * +@@ -4907,7 +5567,7 @@ + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail +- * at a time, it synchronizes IO on IA64/Altix systems ++ * at a time, it syncronizes IO on IA64/Altix systems + */ + mmiowb(); + +@@ -4932,9 +5592,12 @@ + + static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) + { +- struct net_device *netdev = tx_ring->netdev; ++ struct net_device *netdev = netdev_ring(tx_ring); + +- netif_stop_subqueue(netdev, tx_ring->queue_index); ++ if (netif_is_multiqueue(netdev)) ++ netif_stop_subqueue(netdev, ring_queue_index(tx_ring)); ++ else ++ netif_stop_queue(netdev); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); +@@ -4949,11 +5612,12 @@ + return -EBUSY; + + /* A reprieve! */ +- netif_wake_subqueue(netdev, tx_ring->queue_index); ++ if (netif_is_multiqueue(netdev)) ++ netif_wake_subqueue(netdev, ring_queue_index(tx_ring)); ++ else ++ netif_wake_queue(netdev); + +- u64_stats_update_begin(&tx_ring->tx_syncp2); +- tx_ring->tx_stats.restart_queue2++; +- u64_stats_update_end(&tx_ring->tx_syncp2); ++ tx_ring->tx_stats.restart_queue++; + + return 0; + } +@@ -4971,25 +5635,26 @@ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; ++#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD ++ unsigned short f; ++#endif + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + +- /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, ++ /* ++ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { +- unsigned short f; +- +- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) +- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +- } else { +- count += skb_shinfo(skb)->nr_frags; +- } +- ++#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD ++ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) ++ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); ++#else ++ count += skb_shinfo(skb)->nr_frags; ++#endif + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; +@@ -5001,12 +5666,21 @@ + first->bytecount = skb->len; + first->gso_segs = 1; + ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef SKB_SHARED_TX_IS_UNION ++ if (unlikely(skb_tx(skb)->hardware)) { ++#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { ++#endif + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { ++#ifdef SKB_SHARED_TX_IS_UNION ++ skb_tx(skb)->in_progress = 1; ++#else + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++#endif + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); +@@ -5015,12 +5689,11 @@ + schedule_work(&adapter->ptp_tx_work); + } + } +- ++#endif /* HAVE_PTP_1588_CLOCK */ + skb_tx_timestamp(skb); +- +- if (vlan_tx_tag_present(skb)) { ++ if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; +- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); ++ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ +@@ -5035,6 +5708,10 @@ + + igb_tx_map(tx_ring, first, hdr_len); + ++#ifndef HAVE_TRANS_START_IN_QUEUE ++ netdev_ring(tx_ring)->trans_start = jiffies; ++ ++#endif + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + +@@ -5046,6 +5723,7 @@ + return NETDEV_TX_OK; + } + ++#ifdef HAVE_TX_MQ + static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) + { +@@ -5056,6 +5734,9 @@ + + return adapter->tx_ring[r_idx]; + } ++#else ++#define igb_tx_queue_mapping(_adapter, _skb) ((_adapter)->tx_ring[0]) ++#endif + + static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +@@ -5072,22 +5753,22 @@ + return NETDEV_TX_OK; + } + +- /* The minimum packet size with TCTL.PSP set is 17 so pad the skb ++ /* ++ * The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ +- if (unlikely(skb->len < 17)) { +- if (skb_pad(skb, 17 - skb->len)) ++ if (skb->len < 17) { ++ if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; +- skb_set_tail_pointer(skb, 17); + } + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); + } + + /** +- * igb_tx_timeout - Respond to a Tx Hang +- * @netdev: network interface device structure ++ * igb_tx_timeout - Respond to a Tx Hang ++ * @netdev: network interface device structure + **/ + static void igb_tx_timeout(struct net_device *netdev) + { +@@ -5101,59 +5782,64 @@ + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); +- wr32(E1000_EICS, +- (adapter->eims_enable_mask & ~adapter->eims_other)); ++ E1000_WRITE_REG(hw, E1000_EICS, ++ (adapter->eims_enable_mask & ~adapter->eims_other)); + } + + static void igb_reset_task(struct work_struct *work) + { + struct igb_adapter *adapter; ++ + adapter = container_of(work, struct igb_adapter, reset_task); + +- igb_dump(adapter); +- netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); + } + + /** +- * igb_get_stats64 - Get System Network Statistics +- * @netdev: network interface device structure +- * @stats: rtnl_link_stats64 pointer ++ * igb_get_stats - Get System Network Statistics ++ * @netdev: network interface device structure ++ * ++ * Returns the address of the device statistics structure. ++ * The statistics are updated here and also from the timer callback. + **/ +-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static struct net_device_stats *igb_get_stats(struct net_device *netdev) + { + struct igb_adapter *adapter = netdev_priv(netdev); + +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, &adapter->stats64); +- memcpy(stats, &adapter->stats64, sizeof(*stats)); +- spin_unlock(&adapter->stats64_lock); ++ if (!test_bit(__IGB_RESETTING, &adapter->state)) ++ igb_update_stats(adapter); + +- return stats; ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ /* only return the current stats */ ++ return &netdev->stats; ++#else ++ /* only return the current stats */ ++ return &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + } + + /** +- * igb_change_mtu - Change the Maximum Transfer Unit +- * @netdev: network interface device structure +- * @new_mtu: new value for maximum frame size ++ * igb_change_mtu - Change the Maximum Transfer Unit ++ * @netdev: network interface device structure ++ * @new_mtu: new value for maximum frame size + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + **/ + static int igb_change_mtu(struct net_device *netdev, int new_mtu) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { +- dev_err(&pdev->dev, "Invalid MTU setting\n"); ++ dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n"); + return -EINVAL; + } + + #define MAX_STD_JUMBO_FRAME_SIZE 9238 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { +- dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); ++ dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n"); + return -EINVAL; + } + +@@ -5170,9 +5856,10 @@ + if (netif_running(netdev)) + igb_down(adapter); + +- dev_info(&pdev->dev, "changing MTU from %d to %d\n", ++ dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; ++ hw->dev_spec._82575.mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); +@@ -5185,53 +5872,74 @@ + } + + /** +- * igb_update_stats - Update the board statistics counters +- * @adapter: board private structure ++ * igb_update_stats - Update the board statistics counters ++ * @adapter: board private structure + **/ +-void igb_update_stats(struct igb_adapter *adapter, +- struct rtnl_link_stats64 *net_stats) ++ ++void igb_update_stats(struct igb_adapter *adapter) + { ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats *net_stats = &adapter->netdev->stats; ++#else ++ struct net_device_stats *net_stats = &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct e1000_hw *hw = &adapter->hw; ++#ifdef HAVE_PCI_ERS + struct pci_dev *pdev = adapter->pdev; ++#endif + u32 reg, mpc; + u16 phy_tmp; + int i; + u64 bytes, packets; +- unsigned int start; +- u64 _bytes, _packets; ++#ifndef IGB_NO_LRO ++ u32 flushed = 0, coal = 0; ++ struct igb_q_vector *q_vector; ++#endif + + #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + +- /* Prevent stats update while adapter is being reset, or if the pci ++ /* ++ * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; ++#ifdef HAVE_PCI_ERS + if (pci_channel_offline(pdev)) + return; + ++#endif ++#ifndef IGB_NO_LRO ++ for (i = 0; i < adapter->num_q_vectors; i++) { ++ q_vector = adapter->q_vector[i]; ++ if (!q_vector) ++ continue; ++ flushed += q_vector->lrolist.stats.flushed; ++ coal += q_vector->lrolist.stats.coal; ++ } ++ adapter->lro_stats.flushed = flushed; ++ adapter->lro_stats.coal = coal; ++ ++#endif + bytes = 0; + packets = 0; +- +- rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; +- u32 rqdpc = rd32(E1000_RQDPC(i)); +- if (hw->mac.type >= e1000_i210) +- wr32(E1000_RQDPC(i), 0); ++ u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF; + +- if (rqdpc) { +- ring->rx_stats.drops += rqdpc; +- net_stats->rx_fifo_errors += rqdpc; ++ if (hw->mac.type >= e1000_i210) ++ E1000_WRITE_REG(hw, E1000_RQDPC(i), 0); ++ ring->rx_stats.drops += rqdpc_tmp; ++ net_stats->rx_fifo_errors += rqdpc_tmp; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (!ring->vmdq_netdev) { ++ bytes += ring->rx_stats.bytes; ++ packets += ring->rx_stats.packets; + } +- +- do { +- start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +- _bytes = ring->rx_stats.bytes; +- _packets = ring->rx_stats.packets; +- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +- bytes += _bytes; +- packets += _packets; ++#else ++ bytes += ring->rx_stats.bytes; ++ packets += ring->rx_stats.packets; ++#endif + } + + net_stats->rx_bytes = bytes; +@@ -5241,98 +5949,98 @@ + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; +- do { +- start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +- _bytes = ring->tx_stats.bytes; +- _packets = ring->tx_stats.packets; +- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +- bytes += _bytes; +- packets += _packets; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (!ring->vmdq_netdev) { ++ bytes += ring->tx_stats.bytes; ++ packets += ring->tx_stats.packets; ++ } ++#else ++ bytes += ring->tx_stats.bytes; ++ packets += ring->tx_stats.packets; ++#endif + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; +- rcu_read_unlock(); + + /* read stats registers */ +- adapter->stats.crcerrs += rd32(E1000_CRCERRS); +- adapter->stats.gprc += rd32(E1000_GPRC); +- adapter->stats.gorc += rd32(E1000_GORCL); +- rd32(E1000_GORCH); /* clear GORCL */ +- adapter->stats.bprc += rd32(E1000_BPRC); +- adapter->stats.mprc += rd32(E1000_MPRC); +- adapter->stats.roc += rd32(E1000_ROC); +- +- adapter->stats.prc64 += rd32(E1000_PRC64); +- adapter->stats.prc127 += rd32(E1000_PRC127); +- adapter->stats.prc255 += rd32(E1000_PRC255); +- adapter->stats.prc511 += rd32(E1000_PRC511); +- adapter->stats.prc1023 += rd32(E1000_PRC1023); +- adapter->stats.prc1522 += rd32(E1000_PRC1522); +- adapter->stats.symerrs += rd32(E1000_SYMERRS); +- adapter->stats.sec += rd32(E1000_SEC); ++ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); ++ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC); ++ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL); ++ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */ ++ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC); ++ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC); ++ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC); ++ ++ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64); ++ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127); ++ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255); ++ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511); ++ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023); ++ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522); ++ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS); ++ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC); + +- mpc = rd32(E1000_MPC); ++ mpc = E1000_READ_REG(hw, E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; +- adapter->stats.scc += rd32(E1000_SCC); +- adapter->stats.ecol += rd32(E1000_ECOL); +- adapter->stats.mcc += rd32(E1000_MCC); +- adapter->stats.latecol += rd32(E1000_LATECOL); +- adapter->stats.dc += rd32(E1000_DC); +- adapter->stats.rlec += rd32(E1000_RLEC); +- adapter->stats.xonrxc += rd32(E1000_XONRXC); +- adapter->stats.xontxc += rd32(E1000_XONTXC); +- adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); +- adapter->stats.xofftxc += rd32(E1000_XOFFTXC); +- adapter->stats.fcruc += rd32(E1000_FCRUC); +- adapter->stats.gptc += rd32(E1000_GPTC); +- adapter->stats.gotc += rd32(E1000_GOTCL); +- rd32(E1000_GOTCH); /* clear GOTCL */ +- adapter->stats.rnbc += rd32(E1000_RNBC); +- adapter->stats.ruc += rd32(E1000_RUC); +- adapter->stats.rfc += rd32(E1000_RFC); +- adapter->stats.rjc += rd32(E1000_RJC); +- adapter->stats.tor += rd32(E1000_TORH); +- adapter->stats.tot += rd32(E1000_TOTH); +- adapter->stats.tpr += rd32(E1000_TPR); +- +- adapter->stats.ptc64 += rd32(E1000_PTC64); +- adapter->stats.ptc127 += rd32(E1000_PTC127); +- adapter->stats.ptc255 += rd32(E1000_PTC255); +- adapter->stats.ptc511 += rd32(E1000_PTC511); +- adapter->stats.ptc1023 += rd32(E1000_PTC1023); +- adapter->stats.ptc1522 += rd32(E1000_PTC1522); +- +- adapter->stats.mptc += rd32(E1000_MPTC); +- adapter->stats.bptc += rd32(E1000_BPTC); +- +- adapter->stats.tpt += rd32(E1000_TPT); +- adapter->stats.colc += rd32(E1000_COLC); +- +- adapter->stats.algnerrc += rd32(E1000_ALGNERRC); +- /* read internal phy specific stats */ +- reg = rd32(E1000_CTRL_EXT); ++ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC); ++ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL); ++ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC); ++ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL); ++ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC); ++ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC); ++ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC); ++ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC); ++ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC); ++ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); ++ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC); ++ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC); ++ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL); ++ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */ ++ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC); ++ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC); ++ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC); ++ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC); ++ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH); ++ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH); ++ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR); ++ ++ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64); ++ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127); ++ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255); ++ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511); ++ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); ++ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); ++ ++ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC); ++ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC); ++ ++ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT); ++ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC); ++ ++ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); ++ /* read internal phy sepecific stats */ ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { +- adapter->stats.rxerrc += rd32(E1000_RXERRC); ++ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) +- adapter->stats.tncrs += rd32(E1000_TNCRS); ++ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS); + } ++ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC); ++ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + +- adapter->stats.tsctc += rd32(E1000_TSCTC); +- adapter->stats.tsctfc += rd32(E1000_TSCTFC); +- +- adapter->stats.iac += rd32(E1000_IAC); +- adapter->stats.icrxoc += rd32(E1000_ICRXOC); +- adapter->stats.icrxptc += rd32(E1000_ICRXPTC); +- adapter->stats.icrxatc += rd32(E1000_ICRXATC); +- adapter->stats.ictxptc += rd32(E1000_ICTXPTC); +- adapter->stats.ictxatc += rd32(E1000_ICTXATC); +- adapter->stats.ictxqec += rd32(E1000_ICTXQEC); +- adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); +- adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); ++ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC); ++ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); ++ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); ++ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); ++ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); ++ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); ++ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); ++ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); ++ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; +@@ -5365,24 +6073,20 @@ + /* Phy Stats */ + if (hw->phy.media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && +- (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { ++ (!igb_e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + } + + /* Management Stats */ +- adapter->stats.mgptc += rd32(E1000_MGTPTC); +- adapter->stats.mgprc += rd32(E1000_MGTPRC); +- adapter->stats.mgpdc += rd32(E1000_MGTPDC); +- +- /* OS2BMC Stats */ +- reg = rd32(E1000_MANC); +- if (reg & E1000_MANC_EN_BMC2OS) { +- adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); +- adapter->stats.o2bspc += rd32(E1000_O2BSPC); +- adapter->stats.b2ospc += rd32(E1000_B2OSPC); +- adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); ++ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC); ++ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC); ++ if (hw->mac.type > e1000_82580) { ++ adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC); ++ adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC); ++ adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC); ++ adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC); + } + } + +@@ -5390,7 +6094,7 @@ + { + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; +- u32 icr = rd32(E1000_ICR); ++ u32 icr = E1000_READ_REG(hw, E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) +@@ -5417,18 +6121,24 @@ + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + ++#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { +- u32 tsicr = rd32(E1000_TSICR); ++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ +- wr32(E1000_TSICR, E1000_TSICR_TXTS); ++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ + +- wr32(E1000_EIMS, adapter->eims_other); ++ /* Check for MDD event */ ++ if (icr & E1000_ICR_MDDET) ++ igb_process_mdd_event(adapter); ++ ++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; + } +@@ -5465,7 +6175,7 @@ + return IRQ_HANDLED; + } + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +@@ -5474,9 +6184,10 @@ + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) +- txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; ++ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576; + +- /* We can enable relaxed ordering for reads, but not writes when ++ /* ++ * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ +@@ -5484,7 +6195,7 @@ + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + +- wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); ++ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); + } + + static void igb_update_rx_dca(struct igb_adapter *adapter, +@@ -5495,16 +6206,17 @@ + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) +- rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; ++ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576; + +- /* We can enable relaxed ordering for reads, but not writes when ++ /* ++ * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + +- wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); ++ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); + } + + static void igb_update_dca(struct igb_q_vector *q_vector) +@@ -5535,7 +6247,7 @@ + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ +- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); ++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; +@@ -5556,9 +6268,9 @@ + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; +- if (dca_add_requester(dev) == 0) { ++ if (dca_add_requester(dev) == E1000_SUCCESS) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; +- dev_info(&pdev->dev, "DCA enabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } +@@ -5569,14 +6281,15 @@ + * hanging around in the sysfs model + */ + dca_remove_requester(dev); +- dev_info(&pdev->dev, "DCA disabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; +- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); ++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, ++ E1000_DCA_CTRL_DCA_DISABLE); + } + break; + } + +- return 0; ++ return E1000_SUCCESS; + } + + static int igb_notify_dca(struct notifier_block *nb, unsigned long event, +@@ -5585,27 +6298,29 @@ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, +- __igb_notify_dca); ++ __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; + } +-#endif /* CONFIG_IGB_DCA */ ++#endif /* IGB_DCA */ + +-#ifdef CONFIG_PCI_IOV + static int igb_vf_configure(struct igb_adapter *adapter, int vf) + { + unsigned char mac_addr[ETH_ALEN]; + +- eth_zero_addr(mac_addr); ++ random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + ++#ifdef IFLA_VF_MAX ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; ++#endif ++#endif + +- return 0; ++ return true; + } + +-#endif + static void igb_ping_all_vfs(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +@@ -5616,26 +6331,71 @@ + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; +- igb_write_mbx(hw, &ping, 1, i); ++ e1000_write_mbx(hw, &ping, 1, i); + } + } + ++/** ++ * igb_mta_set_ - Set multicast filter table address ++ * @adapter: pointer to the adapter structure ++ * @hash_value: determines the MTA register and bit to set ++ * ++ * The multicast table address is a register array of 32-bit registers. ++ * The hash_value is used to determine what register the bit is in, the ++ * current value is read, the new bit is OR'd in and the new value is ++ * written back into the register. ++ **/ ++void igb_mta_set(struct igb_adapter *adapter, u32 hash_value) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u32 hash_bit, hash_reg, mta; ++ ++ /* ++ * The MTA is a register array of 32-bit registers. It is ++ * treated like an array of (32*mta_reg_count) bits. We want to ++ * set bit BitArray[hash_value]. So we figure out what register ++ * the bit is in, read it, OR in the new bit, then write ++ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a ++ * mask to bits 31:5 of the hash value which gives us the ++ * register we're modifying. The hash bit within that register ++ * is determined by the lower 5 bits of the hash value. ++ */ ++ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); ++ hash_bit = hash_value & 0x1F; ++ ++ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); ++ ++ mta |= (1 << hash_bit); ++ ++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); ++ E1000_WRITE_FLUSH(hw); ++} ++ + static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) + { ++ + struct e1000_hw *hw = &adapter->hw; +- u32 vmolr = rd32(E1000_VMOLR(vf)); ++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + ++#ifdef IGB_ENABLE_VF_PROMISC ++ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) { ++ vmolr |= E1000_VMOLR_ROPE; ++ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC; ++ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST; ++ } ++#endif + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { +- /* if we have hashes and we are clearing a multicast promisc ++ /* ++ * if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ +@@ -5646,17 +6406,18 @@ + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) +- igb_mta_set(hw, vf_data->vf_mc_hashes[j]); ++ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); + } + } + +- wr32(E1000_VMOLR(vf), vmolr); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; ++ + } + + static int igb_set_vf_multicasts(struct igb_adapter *adapter, +@@ -5694,7 +6455,7 @@ + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { +- u32 vmolr = rd32(E1000_VMOLR(i)); ++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + +@@ -5706,9 +6467,9 @@ + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) +- igb_mta_set(hw, vf_data->vf_mc_hashes[j]); ++ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); + } +- wr32(E1000_VMOLR(i), vmolr); ++ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + } + +@@ -5716,13 +6477,14 @@ + { + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, reg, vid; ++ u16 vlan_default; + int i; + + pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + + /* remove the vf from the pool */ + reg &= ~pool_mask; +@@ -5732,16 +6494,20 @@ + (reg & E1000_VLVF_VLANID_ENABLE)) { + reg = 0; + vid = reg & E1000_VLVF_VLANID_MASK; +- igb_vfta_set(hw, vid, false); ++ igb_vfta_set(adapter, vid, FALSE); + } + +- wr32(E1000_VLVF(i), reg); ++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); + } + + adapter->vf_data[vf].vlans_enabled = 0; ++ ++ vlan_default = adapter->vf_data[vf].default_vf_vlan_id; ++ if (vlan_default) ++ igb_vlvf_set(adapter, vlan_default, true, vf); + } + +-static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) ++s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) + { + struct e1000_hw *hw = &adapter->hw; + u32 reg, i; +@@ -5751,12 +6517,12 @@ + return -1; + + /* we only need to do this if VMDq is enabled */ +- if (!adapter->vfs_allocated_count) ++ if (!adapter->vmdq_pools) + return -1; + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; +@@ -5769,7 +6535,7 @@ + * one without the enable bit set + */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if (!(reg & E1000_VLVF_VLANID_ENABLE)) + break; + } +@@ -5781,26 +6547,26 @@ + /* if !enabled we need to set this up in vfta */ + if (!(reg & E1000_VLVF_VLANID_ENABLE)) { + /* add VID to filter table */ +- igb_vfta_set(hw, vid, true); ++ igb_vfta_set(adapter, vid, TRUE); + reg |= E1000_VLVF_VLANID_ENABLE; + } + reg &= ~E1000_VLVF_VLANID_MASK; + reg |= vid; +- wr32(E1000_VLVF(i), reg); ++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) +- return 0; ++ return E1000_SUCCESS; + + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + +- reg = rd32(E1000_VMOLR(vf)); ++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size += 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; +- wr32(E1000_VMOLR(vf), reg); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); + } + + adapter->vf_data[vf].vlans_enabled++; +@@ -5812,38 +6578,40 @@ + /* if pool is empty then remove entry from vfta */ + if (!(reg & E1000_VLVF_POOLSEL_MASK)) { + reg = 0; +- igb_vfta_set(hw, vid, false); ++ igb_vfta_set(adapter, vid, FALSE); + } +- wr32(E1000_VLVF(i), reg); ++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) +- return 0; ++ return E1000_SUCCESS; + + adapter->vf_data[vf].vlans_enabled--; + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + +- reg = rd32(E1000_VMOLR(vf)); ++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size -= 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; +- wr32(E1000_VMOLR(vf), reg); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); + } + } + } +- return 0; ++ return E1000_SUCCESS; + } + ++#ifdef IFLA_VF_MAX + static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) + { + struct e1000_hw *hw = &adapter->hw; + + if (vid) +- wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); ++ E1000_WRITE_REG(hw, E1000_VMVIR(vf), ++ (vid | E1000_VMVIR_VLANA_DEFAULT)); + else +- wr32(E1000_VMVIR(vf), 0); ++ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); + } + + static int igb_ndo_set_vf_vlan(struct net_device *netdev, +@@ -5852,7 +6620,9 @@ + int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); + +- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) ++ /* VLAN IDs accepted range 0-4094 */ ++ if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) ++ || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = igb_vlvf_set(adapter, vlan, !!vlan, vf); +@@ -5862,6 +6632,7 @@ + igb_set_vmolr(adapter, vf, !vlan); + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; ++ igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { +@@ -5871,10 +6642,14 @@ + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { ++ if (adapter->vf_data[vf].pf_vlan) ++ dev_info(&adapter->pdev->dev, ++ "Clearing VLAN on VF %d\n", vf); + igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, +- false, vf); ++ false, vf); + igb_set_vmvir(adapter, vlan, vf); + igb_set_vmolr(adapter, vf, true); ++ igb_set_vf_vlan_strip(adapter, vf, false); + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + } +@@ -5882,6 +6657,36 @@ + return err; + } + ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ++static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, ++ bool setting) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; ++ u32 dtxswc, reg_offset; ++ ++ if (!adapter->vfs_allocated_count) ++ return -EOPNOTSUPP; ++ ++ if (vf >= adapter->vfs_allocated_count) ++ return -EINVAL; ++ ++ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; ++ dtxswc = E1000_READ_REG(hw, reg_offset); ++ if (setting) ++ dtxswc |= ((1 << vf) | ++ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); ++ else ++ dtxswc &= ~((1 << vf) | ++ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); ++ E1000_WRITE_REG(hw, reg_offset, dtxswc); ++ ++ adapter->vf_data[vf].spoofchk_enabled = setting; ++ return E1000_SUCCESS; ++} ++#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ ++#endif /* IFLA_VF_MAX */ ++ + static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) + { + struct e1000_hw *hw = &adapter->hw; +@@ -5890,7 +6695,7 @@ + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; +@@ -5909,6 +6714,11 @@ + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int err = 0; + ++ if (vid) ++ igb_set_vf_vlan_strip(adapter, vf, true); ++ else ++ igb_set_vf_vlan_strip(adapter, vf, false); ++ + /* If in promiscuous mode we need to make sure the PF also has + * the VLAN filter set. + */ +@@ -5928,6 +6738,7 @@ + */ + if (!add && (adapter->netdev->flags & IFF_PROMISC)) { + u32 vlvf, bits; ++ + int regndx = igb_find_vlvf_entry(adapter, vid); + + if (regndx < 0) +@@ -5935,7 +6746,7 @@ + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ +- vlvf = bits = rd32(E1000_VLVF(regndx)); ++ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx)); + bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + /* If the filter was removed then ensure PF pool bit +@@ -5943,7 +6754,9 @@ + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && ++#ifndef HAVE_VLAN_RX_REGISTER + !test_bit(vid, adapter->active_vlans) && ++#endif + !bits) + igb_vlvf_set(adapter, vid, add, + adapter->vfs_allocated_count); +@@ -5955,7 +6768,9 @@ + + static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) + { +- /* clear flags - except flag that indicates PF has set the MAC */ ++ struct e1000_hw *hw = &adapter->hw; ++ ++ /* clear flags except flag that the PF has set the MAC */ + adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; + adapter->vf_data[vf].last_nack = jiffies; + +@@ -5964,27 +6779,40 @@ + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); ++#ifdef IFLA_VF_MAX + if (adapter->vf_data[vf].pf_vlan) + igb_ndo_set_vf_vlan(adapter->netdev, vf, + adapter->vf_data[vf].pf_vlan, + adapter->vf_data[vf].pf_qos); + else + igb_clear_vf_vfta(adapter, vf); ++#endif + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); ++ ++ /* ++ * Reset the VFs TDWBAL and TDWBAH registers which are not ++ * cleared by a VFLR ++ */ ++ E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0); ++ E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0); ++ if (hw->mac.type == e1000_82576) { ++ E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0); ++ E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0); ++ } + } + + static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) + { + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + +- /* clear mac address as we were hotplug removed/added */ ++ /* generate a new mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) +- eth_zero_addr(vf_mac); ++ random_ether_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +@@ -6005,25 +6833,26 @@ + igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + + /* enable transmit and receive for vf */ +- reg = rd32(E1000_VFTE); +- wr32(E1000_VFTE, reg | (1 << vf)); +- reg = rd32(E1000_VFRE); +- wr32(E1000_VFRE, reg | (1 << vf)); ++ reg = E1000_READ_REG(hw, E1000_VFTE); ++ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf)); ++ reg = E1000_READ_REG(hw, E1000_VFRE); ++ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; +- memcpy(addr, vf_mac, ETH_ALEN); +- igb_write_mbx(hw, msgbuf, 3, vf); ++ memcpy(addr, vf_mac, 6); ++ e1000_write_mbx(hw, msgbuf, 3, vf); + } + + static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) + { +- /* The VF MAC Address is stored in a packed array of bytes ++ /* ++ * The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ +- unsigned char *addr = (char *)&msg[1]; ++ unsigned char *addr = (unsigned char *)&msg[1]; + int err = -1; + + if (is_valid_ether_addr(addr)) +@@ -6041,7 +6870,7 @@ + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { +- igb_write_mbx(hw, &msg, 1, vf); ++ e1000_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } + } +@@ -6054,45 +6883,47 @@ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + +- retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); ++ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + + if (retval) { +- /* if receive failed revoke VF CTS stats and restart init */ +- dev_err(&pdev->dev, "Error receiving message from VF\n"); +- vf_data->flags &= ~IGB_VF_FLAG_CTS; +- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) +- return; +- goto out; ++ dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n"); ++ return; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + return; + +- /* until the vf completes a reset it should not be ++ /* ++ * until the vf completes a reset it should not be + * allowed to start any configuration. + */ ++ + if (msgbuf[0] == E1000_VF_RESET) { + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { +- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) +- return; +- retval = -1; +- goto out; ++ msgbuf[0] = E1000_VT_MSGTYPE_NACK; ++ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) { ++ e1000_write_mbx(hw, msgbuf, 1, vf); ++ vf_data->last_nack = jiffies; ++ } ++ return; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = -EINVAL; ++#ifndef IGB_DISABLE_VF_MAC_SET + if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + else +- dev_warn(&pdev->dev, +- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", +- vf); ++ DPRINTK(DRV, INFO, ++ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", ++ vf); ++#endif + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); +@@ -6105,28 +6936,31 @@ + break; + case E1000_VF_SET_VLAN: + retval = -1; ++#ifdef IFLA_VF_MAX + if (vf_data->pf_vlan) +- dev_warn(&pdev->dev, +- "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", +- vf); ++ DPRINTK(DRV, INFO, ++ "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", ++ vf); + else ++#endif + retval = igb_set_vf_vlan(adapter, msgbuf, vf); + break; + default: +- dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); +- retval = -1; ++ dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", ++ msgbuf[0]); ++ retval = -E1000_ERR_MBX; + break; + } + +- msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +-out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + +- igb_write_mbx(hw, msgbuf, 1, vf); ++ msgbuf[0] |= E1000_VT_MSGTYPE_CTS; ++ ++ e1000_write_mbx(hw, msgbuf, 1, vf); + } + + static void igb_msg_task(struct igb_adapter *adapter) +@@ -6136,15 +6970,15 @@ + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ +- if (!igb_check_for_rst(hw, vf)) ++ if (!e1000_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ +- if (!igb_check_for_msg(hw, vf)) ++ if (!e1000_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ +- if (!igb_check_for_ack(hw, vf)) ++ if (!e1000_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } + } +@@ -6169,17 +7003,17 @@ + return; + + /* we only need to do this if VMDq is enabled */ +- if (!adapter->vfs_allocated_count) ++ if (!adapter->vmdq_pools) + return; + + for (i = 0; i < hw->mac.uta_reg_count; i++) +- array_wr32(E1000_UTA, i, ~0); ++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0); + } + + /** +- * igb_intr_msi - Interrupt Handler +- * @irq: interrupt number +- * @data: pointer to a network interface device structure ++ * igb_intr_msi - Interrupt Handler ++ * @irq: interrupt number ++ * @data: pointer to a network interface device structure + **/ + static irqreturn_t igb_intr_msi(int irq, void *data) + { +@@ -6187,7 +7021,7 @@ + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ +- u32 icr = rd32(E1000_ICR); ++ u32 icr = E1000_READ_REG(hw, E1000_ICR); + + igb_write_itr(q_vector); + +@@ -6205,16 +7039,18 @@ + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + ++#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { +- u32 tsicr = rd32(E1000_TSICR); ++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ +- wr32(E1000_TSICR, E1000_TSICR_TXTS); ++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ + + napi_schedule(&q_vector->napi); + +@@ -6222,9 +7058,9 @@ + } + + /** +- * igb_intr - Legacy Interrupt Handler +- * @irq: interrupt number +- * @data: pointer to a network interface device structure ++ * igb_intr - Legacy Interrupt Handler ++ * @irq: interrupt number ++ * @data: pointer to a network interface device structure + **/ + static irqreturn_t igb_intr(int irq, void *data) + { +@@ -6234,7 +7070,7 @@ + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ +- u32 icr = rd32(E1000_ICR); ++ u32 icr = E1000_READ_REG(hw, E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt +@@ -6259,23 +7095,25 @@ + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + ++#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { +- u32 tsicr = rd32(E1000_TSICR); ++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ +- wr32(E1000_TSICR, E1000_TSICR_TXTS); ++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; + } + +-static void igb_ring_irq_enable(struct igb_q_vector *q_vector) ++void igb_ring_irq_enable(struct igb_q_vector *q_vector) + { + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; +@@ -6289,26 +7127,25 @@ + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { +- if (adapter->flags & IGB_FLAG_HAS_MSIX) +- wr32(E1000_EIMS, q_vector->eims_value); ++ if (adapter->msix_entries) ++ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } + } + + /** +- * igb_poll - NAPI Rx polling callback +- * @napi: napi polling structure +- * @budget: count of how many packets we should handle ++ * igb_poll - NAPI Rx polling callback ++ * @napi: napi polling structure ++ * @budget: count of how many packets we should handle + **/ + static int igb_poll(struct napi_struct *napi, int budget) + { + struct igb_q_vector *q_vector = container_of(napi, +- struct igb_q_vector, +- napi); ++ struct igb_q_vector, napi); + bool clean_complete = true; + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); + #endif +@@ -6318,6 +7155,12 @@ + if (q_vector->rx.ring) + clean_complete &= igb_clean_rx_irq(q_vector, budget); + ++#ifndef HAVE_NETDEV_NAPI_LIST ++ /* if netdev is disabled we need to stop polling */ ++ if (!netif_running(q_vector->adapter->netdev)) ++ clean_complete = true; ++ ++#endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; +@@ -6330,10 +7173,9 @@ + } + + /** +- * igb_clean_tx_irq - Reclaim resources after transmit completes +- * @q_vector: pointer to q_vector containing needed info +- * +- * returns true if ring is completely cleaned ++ * igb_clean_tx_irq - Reclaim resources after transmit completes ++ * @q_vector: pointer to q_vector containing needed info ++ * returns TRUE if ring is completely cleaned + **/ + static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) + { +@@ -6426,16 +7268,20 @@ + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); ++ + i += tx_ring->count; + tx_ring->next_to_clean = i; +- u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; +- u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + ++#ifdef DEBUG ++ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) && ++ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) { ++#else + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { ++#endif + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the +@@ -6444,10 +7290,23 @@ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + +- (adapter->tx_timeout_factor * HZ)) && +- !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { ++ (adapter->tx_timeout_factor * HZ)) ++ && !(E1000_READ_REG(hw, E1000_STATUS) & ++ E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ ++#ifdef DEBUG ++ adapter->tx_hang_detected = TRUE; ++ if (adapter->disable_hw_reset) { ++ DPRINTK(DRV, WARNING, ++ "Deactivating netdev watchdog timer\n"); ++ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer)) ++ dev_put(netdev_ring(tx_ring)); ++#ifndef HAVE_NET_DEVICE_OPS ++ netdev_ring(tx_ring)->tx_timeout = NULL; ++#endif ++ } ++#endif /* DEBUG */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" +@@ -6461,7 +7320,7 @@ + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, +- rd32(E1000_TDH(tx_ring->reg_idx)), ++ E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, +@@ -6469,8 +7328,11 @@ + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); +- netif_stop_subqueue(tx_ring->netdev, +- tx_ring->queue_index); ++ if (netif_is_multiqueue(netdev_ring(tx_ring))) ++ netif_stop_subqueue(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)); ++ else ++ netif_stop_queue(netdev_ring(tx_ring)); + + /* we are about to reset, no point in enabling stuff */ + return true; +@@ -6479,33 +7341,63 @@ + + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && +- netif_carrier_ok(tx_ring->netdev) && +- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { ++ netif_carrier_ok(netdev_ring(tx_ring)) && ++ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +- if (__netif_subqueue_stopped(tx_ring->netdev, +- tx_ring->queue_index) && +- !(test_bit(__IGB_DOWN, &adapter->state))) { +- netif_wake_subqueue(tx_ring->netdev, +- tx_ring->queue_index); +- +- u64_stats_update_begin(&tx_ring->tx_syncp); +- tx_ring->tx_stats.restart_queue++; +- u64_stats_update_end(&tx_ring->tx_syncp); ++ if (netif_is_multiqueue(netdev_ring(tx_ring))) { ++ if (__netif_subqueue_stopped(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)) && ++ !(test_bit(__IGB_DOWN, &adapter->state))) { ++ netif_wake_subqueue(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)); ++ tx_ring->tx_stats.restart_queue++; ++ } ++ } else { ++ if (netif_queue_stopped(netdev_ring(tx_ring)) && ++ !(test_bit(__IGB_DOWN, &adapter->state))) { ++ netif_wake_queue(netdev_ring(tx_ring)); ++ tx_ring->tx_stats.restart_queue++; ++ } + } + } + + return !!budget; + } + ++#ifdef HAVE_VLAN_RX_REGISTER ++/** ++ * igb_receive_skb - helper function to handle rx indications ++ * @q_vector: structure containing interrupt and ring information ++ * @skb: packet to send up ++ **/ ++static void igb_receive_skb(struct igb_q_vector *q_vector, ++ struct sk_buff *skb) ++{ ++ struct vlan_group **vlgrp = netdev_priv(skb->dev); ++ ++ if (IGB_CB(skb)->vid) { ++ if (*vlgrp) { ++ vlan_gro_receive(&q_vector->napi, *vlgrp, ++ IGB_CB(skb)->vid, skb); ++ } else { ++ dev_kfree_skb_any(skb); ++ } ++ } else { ++ napi_gro_receive(&q_vector->napi, skb); ++ } ++} ++ ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + /** +- * igb_reuse_rx_page - page flip buffer and store it back on the ring +- * @rx_ring: rx descriptor ring to store buffers on +- * @old_buff: donor buffer to have page reused ++ * igb_reuse_rx_page - page flip buffer and store it back on the ring ++ * @rx_ring: rx descriptor ring to store buffers on ++ * @old_buff: donor buffer to have page reused + * +- * Synchronizes page for reuse by the adapter ++ * Synchronizes page for reuse by the adapter + **/ + static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +@@ -6545,39 +7437,34 @@ + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IGB_RX_BUFSZ; + +- /* since we are the only owner of the page and we need to +- * increment it, just set the value to 2 in order to avoid +- * an unnecessary locked operation +- */ +- atomic_set(&page->_count, 2); + #else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; ++#endif + + /* bump ref count on page before it is given to the stack */ + get_page(page); +-#endif + + return true; + } + + /** +- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff +- * @rx_ring: rx descriptor ring to transact packets on +- * @rx_buffer: buffer containing page to add +- * @rx_desc: descriptor containing length of buffer written by hardware +- * @skb: sk_buff to place the data into +- * +- * This function will add the data contained in rx_buffer->page to the skb. +- * This is done either through a direct copy if the data in the buffer is +- * less than the skb header size, otherwise it will just attach the page as +- * a frag to the skb. ++ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff ++ * @rx_ring: rx descriptor ring to transact packets on ++ * @rx_buffer: buffer containing page to add ++ * @rx_desc: descriptor containing length of buffer written by hardware ++ * @skb: sk_buff to place the data into ++ * ++ * This function will add the data contained in rx_buffer->page to the skb. ++ * This is done either through a direct copy if the data in the buffer is ++ * less than the skb header size, otherwise it will just attach the page as ++ * a frag to the skb. + * +- * The function will then update the page offset if necessary and return +- * true if the buffer can be reused by the adapter. ++ * The function will then update the page offset if necessary and return ++ * true if the buffer can be reused by the adapter. + **/ + static bool igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, +@@ -6585,22 +7472,27 @@ + struct sk_buff *skb) + { + struct page *page = rx_buffer->page; ++ unsigned char *va = page_address(page) + rx_buffer->page_offset; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + #if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; + #else +- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); ++ unsigned int truesize = SKB_DATA_ALIGN(size); + #endif ++ unsigned int pull_len; + +- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { +- unsigned char *va = page_address(page) + rx_buffer->page_offset; ++ if (unlikely(skb_is_nonlinear(skb))) ++ goto add_tail_frag; + +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { +- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); +- va += IGB_TS_HDR_LEN; +- size -= IGB_TS_HDR_LEN; +- } ++#ifdef HAVE_PTP_1588_CLOCK ++ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { ++ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); ++ va += IGB_TS_HDR_LEN; ++ size -= IGB_TS_HDR_LEN; ++ } ++#endif /* HAVE_PTP_1588_CLOCK */ + ++ if (likely(size <= IGB_RX_HDR_LEN)) { + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ +@@ -6612,8 +7504,21 @@ + return false; + } + ++ /* we need the header to contain the greater of either ETH_HLEN or ++ * 60 bytes if the skb->len is less than 60 for skb_pad. ++ */ ++ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); ++ ++ /* align pull length to size of long to optimize memcpy performance */ ++ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); ++ ++ /* update all of the pointers */ ++ va += pull_len; ++ size -= pull_len; ++ ++add_tail_frag: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, +- rx_buffer->page_offset, size, truesize); ++ (unsigned long)va & ~PAGE_MASK, size, truesize); + + return igb_can_reuse_rx_page(rx_buffer, page, truesize); + } +@@ -6648,7 +7553,8 @@ + return NULL; + } + +- /* we will be copying header into skb->data in ++ /* ++ * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ +@@ -6672,72 +7578,606 @@ + PAGE_SIZE, DMA_FROM_DEVICE); + } + +- /* clear contents of rx_buffer */ +- rx_buffer->page = NULL; ++ /* clear contents of rx_buffer */ ++ rx_buffer->page = NULL; ++ ++ return skb; ++} ++ ++#endif ++static inline void igb_rx_checksum(struct igb_ring *ring, ++ union e1000_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ skb_checksum_none_assert(skb); ++ ++ /* Ignore Checksum bit is set */ ++ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) ++ return; ++ ++ /* Rx checksum disabled via ethtool */ ++ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) ++ return; ++ ++ /* TCP/UDP checksum error bit is set */ ++ if (igb_test_staterr(rx_desc, ++ E1000_RXDEXT_STATERR_TCPE | ++ E1000_RXDEXT_STATERR_IPE)) { ++ /* ++ * work around errata with sctp packets where the TCPE aka ++ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) ++ * packets, (aka let the stack check the crc32c) ++ */ ++ if (!((skb->len == 60) && ++ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) ++ ring->rx_stats.csum_err++; ++ ++ /* let the stack verify checksum errors */ ++ return; ++ } ++ /* It must be a TCP or UDP packet with a valid checksum */ ++ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | ++ E1000_RXD_STAT_UDPCS)) ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++#ifdef NETIF_F_RXHASH ++static inline void igb_rx_hash(struct igb_ring *ring, ++ union e1000_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ if (netdev_ring(ring)->features & NETIF_F_RXHASH) ++ skb_set_hash(skb, ++ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), ++ PKT_HASH_TYPE_L3); ++} ++ ++#endif ++#ifndef IGB_NO_LRO ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++/** ++ * igb_merge_active_tail - merge active tail into lro skb ++ * @tail: pointer to active tail in frag_list ++ * ++ * This function merges the length and data of an active tail into the ++ * skb containing the frag_list. It resets the tail's pointer to the head, ++ * but it leaves the heads pointer to tail intact. ++ **/ ++static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail) ++{ ++ struct sk_buff *head = IGB_CB(tail)->head; ++ ++ if (!head) ++ return tail; ++ ++ head->len += tail->len; ++ head->data_len += tail->len; ++ head->truesize += tail->len; ++ ++ IGB_CB(tail)->head = NULL; ++ ++ return head; ++} ++ ++/** ++ * igb_add_active_tail - adds an active tail into the skb frag_list ++ * @head: pointer to the start of the skb ++ * @tail: pointer to active tail to add to frag_list ++ * ++ * This function adds an active tail to the end of the frag list. This tail ++ * will still be receiving data so we cannot yet ad it's stats to the main ++ * skb. That is done via igb_merge_active_tail. ++ **/ ++static inline void igb_add_active_tail(struct sk_buff *head, ++ struct sk_buff *tail) ++{ ++ struct sk_buff *old_tail = IGB_CB(head)->tail; ++ ++ if (old_tail) { ++ igb_merge_active_tail(old_tail); ++ old_tail->next = tail; ++ } else { ++ skb_shinfo(head)->frag_list = tail; ++ } ++ ++ IGB_CB(tail)->head = head; ++ IGB_CB(head)->tail = tail; ++ ++ IGB_CB(head)->append_cnt++; ++} ++ ++/** ++ * igb_close_active_frag_list - cleanup pointers on a frag_list skb ++ * @head: pointer to head of an active frag list ++ * ++ * This function will clear the frag_tail_tracker pointer on an active ++ * frag_list and returns true if the pointer was actually set ++ **/ ++static inline bool igb_close_active_frag_list(struct sk_buff *head) ++{ ++ struct sk_buff *tail = IGB_CB(head)->tail; ++ ++ if (!tail) ++ return false; ++ ++ igb_merge_active_tail(tail); ++ ++ IGB_CB(head)->tail = NULL; ++ ++ return true; ++} ++ ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++/** ++ * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled ++ * @adapter: board private structure ++ * @rx_desc: pointer to the rx descriptor ++ * @skb: pointer to the skb to be merged ++ * ++ **/ ++static inline bool igb_can_lro(struct igb_ring *rx_ring, ++ union e1000_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ struct iphdr *iph = (struct iphdr *)skb->data; ++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; ++ ++ /* verify hardware indicates this is IPv4/TCP */ ++ if ((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) || ++ !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4)))) ++ return false; ++ ++ /* .. and LRO is enabled */ ++ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO)) ++ return false; ++ ++ /* .. and we are not in promiscuous mode */ ++ if (netdev_ring(rx_ring)->flags & IFF_PROMISC) ++ return false; ++ ++ /* .. and the header is large enough for us to read IP/TCP fields */ ++ if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr))) ++ return false; ++ ++ /* .. and there are no VLANs on packet */ ++ if (skb->protocol != htons(ETH_P_IP)) ++ return false; ++ ++ /* .. and we are version 4 with no options */ ++ if (*(u8 *)iph != 0x45) ++ return false; ++ ++ /* .. and the packet is not fragmented */ ++ if (iph->frag_off & htons(IP_MF | IP_OFFSET)) ++ return false; ++ ++ /* .. and that next header is TCP */ ++ if (iph->protocol != IPPROTO_TCP) ++ return false; ++ ++ return true; ++} ++ ++static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb) ++{ ++ return (struct igb_lrohdr *)skb->data; ++} ++ ++/** ++ * igb_lro_flush - Indicate packets to upper layer. ++ * ++ * Update IP and TCP header part of head skb if more than one ++ * skb's chained and indicate packets to upper layer. ++ **/ ++static void igb_lro_flush(struct igb_q_vector *q_vector, ++ struct sk_buff *skb) ++{ ++ struct igb_lro_list *lrolist = &q_vector->lrolist; ++ ++ __skb_unlink(skb, &lrolist->active); ++ ++ if (IGB_CB(skb)->append_cnt) { ++ struct igb_lrohdr *lroh = igb_lro_hdr(skb); ++ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ /* close any active lro contexts */ ++ igb_close_active_frag_list(skb); ++ ++#endif ++ /* incorporate ip header and re-calculate checksum */ ++ lroh->iph.tot_len = ntohs(skb->len); ++ lroh->iph.check = 0; ++ ++ /* header length is 5 since we know no options exist */ ++ lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); ++ ++ /* clear TCP checksum to indicate we are an LRO frame */ ++ lroh->th.check = 0; ++ ++ /* incorporate latest timestamp into the tcp header */ ++ if (IGB_CB(skb)->tsecr) { ++ lroh->ts[2] = IGB_CB(skb)->tsecr; ++ lroh->ts[1] = htonl(IGB_CB(skb)->tsval); ++ } ++#ifdef NETIF_F_GSO ++ ++#ifdef NAPI_GRO_CB ++ NAPI_GRO_CB(skb)->data_offset = 0; ++#endif ++ skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss; ++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; ++#endif ++ } ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, skb); ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++ lrolist->stats.flushed++; ++} ++ ++static void igb_lro_flush_all(struct igb_q_vector *q_vector) ++{ ++ struct igb_lro_list *lrolist = &q_vector->lrolist; ++ struct sk_buff *skb, *tmp; ++ ++ skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) ++ igb_lro_flush(q_vector, skb); ++} ++ ++/* ++ * igb_lro_header_ok - Main LRO function. ++ **/ ++static void igb_lro_header_ok(struct sk_buff *skb) ++{ ++ struct igb_lrohdr *lroh = igb_lro_hdr(skb); ++ u16 opt_bytes, data_len; ++ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ IGB_CB(skb)->tail = NULL; ++#endif ++ IGB_CB(skb)->tsecr = 0; ++ IGB_CB(skb)->append_cnt = 0; ++ IGB_CB(skb)->mss = 0; ++ ++ /* ensure that the checksum is valid */ ++ if (skb->ip_summed != CHECKSUM_UNNECESSARY) ++ return; ++ ++ /* If we see CE codepoint in IP header, packet is not mergeable */ ++ if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) ++ return; ++ ++ /* ensure no bits set besides ack or psh */ ++ if (lroh->th.fin || lroh->th.syn || lroh->th.rst || ++ lroh->th.urg || lroh->th.ece || lroh->th.cwr || ++ !lroh->th.ack) ++ return; ++ ++ /* store the total packet length */ ++ data_len = ntohs(lroh->iph.tot_len); ++ ++ /* remove any padding from the end of the skb */ ++ __pskb_trim(skb, data_len); ++ ++ /* remove header length from data length */ ++ data_len -= sizeof(struct igb_lrohdr); ++ ++ /* ++ * check for timestamps. Since the only option we handle are timestamps, ++ * we only have to handle the simple case of aligned timestamps ++ */ ++ opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); ++ if (opt_bytes != 0) { ++ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || ++ !pskb_may_pull(skb, sizeof(struct igb_lrohdr) + ++ TCPOLEN_TSTAMP_ALIGNED) || ++ (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | ++ (TCPOPT_NOP << 16) | ++ (TCPOPT_TIMESTAMP << 8) | ++ TCPOLEN_TIMESTAMP)) || ++ (lroh->ts[2] == 0)) { ++ return; ++ } ++ ++ IGB_CB(skb)->tsval = ntohl(lroh->ts[1]); ++ IGB_CB(skb)->tsecr = lroh->ts[2]; ++ ++ data_len -= TCPOLEN_TSTAMP_ALIGNED; ++ } ++ ++ /* record data_len as mss for the packet */ ++ IGB_CB(skb)->mss = data_len; ++ IGB_CB(skb)->next_seq = ntohl(lroh->th.seq); ++} ++ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) ++{ ++ struct skb_shared_info *sh_info; ++ struct skb_shared_info *new_skb_info; ++ unsigned int data_len; ++ ++ sh_info = skb_shinfo(lro_skb); ++ new_skb_info = skb_shinfo(new_skb); ++ ++ /* copy frags into the last skb */ ++ memcpy(sh_info->frags + sh_info->nr_frags, ++ new_skb_info->frags, ++ new_skb_info->nr_frags * sizeof(skb_frag_t)); ++ ++ /* copy size data over */ ++ sh_info->nr_frags += new_skb_info->nr_frags; ++ data_len = IGB_CB(new_skb)->mss; ++ lro_skb->len += data_len; ++ lro_skb->data_len += data_len; ++ lro_skb->truesize += data_len; ++ ++ /* wipe record of data from new_skb */ ++ new_skb_info->nr_frags = 0; ++ new_skb->len = new_skb->data_len = 0; ++ dev_kfree_skb_any(new_skb); ++} ++ ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++/** ++ * igb_lro_receive - if able, queue skb into lro chain ++ * @q_vector: structure containing interrupt and ring information ++ * @new_skb: pointer to current skb being checked ++ * ++ * Checks whether the skb given is eligible for LRO and if that's ++ * fine chains it to the existing lro_skb based on flowid. If an LRO for ++ * the flow doesn't exist create one. ++ **/ ++static void igb_lro_receive(struct igb_q_vector *q_vector, ++ struct sk_buff *new_skb) ++{ ++ struct sk_buff *lro_skb; ++ struct igb_lro_list *lrolist = &q_vector->lrolist; ++ struct igb_lrohdr *lroh = igb_lro_hdr(new_skb); ++ __be32 saddr = lroh->iph.saddr; ++ __be32 daddr = lroh->iph.daddr; ++ __be32 tcp_ports = *(__be32 *)&lroh->th; ++ u16 data_len; ++#ifdef HAVE_VLAN_RX_REGISTER ++ u16 vid = IGB_CB(new_skb)->vid; ++#else ++ u16 vid = new_skb->vlan_tci; ++#endif ++ ++ igb_lro_header_ok(new_skb); ++ ++ /* ++ * we have a packet that might be eligible for LRO, ++ * so see if it matches anything we might expect ++ */ ++ skb_queue_walk(&lrolist->active, lro_skb) { ++ if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports || ++ igb_lro_hdr(lro_skb)->iph.saddr != saddr || ++ igb_lro_hdr(lro_skb)->iph.daddr != daddr) ++ continue; ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ if (IGB_CB(lro_skb)->vid != vid) ++#else ++ if (lro_skb->vlan_tci != vid) ++#endif ++ continue; ++ ++ /* out of order packet */ ++ if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) { ++ igb_lro_flush(q_vector, lro_skb); ++ IGB_CB(new_skb)->mss = 0; ++ break; ++ } ++ ++ /* TCP timestamp options have changed */ ++ if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) { ++ igb_lro_flush(q_vector, lro_skb); ++ break; ++ } ++ ++ /* make sure timestamp values are increasing */ ++ if (IGB_CB(lro_skb)->tsecr && ++ IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) { ++ igb_lro_flush(q_vector, lro_skb); ++ IGB_CB(new_skb)->mss = 0; ++ break; ++ } ++ ++ data_len = IGB_CB(new_skb)->mss; ++ ++ /* Check for all of the above below ++ * malformed header ++ * no tcp data ++ * resultant packet would be too large ++ * new skb is larger than our current mss ++ * data would remain in header ++ * we would consume more frags then the sk_buff contains ++ * ack sequence numbers changed ++ * window size has changed ++ */ ++ if (data_len == 0 || ++ data_len > IGB_CB(lro_skb)->mss || ++ data_len > IGB_CB(lro_skb)->free || ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ data_len != new_skb->data_len || ++ skb_shinfo(new_skb)->nr_frags >= ++ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) || ++#endif ++ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || ++ igb_lro_hdr(lro_skb)->th.window != lroh->th.window) { ++ igb_lro_flush(q_vector, lro_skb); ++ break; ++ } ++ ++ /* Remove IP and TCP header*/ ++ skb_pull(new_skb, new_skb->len - data_len); ++ ++ /* update timestamp and timestamp echo response */ ++ IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval; ++ IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr; ++ ++ /* update sequence and free space */ ++ IGB_CB(lro_skb)->next_seq += data_len; ++ IGB_CB(lro_skb)->free -= data_len; ++ ++ /* update append_cnt */ ++ IGB_CB(lro_skb)->append_cnt++; ++ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ /* if header is empty pull pages into current skb */ ++ igb_merge_frags(lro_skb, new_skb); ++#else ++ /* chain this new skb in frag_list */ ++ igb_add_active_tail(lro_skb, new_skb); ++#endif ++ ++ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh || ++ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) { ++ igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; ++ igb_lro_flush(q_vector, lro_skb); ++ } ++ ++ lrolist->stats.coal++; ++ return; ++ } ++ ++ if (IGB_CB(new_skb)->mss && !lroh->th.psh) { ++ /* if we are at capacity flush the tail */ ++ if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) { ++ lro_skb = skb_peek_tail(&lrolist->active); ++ if (lro_skb) ++ igb_lro_flush(q_vector, lro_skb); ++ } ++ ++ /* update sequence and free space */ ++ IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss; ++ IGB_CB(new_skb)->free = 65521 - new_skb->len; + +- return skb; ++ /* .. and insert at the front of the active list */ ++ __skb_queue_head(&lrolist->active, new_skb); ++ ++ lrolist->stats.coal++; ++ return; ++ } ++ ++ /* packet not handled by any of the above, pass it to the stack */ ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, new_skb); ++#else ++ napi_gro_receive(&q_vector->napi, new_skb); ++#endif + } + +-static inline void igb_rx_checksum(struct igb_ring *ring, ++#endif /* IGB_NO_LRO */ ++/** ++ * igb_process_skb_fields - Populate skb header fields from Rx descriptor ++ * @rx_ring: rx descriptor ring packet is being transacted on ++ * @rx_desc: pointer to the EOP Rx descriptor ++ * @skb: pointer to current skb being populated ++ * ++ * This function checks the ring, descriptor, and packet information in ++ * order to populate the hash, checksum, VLAN, timestamp, protocol, and ++ * other fields within the skb. ++ **/ ++static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- skb_checksum_none_assert(skb); ++ struct net_device *dev = rx_ring->netdev; ++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; ++ bool notype; + +- /* Ignore Checksum bit is set */ +- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) +- return; ++#ifdef NETIF_F_RXHASH ++ igb_rx_hash(rx_ring, rx_desc, skb); + +- /* Rx checksum disabled via ethtool */ +- if (!(ring->netdev->features & NETIF_F_RXCSUM)) +- return; ++#endif ++ igb_rx_checksum(rx_ring, rx_desc, skb); + +- /* TCP/UDP checksum error bit is set */ +- if (igb_test_staterr(rx_desc, +- E1000_RXDEXT_STATERR_TCPE | +- E1000_RXDEXT_STATERR_IPE)) { +- /* work around errata with sctp packets where the TCPE aka +- * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) +- * packets, (aka let the stack check the crc32c) +- */ +- if (!((skb->len == 60) && +- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { +- u64_stats_update_begin(&ring->rx_syncp); +- ring->rx_stats.csum_err++; +- u64_stats_update_end(&ring->rx_syncp); +- } +- /* let the stack verify checksum errors */ +- return; ++ /* update packet type stats */ ++ switch (pkt_info & E1000_RXDADV_PKTTYPE_ILMASK) { ++ case E1000_RXDADV_PKTTYPE_IPV4: ++ rx_ring->pkt_stats.ipv4_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_IPV4_EX: ++ rx_ring->pkt_stats.ipv4e_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_IPV6: ++ rx_ring->pkt_stats.ipv6_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_IPV6_EX: ++ rx_ring->pkt_stats.ipv6e_packets++; ++ break; ++ default: ++ notype = true; ++ break; + } +- /* It must be a TCP or UDP packet with a valid checksum */ +- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | +- E1000_RXD_STAT_UDPCS)) +- skb->ip_summed = CHECKSUM_UNNECESSARY; + +- dev_dbg(ring->dev, "cksum success: bits %08X\n", +- le32_to_cpu(rx_desc->wb.upper.status_error)); +-} ++ switch (pkt_info & E1000_RXDADV_PKTTYPE_TLMASK) { ++ case E1000_RXDADV_PKTTYPE_TCP: ++ rx_ring->pkt_stats.tcp_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_UDP: ++ rx_ring->pkt_stats.udp_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_SCTP: ++ rx_ring->pkt_stats.sctp_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_NFS: ++ rx_ring->pkt_stats.nfs_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_NONE: ++ if (notype) ++ rx_ring->pkt_stats.other_packets++; ++ break; ++ default: ++ break; ++ } + +-static inline void igb_rx_hash(struct igb_ring *ring, +- union e1000_adv_rx_desc *rx_desc, +- struct sk_buff *skb) +-{ +- if (ring->netdev->features & NETIF_F_RXHASH) +- skb_set_hash(skb, +- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), +- PKT_HASH_TYPE_L3); ++#ifdef HAVE_PTP_1588_CLOCK ++ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && ++ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) ++ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); ++ ++#endif /* HAVE_PTP_1588_CLOCK */ ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ++#else ++ if ((dev->features & NETIF_F_HW_VLAN_RX) && ++#endif ++ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { ++ u16 vid = 0; ++ ++ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && ++ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) ++ vid = be16_to_cpu(rx_desc->wb.upper.vlan); ++ else ++ vid = le16_to_cpu(rx_desc->wb.upper.vlan); ++#ifdef HAVE_VLAN_RX_REGISTER ++ IGB_CB(skb)->vid = vid; ++ } else { ++ IGB_CB(skb)->vid = 0; ++#else ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); ++#endif ++ } ++ ++ skb_record_rx_queue(skb, rx_ring->queue_index); ++ ++ skb->protocol = eth_type_trans(skb, dev); + } + + /** +- * igb_is_non_eop - process handling of non-EOP buffers +- * @rx_ring: Rx ring being processed +- * @rx_desc: Rx descriptor for current buffer +- * @skb: current socket buffer containing buffer in progress +- * +- * This function updates next to clean. If the buffer is an EOP buffer +- * this function exits returning false, otherwise it will place the +- * sk_buff in the next buffer to be chained and return true indicating +- * that this is in fact a non-EOP buffer. ++ * igb_is_non_eop - process handling of non-EOP buffers ++ * @rx_ring: Rx ring being processed ++ * @rx_desc: Rx descriptor for current buffer ++ * ++ * This function updates next to clean. If the buffer is an EOP buffer ++ * this function exits returning false, otherwise it will place the ++ * sk_buff in the next buffer to be chained and return true indicating ++ * that this is in fact a non-EOP buffer. + **/ + static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +@@ -6756,200 +8196,134 @@ + return true; + } + +-/** +- * igb_get_headlen - determine size of header for LRO/GRO +- * @data: pointer to the start of the headers +- * @max_len: total length of section to find headers in +- * +- * This function is meant to determine the length of headers that will +- * be recognized by hardware for LRO, and GRO offloads. The main +- * motivation of doing this is to only perform one pull for IPv4 TCP +- * packets so that we can do basic things like calculating the gso_size +- * based on the average data per packet. +- **/ +-static unsigned int igb_get_headlen(unsigned char *data, +- unsigned int max_len) +-{ +- union { +- unsigned char *network; +- /* l2 headers */ +- struct ethhdr *eth; +- struct vlan_hdr *vlan; +- /* l3 headers */ +- struct iphdr *ipv4; +- struct ipv6hdr *ipv6; +- } hdr; +- __be16 protocol; +- u8 nexthdr = 0; /* default to not TCP */ +- u8 hlen; +- +- /* this should never happen, but better safe than sorry */ +- if (max_len < ETH_HLEN) +- return max_len; +- +- /* initialize network frame pointer */ +- hdr.network = data; +- +- /* set first protocol and move network header forward */ +- protocol = hdr.eth->h_proto; +- hdr.network += ETH_HLEN; +- +- /* handle any vlan tag if present */ +- if (protocol == htons(ETH_P_8021Q)) { +- if ((hdr.network - data) > (max_len - VLAN_HLEN)) +- return max_len; +- +- protocol = hdr.vlan->h_vlan_encapsulated_proto; +- hdr.network += VLAN_HLEN; +- } +- +- /* handle L3 protocols */ +- if (protocol == htons(ETH_P_IP)) { +- if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) +- return max_len; +- +- /* access ihl as a u8 to avoid unaligned access on ia64 */ +- hlen = (hdr.network[0] & 0x0F) << 2; +- +- /* verify hlen meets minimum size requirements */ +- if (hlen < sizeof(struct iphdr)) +- return hdr.network - data; +- +- /* record next protocol if header is present */ +- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) +- nexthdr = hdr.ipv4->protocol; +- } else if (protocol == htons(ETH_P_IPV6)) { +- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) +- return max_len; +- +- /* record next protocol */ +- nexthdr = hdr.ipv6->nexthdr; +- hlen = sizeof(struct ipv6hdr); +- } else { +- return hdr.network - data; +- } ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++/* igb_clean_rx_irq -- * legacy */ ++static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) ++{ ++ struct igb_ring *rx_ring = q_vector->rx.ring; ++ unsigned int total_bytes = 0, total_packets = 0; ++ u16 cleaned_count = igb_desc_unused(rx_ring); + +- /* relocate pointer to start of L4 header */ +- hdr.network += hlen; ++ do { ++ struct igb_rx_buffer *rx_buffer; ++ union e1000_adv_rx_desc *rx_desc; ++ struct sk_buff *skb; ++ u16 ntc; + +- /* finally sort out TCP */ +- if (nexthdr == IPPROTO_TCP) { +- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) +- return max_len; ++ /* return some buffers to hardware, one at a time is too slow */ ++ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { ++ igb_alloc_rx_buffers(rx_ring, cleaned_count); ++ cleaned_count = 0; ++ } + +- /* access doff as a u8 to avoid unaligned access on ia64 */ +- hlen = (hdr.network[12] & 0xF0) >> 2; ++ ntc = rx_ring->next_to_clean; ++ rx_desc = IGB_RX_DESC(rx_ring, ntc); ++ rx_buffer = &rx_ring->rx_buffer_info[ntc]; + +- /* verify hlen meets minimum size requirements */ +- if (hlen < sizeof(struct tcphdr)) +- return hdr.network - data; ++ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) ++ break; + +- hdr.network += hlen; +- } else if (nexthdr == IPPROTO_UDP) { +- if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) +- return max_len; ++ /* ++ * This memory barrier is needed to keep us from reading ++ * any other fields out of the rx_desc until we know the ++ * RXD_STAT_DD bit is set ++ */ ++ rmb(); + +- hdr.network += sizeof(struct udphdr); +- } ++ skb = rx_buffer->skb; + +- /* If everything has gone correctly hdr.network should be the +- * data section of the packet and will be the end of the header. +- * If not then it probably represents the end of the last recognized +- * header. +- */ +- if ((hdr.network - data) < max_len) +- return hdr.network - data; +- else +- return max_len; +-} ++ prefetch(skb->data); + +-/** +- * igb_pull_tail - igb specific version of skb_pull_tail +- * @rx_ring: rx descriptor ring packet is being transacted on +- * @rx_desc: pointer to the EOP Rx descriptor +- * @skb: pointer to current skb being adjusted +- * +- * This function is an igb specific version of __pskb_pull_tail. The +- * main difference between this version and the original function is that +- * this function can make several assumptions about the state of things +- * that allow for significant optimizations versus the standard function. +- * As a result we can do things like drop a frag and maintain an accurate +- * truesize for the skb. +- */ +-static void igb_pull_tail(struct igb_ring *rx_ring, +- union e1000_adv_rx_desc *rx_desc, +- struct sk_buff *skb) +-{ +- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; +- unsigned char *va; +- unsigned int pull_len; ++ /* pull the header of the skb in */ ++ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); + +- /* it is valid to use page_address instead of kmap since we are +- * working with pages allocated out of the lomem pool per +- * alloc_page(GFP_ATOMIC) +- */ +- va = skb_frag_address(frag); ++ /* clear skb reference in buffer info structure */ ++ rx_buffer->skb = NULL; + +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { +- /* retrieve timestamp from buffer */ +- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); ++ cleaned_count++; + +- /* update pointers to remove timestamp header */ +- skb_frag_size_sub(frag, IGB_TS_HDR_LEN); +- frag->page_offset += IGB_TS_HDR_LEN; +- skb->data_len -= IGB_TS_HDR_LEN; +- skb->len -= IGB_TS_HDR_LEN; ++ BUG_ON(igb_is_non_eop(rx_ring, rx_desc)); + +- /* move va to start of packet data */ +- va += IGB_TS_HDR_LEN; +- } ++ dma_unmap_single(rx_ring->dev, rx_buffer->dma, ++ rx_ring->rx_buffer_len, ++ DMA_FROM_DEVICE); ++ rx_buffer->dma = 0; + +- /* we need the header to contain the greater of either ETH_HLEN or +- * 60 bytes if the skb->len is less than 60 for skb_pad. +- */ +- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); ++ if (igb_test_staterr(rx_desc, ++ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { ++ dev_kfree_skb_any(skb); ++ continue; ++ } + +- /* align pull length to size of long to optimize memcpy performance */ +- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); ++ total_bytes += skb->len; + +- /* update all of the pointers */ +- skb_frag_size_sub(frag, pull_len); +- frag->page_offset += pull_len; +- skb->data_len -= pull_len; +- skb->tail += pull_len; ++ /* populate checksum, timestamp, VLAN, and protocol */ ++ igb_process_skb_fields(rx_ring, rx_desc, skb); ++ ++#ifndef IGB_NO_LRO ++ if (igb_can_lro(rx_ring, rx_desc, skb)) ++ igb_lro_receive(q_vector, skb); ++ else ++#endif ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, skb); ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++ ++#ifndef NETIF_F_GRO ++ netdev_ring(rx_ring)->last_rx = jiffies; ++ ++#endif ++ /* update budget accounting */ ++ total_packets++; ++ } while (likely(total_packets < budget)); ++ ++ rx_ring->rx_stats.packets += total_packets; ++ rx_ring->rx_stats.bytes += total_bytes; ++ q_vector->rx.total_packets += total_packets; ++ q_vector->rx.total_bytes += total_bytes; ++ ++ if (cleaned_count) ++ igb_alloc_rx_buffers(rx_ring, cleaned_count); ++ ++#ifndef IGB_NO_LRO ++ igb_lro_flush_all(q_vector); ++ ++#endif /* IGB_NO_LRO */ ++ return (total_packets < budget); + } ++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + + /** +- * igb_cleanup_headers - Correct corrupted or empty headers +- * @rx_ring: rx descriptor ring packet is being transacted on +- * @rx_desc: pointer to the EOP Rx descriptor +- * @skb: pointer to current skb being fixed ++ * igb_cleanup_headers - Correct corrupted or empty headers ++ * @rx_ring: rx descriptor ring packet is being transacted on ++ * @rx_desc: pointer to the EOP Rx descriptor ++ * @skb: pointer to current skb being fixed + * +- * Address the case where we are pulling data in on pages only +- * and as such no data is present in the skb header. ++ * Address the case where we are pulling data in on pages only ++ * and as such no data is present in the skb header. + * +- * In addition if skb is not at least 60 bytes we need to pad it so that +- * it is large enough to qualify as a valid Ethernet frame. ++ * In addition if skb is not at least 60 bytes we need to pad it so that ++ * it is large enough to qualify as a valid Ethernet frame. + * +- * Returns true if an error was encountered and skb was freed. ++ * Returns true if an error was encountered and skb was freed. + **/ + static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { ++ + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; ++ + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + +- /* place header in linear portion of buffer */ +- if (skb_is_nonlinear(skb)) +- igb_pull_tail(rx_ring, rx_desc, skb); +- + /* if skb_pad returns an error the skb was freed */ + if (unlikely(skb->len < 60)) { + int pad_len = 60 - skb->len; +@@ -6962,56 +8336,15 @@ + return false; + } + +-/** +- * igb_process_skb_fields - Populate skb header fields from Rx descriptor +- * @rx_ring: rx descriptor ring packet is being transacted on +- * @rx_desc: pointer to the EOP Rx descriptor +- * @skb: pointer to current skb being populated +- * +- * This function checks the ring, descriptor, and packet information in +- * order to populate the hash, checksum, VLAN, timestamp, protocol, and +- * other fields within the skb. +- **/ +-static void igb_process_skb_fields(struct igb_ring *rx_ring, +- union e1000_adv_rx_desc *rx_desc, +- struct sk_buff *skb) +-{ +- struct net_device *dev = rx_ring->netdev; +- +- igb_rx_hash(rx_ring, rx_desc, skb); +- +- igb_rx_checksum(rx_ring, rx_desc, skb); +- +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && +- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) +- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); +- +- if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +- igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { +- u16 vid; +- +- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && +- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) +- vid = be16_to_cpu(rx_desc->wb.upper.vlan); +- else +- vid = le16_to_cpu(rx_desc->wb.upper.vlan); +- +- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); +- } +- +- skb_record_rx_queue(skb, rx_ring->queue_index); +- +- skb->protocol = eth_type_trans(skb, rx_ring->netdev); +-} +- +-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ++/* igb_clean_rx_irq -- * packet split */ ++static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) + { + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + +- while (likely(total_packets < budget)) { ++ do { + union e1000_adv_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ +@@ -7025,7 +8358,8 @@ + if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) + break; + +- /* This memory barrier is needed to keep us from reading ++ /* ++ * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ +@@ -7056,31 +8390,89 @@ + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + +- napi_gro_receive(&q_vector->napi, skb); ++#ifndef IGB_NO_LRO ++ if (igb_can_lro(rx_ring, rx_desc, skb)) ++ igb_lro_receive(q_vector, skb); ++ else ++#endif ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, skb); ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++#ifndef NETIF_F_GRO ++ ++ netdev_ring(rx_ring)->last_rx = jiffies; ++#endif + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; +- } ++ } while (likely(total_packets < budget)); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + +- u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; +- u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + +- return total_packets < budget; ++#ifndef IGB_NO_LRO ++ igb_lro_flush_all(q_vector); ++ ++#endif /* IGB_NO_LRO */ ++ return (total_packets < budget); ++} ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, ++ struct igb_rx_buffer *bi) ++{ ++ struct sk_buff *skb = bi->skb; ++ dma_addr_t dma = bi->dma; ++ ++ if (dma) ++ return true; ++ ++ if (likely(!skb)) { ++ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), ++ rx_ring->rx_buffer_len); ++ bi->skb = skb; ++ if (!skb) { ++ rx_ring->rx_stats.alloc_failed++; ++ return false; ++ } ++ ++ /* initialize skb for ring */ ++ skb_record_rx_queue(skb, ring_queue_index(rx_ring)); ++ } ++ ++ dma = dma_map_single(rx_ring->dev, skb->data, ++ rx_ring->rx_buffer_len, DMA_FROM_DEVICE); ++ ++ /* if mapping failed free memory back to system since ++ * there isn't much point in holding memory we can't use ++ */ ++ if (dma_mapping_error(rx_ring->dev, dma)) { ++ dev_kfree_skb_any(skb); ++ bi->skb = NULL; ++ ++ rx_ring->rx_stats.alloc_failed++; ++ return false; ++ } ++ ++ bi->dma = dma; ++ return true; + } + ++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) + { +@@ -7092,7 +8484,7 @@ + return true; + + /* alloc new page for storage */ +- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); ++ page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; +@@ -7101,7 +8493,8 @@ + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + +- /* if mapping failed free memory back to system since ++ /* ++ * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { +@@ -7118,9 +8511,10 @@ + return true; + } + ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + /** +- * igb_alloc_rx_buffers - Replace used receive buffers; packet split +- * @adapter: address of board private structure ++ * igb_alloc_rx_buffers - Replace used receive buffers; packet split ++ * @adapter: address of board private structure + **/ + void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) + { +@@ -7137,13 +8531,22 @@ + i -= rx_ring->count; + + do { ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ if (!igb_alloc_mapped_skb(rx_ring, bi)) ++#else + if (!igb_alloc_mapped_page(rx_ring, bi)) ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + break; + +- /* Refresh the desc even if buffer_addrs didn't change ++ /* ++ * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); ++#else + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); ++#endif + + rx_desc++; + bi++; +@@ -7166,10 +8569,13 @@ + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + +- /* Force memory writes to complete before letting h/w ++#endif ++ /* ++ * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). +@@ -7179,6 +8585,7 @@ + } + } + ++#ifdef SIOCGMIIPHY + /** + * igb_mii_ioctl - + * @netdev: +@@ -7198,17 +8605,20 @@ + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: +- if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, +- &data->val_out)) ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, ++ &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } +- return 0; ++ return E1000_SUCCESS; + } + ++#endif + /** + * igb_ioctl - + * @netdev: +@@ -7218,156 +8628,295 @@ + static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) + { + switch (cmd) { ++#ifdef SIOCGMIIPHY + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); ++#endif ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); ++#endif + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); ++#endif /* HAVE_PTP_1588_CLOCK */ ++#ifdef ETHTOOL_OPS_COMPAT ++ case SIOCETHTOOL: ++ return ethtool_ioctl(ifr); ++#endif + default: + return -EOPNOTSUPP; + } + } + +-void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) ++void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); + } + +-void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) ++void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); + } + +-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) ++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; ++ u16 cap_offset; + +- if (pcie_capability_read_word(adapter->pdev, reg, value)) ++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); ++ if (!cap_offset) + return -E1000_ERR_CONFIG; + +- return 0; ++ pci_read_config_word(adapter->pdev, cap_offset + reg, value); ++ ++ return E1000_SUCCESS; + } + +-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) ++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; ++ u16 cap_offset; + +- if (pcie_capability_write_word(adapter->pdev, reg, *value)) ++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); ++ if (!cap_offset) + return -E1000_ERR_CONFIG; + +- return 0; ++ pci_write_config_word(adapter->pdev, cap_offset + reg, *value); ++ ++ return E1000_SUCCESS; + } + +-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) ++#ifdef HAVE_VLAN_RX_REGISTER ++static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp) ++#else ++void igb_vlan_mode(struct net_device *netdev, u32 features) ++#endif /* HAVE_VLAN_RX_REGISTER */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; +- bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); ++ bool enable; ++ int i; ++#ifdef HAVE_VLAN_RX_REGISTER ++ enable = !!vlgrp; ++ igb_irq_disable(adapter); ++ ++ adapter->vlgrp = vlgrp; ++ ++ if (!test_bit(__IGB_DOWN, &adapter->state)) ++ igb_irq_enable(adapter); ++#else ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); ++#else ++ enable = !!(features & NETIF_F_HW_VLAN_RX); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ + + if (enable) { + /* enable VLAN tag insert/strip */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_VME; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Disable CFI check */ +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ } ++ ++#ifndef CONFIG_IGB_VMDQ_NETDEV ++ for (i = 0; i < adapter->vmdq_pools; i++) { ++ igb_set_vf_vlan_strip(adapter, ++ adapter->vfs_allocated_count + i, ++ enable); ++ } ++ ++#else ++ igb_set_vf_vlan_strip(adapter, ++ adapter->vfs_allocated_count, ++ enable); ++ ++ for (i = 1; i < adapter->vmdq_pools; i++) { ++#ifdef HAVE_VLAN_RX_REGISTER ++ struct igb_vmdq_adapter *vadapter; ++ ++ vadapter = netdev_priv(adapter->vmdq_netdev[i-1]); ++ ++ enable = !!vadapter->vlgrp; ++#else ++ struct net_device *vnetdev; ++ ++ vnetdev = adapter->vmdq_netdev[i-1]; ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX); ++#else ++ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ ++ igb_set_vf_vlan_strip(adapter, ++ adapter->vfs_allocated_count + i, ++ enable); + } + ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ + igb_rlpml_set(adapter); + } + ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + static int igb_vlan_rx_add_vid(struct net_device *netdev, +- __be16 proto, u16 vid) ++ __always_unused __be16 proto, u16 vid) ++#else ++static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#else ++static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + { + struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* attempt to add filter to vlvf array */ +- igb_vlvf_set(adapter, vid, true, pf_id); ++ igb_vlvf_set(adapter, vid, TRUE, pf_id); + + /* add the filter since PF can receive vlans w/o entry in vlvf */ +- igb_vfta_set(hw, vid, true); ++ igb_vfta_set(adapter, vid, TRUE); ++#ifndef HAVE_NETDEV_VLAN_FEATURES + +- set_bit(vid, adapter->active_vlans); ++ /* Copy feature flags from netdev to the vlan netdev for this vid. ++ * This allows things like TSO to bubble down to our vlan device. ++ * There is no need to update netdev for vlan 0 (DCB), since it ++ * wouldn't has v_netdev. ++ */ ++ if (adapter->vlgrp) { ++ struct vlan_group *vlgrp = adapter->vlgrp; ++ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + ++ if (v_netdev) { ++ v_netdev->features |= netdev->features; ++ vlan_group_set_device(vlgrp, vid, v_netdev); ++ } ++ } ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++#ifndef HAVE_VLAN_RX_REGISTER ++ ++ set_bit(vid, adapter->active_vlans); ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } + ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + static int igb_vlan_rx_kill_vid(struct net_device *netdev, +- __be16 proto, u16 vid) ++ __always_unused __be16 proto, u16 vid) ++#else ++static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#else ++static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + { + struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + s32 err; + ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_irq_disable(adapter); ++ ++ vlan_group_set_device(adapter->vlgrp, vid, NULL); ++ ++ if (!test_bit(__IGB_DOWN, &adapter->state)) ++ igb_irq_enable(adapter); ++ ++#endif /* HAVE_VLAN_RX_REGISTER */ + /* remove vlan from VLVF table array */ +- err = igb_vlvf_set(adapter, vid, false, pf_id); ++ err = igb_vlvf_set(adapter, vid, FALSE, pf_id); + + /* if vid was not present in VLVF just remove it from table */ + if (err) +- igb_vfta_set(hw, vid, false); ++ igb_vfta_set(adapter, vid, FALSE); ++#ifndef HAVE_VLAN_RX_REGISTER + + clear_bit(vid, adapter->active_vlans); +- ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } + + static void igb_restore_vlan(struct igb_adapter *adapter) + { ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_vlan_mode(adapter->netdev, adapter->vlgrp); ++ ++ if (adapter->vlgrp) { ++ u16 vid; ++ ++ for (vid = 0; vid < VLAN_N_VID; vid++) { ++ if (!vlan_group_get_device(adapter->vlgrp, vid)) ++ continue; ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ igb_vlan_rx_add_vid(adapter->netdev, ++ htons(ETH_P_8021Q), vid); ++#else ++ igb_vlan_rx_add_vid(adapter->netdev, vid); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++ } ++ } ++#else + u16 vid; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +- igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ igb_vlan_rx_add_vid(adapter->netdev, ++ htons(ETH_P_8021Q), vid); ++#else ++ igb_vlan_rx_add_vid(adapter->netdev, vid); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ + } + +-int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) ++int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) + { + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + +- /* Make sure dplx is at most 1 bit and lsb of speed is not set +- * for the switch() below to work +- */ +- if ((spd & 1) || (dplx & ~1)) +- goto err_inval; +- +- /* Fiber NIC's only allow 1000 gbps Full duplex +- * and 100Mbps Full duplex for 100baseFx sfp ++ /* SerDes device's does not support 10Mbps Full/duplex ++ * and 100Mbps Half duplex + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { +- switch (spd + dplx) { ++ switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: +- goto err_inval; ++ dev_err(pci_dev_to_dev(pdev), ++ "Unsupported Speed/Duplex configuration\n"); ++ return -EINVAL; + default: + break; + } + } + +- switch (spd + dplx) { ++ switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; +@@ -7386,17 +8935,52 @@ + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: +- goto err_inval; ++ dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n"); ++ return -EINVAL; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; ++} + +-err_inval: +- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); +- return -EINVAL; ++/* This function should only be called if RTNL lock is held */ ++int igb_setup_queues(struct igb_adapter *adapter) ++{ ++ struct net_device *dev = adapter->netdev; ++ int err; ++ ++ if (adapter->rss_queues == adapter->num_rx_queues) { ++ if (adapter->tss_queues) { ++ if (adapter->tss_queues == adapter->num_tx_queues) ++ return 0; ++ } else if (adapter->vfs_allocated_count || ++ adapter->rss_queues == adapter->num_tx_queues) { ++ return 0; ++ } ++ } ++ ++ /* ++ * Hardware has to reinitialize queues and interrupts to ++ * match the new configuration. Unfortunately, the hardware ++ * is not flexible enough to do this dynamically. ++ */ ++ if (netif_running(dev)) ++ igb_close(dev); ++ ++ igb_clear_interrupt_scheme(adapter); ++ ++ err = igb_init_interrupt_scheme(adapter, true); ++ if (err) { ++ dev_close(dev); ++ return err; ++ } ++ ++ if (netif_running(dev)) ++ err = igb_open(dev); ++ ++ return err; + } + + static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, +@@ -7413,6 +8997,10 @@ + + netif_device_detach(netdev); + ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if (status & E1000_STATUS_LU) ++ wufc &= ~E1000_WUFC_LNKC; ++ + if (netif_running(netdev)) + __igb_close(netdev, true); + +@@ -7424,37 +9012,31 @@ + return retval; + #endif + +- status = rd32(E1000_STATUS); +- if (status & E1000_STATUS_LU) +- wufc &= ~E1000_WUFC_LNKC; +- + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + +- ctrl = rd32(E1000_CTRL); +- /* advertise wake from D3Cold */ +- #define E1000_CTRL_ADVD3WUC 0x00100000 ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ +- igb_disable_pcie_master(hw); ++ e1000_disable_pcie_master(hw); + +- wr32(E1000_WUC, E1000_WUC_PME_EN); +- wr32(E1000_WUFC, wufc); ++ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN); ++ E1000_WRITE_REG(hw, E1000_WUFC, wufc); + } else { +- wr32(E1000_WUC, 0); +- wr32(E1000_WUFC, 0); ++ E1000_WRITE_REG(hw, E1000_WUC, 0); ++ E1000_WRITE_REG(hw, E1000_WUFC, 0); + } + + *enable_wake = wufc || adapter->en_mng_pt; +@@ -7474,12 +9056,17 @@ + } + + #ifdef CONFIG_PM +-#ifdef CONFIG_PM_SLEEP ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + static int igb_suspend(struct device *dev) ++#else ++static int igb_suspend(struct pci_dev *pdev, pm_message_t state) ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + { ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS ++ struct pci_dev *pdev = to_pci_dev(dev); ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + int retval; + bool wake; +- struct pci_dev *pdev = to_pci_dev(dev); + + retval = __igb_shutdown(pdev, &wake, 0); + if (retval) +@@ -7494,11 +9081,16 @@ + + return 0; + } +-#endif /* CONFIG_PM_SLEEP */ + ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + static int igb_resume(struct device *dev) ++#else ++static int igb_resume(struct pci_dev *pdev) ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + { ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + struct pci_dev *pdev = to_pci_dev(dev); ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +@@ -7510,7 +9102,7 @@ + + err = pci_enable_device_mem(pdev); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "igb: Cannot enable PCI device from suspend\n"); + return err; + } +@@ -7520,18 +9112,18 @@ + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { +- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); ++ dev_err(pci_dev_to_dev(pdev), ++ "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + +- /* let the f/w know that the h/w is now under the control of the +- * driver. ++ /* let the f/w know that the h/w is now under the control of the driver. + */ + igb_get_hw_control(adapter); + +- wr32(E1000_WUS, ~0); ++ E1000_WRITE_REG(hw, E1000_WUS, ~0); + + if (netdev->flags & IFF_UP) { + rtnl_lock(); +@@ -7542,10 +9134,12 @@ + } + + netif_device_attach(netdev); ++ + return 0; + } + + #ifdef CONFIG_PM_RUNTIME ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + static int igb_runtime_idle(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); +@@ -7582,91 +9176,51 @@ + { + return igb_resume(dev); + } ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + #endif /* CONFIG_PM_RUNTIME */ +-#endif ++#endif /* CONFIG_PM */ + +-static void igb_shutdown(struct pci_dev *pdev) ++#ifdef USE_REBOOT_NOTIFIER ++/* only want to do this for 2.4 kernels? */ ++static int igb_notify_reboot(struct notifier_block *nb, unsigned long event, ++ void *p) + { ++ struct pci_dev *pdev = NULL; + bool wake; + +- __igb_shutdown(pdev, &wake, 0); +- +- if (system_state == SYSTEM_POWER_OFF) { +- pci_wake_from_d3(pdev, wake); +- pci_set_power_state(pdev, PCI_D3hot); ++ switch (event) { ++ case SYS_DOWN: ++ case SYS_HALT: ++ case SYS_POWER_OFF: ++ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { ++ if (pci_dev_driver(pdev) == &igb_driver) { ++ __igb_shutdown(pdev, &wake, 0); ++ if (event == SYS_POWER_OFF) { ++ pci_wake_from_d3(pdev, wake); ++ pci_set_power_state(pdev, PCI_D3hot); ++ } ++ } ++ } + } ++ return NOTIFY_DONE; + } +- +-#ifdef CONFIG_PCI_IOV +-static int igb_sriov_reinit(struct pci_dev *dev) ++#else ++static void igb_shutdown(struct pci_dev *pdev) + { +- struct net_device *netdev = pci_get_drvdata(dev); +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct pci_dev *pdev = adapter->pdev; ++ bool wake = false; + +- rtnl_lock(); +- +- if (netif_running(netdev)) +- igb_close(netdev); +- else +- igb_reset(adapter); +- +- igb_clear_interrupt_scheme(adapter); +- +- igb_init_queue_configuration(adapter); ++ __igb_shutdown(pdev, &wake, 0); + +- if (igb_init_interrupt_scheme(adapter, true)) { +- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); +- return -ENOMEM; ++ if (system_state == SYSTEM_POWER_OFF) { ++ pci_wake_from_d3(pdev, wake); ++ pci_set_power_state(pdev, PCI_D3hot); + } +- +- if (netif_running(netdev)) +- igb_open(netdev); +- +- rtnl_unlock(); +- +- return 0; +-} +- +-static int igb_pci_disable_sriov(struct pci_dev *dev) +-{ +- int err = igb_disable_sriov(dev); +- +- if (!err) +- err = igb_sriov_reinit(dev); +- +- return err; +-} +- +-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +-{ +- int err = igb_enable_sriov(dev, num_vfs); +- +- if (err) +- goto out; +- +- err = igb_sriov_reinit(dev); +- if (!err) +- return num_vfs; +- +-out: +- return err; +-} +- +-#endif +-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +-{ +-#ifdef CONFIG_PCI_IOV +- if (num_vfs == 0) +- return igb_pci_disable_sriov(dev); +- else +- return igb_pci_enable_sriov(dev, num_vfs); +-#endif +- return 0; + } ++#endif /* USE_REBOOT_NOTIFIER */ + + #ifdef CONFIG_NET_POLL_CONTROLLER +-/* Polling 'interrupt' - used by things like netconsole to send skbs ++/* ++ * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +@@ -7679,8 +9233,8 @@ + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; +- if (adapter->flags & IGB_FLAG_HAS_MSIX) +- wr32(E1000_EIMC, q_vector->eims_value); ++ if (adapter->msix_entries) ++ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value); + else + igb_irq_disable(adapter); + napi_schedule(&q_vector->napi); +@@ -7688,20 +9242,98 @@ + } + #endif /* CONFIG_NET_POLL_CONTROLLER */ + ++#ifdef HAVE_PCI_ERS ++#define E1000_DEV_ID_82576_VF 0x10CA + /** +- * igb_io_error_detected - called when PCI error is detected +- * @pdev: Pointer to PCI device +- * @state: The current pci connection state ++ * igb_io_error_detected - called when PCI error is detected ++ * @pdev: Pointer to PCI device ++ * @state: The current pci connection state + * +- * This function is called after a PCI bus error affecting +- * this device has been detected. +- **/ ++ * This function is called after a PCI bus error affecting ++ * this device has been detected. ++ */ + static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + ++#ifdef CONFIG_PCI_IOV ++ struct pci_dev *bdev, *vfdev; ++ u32 dw0, dw1, dw2, dw3; ++ int vf, pos; ++ u16 req_id, pf_func; ++ ++ if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA)) ++ goto skip_bad_vf_detection; ++ ++ bdev = pdev->bus->self; ++ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) ++ bdev = bdev->bus->self; ++ ++ if (!bdev) ++ goto skip_bad_vf_detection; ++ ++ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); ++ if (!pos) ++ goto skip_bad_vf_detection; ++ ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); ++ ++ req_id = dw1 >> 16; ++ /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */ ++ if (!(req_id & 0x0080)) ++ goto skip_bad_vf_detection; ++ ++ pf_func = req_id & 0x01; ++ if ((pf_func & 1) == (pdev->devfn & 1)) { ++ ++ vf = (req_id & 0x7F) >> 1; ++ dev_err(pci_dev_to_dev(pdev), ++ "VF %d has caused a PCIe error\n", vf); ++ dev_err(pci_dev_to_dev(pdev), ++ "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2:\n%8.8x\tdw3: %8.8x\n", ++ dw0, dw1, dw2, dw3); ++ ++ /* Find the pci device of the offending VF */ ++ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, ++ E1000_DEV_ID_82576_VF, NULL); ++ while (vfdev) { ++ if (vfdev->devfn == (req_id & 0xFF)) ++ break; ++ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, ++ E1000_DEV_ID_82576_VF, vfdev); ++ } ++ /* ++ * There's a slim chance the VF could have been hot plugged, ++ * so if it is no longer present we don't need to issue the ++ * VFLR. Just clean up the AER in that case. ++ */ ++ if (vfdev) { ++ dev_err(pci_dev_to_dev(pdev), ++ "Issuing VFLR to VF %d\n", vf); ++ pci_write_config_dword(vfdev, 0xA8, 0x00008000); ++ } ++ ++ pci_cleanup_aer_uncorrect_error_status(pdev); ++ } ++ ++ /* ++ * Even though the error may have occurred on the other port ++ * we still need to increment the vf error reference count for ++ * both ports because the I/O resume function will be called ++ * for both of them. ++ */ ++ adapter->vferr_refcount++; ++ ++ return PCI_ERS_RESULT_RECOVERED; ++ ++skip_bad_vf_detection: ++#endif /* CONFIG_PCI_IOV */ ++ + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) +@@ -7716,22 +9348,21 @@ + } + + /** +- * igb_io_slot_reset - called after the pci bus has been reset. +- * @pdev: Pointer to PCI device ++ * igb_io_slot_reset - called after the pci bus has been reset. ++ * @pdev: Pointer to PCI device + * +- * Restart the card from scratch, as if from a cold-boot. Implementation +- * resembles the first-half of the igb_resume routine. +- **/ ++ * Restart the card from scratch, as if from a cold-boot. Implementation ++ * resembles the first-half of the igb_resume routine. ++ */ + static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; +- int err; + + if (pci_enable_device_mem(pdev)) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { +@@ -7742,77 +9373,91 @@ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + +- igb_reset(adapter); +- wr32(E1000_WUS, ~0); ++ schedule_work(&adapter->reset_task); ++ E1000_WRITE_REG(hw, E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + +- err = pci_cleanup_aer_uncorrect_error_status(pdev); +- if (err) { +- dev_err(&pdev->dev, +- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", +- err); +- /* non-fatal, continue */ +- } ++ pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; + } + + /** +- * igb_io_resume - called when traffic can start flowing again. +- * @pdev: Pointer to PCI device ++ * igb_io_resume - called when traffic can start flowing again. ++ * @pdev: Pointer to PCI device + * +- * This callback is called when the error recovery driver tells us that +- * its OK to resume normal operation. Implementation resembles the +- * second-half of the igb_resume routine. ++ * This callback is called when the error recovery driver tells us that ++ * its OK to resume normal operation. Implementation resembles the ++ * second-half of the igb_resume routine. + */ + static void igb_io_resume(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + ++ if (adapter->vferr_refcount) { ++ dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n"); ++ adapter->vferr_refcount--; ++ return; ++ } ++ + if (netif_running(netdev)) { + if (igb_up(adapter)) { +- dev_err(&pdev->dev, "igb_up failed after reset\n"); ++ dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + +- /* let the f/w know that the h/w is now under the control of the +- * driver. ++ /* let the f/w know that the h/w is now under the control of the driver. + */ + igb_get_hw_control(adapter); + } + +-static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, +- u8 qsel) ++#endif /* HAVE_PCI_ERS */ ++ ++int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) + { +- u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; ++ int i; + +- /* HW expects these in little endian so we reverse the byte order +- * from network order (big endian) to little endian +- */ +- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | +- ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); +- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); +- +- /* Indicate to hardware the Address is Valid. */ +- rar_high |= E1000_RAH_AV; +- +- if (hw->mac.type == e1000_82575) +- rar_high |= E1000_RAH_POOL_1 * qsel; +- else +- rar_high |= E1000_RAH_POOL_1 << qsel; ++ if (is_zero_ether_addr(addr)) ++ return 0; + +- wr32(E1000_RAL(index), rar_low); +- wrfl(); +- wr32(E1000_RAH(index), rar_high); +- wrfl(); ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) ++ continue; ++ adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED | ++ IGB_MAC_STATE_IN_USE); ++ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); ++ adapter->mac_table[i].queue = queue; ++ igb_sync_mac_table(adapter); ++ return 0; ++ } ++ return -ENOMEM; + } ++int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) ++{ ++ /* search table for addr, if found, set to 0 and sync */ ++ int i; ++ struct e1000_hw *hw = &adapter->hw; + ++ if (is_zero_ether_addr(addr)) ++ return 0; ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (!ether_addr_equal(addr, adapter->mac_table[i].addr) && ++ adapter->mac_table[i].queue == queue) { ++ adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED; ++ memset(adapter->mac_table[i].addr, 0, ETH_ALEN); ++ adapter->mac_table[i].queue = 0; ++ igb_sync_mac_table(adapter); ++ return 0; ++ } ++ } ++ return -ENOMEM; ++} + static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) + { +@@ -7829,15 +9474,17 @@ + return 0; + } + ++#ifdef IFLA_VF_MAX + static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++ + if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + return -EINVAL; + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, +- "Reload the VF driver to make this change effective."); ++ "Reload the VF driver to make this change effective.\n"); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); +@@ -7854,13 +9501,15 @@ + return 100; + case SPEED_1000: + return 1000; ++ case SPEED_2500: ++ return 2500; + default: + return 0; + } + } + + static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, +- int link_speed) ++ int link_speed) + { + int rf_dec, rf_int; + u32 bcnrc_val; +@@ -7869,23 +9518,23 @@ + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); +- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / +- tx_rate; ++ rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) || +- (adapter->hw.mac.type != e1000_82576)) ++ (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); +@@ -7903,7 +9552,7 @@ + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, +- "Link speed has been changed. VF Transmit rate is disabled\n"); ++ "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { +@@ -7911,13 +9560,16 @@ + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, +- adapter->vf_data[i].tx_rate, +- actual_link_speed); ++ adapter->vf_data[i].tx_rate, actual_link_speed); + } + } + +-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, +- int min_tx_rate, int max_tx_rate) ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, ++ int max_tx_rate) ++#else ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +@@ -7926,105 +9578,137 @@ + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + +- if (min_tx_rate) +- return -EINVAL; +- + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || +- (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || +- (max_tx_rate < 0) || +- (max_tx_rate > actual_link_speed)) ++ (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) || ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++ (max_tx_rate < 0) || (max_tx_rate > actual_link_speed)) ++#else ++ (tx_rate < 0) || (tx_rate > actual_link_speed)) ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); ++#else ++ adapter->vf_data[vf].tx_rate = (u16)tx_rate; ++ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + + return 0; + } + +-static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, +- bool setting) +-{ +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- u32 reg_val, reg_offset; +- +- if (!adapter->vfs_allocated_count) +- return -EOPNOTSUPP; +- +- if (vf >= adapter->vfs_allocated_count) +- return -EINVAL; +- +- reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; +- reg_val = rd32(reg_offset); +- if (setting) +- reg_val |= ((1 << vf) | +- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); +- else +- reg_val &= ~((1 << vf) | +- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); +- wr32(reg_offset, reg_val); +- +- adapter->vf_data[vf].spoofchk_enabled = setting; +- return 0; +-} +- + static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++ + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; ++#else ++ ivi->tx_rate = adapter->vf_data[vf].tx_rate; ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; ++#endif + return 0; + } +- ++#endif + static void igb_vmm_control(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; ++ int count; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: +- case e1000_i210: +- case e1000_i211: +- case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ +- reg = rd32(E1000_DTXCTL); +- reg |= E1000_DTXCTL_VLAN_ADDED; +- wr32(E1000_DTXCTL, reg); ++ reg = E1000_READ_REG(hw, E1000_DTXCTL); ++ reg |= (E1000_DTXCTL_VLAN_ADDED | ++ E1000_DTXCTL_SPOOF_INT); ++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); + /* Fall through */ + case e1000_82580: + /* enable replication vlan tag stripping */ +- reg = rd32(E1000_RPLOLR); ++ reg = E1000_READ_REG(hw, E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; +- wr32(E1000_RPLOLR, reg); ++ E1000_WRITE_REG(hw, E1000_RPLOLR, reg); + /* Fall through */ + case e1000_i350: ++ case e1000_i354: + /* none of the above registers are supported by i350 */ + break; + } + +- if (adapter->vfs_allocated_count) { +- igb_vmdq_set_loopback_pf(hw, true); +- igb_vmdq_set_replication_pf(hw, true); +- igb_vmdq_set_anti_spoofing_pf(hw, true, +- adapter->vfs_allocated_count); +- } else { +- igb_vmdq_set_loopback_pf(hw, false); +- igb_vmdq_set_replication_pf(hw, false); +- } ++ /* Enable Malicious Driver Detection */ ++ if ((adapter->vfs_allocated_count) && ++ (adapter->mdd)) { ++ if (hw->mac.type == e1000_i350) ++ igb_enable_mdd(adapter); ++ } ++ ++ /* enable replication and loopback support */ ++ count = adapter->vfs_allocated_count || adapter->vmdq_pools; ++ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count) ++ e1000_vmdq_set_loopback_pf(hw, 1); ++ e1000_vmdq_set_anti_spoofing_pf(hw, ++ adapter->vfs_allocated_count || adapter->vmdq_pools, ++ adapter->vfs_allocated_count); ++ e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count || ++ adapter->vmdq_pools); ++} ++ ++static void igb_init_fw(struct igb_adapter *adapter) ++{ ++ struct e1000_fw_drv_info fw_cmd; ++ struct e1000_hw *hw = &adapter->hw; ++ int i; ++ u16 mask; ++ ++ if (hw->mac.type == e1000_i210) ++ mask = E1000_SWFW_EEP_SM; ++ else ++ mask = E1000_SWFW_PHY0_SM; ++ /* i211 parts do not support this feature */ ++ if (hw->mac.type == e1000_i211) ++ hw->mac.arc_subsystem_valid = false; ++ ++ if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) { ++ for (i = 0; i <= FW_MAX_RETRIES; i++) { ++ E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI); ++ fw_cmd.hdr.cmd = FW_CMD_DRV_INFO; ++ fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN; ++ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED; ++ fw_cmd.port_num = hw->bus.func; ++ fw_cmd.drv_version = FW_FAMILY_DRV_VER; ++ fw_cmd.hdr.checksum = 0; ++ fw_cmd.hdr.checksum = ++ e1000_calculate_checksum((u8 *)&fw_cmd, ++ (FW_HDR_LEN + ++ fw_cmd.hdr.buf_len)); ++ e1000_host_interface_command(hw, (u8 *)&fw_cmd, ++ sizeof(fw_cmd)); ++ if (fw_cmd.hdr.cmd_or_resp.ret_status ++ == FW_STATUS_SUCCESS) ++ break; ++ } ++ } else ++ dev_warn(pci_dev_to_dev(adapter->pdev), ++ "Unable to get semaphore, firmware init failed.\n"); ++ hw->mac.ops.release_swfw_sync(hw, mask); + } + + static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +@@ -8032,34 +9716,40 @@ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; ++ u32 status; ++ ++ if (hw->mac.type == e1000_i211) ++ return; + + if (hw->mac.type > e1000_82580) { +- if (adapter->flags & IGB_FLAG_DMAC) { ++ if (adapter->dmac != IGB_DMAC_DISABLE) { + u32 reg; + +- /* force threshold to 0. */ +- wr32(E1000_DMCTXTH, 0); ++ /* force threshold to 0. */ ++ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); + +- /* DMA Coalescing high water mark needs to be greater ++ /* ++ * DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * pba - adapter->max_frame_size / 16; + if (hwm < 64 * (pba - 6)) + hwm = 64 * (pba - 6); +- reg = rd32(E1000_FCRTC); ++ reg = E1000_READ_REG(hw, E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); +- wr32(E1000_FCRTC, reg); ++ E1000_WRITE_REG(hw, E1000_FCRTC, reg); + +- /* Set the DMA Coalescing Rx threshold to PBA - 2 * max ++ /* ++ * Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - adapter->max_frame_size / 512; + if (dmac_thr < pba - 10) + dmac_thr = pba - 10; +- reg = rd32(E1000_DMACR); ++ reg = E1000_READ_REG(hw, E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); +@@ -8067,47 +9757,84 @@ + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + +- /* watchdog timer= +-1000 usec in 32usec intervals */ +- reg |= (1000 >> 5); ++ /* Check if status is 2.5Gb backplane connection ++ * before configuration of watchdog timer, which is ++ * in msec values in 12.8usec intervals ++ * watchdog timer= msec values in 32usec intervals ++ * for non 2.5Gb connection ++ */ ++ if (hw->mac.type == e1000_i354) { ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if ((status & E1000_STATUS_2P5_SKU) && ++ (!(status & E1000_STATUS_2P5_SKU_OVER))) ++ reg |= ((adapter->dmac * 5) >> 6); ++ else ++ reg |= ((adapter->dmac) >> 5); ++ } else { ++ reg |= ((adapter->dmac) >> 5); ++ } + +- /* Disable BMC-to-OS Watchdog Enable */ ++ /* ++ * Disable BMC-to-OS Watchdog enable ++ * on devices that support OS-to-BMC ++ */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; ++ E1000_WRITE_REG(hw, E1000_DMACR, reg); + +- wr32(E1000_DMACR, reg); ++ /* no lower threshold to disable coalescing ++ * (smart fifb)-UTRESH=0 ++ */ ++ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); + +- /* no lower threshold to disable +- * coalescing(smart fifb)-UTRESH=0 ++ /* This sets the time to wait before requesting ++ * transition to low power state to number of usecs ++ * needed to receive 1 512 byte frame at gigabit ++ * line rate. On i350 device, time to make transition ++ * to Lx state is delayed by 4 usec with flush disable ++ * bit set to avoid losing mailbox interrupts + */ +- wr32(E1000_DMCRTRH, 0); ++ reg = E1000_READ_REG(hw, E1000_DMCTLX); ++ if (hw->mac.type == e1000_i350) ++ reg |= IGB_DMCTLX_DCFLUSH_DIS; + +- reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); ++ /* in 2.5Gb connection, TTLX unit is 0.4 usec ++ * which is 0x4*2 = 0xA. But delay is still 4 usec ++ */ ++ if (hw->mac.type == e1000_i354) { ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if ((status & E1000_STATUS_2P5_SKU) && ++ (!(status & E1000_STATUS_2P5_SKU_OVER))) ++ reg |= 0xA; ++ else ++ reg |= 0x4; ++ } else { ++ reg |= 0x4; ++ } + +- wr32(E1000_DMCTLX, reg); ++ E1000_WRITE_REG(hw, E1000_DMCTLX, reg); + +- /* free space in tx packet buffer to wake from +- * DMA coal +- */ +- wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - +- (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); ++ /* free space in tx pkt buffer to wake from DMA coal */ ++ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - ++ (IGB_TX_BUF_4096 + adapter->max_frame_size)) ++ >> 6); + +- /* make low power state decision controlled +- * by DMA coal +- */ +- reg = rd32(E1000_PCIEMISC); ++ /* low power state decision controlled by DMA coal */ ++ reg = E1000_READ_REG(hw, E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; +- wr32(E1000_PCIEMISC, reg); ++ E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { +- u32 reg = rd32(E1000_PCIEMISC); ++ u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); + +- wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); +- wr32(E1000_DMACR, 0); ++ E1000_WRITE_REG(hw, E1000_PCIEMISC, ++ reg & ~E1000_PCIEMISC_LX_DECISION); ++ E1000_WRITE_REG(hw, E1000_DMACR, 0); + } + } + +-/** +- * igb_read_i2c_byte - Reads 8 bit word over I2C ++#ifdef HAVE_I2C_SUPPORT ++/* igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address +@@ -8115,9 +9842,9 @@ + * + * Performs byte read operation over I2C interface at + * a specified device address. +- **/ ++ */ + s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 *data) ++ u8 dev_addr, u8 *data) + { + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; +@@ -8129,7 +9856,8 @@ + + swfw_mask = E1000_SWFW_PHY0_SM; + +- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) ++ != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); +@@ -8139,12 +9867,11 @@ + return E1000_ERR_I2C; + else { + *data = status; +- return 0; ++ return E1000_SUCCESS; + } + } + +-/** +- * igb_write_i2c_byte - Writes 8 bit word over I2C ++/* igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address +@@ -8152,9 +9879,9 @@ + * + * Performs byte write operation over I2C interface at + * a specified device address. +- **/ ++ */ + s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 data) ++ u8 dev_addr, u8 data) + { + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; +@@ -8164,7 +9891,7 @@ + if (!this_client) + return E1000_ERR_I2C; + +- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); +@@ -8172,9 +9899,9 @@ + if (status) + return E1000_ERR_I2C; + else +- return 0; +- ++ return E1000_SUCCESS; + } ++#endif /* HAVE_I2C_SUPPORT */ + + int igb_reinit_queues(struct igb_adapter *adapter) + { +@@ -8197,4 +9924,5 @@ + + return err; + } ++ + /* igb_main.c */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_param.c b/drivers/net/ethernet/intel/igb/igb_param.c +--- a/drivers/net/ethernet/intel/igb/igb_param.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_param.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,872 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include ++ ++#include "igb.h" ++ ++/* This is the only thing that needs to be changed to adjust the ++ * maximum number of ports that the driver can manage. ++ */ ++ ++#define IGB_MAX_NIC 32 ++ ++#define OPTION_UNSET -1 ++#define OPTION_DISABLED 0 ++#define OPTION_ENABLED 1 ++#define MAX_NUM_LIST_OPTS 15 ++ ++/* All parameters are treated the same, as an integer array of values. ++ * This macro just reduces the need to repeat the same declaration code ++ * over and over (plus this helps to avoid typo bugs). ++ */ ++ ++#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET } ++#ifndef module_param_array ++/* Module Parameters are always initialized to -1, so that the driver ++ * can tell the difference between no user specified value or the ++ * user asking for the default value. ++ * The true default values are loaded in when igb_check_options is called. ++ * ++ * This is a GCC extension to ANSI C. ++ * See the item "Labeled Elements in Initializers" in the section ++ * "Extensions to the C Language Family" of the GCC documentation. ++ */ ++ ++#define IGB_PARAM(X, desc) \ ++ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ ++ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \ ++ MODULE_PARM_DESC(X, desc); ++#else ++#define IGB_PARAM(X, desc) \ ++ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ ++ static unsigned int num_##X; \ ++ module_param_array_named(X, X, int, &num_##X, 0); \ ++ MODULE_PARM_DESC(X, desc); ++#endif ++ ++/* Interrupt Throttle Rate (interrupts/sec) ++ * ++ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) ++ */ ++IGB_PARAM(InterruptThrottleRate, ++ "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive"); ++#define DEFAULT_ITR 3 ++#define MAX_ITR 100000 ++/* #define MIN_ITR 120 */ ++#define MIN_ITR 0 ++/* IntMode (Interrupt Mode) ++ * ++ * Valid Range: 0 - 2 ++ * ++ * Default Value: 2 (MSI-X) ++ */ ++IGB_PARAM(IntMode, ++ "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2"); ++#define MAX_INTMODE IGB_INT_MODE_MSIX ++#define MIN_INTMODE IGB_INT_MODE_LEGACY ++ ++IGB_PARAM(Node, "set the starting node to allocate memory on, default -1"); ++ ++/* LLIPort (Low Latency Interrupt TCP Port) ++ * ++ * Valid Range: 0 - 65535 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IGB_PARAM(LLIPort, ++ "Low Latency Interrupt TCP Port (0-65535), default 0=off"); ++ ++#define DEFAULT_LLIPORT 0 ++#define MAX_LLIPORT 0xFFFF ++#define MIN_LLIPORT 0 ++ ++/* LLIPush (Low Latency Interrupt on TCP Push flag) ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off"); ++ ++#define DEFAULT_LLIPUSH 0 ++#define MAX_LLIPUSH 1 ++#define MIN_LLIPUSH 0 ++ ++/* LLISize (Low Latency Interrupt on Packet Size) ++ * ++ * Valid Range: 0 - 1500 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IGB_PARAM(LLISize, ++ "Low Latency Interrupt on Packet Size (0-1500), default 0=off"); ++ ++#define DEFAULT_LLISIZE 0 ++#define MAX_LLISIZE 1500 ++#define MIN_LLISIZE 0 ++ ++/* RSS (Enable RSS multiqueue receive) ++ * ++ * Valid Range: 0 - 8 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(RSS, ++ "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus"); ++ ++#define DEFAULT_RSS 1 ++#define MAX_RSS 8 ++#define MIN_RSS 0 ++ ++/* VMDQ (Enable VMDq multiqueue receive) ++ * ++ * Valid Range: 0 - 8 ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(VMDQ, ++ "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0"); ++ ++#define DEFAULT_VMDQ 0 ++#define MAX_VMDQ MAX_RSS ++#define MIN_VMDQ 0 ++ ++/* max_vfs (Enable SR-IOV VF devices) ++ * ++ * Valid Range: 0 - 7 ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(max_vfs, ++ "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0"); ++ ++#define DEFAULT_SRIOV 0 ++#define MAX_SRIOV 7 ++#define MIN_SRIOV 0 ++ ++/* MDD (Enable Malicious Driver Detection) ++ * ++ * Only available when SR-IOV is enabled - max_vfs is greater than 0 ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(MDD, ++ "Malicious Driver Detection (0/1), default 1 = enabled. Only available when max_vfs is greater than 0"); ++ ++#ifdef DEBUG ++ ++/* Disable Hardware Reset on Tx Hang ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 0 (disabled, i.e. h/w will reset) ++ */ ++IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang"); ++ ++/* Dump Transmit and Receive buffers ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request"); ++ ++#endif /* DEBUG */ ++ ++/* QueuePairs (Enable TX/RX queue pairs for interrupt handling) ++ * ++ * Valid Range: 0 - 1 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(QueuePairs, ++ "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on"); ++ ++#define DEFAULT_QUEUE_PAIRS 1 ++#define MAX_QUEUE_PAIRS 1 ++#define MIN_QUEUE_PAIRS 0 ++ ++/* Enable/disable EEE (a.k.a. IEEE802.3az) ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(EEE, ++ "Enable/disable on parts that support the feature"); ++ ++/* Enable/disable DMA Coalescing ++ * ++ * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, ++ * 9000, 10000(msec), 250(usec), 500(usec) ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(DMAC, ++ "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))"); ++ ++#ifndef IGB_NO_LRO ++/* Enable/disable Large Receive Offload ++ * ++ * Valid Values: 0(off), 1(on) ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off"); ++ ++#endif ++struct igb_opt_list { ++ int i; ++ char *str; ++}; ++struct igb_option { ++ enum { enable_option, range_option, list_option } type; ++ const char *name; ++ const char *err; ++ int def; ++ union { ++ struct { /* range_option info */ ++ int min; ++ int max; ++ } r; ++ struct { /* list_option info */ ++ int nr; ++ struct igb_opt_list *p; ++ } l; ++ } arg; ++}; ++ ++static int igb_validate_option(unsigned int *value, ++ struct igb_option *opt, ++ struct igb_adapter *adapter) ++{ ++ if (*value == OPTION_UNSET) { ++ *value = opt->def; ++ return 0; ++ } ++ ++ switch (opt->type) { ++ case enable_option: ++ switch (*value) { ++ case OPTION_ENABLED: ++ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); ++ return 0; ++ case OPTION_DISABLED: ++ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); ++ return 0; ++ } ++ break; ++ case range_option: ++ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { ++ DPRINTK(PROBE, INFO, ++ "%s set to %d\n", opt->name, *value); ++ return 0; ++ } ++ break; ++ case list_option: { ++ int i; ++ struct igb_opt_list *ent; ++ ++ for (i = 0; i < opt->arg.l.nr; i++) { ++ ent = &opt->arg.l.p[i]; ++ if (*value == ent->i) { ++ if (ent->str[0] != '\0') ++ DPRINTK(PROBE, INFO, "%s\n", ent->str); ++ return 0; ++ } ++ } ++ } ++ break; ++ default: ++ BUG(); ++ } ++ ++ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n", ++ opt->name, *value, opt->err); ++ *value = opt->def; ++ return -1; ++} ++ ++/** ++ * igb_check_options - Range Checking for Command Line Parameters ++ * @adapter: board private structure ++ * ++ * This routine checks all command line parameters for valid user ++ * input. If an invalid value is given, or if no user specified ++ * value exists, a default value is used. The final value is stored ++ * in a variable in the adapter structure. ++ **/ ++ ++void igb_check_options(struct igb_adapter *adapter) ++{ ++ int bd = adapter->bd_number; ++ struct e1000_hw *hw = &adapter->hw; ++ ++ if (bd >= IGB_MAX_NIC) { ++ DPRINTK(PROBE, NOTICE, ++ "Warning: no configuration for board #%d\n", bd); ++ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); ++#ifndef module_param_array ++ bd = IGB_MAX_NIC; ++#endif ++ } ++ ++ { /* Interrupt Throttling Rate */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Interrupt Throttling Rate (ints/sec)", ++ .err = "using default of "__MODULE_STRING(DEFAULT_ITR), ++ .def = DEFAULT_ITR, ++ .arg = { .r = { .min = MIN_ITR, ++ .max = MAX_ITR } } ++ }; ++ ++#ifdef module_param_array ++ if (num_InterruptThrottleRate > bd) { ++#endif ++ unsigned int itr = InterruptThrottleRate[bd]; ++ ++ switch (itr) { ++ case 0: ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ if (hw->mac.type >= e1000_i350) ++ adapter->dmac = IGB_DMAC_DISABLE; ++ adapter->rx_itr_setting = itr; ++ break; ++ case 1: ++ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", ++ opt.name); ++ adapter->rx_itr_setting = itr; ++ break; ++ case 3: ++ DPRINTK(PROBE, INFO, ++ "%s set to dynamic conservative mode\n", ++ opt.name); ++ adapter->rx_itr_setting = itr; ++ break; ++ default: ++ igb_validate_option(&itr, &opt, adapter); ++ /* Save the setting, because the dynamic bits ++ * change itr. In case of invalid user value, ++ * default to conservative mode, else need to ++ * clear the lower two bits because they are ++ * used as control */ ++ if (itr == 3) { ++ adapter->rx_itr_setting = itr; ++ } else { ++ adapter->rx_itr_setting = 1000000000 ++ / (itr * 256); ++ adapter->rx_itr_setting &= ~3; ++ } ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ adapter->rx_itr_setting = opt.def; ++ } ++#endif ++ adapter->tx_itr_setting = adapter->rx_itr_setting; ++ } ++ { /* Interrupt Mode */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Interrupt Mode", ++ .err = "defaulting to 2 (MSI-X)", ++ .def = IGB_INT_MODE_MSIX, ++ .arg = { .r = { .min = MIN_INTMODE, ++ .max = MAX_INTMODE } } ++ }; ++ ++#ifdef module_param_array ++ if (num_IntMode > bd) { ++#endif ++ unsigned int int_mode = IntMode[bd]; ++ igb_validate_option(&int_mode, &opt, adapter); ++ adapter->int_mode = int_mode; ++#ifdef module_param_array ++ } else { ++ adapter->int_mode = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt TCP Port */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt TCP Port", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIPORT), ++ .def = DEFAULT_LLIPORT, ++ .arg = { .r = { .min = MIN_LLIPORT, ++ .max = MAX_LLIPORT } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPort > bd) { ++#endif ++ adapter->lli_port = LLIPort[bd]; ++ if (adapter->lli_port) { ++ igb_validate_option(&adapter->lli_port, &opt, ++ adapter); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_port = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt on Packet Size */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on Packet Size", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLISIZE), ++ .def = DEFAULT_LLISIZE, ++ .arg = { .r = { .min = MIN_LLISIZE, ++ .max = MAX_LLISIZE } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLISize > bd) { ++#endif ++ adapter->lli_size = LLISize[bd]; ++ if (adapter->lli_size) { ++ igb_validate_option(&adapter->lli_size, &opt, ++ adapter); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_size = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt on TCP Push flag */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "Low Latency Interrupt on TCP Push flag", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPush > bd) { ++#endif ++ unsigned int lli_push = LLIPush[bd]; ++ igb_validate_option(&lli_push, &opt, adapter); ++ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0; ++#ifdef module_param_array ++ } else { ++ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0; ++ } ++#endif ++ } ++ { /* SRIOV - Enable SR-IOV VF devices */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "max_vfs - SR-IOV VF devices", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_SRIOV), ++ .def = DEFAULT_SRIOV, ++ .arg = { .r = { .min = MIN_SRIOV, ++ .max = MAX_SRIOV } } ++ }; ++ ++#ifdef module_param_array ++ if (num_max_vfs > bd) { ++#endif ++ adapter->vfs_allocated_count = max_vfs[bd]; ++ igb_validate_option(&adapter->vfs_allocated_count, ++ &opt, adapter); ++ ++#ifdef module_param_array ++ } else { ++ adapter->vfs_allocated_count = opt.def; ++ } ++#endif ++ if (adapter->vfs_allocated_count) { ++ switch (hw->mac.type) { ++ case e1000_82575: ++ case e1000_82580: ++ case e1000_i210: ++ case e1000_i211: ++ case e1000_i354: ++ adapter->vfs_allocated_count = 0; ++ DPRINTK(PROBE, INFO, ++ "SR-IOV option max_vfs not supported.\n"); ++ /* Fall through */ ++ default: ++ break; ++ } ++ } ++ } ++ { /* VMDQ - Enable VMDq multiqueue receive */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "VMDQ - VMDq multiqueue queue count", ++ .err = "using default of "__MODULE_STRING(DEFAULT_VMDQ), ++ .def = DEFAULT_VMDQ, ++ .arg = { .r = { .min = MIN_VMDQ, ++ .max = (MAX_VMDQ ++ - adapter->vfs_allocated_count)} } ++ }; ++ if ((hw->mac.type != e1000_i210) || ++ (hw->mac.type != e1000_i211)) { ++#ifdef module_param_array ++ if (num_VMDQ > bd) { ++#endif ++ adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]); ++ if (adapter->vfs_allocated_count && ++ !adapter->vmdq_pools) { ++ DPRINTK(PROBE, INFO, ++ "Enabling SR-IOV requires VMDq be set to at least 1\n"); ++ adapter->vmdq_pools = 1; ++ } ++ igb_validate_option(&adapter->vmdq_pools, &opt, ++ adapter); ++ ++#ifdef module_param_array ++ } else { ++ if (!adapter->vfs_allocated_count) ++ adapter->vmdq_pools = (opt.def == 1 ? 0 ++ : opt.def); ++ else ++ adapter->vmdq_pools = 1; ++ } ++#endif ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) { ++ DPRINTK(PROBE, INFO, ++ "VMDq not supported on this part.\n"); ++ adapter->vmdq_pools = 0; ++ } ++#endif ++ ++ } else { ++ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n"); ++ adapter->vmdq_pools = opt.def; ++ } ++ } ++ { /* RSS - Enable RSS multiqueue receives */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "RSS - RSS multiqueue receive count", ++ .err = "using default of "__MODULE_STRING(DEFAULT_RSS), ++ .def = DEFAULT_RSS, ++ .arg = { .r = { .min = MIN_RSS, ++ .max = MAX_RSS } } ++ }; ++ ++ switch (hw->mac.type) { ++ case e1000_82575: ++#ifndef CONFIG_IGB_VMDQ_NETDEV ++ if (!!adapter->vmdq_pools) { ++ if (adapter->vmdq_pools <= 2) { ++ if (adapter->vmdq_pools == 2) ++ opt.arg.r.max = 3; ++ } else { ++ opt.arg.r.max = 1; ++ } ++ } else { ++ opt.arg.r.max = 4; ++ } ++#else ++ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4; ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ break; ++ case e1000_i210: ++ opt.arg.r.max = 4; ++ break; ++ case e1000_i211: ++ opt.arg.r.max = 2; ++ break; ++ case e1000_82576: ++#ifndef CONFIG_IGB_VMDQ_NETDEV ++ if (!!adapter->vmdq_pools) ++ opt.arg.r.max = 2; ++ break; ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ case e1000_82580: ++ case e1000_i350: ++ case e1000_i354: ++ default: ++ if (!!adapter->vmdq_pools) ++ opt.arg.r.max = 1; ++ break; ++ } ++ ++ if (adapter->int_mode != IGB_INT_MODE_MSIX) { ++ DPRINTK(PROBE, INFO, ++ "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n", ++ opt.err); ++ opt.arg.r.max = 1; ++ } ++ ++#ifdef module_param_array ++ if (num_RSS > bd) { ++#endif ++ adapter->rss_queues = RSS[bd]; ++ switch (adapter->rss_queues) { ++ case 1: ++ break; ++ default: ++ igb_validate_option(&adapter->rss_queues, &opt, ++ adapter); ++ if (adapter->rss_queues) ++ break; ++ case 0: ++ adapter->rss_queues = min_t(u32, opt.arg.r.max, ++ num_online_cpus()); ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ adapter->rss_queues = opt.def; ++ } ++#endif ++ } ++ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = ++ "QueuePairs - Tx/Rx queue pairs for interrupt handling", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++#ifdef module_param_array ++ if (num_QueuePairs > bd) { ++#endif ++ unsigned int qp = QueuePairs[bd]; ++ /* ++ * We must enable queue pairs if the number of queues ++ * exceeds the number of available interrupts. We are ++ * limited to 10, or 3 per unallocated vf. On I210 and ++ * I211 devices, we are limited to 5 interrupts. ++ * However, since I211 only supports 2 queues, we do not ++ * need to check and override the user option. ++ */ ++ if (qp == OPTION_DISABLED) { ++ if (adapter->rss_queues > 4) ++ qp = OPTION_ENABLED; ++ ++ if (adapter->vmdq_pools > 4) ++ qp = OPTION_ENABLED; ++ ++ if (adapter->rss_queues > 1 && ++ (adapter->vmdq_pools > 3 || ++ adapter->vfs_allocated_count > 6)) ++ qp = OPTION_ENABLED; ++ ++ if (hw->mac.type == e1000_i210 && ++ adapter->rss_queues > 2) ++ qp = OPTION_ENABLED; ++ ++ if (qp == OPTION_ENABLED) ++ DPRINTK(PROBE, INFO, ++ "Number of queues exceeds available interrupts, %s\n", ++ opt.err); ++ } ++ igb_validate_option(&qp, &opt, adapter); ++ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0; ++#ifdef module_param_array ++ } else { ++ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0; ++ } ++#endif ++ } ++ { /* EEE - Enable EEE for capable adapters */ ++ ++ if (hw->mac.type >= e1000_i350) { ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "EEE Support", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++#ifdef module_param_array ++ if (num_EEE > bd) { ++#endif ++ unsigned int eee = EEE[bd]; ++ igb_validate_option(&eee, &opt, adapter); ++ adapter->flags |= eee ? IGB_FLAG_EEE : 0; ++ if (eee) ++ hw->dev_spec._82575.eee_disable = false; ++ else ++ hw->dev_spec._82575.eee_disable = true; ++ ++#ifdef module_param_array ++ } else { ++ adapter->flags |= opt.def ? IGB_FLAG_EEE : 0; ++ if (adapter->flags & IGB_FLAG_EEE) ++ hw->dev_spec._82575.eee_disable = false; ++ else ++ hw->dev_spec._82575.eee_disable = true; ++ } ++#endif ++ } ++ } ++ { /* DMAC - Enable DMA Coalescing for capable adapters */ ++ ++ if (hw->mac.type >= e1000_i350) { ++ struct igb_opt_list list[] = { ++ { IGB_DMAC_DISABLE, "DMAC Disable"}, ++ { IGB_DMAC_MIN, "DMAC 250 usec"}, ++ { IGB_DMAC_500, "DMAC 500 usec"}, ++ { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"}, ++ { IGB_DMAC_2000, "DMAC 2000 usec"}, ++ { IGB_DMAC_3000, "DMAC 3000 usec"}, ++ { IGB_DMAC_4000, "DMAC 4000 usec"}, ++ { IGB_DMAC_5000, "DMAC 5000 usec"}, ++ { IGB_DMAC_6000, "DMAC 6000 usec"}, ++ { IGB_DMAC_7000, "DMAC 7000 usec"}, ++ { IGB_DMAC_8000, "DMAC 8000 usec"}, ++ { IGB_DMAC_9000, "DMAC 9000 usec"}, ++ { IGB_DMAC_MAX, "DMAC 10000 usec"} ++ }; ++ struct igb_option opt = { ++ .type = list_option, ++ .name = "DMA Coalescing", ++ .err = "using default of " ++ __MODULE_STRING(IGB_DMAC_DISABLE), ++ .def = IGB_DMAC_DISABLE, ++ .arg = { .l = { .nr = 13, ++ .p = list ++ } ++ } ++ }; ++#ifdef module_param_array ++ if (num_DMAC > bd) { ++#endif ++ unsigned int dmac = DMAC[bd]; ++ if (adapter->rx_itr_setting == IGB_DMAC_DISABLE) ++ dmac = IGB_DMAC_DISABLE; ++ igb_validate_option(&dmac, &opt, adapter); ++ switch (dmac) { ++ case IGB_DMAC_DISABLE: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_MIN: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_500: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_EN_DEFAULT: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_2000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_3000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_4000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_5000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_6000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_7000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_8000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_9000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_MAX: ++ adapter->dmac = dmac; ++ break; ++ default: ++ adapter->dmac = opt.def; ++ DPRINTK(PROBE, INFO, ++ "Invalid DMAC setting, resetting DMAC to %d\n", ++ opt.def); ++ } ++#ifdef module_param_array ++ } else ++ adapter->dmac = opt.def; ++#endif ++ } ++ } ++#ifndef IGB_NO_LRO ++ { /* LRO - Enable Large Receive Offload */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "LRO - Large Receive Offload", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ struct net_device *netdev = adapter->netdev; ++#ifdef module_param_array ++ if (num_LRO > bd) { ++#endif ++ unsigned int lro = LRO[bd]; ++ igb_validate_option(&lro, &opt, adapter); ++ netdev->features |= lro ? NETIF_F_LRO : 0; ++#ifdef module_param_array ++ } else if (opt.def == OPTION_ENABLED) { ++ netdev->features |= NETIF_F_LRO; ++ } ++#endif ++ } ++#endif /* IGB_NO_LRO */ ++ { /* MDD - Enable Malicious Driver Detection. Only available when ++ SR-IOV is enabled. */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "Malicious Driver Detection", ++ .err = "defaulting to 1", ++ .def = OPTION_ENABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = OPTION_ENABLED } } ++ }; ++ ++#ifdef module_param_array ++ if (num_MDD > bd) { ++#endif ++ adapter->mdd = MDD[bd]; ++ igb_validate_option((uint *)&adapter->mdd, &opt, ++ adapter); ++#ifdef module_param_array ++ } else { ++ adapter->mdd = opt.def; ++ } ++#endif ++ } ++} ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_procfs.c b/drivers/net/ethernet/intel/igb/igb_procfs.c +--- a/drivers/net/ethernet/intel/igb/igb_procfs.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_procfs.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,356 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "igb.h" ++#include "e1000_82575.h" ++#include "e1000_hw.h" ++ ++#ifdef IGB_PROCFS ++#ifndef IGB_HWMON ++ ++#include ++#include ++#include ++#include ++#include ++ ++static struct proc_dir_entry *igb_top_dir; ++ ++bool igb_thermal_present(struct igb_adapter *adapter) ++{ ++ s32 status; ++ struct e1000_hw *hw; ++ ++ if (adapter == NULL) ++ return false; ++ hw = &adapter->hw; ++ ++ /* ++ * Only set I2C bit-bang mode if an external thermal sensor is ++ * supported on this device. ++ */ ++ if (adapter->ets) { ++ status = e1000_set_i2c_bb(hw); ++ if (status != E1000_SUCCESS) ++ return false; ++ } ++ ++ status = hw->mac.ops.init_thermal_sensor_thresh(hw); ++ if (status != E1000_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++static int igb_macburn(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct e1000_hw *hw; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ if (hw == NULL) ++ return snprintf(page, count, "error: no hw data\n"); ++ ++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", ++ (unsigned int)hw->mac.perm_addr[0], ++ (unsigned int)hw->mac.perm_addr[1], ++ (unsigned int)hw->mac.perm_addr[2], ++ (unsigned int)hw->mac.perm_addr[3], ++ (unsigned int)hw->mac.perm_addr[4], ++ (unsigned int)hw->mac.perm_addr[5]); ++} ++ ++static int igb_macadmn(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct e1000_hw *hw; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ if (hw == NULL) ++ return snprintf(page, count, "error: no hw data\n"); ++ ++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", ++ (unsigned int)hw->mac.addr[0], ++ (unsigned int)hw->mac.addr[1], ++ (unsigned int)hw->mac.addr[2], ++ (unsigned int)hw->mac.addr[3], ++ (unsigned int)hw->mac.addr[4], ++ (unsigned int)hw->mac.addr[5]); ++} ++ ++static int igb_numeports(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct e1000_hw *hw; ++ int ports; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ if (hw == NULL) ++ return snprintf(page, count, "error: no hw data\n"); ++ ++ ports = 4; ++ ++ return snprintf(page, count, "%d\n", ports); ++} ++ ++static int igb_porttype(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", ++ test_bit(__IGB_DOWN, &adapter->state)); ++} ++ ++static int igb_therm_location(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", therm_data->sensor_data->location); ++} ++ ++static int igb_therm_maxopthresh(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", ++ therm_data->sensor_data->max_op_thresh); ++} ++ ++static int igb_therm_cautionthresh(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", ++ therm_data->sensor_data->caution_thresh); ++} ++ ++static int igb_therm_temp(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ s32 status; ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ status = e1000_get_thermal_sensor_data(therm_data->hw); ++ if (status != E1000_SUCCESS) ++ snprintf(page, count, "error: status %d returned\n", status); ++ ++ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); ++} ++ ++struct igb_proc_type { ++ char name[32]; ++ int (*read)(char*, char**, off_t, int, int*, void*); ++}; ++ ++struct igb_proc_type igb_proc_entries[] = { ++ {"numeports", &igb_numeports}, ++ {"porttype", &igb_porttype}, ++ {"macburn", &igb_macburn}, ++ {"macadmn", &igb_macadmn}, ++ {"", NULL} ++}; ++ ++struct igb_proc_type igb_internal_entries[] = { ++ {"location", &igb_therm_location}, ++ {"temp", &igb_therm_temp}, ++ {"cautionthresh", &igb_therm_cautionthresh}, ++ {"maxopthresh", &igb_therm_maxopthresh}, ++ {"", NULL} ++}; ++ ++void igb_del_proc_entries(struct igb_adapter *adapter) ++{ ++ int index, i; ++ char buf[16]; /* much larger than the sensor number will ever be */ ++ ++ if (igb_top_dir == NULL) ++ return; ++ ++ for (i = 0; i < E1000_MAX_SENSORS; i++) { ++ if (adapter->therm_dir[i] == NULL) ++ continue; ++ ++ for (index = 0; ; index++) { ++ if (igb_internal_entries[index].read == NULL) ++ break; ++ ++ remove_proc_entry(igb_internal_entries[index].name, ++ adapter->therm_dir[i]); ++ } ++ snprintf(buf, sizeof(buf), "sensor_%d", i); ++ remove_proc_entry(buf, adapter->info_dir); ++ } ++ ++ if (adapter->info_dir != NULL) { ++ for (index = 0; ; index++) { ++ if (igb_proc_entries[index].read == NULL) ++ break; ++ remove_proc_entry(igb_proc_entries[index].name, ++ adapter->info_dir); ++ } ++ remove_proc_entry("info", adapter->eth_dir); ++ } ++ ++ if (adapter->eth_dir != NULL) ++ remove_proc_entry(pci_name(adapter->pdev), igb_top_dir); ++} ++ ++/* called from igb_main.c */ ++void igb_procfs_exit(struct igb_adapter *adapter) ++{ ++ igb_del_proc_entries(adapter); ++} ++ ++int igb_procfs_topdir_init(void) ++{ ++ igb_top_dir = proc_mkdir("driver/igb", NULL); ++ if (igb_top_dir == NULL) ++ return (-ENOMEM); ++ ++ return 0; ++} ++ ++void igb_procfs_topdir_exit(void) ++{ ++ remove_proc_entry("driver/igb", NULL); ++} ++ ++/* called from igb_main.c */ ++int igb_procfs_init(struct igb_adapter *adapter) ++{ ++ int rc = 0; ++ int i; ++ int index; ++ char buf[16]; /* much larger than the sensor number will ever be */ ++ ++ adapter->eth_dir = NULL; ++ adapter->info_dir = NULL; ++ for (i = 0; i < E1000_MAX_SENSORS; i++) ++ adapter->therm_dir[i] = NULL; ++ ++ if (igb_top_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir); ++ if (adapter->eth_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ adapter->info_dir = proc_mkdir("info", adapter->eth_dir); ++ if (adapter->info_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ for (index = 0; ; index++) { ++ if (igb_proc_entries[index].read == NULL) ++ break; ++ if (!(create_proc_read_entry(igb_proc_entries[index].name, ++ 0444, ++ adapter->info_dir, ++ igb_proc_entries[index].read, ++ adapter))) { ++ ++ rc = -ENOMEM; ++ goto fail; ++ } ++ } ++ if (igb_thermal_present(adapter) == false) ++ goto exit; ++ ++ for (i = 0; i < E1000_MAX_SENSORS; i++) { ++ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) ++ continue; ++ ++ snprintf(buf, sizeof(buf), "sensor_%d", i); ++ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); ++ if (adapter->therm_dir[i] == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ for (index = 0; ; index++) { ++ if (igb_internal_entries[index].read == NULL) ++ break; ++ /* ++ * therm_data struct contains pointer the read func ++ * will be needing ++ */ ++ adapter->therm_data[i].hw = &adapter->hw; ++ adapter->therm_data[i].sensor_data = ++ &adapter->hw.mac.thermal_sensor_data.sensor[i]; ++ ++ if (!(create_proc_read_entry( ++ igb_internal_entries[index].name, ++ 0444, ++ adapter->therm_dir[i], ++ igb_internal_entries[index].read, ++ &adapter->therm_data[i]))) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ } ++ } ++ goto exit; ++ ++fail: ++ igb_del_proc_entries(adapter); ++exit: ++ return rc; ++} ++ ++#endif /* !IGB_HWMON */ ++#endif /* IGB_PROCFS */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c +--- a/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,31 +1,46 @@ +-/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 +- * +- * Copyright (C) 2011 Richard Cochran +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/****************************************************************************** ++ Copyright(c) 2011 Richard Cochran for some of the ++ 82576 and 82580 code ++******************************************************************************/ ++ ++#include "igb.h" ++ ++#ifdef HAVE_PTP_1588_CLOCK + #include + #include + #include + #include +- +-#include "igb.h" ++#include + + #define INCVALUE_MASK 0x7fffffff + #define ISGN 0x80000000 + +-/* The 82580 timesync updates the system timer every 8ns by 8ns, ++/* ++ * The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold +@@ -74,9 +89,10 @@ + #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) + #define IGB_NBITS_82580 40 + +-static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); ++/* ++ * SYSTIM read access for the 82576 ++ */ + +-/* SYSTIM read access for the 82576 */ + static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) + { + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); +@@ -84,8 +100,8 @@ + u64 val; + u32 lo, hi; + +- lo = rd32(E1000_SYSTIML); +- hi = rd32(E1000_SYSTIMH); ++ lo = E1000_READ_REG(hw, E1000_SYSTIML); ++ hi = E1000_READ_REG(hw, E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; +@@ -93,21 +109,24 @@ + return val; + } + +-/* SYSTIM read access for the 82580 */ ++/* ++ * SYSTIM read access for the 82580 ++ */ ++ + static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) + { + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; +- u32 lo, hi; + u64 val; ++ u32 lo, hi; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ +- rd32(E1000_SYSTIMR); +- lo = rd32(E1000_SYSTIML); +- hi = rd32(E1000_SYSTIMH); ++ E1000_READ_REG(hw, E1000_SYSTIMR); ++ lo = E1000_READ_REG(hw, E1000_SYSTIML); ++ hi = E1000_READ_REG(hw, E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; +@@ -115,7 +134,10 @@ + return val; + } + +-/* SYSTIM read access for I210/I211 */ ++/* ++ * SYSTIM read access for I210/I211 ++ */ ++ + static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) + { + struct e1000_hw *hw = &adapter->hw; +@@ -125,9 +147,9 @@ + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ +- rd32(E1000_SYSTIMR); +- nsec = rd32(E1000_SYSTIML); +- sec = rd32(E1000_SYSTIMH); ++ E1000_READ_REG(hw, E1000_SYSTIMR); ++ nsec = E1000_READ_REG(hw, E1000_SYSTIML); ++ sec = E1000_READ_REG(hw, E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +@@ -138,11 +160,12 @@ + { + struct e1000_hw *hw = &adapter->hw; + +- /* Writing the SYSTIMR register is not necessary as it only provides ++ /* ++ * Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ +- wr32(E1000_SYSTIML, ts->tv_nsec); +- wr32(E1000_SYSTIMH, ts->tv_sec); ++ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec); ++ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec); + } + + /** +@@ -172,8 +195,8 @@ + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); +@@ -195,7 +218,10 @@ + } + } + +-/* PTP clock operations */ ++/* ++ * PTP clock operations ++ */ ++ + static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) + { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, +@@ -220,7 +246,8 @@ + else + incvalue += rate; + +- wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 ++ | (incvalue & INCVALUE_82576_MASK)); + + return 0; + } +@@ -242,11 +269,24 @@ + rate <<= 26; + rate = div_u64(rate, 1953125); + ++ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x ++ * as quickly. Account for this by dividing the adjustment by 2.5. ++ */ ++ if (hw->mac.type == e1000_i354) { ++ u32 status = E1000_READ_REG(hw, E1000_STATUS); ++ ++ if ((status & E1000_STATUS_2P5_SKU) && ++ !(status & E1000_STATUS_2P5_SKU_OVER)) { ++ rate <<= 1; ++ rate = div_u64(rate, 5); ++ } ++ } ++ + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + +- wr32(E1000_TIMINCA, inca); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, inca); + + return 0; + } +@@ -287,14 +327,13 @@ + return 0; + } + +-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, +- struct timespec *ts) ++static int igb_ptp_gettime64_82576(struct ptp_clock_info *ptp, ++ struct timespec64 *ts64) + { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; +- u32 remainder; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + +@@ -302,28 +341,99 @@ + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + +- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); +- ts->tv_nsec = remainder; ++ *ts64 = ns_to_timespec64(ns); + + return 0; + } + +-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, +- struct timespec *ts) ++static int igb_ptp_gettime64_i210(struct ptp_clock_info *ptp, ++ struct timespec64 *ts64) ++{ ++ struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ++ ptp_caps); ++ struct timespec ts; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&igb->tmreg_lock, flags); ++ ++ igb_ptp_read_i210(igb, &ts); ++ *ts64 = timespec_to_timespec64(ts); ++ ++ spin_unlock_irqrestore(&igb->tmreg_lock, flags); ++ ++ return 0; ++} ++ ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++static int igb_ptp_settime64_82576(struct ptp_clock_info *ptp, ++ const struct timespec64 *ts64) ++{ ++ struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ++ ptp_caps); ++ unsigned long flags; ++ u64 ns; ++ ++ ns = timespec64_to_ns(ts64); ++ ++ spin_lock_irqsave(&igb->tmreg_lock, flags); ++ ++ timecounter_init(&igb->tc, &igb->cc, ns); ++ ++ spin_unlock_irqrestore(&igb->tmreg_lock, flags); ++ ++ return 0; ++} ++ ++#endif ++static int igb_ptp_settime64_i210(struct ptp_clock_info *ptp, ++ const struct timespec64 *ts64) + { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); ++ struct timespec ts; + unsigned long flags; + ++ ts = timespec64_to_timespec(*ts64); + spin_lock_irqsave(&igb->tmreg_lock, flags); + +- igb_ptp_read_i210(igb, ts); ++ igb_ptp_write_i210(igb, &ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; + } + ++#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 ++static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, ++ struct timespec *ts) ++{ ++ struct timespec64 ts64; ++ int err; ++ ++ err = igb_ptp_gettime64_82576(ptp, &ts64); ++ if (err) ++ return err; ++ ++ *ts = timespec64_to_timespec(ts64); ++ ++ return 0; ++} ++ ++static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, ++ struct timespec *ts) ++{ ++ struct timespec64 ts64; ++ int err; ++ ++ err = igb_ptp_gettime64_i210(ptp, &ts64); ++ if (err) ++ return err; ++ ++ *ts = timespec64_to_timespec(ts64); ++ ++ return 0; ++} ++ + static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec *ts) + { +@@ -360,8 +470,9 @@ + return 0; + } + +-static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, +- struct ptp_clock_request *rq, int on) ++#endif ++static int igb_ptp_enable(struct ptp_clock_info *ptp, ++ struct ptp_clock_request *rq, int on) + { + return -EOPNOTSUPP; + } +@@ -372,8 +483,8 @@ + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. +- **/ +-static void igb_ptp_tx_work(struct work_struct *work) ++ */ ++void igb_ptp_tx_work(struct work_struct *work) + { + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); +@@ -393,7 +504,7 @@ + return; + } + +- tsynctxctl = rd32(E1000_TSYNCTXCTL); ++ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else +@@ -401,15 +512,16 @@ + schedule_work(&adapter->ptp_tx_work); + } + +-static void igb_ptp_overflow_check(struct work_struct *work) ++static void igb_ptp_overflow_check_82576(struct work_struct *work) + { + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); +- struct timespec ts; ++ struct timespec64 ts64; + +- igb->ptp_caps.gettime(&igb->ptp_caps, &ts); ++ igb_ptp_gettime64_82576(&igb->ptp_caps, &ts64); + +- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); ++ pr_debug("igb overflow check at %lld.%09lu\n", ++ (long long)ts64.tv_sec, ts64.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +@@ -423,11 +535,11 @@ + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. +- **/ ++ */ + void igb_ptp_rx_hang(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +- u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); ++ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + unsigned long rx_event; + + if (hw->mac.type != e1000_82576) +@@ -448,7 +560,7 @@ + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { +- rd32(E1000_RXSTMPH); ++ E1000_READ_REG(hw, E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); +@@ -462,15 +574,15 @@ + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. +- **/ +-static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) ++ */ ++void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + +- regval = rd32(E1000_TXSTMPL); +- regval |= (u64)rd32(E1000_TXSTMPH) << 32; ++ regval = E1000_READ_REG(hw, E1000_TXSTMPL); ++ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); +@@ -488,14 +600,15 @@ + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8. +- **/ ++ */ + void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb) + { + __le64 *regval = (__le64 *)va; + +- /* The timestamp is recorded in little endian format. ++ /* ++ * The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ +@@ -510,7 +623,7 @@ + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. +- **/ ++ */ + void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb) + { +@@ -518,7 +631,8 @@ + struct e1000_hw *hw = &adapter->hw; + u64 regval; + +- /* If this bit is set, then the RX registers contain the time stamp. No ++ /* ++ * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register +@@ -528,11 +642,11 @@ + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ +- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) ++ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + +- regval = rd32(E1000_RXSTMPL); +- regval |= (u64)rd32(E1000_RXSTMPH) << 32; ++ regval = E1000_READ_REG(hw, E1000_RXSTMPL); ++ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + +@@ -576,6 +690,7 @@ + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". ++ * + */ + static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +@@ -631,7 +746,8 @@ + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: +- /* 82576 cannot timestamp all packets, which it needs to do to ++ /* ++ * 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { +@@ -651,9 +767,10 @@ + return 0; + } + +- /* Per-packet timestamping only works if all packets are ++ /* ++ * Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as +- * long as one Rx filter was configured. ++ * long as one rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; +@@ -664,63 +781,63 @@ + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { +- regval = rd32(E1000_RXPBS); ++ regval = E1000_READ_REG(hw, E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; +- wr32(E1000_RXPBS, regval); ++ E1000_WRITE_REG(hw, E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ +- regval = rd32(E1000_TSYNCTXCTL); ++ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; +- wr32(E1000_TSYNCTXCTL, regval); ++ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ +- regval = rd32(E1000_TSYNCRXCTL); ++ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; +- wr32(E1000_TSYNCRXCTL, regval); ++ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ +- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); ++ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) +- wr32(E1000_ETQF(3), ++ E1000_WRITE_REG(hw, E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else +- wr32(E1000_ETQF(3), 0); ++ E1000_WRITE_REG(hw, E1000_ETQF(3), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ +- | E1000_FTQF_VF_BP /* VF not compared */ +- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ +- | E1000_FTQF_MASK); /* mask all inputs */ ++ | E1000_FTQF_VF_BP /* VF not compared */ ++ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamp */ ++ | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + +- wr32(E1000_IMIR(3), htons(PTP_EV_PORT)); +- wr32(E1000_IMIREXT(3), ++ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ +- wr32(E1000_SPQF(3), htons(PTP_EV_PORT)); ++ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } +- wr32(E1000_FTQF(3), ftqf); ++ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf); + } else { +- wr32(E1000_FTQF(3), E1000_FTQF_MASK); ++ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK); + } +- wrfl(); ++ E1000_WRITE_FLUSH(hw); + + /* clear TX/RX time stamp registers, just to be sure */ +- regval = rd32(E1000_TXSTMPL); +- regval = rd32(E1000_TXSTMPH); +- regval = rd32(E1000_RXSTMPL); +- regval = rd32(E1000_RXSTMPH); ++ regval = E1000_READ_REG(hw, E1000_TXSTMPL); ++ regval = E1000_READ_REG(hw, E1000_TXSTMPH); ++ regval = E1000_READ_REG(hw, E1000_RXSTMPL); ++ regval = E1000_READ_REG(hw, E1000_RXSTMPH); + + return 0; + } +@@ -766,19 +883,25 @@ + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576; ++ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576; ++#else + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; +- adapter->ptp_caps.enable = igb_ptp_feature_enable; ++#endif ++ adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + /* Dial the nominal frequency. */ +- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, ++ INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; +@@ -786,15 +909,20 @@ + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576; ++ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576; ++#else + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; +- adapter->ptp_caps.enable = igb_ptp_feature_enable; ++#endif ++ adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + /* Enable the timer functions by clearing bit 31. */ +- wr32(E1000_TSAUXC, 0x0); ++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + break; + case e1000_i210: + case e1000_i211: +@@ -805,33 +933,38 @@ + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_i210; ++ adapter->ptp_caps.settime64 = igb_ptp_settime64_i210; ++#else + adapter->ptp_caps.gettime = igb_ptp_gettime_i210; + adapter->ptp_caps.settime = igb_ptp_settime_i210; +- adapter->ptp_caps.enable = igb_ptp_feature_enable; ++#endif ++ adapter->ptp_caps.enable = igb_ptp_enable; + /* Enable the timer functions by clearing bit 31. */ +- wr32(E1000_TSAUXC, 0x0); ++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + break; + default: + adapter->ptp_clock = NULL; + return; + } + +- wrfl(); ++ E1000_WRITE_FLUSH(hw); + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + /* Initialize the clock and overflow work for devices that need it. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { +- struct timespec ts = ktime_to_timespec(ktime_get_real()); ++ struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + +- igb_ptp_settime_i210(&adapter->ptp_caps, &ts); ++ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, +- igb_ptp_overflow_check); ++ igb_ptp_overflow_check_82576); + + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +@@ -839,8 +972,8 @@ + + /* Initialize the time sync interrupts for devices that support it. */ + if (hw->mac.type >= e1000_82580) { +- wr32(E1000_TSIM, TSYNC_INTERRUPTS); +- wr32(E1000_IMS, E1000_IMS_TS); ++ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); ++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); + } + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +@@ -869,8 +1002,8 @@ + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + break; + case e1000_i210: +@@ -915,17 +1048,18 @@ + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ +- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | ++ INCVALUE_82576); + break; + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Enable the timer functions and interrupts. */ +- wr32(E1000_TSAUXC, 0x0); +- wr32(E1000_TSIM, TSYNC_INTERRUPTS); +- wr32(E1000_IMS, E1000_IMS_TS); ++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); ++ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); ++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ +@@ -934,11 +1068,12 @@ + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { +- struct timespec ts = ktime_to_timespec(ktime_get_real()); ++ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real()); + +- igb_ptp_settime_i210(&adapter->ptp_caps, &ts); ++ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts64); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_regtest.h b/drivers/net/ethernet/intel/igb/igb_regtest.h +--- a/drivers/net/ethernet/intel/igb/igb_regtest.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_regtest.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,256 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ethtool register test data */ ++struct igb_reg_test { ++ u16 reg; ++ u16 reg_offset; ++ u16 array_len; ++ u16 test_type; ++ u32 mask; ++ u32 write; ++}; ++ ++/* In the hardware, registers are laid out either singly, in arrays ++ * spaced 0x100 bytes apart, or in contiguous tables. We assume ++ * most tests take place on arrays or single registers (handled ++ * as a single-element array) and special-case the tables. ++ * Table tests are always pattern tests. ++ * ++ * We also make provision for some required setup steps by specifying ++ * registers to be written without any read-back testing. ++ */ ++ ++#define PATTERN_TEST 1 ++#define SET_READ_TEST 2 ++#define WRITE_NO_TEST 3 ++#define TABLE32_TEST 4 ++#define TABLE64_TEST_LO 5 ++#define TABLE64_TEST_HI 6 ++ ++/* i210 reg test */ ++static struct igb_reg_test reg_test_i210[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* RDH is read-only for i210, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x900FFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* i350 reg test */ ++static struct igb_reg_test reg_test_i350[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ /* VET is readonly on i350 */ ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* RDH is read-only for i350, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0xC3FFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 16, TABLE64_TEST_HI, ++ 0xC3FFFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* 82580 reg test */ ++static struct igb_reg_test reg_test_82580[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ /* RDH is read-only for 82580, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* 82576 reg test */ ++static struct igb_reg_test reg_test_82576[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ /* Enable all queues before testing. */ ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, ++ E1000_RXDCTL_QUEUE_ENABLE }, ++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, ++ E1000_RXDCTL_QUEUE_ENABLE }, ++ /* RDH is read-only for 82576, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, ++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* 82575 register test */ ++static struct igb_reg_test reg_test_82575[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, ++ 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, ++ 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* Enable all four RX queues before testing. */ ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, ++ E1000_RXDCTL_QUEUE_ENABLE }, ++ /* RDH is read-only for 82575, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, ++ 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, ++ 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, ++ 0x000FFFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x800FFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.c b/drivers/net/ethernet/intel/igb/igb_vmdq.c +--- a/drivers/net/ethernet/intel/igb/igb_vmdq.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_vmdq.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,433 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include ++ ++#include "igb.h" ++#include "igb_vmdq.h" ++#include ++ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++int igb_vmdq_open(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct net_device *main_netdev = adapter->netdev; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ if (test_bit(__IGB_DOWN, &adapter->state)) { ++ DPRINTK(DRV, WARNING, ++ "Open %s before opening this device.\n", ++ main_netdev->name); ++ return -EAGAIN; ++ } ++ netif_carrier_off(dev); ++ vadapter->tx_ring->vmdq_netdev = dev; ++ vadapter->rx_ring->vmdq_netdev = dev; ++ if (is_valid_ether_addr(dev->dev_addr)) { ++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); ++ igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); ++ } ++ netif_carrier_on(dev); ++ return 0; ++} ++ ++int igb_vmdq_close(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ netif_carrier_off(dev); ++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); ++ ++ vadapter->tx_ring->vmdq_netdev = NULL; ++ vadapter->rx_ring->vmdq_netdev = NULL; ++ return 0; ++} ++ ++netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ ++ return igb_xmit_frame_ring(skb, vadapter->tx_ring); ++} ++ ++struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ vadapter->net_stats.rx_packets += ++ E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0); ++ vadapter->net_stats.tx_packets += ++ E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0); ++ vadapter->net_stats.rx_bytes += ++ E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0); ++ vadapter->net_stats.tx_bytes += ++ E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0); ++ vadapter->net_stats.multicast += ++ E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0); ++ /* only return the current stats */ ++ return &vadapter->net_stats; ++} ++ ++/** ++ * igb_write_vm_addr_list - write unicast addresses to RAR table ++ * @netdev: network interface device structure ++ * ++ * Writes unicast address list to the RAR table. ++ * Returns: -ENOMEM on failure/insufficient address space ++ * 0 on no addresses written ++ * X on writing X addresses to the RAR table ++ **/ ++static int igb_write_vm_addr_list(struct net_device *netdev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int count = 0; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ /* return ENOMEM indicating insufficient memory for addresses */ ++ if (netdev_uc_count(netdev) > igb_available_rars(adapter)) ++ return -ENOMEM; ++ ++ if (!netdev_uc_empty(netdev)) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *ha; ++#endif ++ netdev_for_each_uc_addr(ha, netdev) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ igb_del_mac_filter(adapter, ha->addr, hw_queue); ++ igb_add_mac_filter(adapter, ha->addr, hw_queue); ++#else ++ igb_del_mac_filter(adapter, ha->da_addr, hw_queue); ++ igb_add_mac_filter(adapter, ha->da_addr, hw_queue); ++#endif ++ count++; ++ } ++ } ++ return count; ++} ++ ++ ++#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */ ++void igb_vmdq_set_rx_mode(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 vmolr, rctl; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ /* Check for Promiscuous and All Multicast modes */ ++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue)); ++ ++ /* clear the affected bits */ ++ vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME | ++ E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE); ++ ++ if (dev->flags & IFF_PROMISC) { ++ vmolr |= E1000_VMOLR_UPE; ++ rctl = E1000_READ_REG(hw, E1000_RCTL); ++ rctl |= E1000_RCTL_UPE; ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ } else { ++ rctl = E1000_READ_REG(hw, E1000_RCTL); ++ rctl &= ~E1000_RCTL_UPE; ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ if (dev->flags & IFF_ALLMULTI) { ++ vmolr |= E1000_VMOLR_MPME; ++ } else { ++ /* ++ * Write addresses to the MTA, if the attempt fails ++ * then we should just turn on promiscous mode so ++ * that we can at least receive multicast traffic ++ */ ++ if (igb_write_mc_addr_list(adapter->netdev) != 0) ++ vmolr |= E1000_VMOLR_ROMPE; ++ } ++#ifdef HAVE_SET_RX_MODE ++ /* ++ * Write addresses to available RAR registers, if there is not ++ * sufficient space to store all the addresses then enable ++ * unicast promiscous mode ++ */ ++ if (igb_write_vm_addr_list(dev) < 0) ++ vmolr |= E1000_VMOLR_UPE; ++#endif ++ } ++ E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr); ++ ++ return; ++} ++ ++int igb_vmdq_set_mac(struct net_device *dev, void *p) ++{ ++ struct sockaddr *addr = p; ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); ++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); ++ return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); ++} ++ ++int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ ++ if (adapter->netdev->mtu < new_mtu) { ++ DPRINTK(PROBE, INFO, ++ "Set MTU on %s to >= %d before changing MTU on %s\n", ++ adapter->netdev->name, new_mtu, dev->name); ++ return -EINVAL; ++ } ++ dev->mtu = new_mtu; ++ return 0; ++} ++ ++void igb_vmdq_tx_timeout(struct net_device *dev) ++{ ++ return; ++} ++ ++void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ vadapter->vlgrp = grp; ++ ++ igb_enable_vlan_tags(adapter); ++ E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0); ++ ++ return; ++} ++void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ struct net_device *v_netdev; ++#endif ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ /* attempt to add filter to vlvf array */ ++ igb_vlvf_set(adapter, vid, TRUE, hw_queue); ++ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ ++ /* Copy feature flags from netdev to the vlan netdev for this vid. ++ * This allows things like TSO to bubble down to our vlan device. ++ */ ++ v_netdev = vlan_group_get_device(vadapter->vlgrp, vid); ++ v_netdev->features |= adapter->netdev->features; ++ vlan_group_set_device(vadapter->vlgrp, vid, v_netdev); ++#endif ++ ++ return; ++} ++void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ vlan_group_set_device(vadapter->vlgrp, vid, NULL); ++ /* remove vlan from VLVF table array */ ++ igb_vlvf_set(adapter, vid, FALSE, hw_queue); ++ ++ ++ return; ++} ++ ++static int igb_vmdq_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 status; ++ ++ if (hw->phy.media_type == e1000_media_type_copper) { ++ ++ ecmd->supported = (SUPPORTED_10baseT_Half | ++ SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | ++ SUPPORTED_100baseT_Full | ++ SUPPORTED_1000baseT_Full| ++ SUPPORTED_Autoneg | ++ SUPPORTED_TP); ++ ecmd->advertising = ADVERTISED_TP; ++ ++ if (hw->mac.autoneg == 1) { ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ /* the e1000 autoneg seems to match ethtool nicely */ ++ ecmd->advertising |= hw->phy.autoneg_advertised; ++ } ++ ++ ecmd->port = PORT_TP; ++ ecmd->phy_address = hw->phy.addr; ++ } else { ++ ecmd->supported = (SUPPORTED_1000baseT_Full | ++ SUPPORTED_FIBRE | ++ SUPPORTED_Autoneg); ++ ++ ecmd->advertising = (ADVERTISED_1000baseT_Full | ++ ADVERTISED_FIBRE | ++ ADVERTISED_Autoneg); ++ ++ ecmd->port = PORT_FIBRE; ++ } ++ ++ ecmd->transceiver = XCVR_INTERNAL; ++ ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ ++ if (status & E1000_STATUS_LU) { ++ ++ if ((status & E1000_STATUS_SPEED_1000) || ++ hw->phy.media_type != e1000_media_type_copper) ++ ethtool_cmd_speed_set(ecmd, SPEED_1000); ++ else if (status & E1000_STATUS_SPEED_100) ++ ethtool_cmd_speed_set(ecmd, SPEED_100); ++ else ++ ethtool_cmd_speed_set(ecmd, SPEED_10); ++ ++ if ((status & E1000_STATUS_FD) || ++ hw->phy.media_type != e1000_media_type_copper) ++ ecmd->duplex = DUPLEX_FULL; ++ else ++ ecmd->duplex = DUPLEX_HALF; ++ } else { ++ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ++ ecmd->duplex = -1; ++ } ++ ++ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; ++ return 0; ++} ++ ++ ++static u32 igb_vmdq_get_msglevel(struct net_device *netdev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ return adapter->msg_enable; ++} ++ ++static void igb_vmdq_get_drvinfo(struct net_device *netdev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct net_device *main_netdev = adapter->netdev; ++ ++ strncpy(drvinfo->driver, igb_driver_name, 32); ++ strncpy(drvinfo->version, igb_driver_version, 32); ++ ++ strncpy(drvinfo->fw_version, "N/A", 4); ++ snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name, ++ vadapter->rx_ring->queue_index); ++ drvinfo->n_stats = 0; ++ drvinfo->testinfo_len = 0; ++ drvinfo->regdump_len = 0; ++} ++ ++static void igb_vmdq_get_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ ++ struct igb_ring *tx_ring = vadapter->tx_ring; ++ struct igb_ring *rx_ring = vadapter->rx_ring; ++ ++ ring->rx_max_pending = IGB_MAX_RXD; ++ ring->tx_max_pending = IGB_MAX_TXD; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; ++ ring->rx_pending = rx_ring->count; ++ ring->tx_pending = tx_ring->count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; ++} ++static u32 igb_vmdq_get_rx_csum(struct net_device *netdev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ ++ return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags); ++} ++ ++ ++static struct ethtool_ops igb_vmdq_ethtool_ops = { ++ .get_settings = igb_vmdq_get_settings, ++ .get_drvinfo = igb_vmdq_get_drvinfo, ++ .get_link = ethtool_op_get_link, ++ .get_ringparam = igb_vmdq_get_ringparam, ++ .get_rx_csum = igb_vmdq_get_rx_csum, ++ .get_tx_csum = ethtool_op_get_tx_csum, ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++ .get_msglevel = igb_vmdq_get_msglevel, ++#ifdef NETIF_F_TSO ++ .get_tso = ethtool_op_get_tso, ++#endif ++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++}; ++ ++void igb_vmdq_set_ethtool_ops(struct net_device *netdev) ++{ ++ SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops); ++} ++ ++ ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.h b/drivers/net/ethernet/intel/igb/igb_vmdq.h +--- a/drivers/net/ethernet/intel/igb/igb_vmdq.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_vmdq.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,43 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IGB_VMDQ_H_ ++#define _IGB_VMDQ_H_ ++ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++int igb_vmdq_open(struct net_device *dev); ++int igb_vmdq_close(struct net_device *dev); ++netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev); ++struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev); ++void igb_vmdq_set_rx_mode(struct net_device *dev); ++int igb_vmdq_set_mac(struct net_device *dev, void *addr); ++int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu); ++void igb_vmdq_tx_timeout(struct net_device *dev); ++void igb_vmdq_vlan_rx_register(struct net_device *dev, ++ struct vlan_group *grp); ++void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); ++void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); ++void igb_vmdq_set_ethtool_ops(struct net_device *netdev); ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++#endif /* _IGB_VMDQ_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.c b/drivers/net/ethernet/intel/igb/kcompat.c +--- a/drivers/net/ethernet/intel/igb/kcompat.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/kcompat.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,2082 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "igb.h" ++#include "kcompat.h" ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++/* From lib/vsprintf.c */ ++#include ++ ++static int skip_atoi(const char **s) ++{ ++ int i=0; ++ ++ while (isdigit(**s)) ++ i = i*10 + *((*s)++) - '0'; ++ return i; ++} ++ ++#define _kc_ZEROPAD 1 /* pad with zero */ ++#define _kc_SIGN 2 /* unsigned/signed long */ ++#define _kc_PLUS 4 /* show plus */ ++#define _kc_SPACE 8 /* space if plus */ ++#define _kc_LEFT 16 /* left justified */ ++#define _kc_SPECIAL 32 /* 0x */ ++#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ ++ ++static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) ++{ ++ char c,sign,tmp[66]; ++ const char *digits; ++ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; ++ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; ++ int i; ++ ++ digits = (type & _kc_LARGE) ? large_digits : small_digits; ++ if (type & _kc_LEFT) ++ type &= ~_kc_ZEROPAD; ++ if (base < 2 || base > 36) ++ return 0; ++ c = (type & _kc_ZEROPAD) ? '0' : ' '; ++ sign = 0; ++ if (type & _kc_SIGN) { ++ if (num < 0) { ++ sign = '-'; ++ num = -num; ++ size--; ++ } else if (type & _kc_PLUS) { ++ sign = '+'; ++ size--; ++ } else if (type & _kc_SPACE) { ++ sign = ' '; ++ size--; ++ } ++ } ++ if (type & _kc_SPECIAL) { ++ if (base == 16) ++ size -= 2; ++ else if (base == 8) ++ size--; ++ } ++ i = 0; ++ if (num == 0) ++ tmp[i++]='0'; ++ else while (num != 0) ++ tmp[i++] = digits[do_div(num,base)]; ++ if (i > precision) ++ precision = i; ++ size -= precision; ++ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { ++ while(size-->0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ } ++ if (sign) { ++ if (buf <= end) ++ *buf = sign; ++ ++buf; ++ } ++ if (type & _kc_SPECIAL) { ++ if (base==8) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } else if (base==16) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ if (buf <= end) ++ *buf = digits[33]; ++ ++buf; ++ } ++ } ++ if (!(type & _kc_LEFT)) { ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = c; ++ ++buf; ++ } ++ } ++ while (i < precision--) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } ++ while (i-- > 0) { ++ if (buf <= end) ++ *buf = tmp[i]; ++ ++buf; ++ } ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ return buf; ++} ++ ++int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ++{ ++ int len; ++ unsigned long long num; ++ int i, base; ++ char *str, *end, c; ++ const char *s; ++ ++ int flags; /* flags to number() */ ++ ++ int field_width; /* width of output field */ ++ int precision; /* min. # of digits for integers; max ++ number of chars for from string */ ++ int qualifier; /* 'h', 'l', or 'L' for integer fields */ ++ /* 'z' support added 23/7/1999 S.H. */ ++ /* 'z' changed to 'Z' --davidm 1/25/99 */ ++ ++ str = buf; ++ end = buf + size - 1; ++ ++ if (end < buf - 1) { ++ end = ((void *) -1); ++ size = end - buf + 1; ++ } ++ ++ for (; *fmt ; ++fmt) { ++ if (*fmt != '%') { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ continue; ++ } ++ ++ /* process flags */ ++ flags = 0; ++ repeat: ++ ++fmt; /* this also skips first '%' */ ++ switch (*fmt) { ++ case '-': flags |= _kc_LEFT; goto repeat; ++ case '+': flags |= _kc_PLUS; goto repeat; ++ case ' ': flags |= _kc_SPACE; goto repeat; ++ case '#': flags |= _kc_SPECIAL; goto repeat; ++ case '0': flags |= _kc_ZEROPAD; goto repeat; ++ } ++ ++ /* get field width */ ++ field_width = -1; ++ if (isdigit(*fmt)) ++ field_width = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ field_width = va_arg(args, int); ++ if (field_width < 0) { ++ field_width = -field_width; ++ flags |= _kc_LEFT; ++ } ++ } ++ ++ /* get the precision */ ++ precision = -1; ++ if (*fmt == '.') { ++ ++fmt; ++ if (isdigit(*fmt)) ++ precision = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ precision = va_arg(args, int); ++ } ++ if (precision < 0) ++ precision = 0; ++ } ++ ++ /* get the conversion qualifier */ ++ qualifier = -1; ++ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { ++ qualifier = *fmt; ++ ++fmt; ++ } ++ ++ /* default base */ ++ base = 10; ++ ++ switch (*fmt) { ++ case 'c': ++ if (!(flags & _kc_LEFT)) { ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ c = (unsigned char) va_arg(args, int); ++ if (str <= end) ++ *str = c; ++ ++str; ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 's': ++ s = va_arg(args, char *); ++ if (!s) ++ s = ""; ++ ++ len = strnlen(s, precision); ++ ++ if (!(flags & _kc_LEFT)) { ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ for (i = 0; i < len; ++i) { ++ if (str <= end) ++ *str = *s; ++ ++str; ++s; ++ } ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 'p': ++ if (field_width == -1) { ++ field_width = 2*sizeof(void *); ++ flags |= _kc_ZEROPAD; ++ } ++ str = number(str, end, ++ (unsigned long) va_arg(args, void *), ++ 16, field_width, precision, flags); ++ continue; ++ ++ ++ case 'n': ++ /* FIXME: ++ * What does C99 say about the overflow case here? */ ++ if (qualifier == 'l') { ++ long * ip = va_arg(args, long *); ++ *ip = (str - buf); ++ } else if (qualifier == 'Z') { ++ size_t * ip = va_arg(args, size_t *); ++ *ip = (str - buf); ++ } else { ++ int * ip = va_arg(args, int *); ++ *ip = (str - buf); ++ } ++ continue; ++ ++ case '%': ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ continue; ++ ++ /* integer number formats - set up the flags and "break" */ ++ case 'o': ++ base = 8; ++ break; ++ ++ case 'X': ++ flags |= _kc_LARGE; ++ case 'x': ++ base = 16; ++ break; ++ ++ case 'd': ++ case 'i': ++ flags |= _kc_SIGN; ++ case 'u': ++ break; ++ ++ default: ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ if (*fmt) { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ } else { ++ --fmt; ++ } ++ continue; ++ } ++ if (qualifier == 'L') ++ num = va_arg(args, long long); ++ else if (qualifier == 'l') { ++ num = va_arg(args, unsigned long); ++ if (flags & _kc_SIGN) ++ num = (signed long) num; ++ } else if (qualifier == 'Z') { ++ num = va_arg(args, size_t); ++ } else if (qualifier == 'h') { ++ num = (unsigned short) va_arg(args, int); ++ if (flags & _kc_SIGN) ++ num = (signed short) num; ++ } else { ++ num = va_arg(args, unsigned int); ++ if (flags & _kc_SIGN) ++ num = (signed int) num; ++ } ++ str = number(str, end, num, base, ++ field_width, precision, flags); ++ } ++ if (str <= end) ++ *str = '\0'; ++ else if (size > 0) ++ /* don't write out a null byte if the buf size is zero */ ++ *end = '\0'; ++ /* the trailing null byte doesn't count towards the total ++ * ++str; ++ */ ++ return str-buf; ++} ++ ++int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = _kc_vsnprintf(buf,size,fmt,args); ++ va_end(args); ++ return i; ++} ++#endif /* < 2.4.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#if defined(CONFIG_HIGHMEM) ++ ++#ifndef PCI_DRAM_OFFSET ++#define PCI_DRAM_OFFSET 0 ++#endif ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + ++ PCI_DRAM_OFFSET); ++} ++ ++#else /* CONFIG_HIGHMEM */ ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return pci_map_single(dev, (void *)page_address(page) + offset, size, ++ direction); ++} ++ ++#endif /* CONFIG_HIGHMEM */ ++ ++void ++_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, ++ int direction) ++{ ++ return pci_unmap_single(dev, dma_addr, size, direction); ++} ++ ++#endif /* 2.4.13 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++int ++_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) ++{ ++ if (!pci_dma_supported(dev, mask)) ++ return -EIO; ++ dev->dma_mask = mask; ++ return 0; ++} ++ ++int ++_kc_pci_request_regions(struct pci_dev *dev, char *res_name) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) { ++ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { ++ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } ++ } ++ return 0; ++} ++ ++void ++_kc_pci_release_regions(struct pci_dev *dev) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) ++ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ ++ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) ++ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ } ++} ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++struct net_device * ++_kc_alloc_etherdev(int sizeof_priv) ++{ ++ struct net_device *dev; ++ int alloc_size; ++ ++ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; ++ dev = kzalloc(alloc_size, GFP_KERNEL); ++ if (!dev) ++ return NULL; ++ ++ if (sizeof_priv) ++ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); ++ dev->name[0] = '\0'; ++ ether_setup(dev); ++ ++ return dev; ++} ++ ++int ++_kc_is_valid_ether_addr(u8 *addr) ++{ ++ const char zaddr[6] = { 0, }; ++ ++ return !(addr[0] & 1) && memcmp(addr, zaddr, 6); ++} ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++int ++_kc_pci_set_power_state(struct pci_dev *dev, int state) ++{ ++ return 0; ++} ++ ++int ++_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) ++{ ++ return 0; ++} ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, ++ int off, int size) ++{ ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ frag->page = page; ++ frag->page_offset = off; ++ frag->size = size; ++ skb_shinfo(skb)->nr_frags = i + 1; ++} ++ ++/* ++ * Original Copyright: ++ * find_next_bit.c: fallback find next bit implementation ++ * ++ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. ++ * Written by David Howells (dhowells@redhat.com) ++ */ ++ ++/** ++ * find_next_bit - find the next set bit in a memory region ++ * @addr: The address to base the search on ++ * @offset: The bitnumber to start searching at ++ * @size: The maximum size to search ++ */ ++unsigned long find_next_bit(const unsigned long *addr, unsigned long size, ++ unsigned long offset) ++{ ++ const unsigned long *p = addr + BITOP_WORD(offset); ++ unsigned long result = offset & ~(BITS_PER_LONG-1); ++ unsigned long tmp; ++ ++ if (offset >= size) ++ return size; ++ size -= result; ++ offset %= BITS_PER_LONG; ++ if (offset) { ++ tmp = *(p++); ++ tmp &= (~0UL << offset); ++ if (size < BITS_PER_LONG) ++ goto found_first; ++ if (tmp) ++ goto found_middle; ++ size -= BITS_PER_LONG; ++ result += BITS_PER_LONG; ++ } ++ while (size & ~(BITS_PER_LONG-1)) { ++ if ((tmp = *(p++))) ++ goto found_middle; ++ result += BITS_PER_LONG; ++ size -= BITS_PER_LONG; ++ } ++ if (!size) ++ return result; ++ tmp = *p; ++ ++found_first: ++ tmp &= (~0UL >> (BITS_PER_LONG - size)); ++ if (tmp == 0UL) /* Are any bits set? */ ++ return result + size; /* Nope. */ ++found_middle: ++ return result + ffs(tmp); ++} ++ ++size_t _kc_strlcpy(char *dest, const char *src, size_t size) ++{ ++ size_t ret = strlen(src); ++ ++ if (size) { ++ size_t len = (ret >= size) ? size - 1 : ret; ++ memcpy(dest, src, len); ++ dest[len] = '\0'; ++ } ++ return ret; ++} ++ ++#ifndef do_div ++#if BITS_PER_LONG == 32 ++uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) ++{ ++ uint64_t rem = *n; ++ uint64_t b = base; ++ uint64_t res, d = 1; ++ uint32_t high = rem >> 32; ++ ++ /* Reduce the thing a bit first */ ++ res = 0; ++ if (high >= base) { ++ high /= base; ++ res = (uint64_t) high << 32; ++ rem -= (uint64_t) (high*base) << 32; ++ } ++ ++ while ((int64_t)b > 0 && b < rem) { ++ b = b+b; ++ d = d+d; ++ } ++ ++ do { ++ if (rem >= b) { ++ rem -= b; ++ res += d; ++ } ++ b >>= 1; ++ d >>= 1; ++ } while (d); ++ ++ *n = res; ++ return rem; ++} ++#endif /* BITS_PER_LONG == 32 */ ++#endif /* do_div */ ++#endif /* 2.6.0 => 2.4.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = vsnprintf(buf, size, fmt, args); ++ va_end(args); ++ return (i >= size) ? (size - 1) : i; ++} ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) ++char *_kc_kstrdup(const char *s, unsigned int gfp) ++{ ++ size_t len; ++ char *buf; ++ ++ if (!s) ++ return NULL; ++ ++ len = strlen(s) + 1; ++ buf = kmalloc(len, gfp); ++ if (buf) ++ memcpy(buf, s, len); ++ return buf; ++} ++#endif /* < 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++void *_kc_kzalloc(size_t size, int flags) ++{ ++ void *ret = kmalloc(size, flags); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif /* <= 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++int _kc_skb_pad(struct sk_buff *skb, int pad) ++{ ++ int ntail; ++ ++ /* If the skbuff is non linear tailroom is always zero.. */ ++ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { ++ memset(skb->data+skb->len, 0, pad); ++ return 0; ++ } ++ ++ ntail = skb->data_len + pad - (skb->end - skb->tail); ++ if (likely(skb_cloned(skb) || ntail > 0)) { ++ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) ++ goto free_skb; ++ } ++ ++#ifdef MAX_SKB_FRAGS ++ if (skb_is_nonlinear(skb) && ++ !__pskb_pull_tail(skb, skb->data_len)) ++ goto free_skb; ++ ++#endif ++ memset(skb->data + skb->len, 0, pad); ++ return 0; ++ ++free_skb: ++ kfree_skb(skb); ++ return -ENOMEM; ++} ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) ++int _kc_pci_save_state(struct pci_dev *pdev) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset, pcie_link_status; ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++ /* no ->dev for 2.4 kernels */ ++ WARN_ON(pdev->dev.driver_data == NULL); ++#endif ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset) { ++ if (!pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ } ++ pci_config_space_ich8lan(); ++#ifdef HAVE_PCI_ERS ++ if (adapter->config_space == NULL) ++#else ++ WARN_ON(adapter->config_space != NULL); ++#endif ++ adapter->config_space = kmalloc(size, GFP_KERNEL); ++ if (!adapter->config_space) { ++ printk(KERN_ERR "Out of memory in pci_save_state\n"); ++ return -ENOMEM; ++ } ++ for (i = 0; i < (size / 4); i++) ++ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); ++ return 0; ++} ++ ++void _kc_pci_restore_state(struct pci_dev *pdev) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset; ++ u16 pcie_link_status; ++ ++ if (adapter->config_space != NULL) { ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset && ++ !pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ ++ pci_config_space_ich8lan(); ++ for (i = 0; i < (size / 4); i++) ++ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); ++#ifndef HAVE_PCI_ERS ++ kfree(adapter->config_space); ++ adapter->config_space = NULL; ++#endif ++ } ++} ++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ ++ ++#ifdef HAVE_PCI_ERS ++void _kc_free_netdev(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ ++ kfree(adapter->config_space); ++#ifdef CONFIG_SYSFS ++ if (netdev->reg_state == NETREG_UNINITIALIZED) { ++ kfree((char *)netdev - netdev->padded); ++ } else { ++ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); ++ netdev->reg_state = NETREG_RELEASED; ++ class_device_put(&netdev->class_dev); ++ } ++#else ++ kfree((char *)netdev - netdev->padded); ++#endif ++} ++#endif ++ ++void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) ++{ ++ void *p; ++ ++ p = kzalloc(len, gfp); ++ if (p) ++ memcpy(p, src, len); ++ return p; ++} ++#endif /* <= 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) ++{ ++ return ((struct adapter_struct *)netdev_priv(netdev))->pdev; ++} ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++/* hexdump code taken from lib/hexdump.c */ ++static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, ++ int groupsize, unsigned char *linebuf, ++ size_t linebuflen, bool ascii) ++{ ++ const u8 *ptr = buf; ++ u8 ch; ++ int j, lx = 0; ++ int ascii_column; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ if (!len) ++ goto nil; ++ if (len > rowsize) /* limit to one line at a time */ ++ len = rowsize; ++ if ((len % groupsize) != 0) /* no mixed size output */ ++ groupsize = 1; ++ ++ switch (groupsize) { ++ case 8: { ++ const u64 *ptr8 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%16.16llx", j ? " " : "", ++ (unsigned long long)*(ptr8 + j)); ++ ascii_column = 17 * ngroups + 2; ++ break; ++ } ++ ++ case 4: { ++ const u32 *ptr4 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%8.8x", j ? " " : "", *(ptr4 + j)); ++ ascii_column = 9 * ngroups + 2; ++ break; ++ } ++ ++ case 2: { ++ const u16 *ptr2 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%4.4x", j ? " " : "", *(ptr2 + j)); ++ ascii_column = 5 * ngroups + 2; ++ break; ++ } ++ ++ default: ++ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ++ ch = ptr[j]; ++ linebuf[lx++] = hex_asc(ch >> 4); ++ linebuf[lx++] = hex_asc(ch & 0x0f); ++ linebuf[lx++] = ' '; ++ } ++ if (j) ++ lx--; ++ ++ ascii_column = 3 * rowsize + 2; ++ break; ++ } ++ if (!ascii) ++ goto nil; ++ ++ while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) ++ linebuf[lx++] = ' '; ++ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) ++ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] ++ : '.'; ++nil: ++ linebuf[lx++] = '\0'; ++} ++ ++void _kc_print_hex_dump(const char *level, ++ const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii) ++{ ++ const u8 *ptr = buf; ++ int i, linelen, remaining = len; ++ unsigned char linebuf[200]; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ for (i = 0; i < len; i += rowsize) { ++ linelen = min(remaining, rowsize); ++ remaining -= rowsize; ++ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, ++ linebuf, sizeof(linebuf), ascii); ++ ++ switch (prefix_type) { ++ case DUMP_PREFIX_ADDRESS: ++ printk("%s%s%*p: %s\n", level, prefix_str, ++ (int)(2 * sizeof(void *)), ptr + i, linebuf); ++ break; ++ case DUMP_PREFIX_OFFSET: ++ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); ++ break; ++ default: ++ printk("%s%s%s\n", level, prefix_str, linebuf); ++ break; ++ } ++ } ++} ++ ++#ifdef HAVE_I2C_SUPPORT ++struct i2c_client * ++_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) ++{ ++ struct i2c_client *client; ++ int status; ++ ++ client = kzalloc(sizeof *client, GFP_KERNEL); ++ if (!client) ++ return NULL; ++ ++ client->adapter = adap; ++ ++ client->dev.platform_data = info->platform_data; ++ ++ client->flags = info->flags; ++ client->addr = info->addr; ++ ++ strlcpy(client->name, info->type, sizeof(client->name)); ++ ++ /* Check for address business */ ++ status = i2c_check_addr(adap, client->addr); ++ if (status) ++ goto out_err; ++ ++ client->dev.parent = &client->adapter->dev; ++ client->dev.bus = &i2c_bus_type; ++ ++ status = i2c_attach_client(client); ++ if (status) ++ goto out_err; ++ ++ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", ++ client->name, dev_name(&client->dev)); ++ ++ return client; ++ ++out_err: ++ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x " ++ "(%d)\n", client->name, client->addr, status); ++ kfree(client); ++ return NULL; ++} ++#endif /* HAVE_I2C_SUPPORT */ ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifdef NAPI ++struct net_device *napi_to_poll_dev(const struct napi_struct *napi) ++{ ++ struct adapter_q_vector *q_vector = container_of(napi, ++ struct adapter_q_vector, ++ napi); ++ return &q_vector->poll_dev; ++} ++ ++int __kc_adapter_clean(struct net_device *netdev, int *budget) ++{ ++ int work_done; ++ int work_to_do = min(*budget, netdev->quota); ++ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ ++ struct napi_struct *napi = netdev->priv; ++ work_done = napi->poll(napi, work_to_do); ++ *budget -= work_done; ++ netdev->quota -= work_done; ++ return (work_done >= work_to_do) ? 1 : 0; ++} ++#endif /* NAPI */ ++#endif /* <= 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) ++{ ++ struct pci_dev *parent = pdev->bus->self; ++ u16 link_state; ++ int pos; ++ ++ if (!parent) ++ return; ++ ++ pos = pci_find_capability(parent, PCI_CAP_ID_EXP); ++ if (pos) { ++ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); ++ link_state &= ~state; ++ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); ++ } ++} ++#endif /* < 2.6.26 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++#ifdef HAVE_TX_MQ ++void _kc_netif_tx_stop_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_stop_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_stop_subqueue(netdev, i); ++} ++void _kc_netif_tx_wake_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_wake_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_wake_subqueue(netdev, i); ++} ++void _kc_netif_tx_start_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_start_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_start_subqueue(netdev, i); ++} ++#endif /* HAVE_TX_MQ */ ++ ++void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) ++{ ++ va_list args; ++ ++ printk(KERN_WARNING "------------[ cut here ]------------\n"); ++ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); ++ va_start(args, fmt); ++ vprintk(fmt, args); ++ va_end(args); ++ ++ dump_stack(); ++} ++#endif /* __VMKLNX__ */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++ ++int ++_kc_pci_prepare_to_sleep(struct pci_dev *dev) ++{ ++ pci_power_t target_state; ++ int error; ++ ++ target_state = pci_choose_state(dev, PMSG_SUSPEND); ++ ++ pci_enable_wake(dev, target_state, true); ++ ++ error = pci_set_power_state(dev, target_state); ++ ++ if (error) ++ pci_enable_wake(dev, target_state, false); ++ ++ return error; ++} ++ ++int ++_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) ++{ ++ int err; ++ ++ err = pci_enable_wake(dev, PCI_D3cold, enable); ++ if (err) ++ goto out; ++ ++ err = pci_enable_wake(dev, PCI_D3hot, enable); ++ ++out: ++ return err; ++} ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) ++{ ++ u16 old_cmd, cmd; ++ ++ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); ++ if (enable) ++ cmd = old_cmd | PCI_COMMAND_MASTER; ++ else ++ cmd = old_cmd & ~PCI_COMMAND_MASTER; ++ if (cmd != old_cmd) { ++ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", ++ enable ? "enabling" : "disabling"); ++ pci_write_config_word(pdev, PCI_COMMAND, cmd); ++ } ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) ++ pdev->is_busmaster = enable; ++#endif ++} ++ ++void _kc_pci_clear_master(struct pci_dev *dev) ++{ ++ __kc_pci_set_master(dev, false); ++} ++#endif /* < 2.6.29 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) ++{ ++ int num_vf = 0; ++#ifdef CONFIG_PCI_IOV ++ struct pci_dev *vfdev; ++ ++ /* loop through all ethernet devices starting at PF dev */ ++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); ++ while (vfdev) { ++ if (vfdev->is_virtfn && vfdev->physfn == dev) ++ num_vf++; ++ ++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); ++ } ++ ++#endif ++ return num_vf; ++} ++#endif /* RHEL_RELEASE_CODE */ ++#endif /* < 2.6.34 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++#ifdef HAVE_TX_MQ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) ++#ifndef CONFIG_NETDEVICES_MULTIQUEUE ++int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) ++{ ++ unsigned int real_num = dev->real_num_tx_queues; ++ struct Qdisc *qdisc; ++ int i; ++ ++ if (txq < 1 || txq > dev->num_tx_queues) ++ return -EINVAL; ++ ++ else if (txq > real_num) ++ dev->real_num_tx_queues = txq; ++ else if (txq < real_num) { ++ dev->real_num_tx_queues = txq; ++ for (i = txq; i < dev->num_tx_queues; i++) { ++ qdisc = netdev_get_tx_queue(dev, i)->qdisc; ++ if (qdisc) { ++ spin_lock_bh(qdisc_lock(qdisc)); ++ qdisc_reset(qdisc); ++ spin_unlock_bh(qdisc_lock(qdisc)); ++ } ++ } ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ ++#endif /* HAVE_TX_MQ */ ++ ++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, ++ const void __user *from, size_t count) ++{ ++ loff_t pos = *ppos; ++ size_t res; ++ ++ if (pos < 0) ++ return -EINVAL; ++ if (pos >= available || !count) ++ return 0; ++ if (count > available - pos) ++ count = available - pos; ++ res = copy_from_user(to + pos, from, count); ++ if (res == count) ++ return -EFAULT; ++ count -= res; ++ *ppos = pos + count; ++ return count; ++} ++ ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++static const u32 _kc_flags_dup_features = ++ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); ++ ++u32 _kc_ethtool_op_get_flags(struct net_device *dev) ++{ ++ return dev->features & _kc_flags_dup_features; ++} ++ ++int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) ++{ ++ if (data & ~supported) ++ return -EINVAL; ++ ++ dev->features = ((dev->features & ~_kc_flags_dup_features) | ++ (data & _kc_flags_dup_features)); ++ return 0; ++} ++#endif /* < 2.6.36 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) ++ ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ ++#endif /* < 2.6.39 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) ++void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, ++ int off, int size, unsigned int truesize) ++{ ++ skb_fill_page_desc(skb, i, page, off, size); ++ skb->len += size; ++ skb->data_len += size; ++ skb->truesize += truesize; ++} ++ ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++int _kc_simple_open(struct inode *inode, struct file *file) ++{ ++ if (inode->i_private) ++ file->private_data = inode->i_private; ++ ++ return 0; ++} ++#endif /* SLE_VERSION < 11,3,0 */ ++ ++#endif /* < 3.4.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) ++static inline int __kc_pcie_cap_version(struct pci_dev *dev) ++{ ++ int pos; ++ u16 reg16; ++ ++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return 0; ++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); ++ return reg16 & PCI_EXP_FLAGS_VERS; ++} ++ ++static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) ++{ ++ return true; ++} ++ ++static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ type == PCI_EXP_TYPE_ENDPOINT || ++ type == PCI_EXP_TYPE_LEG_END; ++} ++ ++static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ int pos; ++ u16 pcie_flags_reg; ++ ++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return false; ++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ (type == PCI_EXP_TYPE_DOWNSTREAM && ++ pcie_flags_reg & PCI_EXP_FLAGS_SLOT); ++} ++ ++static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ type == PCI_EXP_TYPE_RC_EC; ++} ++ ++static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) ++{ ++ if (!pci_is_pcie(dev)) ++ return false; ++ ++ switch (pos) { ++ case PCI_EXP_FLAGS_TYPE: ++ return true; ++ case PCI_EXP_DEVCAP: ++ case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVSTA: ++ return __kc_pcie_cap_has_devctl(dev); ++ case PCI_EXP_LNKCAP: ++ case PCI_EXP_LNKCTL: ++ case PCI_EXP_LNKSTA: ++ return __kc_pcie_cap_has_lnkctl(dev); ++ case PCI_EXP_SLTCAP: ++ case PCI_EXP_SLTCTL: ++ case PCI_EXP_SLTSTA: ++ return __kc_pcie_cap_has_sltctl(dev); ++ case PCI_EXP_RTCTL: ++ case PCI_EXP_RTCAP: ++ case PCI_EXP_RTSTA: ++ return __kc_pcie_cap_has_rtctl(dev); ++ case PCI_EXP_DEVCAP2: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCAP2: ++ case PCI_EXP_LNKCTL2: ++ case PCI_EXP_LNKSTA2: ++ return __kc_pcie_cap_version(dev) > 1; ++ default: ++ return false; ++ } ++} ++ ++/* ++ * Note that these accessor functions are only for the "PCI Express ++ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the ++ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) ++ */ ++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) ++{ ++ int ret; ++ ++ *val = 0; ++ if (pos & 1) ++ return -EINVAL; ++ ++ if (__kc_pcie_capability_reg_implemented(dev, pos)) { ++ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); ++ /* ++ * Reset *val to 0 if pci_read_config_word() fails, it may ++ * have been written as 0xFFFF if hardware error happens ++ * during pci_read_config_word(). ++ */ ++ if (ret) ++ *val = 0; ++ return ret; ++ } ++ ++ /* ++ * For Functions that do not implement the Slot Capabilities, ++ * Slot Status, and Slot Control registers, these spaces must ++ * be hardwired to 0b, with the exception of the Presence Detect ++ * State bit in the Slot Status register of Downstream Ports, ++ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) ++ */ ++ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && ++ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { ++ *val = PCI_EXP_SLTSTA_PDS; ++ } ++ ++ return 0; ++} ++ ++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) ++{ ++ if (pos & 1) ++ return -EINVAL; ++ ++ if (!__kc_pcie_capability_reg_implemented(dev, pos)) ++ return 0; ++ ++ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); ++} ++ ++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, ++ u16 clear, u16 set) ++{ ++ int ret; ++ u16 val; ++ ++ ret = __kc_pcie_capability_read_word(dev, pos, &val); ++ if (!ret) { ++ val &= ~clear; ++ val |= set; ++ ret = __kc_pcie_capability_write_word(dev, pos, val); ++ } ++ ++ return ret; ++} ++ ++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear) ++{ ++ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); ++} ++#endif /* < 3.7.0 */ ++ ++/****************************************************************************** ++ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, ++ * inferred copyright from kernel ++ */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) ++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, ++ int target, unsigned short *fragoff, int *flags) ++{ ++ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); ++ u8 nexthdr = ipv6_hdr(skb)->nexthdr; ++ unsigned int len; ++ bool found; ++ ++#define __KC_IP6_FH_F_FRAG BIT(0) ++#define __KC_IP6_FH_F_AUTH BIT(1) ++#define __KC_IP6_FH_F_SKIP_RH BIT(2) ++ ++ if (fragoff) ++ *fragoff = 0; ++ ++ if (*offset) { ++ struct ipv6hdr _ip6, *ip6; ++ ++ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); ++ if (!ip6 || (ip6->version != 6)) { ++ printk(KERN_ERR "IPv6 header not found\n"); ++ return -EBADMSG; ++ } ++ start = *offset + sizeof(struct ipv6hdr); ++ nexthdr = ip6->nexthdr; ++ } ++ len = skb->len - start; ++ ++ do { ++ struct ipv6_opt_hdr _hdr, *hp; ++ unsigned int hdrlen; ++ found = (nexthdr == target); ++ ++ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { ++ if (target < 0 || found) ++ break; ++ return -ENOENT; ++ } ++ ++ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); ++ if (!hp) ++ return -EBADMSG; ++ ++ if (nexthdr == NEXTHDR_ROUTING) { ++ struct ipv6_rt_hdr _rh, *rh; ++ ++ rh = skb_header_pointer(skb, start, sizeof(_rh), ++ &_rh); ++ if (!rh) ++ return -EBADMSG; ++ ++ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && ++ rh->segments_left == 0) ++ found = false; ++ } ++ ++ if (nexthdr == NEXTHDR_FRAGMENT) { ++ unsigned short _frag_off; ++ __be16 *fp; ++ ++ if (flags) /* Indicate that this is a fragment */ ++ *flags |= __KC_IP6_FH_F_FRAG; ++ fp = skb_header_pointer(skb, ++ start+offsetof(struct frag_hdr, ++ frag_off), ++ sizeof(_frag_off), ++ &_frag_off); ++ if (!fp) ++ return -EBADMSG; ++ ++ _frag_off = ntohs(*fp) & ~0x7; ++ if (_frag_off) { ++ if (target < 0 && ++ ((!ipv6_ext_hdr(hp->nexthdr)) || ++ hp->nexthdr == NEXTHDR_NONE)) { ++ if (fragoff) ++ *fragoff = _frag_off; ++ return hp->nexthdr; ++ } ++ return -ENOENT; ++ } ++ hdrlen = 8; ++ } else if (nexthdr == NEXTHDR_AUTH) { ++ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) ++ break; ++ hdrlen = (hp->hdrlen + 2) << 2; ++ } else ++ hdrlen = ipv6_optlen(hp); ++ ++ if (!found) { ++ nexthdr = hp->nexthdr; ++ len -= hdrlen; ++ start += hdrlen; ++ } ++ } while (!found); ++ ++ *offset = start; ++ return nexthdr; ++} ++#endif /* < 3.8.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) ++#endif /* 3.9.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, const unsigned char *addr, ++ u16 flags) ++#else ++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr, u16 flags) ++#endif ++{ ++ int err = -EINVAL; ++ ++ /* If aging addresses are supported device will need to ++ * implement its own handler for this. ++ */ ++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", dev->name); ++ return err; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) ++ err = dev_uc_add_excl(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_add_excl(dev, addr); ++ ++ /* Only return duplicate errors if NLM_F_EXCL is set */ ++ if (err == -EEXIST && !(flags & NLM_F_EXCL)) ++ err = 0; ++ ++ return err; ++} ++ ++#ifdef USE_CONST_DEV_UC_CHAR ++#ifdef HAVE_FDB_DEL_NLATTR ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, const unsigned char *addr) ++#else ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ const unsigned char *addr) ++#endif ++#else ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr) ++#endif ++{ ++ int err = -EINVAL; ++ ++ /* If aging addresses are supported device will need to ++ * implement its own handler for this. ++ */ ++ if (!(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", dev->name); ++ return err; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) ++ err = dev_uc_del(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_del(dev, addr); ++ ++ return err; ++} ++ ++#endif /* HAVE_FDB_OPS */ ++#ifdef CONFIG_PCI_IOV ++int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) ++{ ++ unsigned int vfs_assigned = 0; ++#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED ++ int pos; ++ struct pci_dev *vfdev; ++ unsigned short dev_id; ++ ++ /* only search if we are a PF */ ++ if (!dev->is_physfn) ++ return 0; ++ ++ /* find SR-IOV capability */ ++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); ++ if (!pos) ++ return 0; ++ ++ /* ++ * determine the device ID for the VFs, the vendor ID will be the ++ * same as the PF so there is no need to check for that one ++ */ ++ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); ++ ++ /* loop through all the VFs to see if we own any that are assigned */ ++ vfdev = pci_get_device(dev->vendor, dev_id, NULL); ++ while (vfdev) { ++ /* ++ * It is considered assigned if it is a virtual function with ++ * our dev as the physical function and the assigned bit is set ++ */ ++ if (vfdev->is_virtfn && (vfdev->physfn == dev) && ++ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) ++ vfs_assigned++; ++ ++ vfdev = pci_get_device(dev->vendor, dev_id, vfdev); ++ } ++ ++#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ ++ return vfs_assigned; ++} ++ ++#endif /* CONFIG_PCI_IOV */ ++#endif /* 3.10.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) ++const unsigned char pcie_link_speed[] = { ++ PCI_SPEED_UNKNOWN, /* 0 */ ++ PCIE_SPEED_2_5GT, /* 1 */ ++ PCIE_SPEED_5_0GT, /* 2 */ ++ PCIE_SPEED_8_0GT, /* 3 */ ++ PCI_SPEED_UNKNOWN, /* 4 */ ++ PCI_SPEED_UNKNOWN, /* 5 */ ++ PCI_SPEED_UNKNOWN, /* 6 */ ++ PCI_SPEED_UNKNOWN, /* 7 */ ++ PCI_SPEED_UNKNOWN, /* 8 */ ++ PCI_SPEED_UNKNOWN, /* 9 */ ++ PCI_SPEED_UNKNOWN, /* A */ ++ PCI_SPEED_UNKNOWN, /* B */ ++ PCI_SPEED_UNKNOWN, /* C */ ++ PCI_SPEED_UNKNOWN, /* D */ ++ PCI_SPEED_UNKNOWN, /* E */ ++ PCI_SPEED_UNKNOWN /* F */ ++}; ++ ++int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, ++ enum pcie_link_width *width) ++{ ++ int ret; ++ ++ *speed = PCI_SPEED_UNKNOWN; ++ *width = PCIE_LNK_WIDTH_UNKNOWN; ++ ++ while (dev) { ++ u16 lnksta; ++ enum pci_bus_speed next_speed; ++ enum pcie_link_width next_width; ++ ++ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); ++ if (ret) ++ return ret; ++ ++ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; ++ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> ++ PCI_EXP_LNKSTA_NLW_SHIFT; ++ ++ if (next_speed < *speed) ++ *speed = next_speed; ++ ++ if (next_width < *width) ++ *width = next_width; ++ ++ dev = dev->bus->self; ++ } ++ ++ return 0; ++} ++ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) ++int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) ++{ ++ int err = dma_set_mask(dev, mask); ++ ++ if (!err) ++ /* coherent mask for the same size will always succeed if ++ * dma_set_mask does. However we store the error anyways, due ++ * to some kernels which use gcc's warn_unused_result on their ++ * definition of dma_set_coherent_mask. ++ */ ++ err = dma_set_coherent_mask(dev, mask); ++ return err; ++} ++ ++void __kc_netdev_rss_key_fill(void *buffer, size_t len) ++{ ++ /* Set of random keys generated using kernel random number generator */ ++ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, ++ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, ++ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, ++ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, ++ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, ++ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, ++ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; ++ ++ BUG_ON(len > NETDEV_RSS_KEY_LEN); ++ memcpy(buffer, seed, len); ++} ++#endif /* 3.13.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) ++int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, ++ int minvec, int maxvec) ++{ ++ int nvec = maxvec; ++ int rc; ++ ++ if (maxvec < minvec) ++ return -ERANGE; ++ ++ do { ++ rc = pci_enable_msix(dev, entries, nvec); ++ if (rc < 0) { ++ return rc; ++ } else if (rc > 0) { ++ if (rc < minvec) ++ return -ENOSPC; ++ nvec = rc; ++ } ++ } while (rc); ++ ++ return nvec; ++} ++#endif /* 3.14.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct netdev_hw_addr *ha, *tmp; ++ int err; ++ ++ /* first go through and flush out any stale entries */ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (!ha->synced || ha->refcount != 1) ++#else ++ if (!ha->sync_cnt || ha->refcount != 1) ++#endif ++ continue; ++ ++ if (unsync && unsync(dev, ha->addr)) ++ continue; ++ ++ list_del_rcu(&ha->list); ++ kfree_rcu(ha, rcu_head); ++ list->count--; ++ } ++ ++ /* go through and sync new entries to the list */ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (ha->synced) ++#else ++ if (ha->sync_cnt) ++#endif ++ continue; ++ ++ err = sync(dev, ha->addr); ++ if (err) ++ return err; ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ ha->synced = true; ++#else ++ ha->sync_cnt++; ++#endif ++ ha->refcount++; ++ } ++ ++ return 0; ++} ++ ++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct netdev_hw_addr *ha, *tmp; ++ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (!ha->synced) ++#else ++ if (!ha->sync_cnt) ++#endif ++ continue; ++ ++ if (unsync && unsync(dev, ha->addr)) ++ continue; ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ ha->synced = false; ++#else ++ ha->sync_cnt--; ++#endif ++ if (--ha->refcount) ++ continue; ++ ++ list_del_rcu(&ha->list); ++ kfree_rcu(ha, rcu_head); ++ list->count--; ++ } ++} ++ ++#endif /* NETDEV_HW_ADDR_T_UNICAST */ ++#ifndef NETDEV_HW_ADDR_T_MULTICAST ++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct dev_addr_list *da, **next = list; ++ int err; ++ ++ /* first go through and flush out any stale entries */ ++ while ((da = *next) != NULL) { ++ if (da->da_synced && da->da_users == 1) { ++ if (!unsync || !unsync(dev, da->da_addr)) { ++ *next = da->next; ++ kfree(da); ++ (*count)--; ++ continue; ++ } ++ } ++ next = &da->next; ++ } ++ ++ /* go through and sync new entries to the list */ ++ for (da = *list; da != NULL; da = da->next) { ++ if (da->da_synced) ++ continue; ++ ++ err = sync(dev, da->da_addr); ++ if (err) ++ return err; ++ ++ da->da_synced++; ++ da->da_users++; ++ } ++ ++ return 0; ++} ++ ++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct dev_addr_list *da; ++ ++ while ((da = *list) != NULL) { ++ if (da->da_synced) { ++ if (!unsync || !unsync(dev, da->da_addr)) { ++ da->da_synced--; ++ if (--da->da_users == 0) { ++ *list = da->next; ++ kfree(da); ++ (*count)--; ++ continue; ++ } ++ } ++ } ++ list = &da->next; ++ } ++} ++#endif /* NETDEV_HW_ADDR_T_MULTICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++#endif /* 3.16.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) ++#ifndef NO_PTP_SUPPORT ++static void __kc_sock_efree(struct sk_buff *skb) ++{ ++ sock_put(skb->sk); ++} ++ ++struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) ++{ ++ struct sock *sk = skb->sk; ++ struct sk_buff *clone; ++ ++ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) ++ return NULL; ++ ++ clone = skb_clone(skb, GFP_ATOMIC); ++ if (!clone) { ++ sock_put(sk); ++ return NULL; ++ } ++ ++ clone->sk = sk; ++ clone->destructor = __kc_sock_efree; ++ ++ return clone; ++} ++ ++void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps) ++{ ++ struct sock_exterr_skb *serr; ++ struct sock *sk = skb->sk; ++ int err; ++ ++ sock_hold(sk); ++ ++ *skb_hwtstamps(skb) = *hwtstamps; ++ ++ serr = SKB_EXT_ERR(skb); ++ memset(serr, 0, sizeof(*serr)); ++ serr->ee.ee_errno = ENOMSG; ++ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; ++ ++ err = sock_queue_err_skb(sk, skb); ++ if (err) ++ kfree_skb(skb); ++ ++ sock_put(sk); ++} ++#endif ++ ++/* include headers needed for get_headlen function */ ++#ifdef HAVE_SCTP ++#include ++#endif ++ ++unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len) ++{ ++ union { ++ unsigned char *network; ++ /* l2 headers */ ++ struct ethhdr *eth; ++ struct vlan_hdr *vlan; ++ /* l3 headers */ ++ struct iphdr *ipv4; ++ struct ipv6hdr *ipv6; ++ } hdr; ++ __be16 proto; ++ u8 nexthdr = 0; /* default to not TCP */ ++ u8 hlen; ++ ++ /* this should never happen, but better safe than sorry */ ++ if (max_len < ETH_HLEN) ++ return max_len; ++ ++ /* initialize network frame pointer */ ++ hdr.network = data; ++ ++ /* set first protocol and move network header forward */ ++ proto = hdr.eth->h_proto; ++ hdr.network += ETH_HLEN; ++ ++again: ++ switch (proto) { ++ /* handle any vlan tag if present */ ++ case __constant_htons(ETH_P_8021AD): ++ case __constant_htons(ETH_P_8021Q): ++ if ((hdr.network - data) > (max_len - VLAN_HLEN)) ++ return max_len; ++ ++ proto = hdr.vlan->h_vlan_encapsulated_proto; ++ hdr.network += VLAN_HLEN; ++ goto again; ++ /* handle L3 protocols */ ++ case __constant_htons(ETH_P_IP): ++ if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) ++ return max_len; ++ ++ /* access ihl as a u8 to avoid unaligned access on ia64 */ ++ hlen = (hdr.network[0] & 0x0F) << 2; ++ ++ /* verify hlen meets minimum size requirements */ ++ if (hlen < sizeof(struct iphdr)) ++ return hdr.network - data; ++ ++ /* record next protocol if header is present */ ++ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) ++ nexthdr = hdr.ipv4->protocol; ++ ++ hdr.network += hlen; ++ break; ++#ifdef NETIF_F_TSO6 ++ case __constant_htons(ETH_P_IPV6): ++ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) ++ return max_len; ++ ++ /* record next protocol */ ++ nexthdr = hdr.ipv6->nexthdr; ++ hdr.network += sizeof(struct ipv6hdr); ++ break; ++#endif /* NETIF_F_TSO6 */ ++ default: ++ return hdr.network - data; ++ } ++ ++ /* finally sort out L4 */ ++ switch (nexthdr) { ++ case IPPROTO_TCP: ++ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) ++ return max_len; ++ ++ /* access doff as a u8 to avoid unaligned access on ia64 */ ++ hdr.network += max_t(u8, sizeof(struct tcphdr), ++ (hdr.network[12] & 0xF0) >> 2); ++ ++ break; ++ case IPPROTO_UDP: ++ case IPPROTO_UDPLITE: ++ hdr.network += sizeof(struct udphdr); ++ break; ++#ifdef HAVE_SCTP ++ case IPPROTO_SCTP: ++ hdr.network += sizeof(struct sctphdr); ++ break; ++#endif ++ } ++ ++ /* ++ * If everything has gone correctly hdr.network should be the ++ * data section of the packet and will be the end of the header. ++ * If not then it probably represents the end of the last recognized ++ * header. ++ */ ++ return min_t(unsigned int, hdr.network - data, max_len); ++} ++ ++#endif /* < 3.18.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) ++#ifdef HAVE_NET_GET_RANDOM_ONCE ++static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; ++ ++void __kc_netdev_rss_key_fill(void *buffer, size_t len) ++{ ++ BUG_ON(len > sizeof(__kc_netdev_rss_key)); ++ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); ++ memcpy(buffer, __kc_netdev_rss_key, len); ++} ++#endif ++#endif +diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.h b/drivers/net/ethernet/intel/igb/kcompat.h +--- a/drivers/net/ethernet/intel/igb/kcompat.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/kcompat.h 2016-11-14 14:32:08.583567168 +0000 +@@ -0,0 +1,5071 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _KCOMPAT_H_ ++#define _KCOMPAT_H_ ++ ++#ifndef LINUX_VERSION_CODE ++#include ++#else ++#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ ++#ifndef UTS_RELEASE ++/* utsrelease.h changed locations in 2.6.33 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#include ++#else ++#include ++#endif ++#endif ++ ++/* NAPI enable/disable flags here */ ++#define NAPI ++ ++#define adapter_struct igb_adapter ++#define adapter_q_vector igb_q_vector ++#define NAPI ++ ++/* and finally set defines so that the code sees the changes */ ++#ifdef NAPI ++#else ++#endif /* NAPI */ ++ ++/* Dynamic LTR and deeper C-State support disable/enable */ ++ ++/* packet split disable/enable */ ++#ifdef DISABLE_PACKET_SPLIT ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++#define CONFIG_IGB_DISABLE_PACKET_SPLIT ++#endif ++#endif /* DISABLE_PACKET_SPLIT */ ++ ++/* MSI compatibility code for all kernels and drivers */ ++#ifdef DISABLE_PCI_MSI ++#undef CONFIG_PCI_MSI ++#endif ++#ifndef CONFIG_PCI_MSI ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++struct msix_entry { ++ u16 vector; /* kernel uses to write allocated vector */ ++ u16 entry; /* driver uses to specify entry, OS writes */ ++}; ++#endif ++#undef pci_enable_msi ++#define pci_enable_msi(a) -ENOTSUPP ++#undef pci_disable_msi ++#define pci_disable_msi(a) do {} while (0) ++#undef pci_enable_msix ++#define pci_enable_msix(a, b, c) -ENOTSUPP ++#undef pci_disable_msix ++#define pci_disable_msix(a) do {} while (0) ++#define msi_remove_pci_irq_vectors(a) do {} while (0) ++#endif /* CONFIG_PCI_MSI */ ++#ifdef DISABLE_PM ++#undef CONFIG_PM ++#endif ++ ++#ifdef DISABLE_NET_POLL_CONTROLLER ++#undef CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef PMSG_SUSPEND ++#define PMSG_SUSPEND 3 ++#endif ++ ++/* generic boolean compatibility */ ++#undef TRUE ++#undef FALSE ++#define TRUE true ++#define FALSE false ++#ifdef GCC_VERSION ++#if ( GCC_VERSION < 3000 ) ++#define _Bool char ++#endif ++#else ++#define _Bool char ++#endif ++ ++#undef __always_unused ++#define __always_unused __attribute__((__unused__)) ++ ++#undef __maybe_unused ++#define __maybe_unused __attribute__((__unused__)) ++ ++/* kernels less than 2.4.14 don't have this */ ++#ifndef ETH_P_8021Q ++#define ETH_P_8021Q 0x8100 ++#endif ++ ++#ifndef module_param ++#define module_param(v,t,p) MODULE_PARM(v, "i"); ++#endif ++ ++#ifndef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffffffffffffULL ++#endif ++ ++#ifndef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0x00000000ffffffffULL ++#endif ++ ++#ifndef PCI_CAP_ID_EXP ++#define PCI_CAP_ID_EXP 0x10 ++#endif ++ ++#ifndef uninitialized_var ++#define uninitialized_var(x) x = x ++#endif ++ ++#ifndef PCIE_LINK_STATE_L0S ++#define PCIE_LINK_STATE_L0S 1 ++#endif ++#ifndef PCIE_LINK_STATE_L1 ++#define PCIE_LINK_STATE_L1 2 ++#endif ++ ++#ifndef mmiowb ++#ifdef CONFIG_IA64 ++#define mmiowb() asm volatile ("mf.a" ::: "memory") ++#else ++#define mmiowb() ++#endif ++#endif ++ ++#ifndef SET_NETDEV_DEV ++#define SET_NETDEV_DEV(net, pdev) ++#endif ++ ++#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) ++#define free_netdev(x) kfree(x) ++#endif ++ ++#ifdef HAVE_POLL_CONTROLLER ++#define CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef SKB_DATAREF_SHIFT ++/* if we do not have the infrastructure to detect if skb_header is cloned ++ just return false in all cases */ ++#define skb_header_cloned(x) 0 ++#endif ++ ++#ifndef NETIF_F_GSO ++#define gso_size tso_size ++#define gso_segs tso_segs ++#endif ++ ++#ifndef NETIF_F_GRO ++#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ ++ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) ++#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) ++#endif ++ ++#ifndef NETIF_F_SCTP_CSUM ++#define NETIF_F_SCTP_CSUM 0 ++#endif ++ ++#ifndef NETIF_F_LRO ++#define NETIF_F_LRO (1 << 15) ++#endif ++ ++#ifndef NETIF_F_NTUPLE ++#define NETIF_F_NTUPLE (1 << 27) ++#endif ++ ++#ifndef NETIF_F_ALL_FCOE ++#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ ++ NETIF_F_FSO) ++#endif ++ ++#ifndef IPPROTO_SCTP ++#define IPPROTO_SCTP 132 ++#endif ++ ++#ifndef IPPROTO_UDPLITE ++#define IPPROTO_UDPLITE 136 ++#endif ++ ++#ifndef CHECKSUM_PARTIAL ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#define CHECKSUM_COMPLETE CHECKSUM_HW ++#endif ++ ++#ifndef __read_mostly ++#define __read_mostly ++#endif ++ ++#ifndef MII_RESV1 ++#define MII_RESV1 0x17 /* Reserved... */ ++#endif ++ ++#ifndef unlikely ++#define unlikely(_x) _x ++#define likely(_x) _x ++#endif ++ ++#ifndef WARN_ON ++#define WARN_ON(x) ++#endif ++ ++#ifndef PCI_DEVICE ++#define PCI_DEVICE(vend,dev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID ++#endif ++ ++#ifndef node_online ++#define node_online(node) ((node) == 0) ++#endif ++ ++#ifndef num_online_cpus ++#define num_online_cpus() smp_num_cpus ++#endif ++ ++#ifndef cpu_online ++#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) ++#endif ++ ++#ifndef _LINUX_RANDOM_H ++#include ++#endif ++ ++#ifndef DECLARE_BITMAP ++#ifndef BITS_TO_LONGS ++#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) ++#endif ++#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] ++#endif ++ ++#ifndef VLAN_HLEN ++#define VLAN_HLEN 4 ++#endif ++ ++#ifndef VLAN_ETH_HLEN ++#define VLAN_ETH_HLEN 18 ++#endif ++ ++#ifndef VLAN_ETH_FRAME_LEN ++#define VLAN_ETH_FRAME_LEN 1518 ++#endif ++ ++#ifndef DCA_GET_TAG_TWO_ARGS ++#define dca3_get_tag(a,b) dca_get_tag(b) ++#endif ++ ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#if defined(__i386__) || defined(__x86_64__) ++#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#endif ++#endif ++ ++/* taken from 2.6.24 definition in linux/kernel.h */ ++#ifndef IS_ALIGNED ++#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) ++#endif ++ ++#ifdef IS_ENABLED ++#undef IS_ENABLED ++#undef __ARG_PLACEHOLDER_1 ++#undef config_enabled ++#undef _config_enabled ++#undef __config_enabled ++#undef ___config_enabled ++#endif ++ ++#define __ARG_PLACEHOLDER_1 0, ++#define config_enabled(cfg) _config_enabled(cfg) ++#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) ++#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) ++#define ___config_enabled(__ignored, val, ...) val ++ ++#define IS_ENABLED(option) \ ++ (config_enabled(option) || config_enabled(option##_MODULE)) ++ ++#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) ++struct _kc_vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ __be16 h_vlan_proto; ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++#define vlan_ethhdr _kc_vlan_ethhdr ++struct _kc_vlan_hdr { ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++#define vlan_hdr _kc_vlan_hdr ++#define vlan_tx_tag_present(_skb) 0 ++#define vlan_tx_tag_get(_skb) 0 ++#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ ++ ++#ifndef VLAN_PRIO_SHIFT ++#define VLAN_PRIO_SHIFT 13 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_2_5GB ++#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_5_0GB ++#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_8_0GB ++#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X1 ++#define PCI_EXP_LNKSTA_NLW_X1 0x0010 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X2 ++#define PCI_EXP_LNKSTA_NLW_X2 0x0020 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X4 ++#define PCI_EXP_LNKSTA_NLW_X4 0x0040 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X8 ++#define PCI_EXP_LNKSTA_NLW_X8 0x0080 ++#endif ++ ++#ifndef __GFP_COLD ++#define __GFP_COLD 0 ++#endif ++ ++#ifndef __GFP_COMP ++#define __GFP_COMP 0 ++#endif ++ ++#ifndef IP_OFFSET ++#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ ++#endif ++ ++/*****************************************************************************/ ++/* Installations with ethtool version without eeprom, adapter id, or statistics ++ * support */ ++ ++#ifndef ETH_GSTRING_LEN ++#define ETH_GSTRING_LEN 32 ++#endif ++ ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x1d ++#undef ethtool_drvinfo ++#define ethtool_drvinfo k_ethtool_drvinfo ++struct k_ethtool_drvinfo { ++ u32 cmd; ++ char driver[32]; ++ char version[32]; ++ char fw_version[32]; ++ char bus_info[32]; ++ char reserved1[32]; ++ char reserved2[16]; ++ u32 n_stats; ++ u32 testinfo_len; ++ u32 eedump_len; ++ u32 regdump_len; ++}; ++ ++struct ethtool_stats { ++ u32 cmd; ++ u32 n_stats; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_GSTATS */ ++ ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x1c ++#endif /* ETHTOOL_PHYS_ID */ ++ ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x1b ++enum ethtool_stringset { ++ ETH_SS_TEST = 0, ++ ETH_SS_STATS, ++}; ++struct ethtool_gstrings { ++ u32 cmd; /* ETHTOOL_GSTRINGS */ ++ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ ++ u32 len; /* number of strings in the string set */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GSTRINGS */ ++ ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x1a ++enum ethtool_test_flags { ++ ETH_TEST_FL_OFFLINE = (1 << 0), ++ ETH_TEST_FL_FAILED = (1 << 1), ++}; ++struct ethtool_test { ++ u32 cmd; ++ u32 flags; ++ u32 reserved; ++ u32 len; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_TEST */ ++ ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0xb ++#undef ETHTOOL_GREGS ++struct ethtool_eeprom { ++ u32 cmd; ++ u32 magic; ++ u32 offset; ++ u32 len; ++ u8 data[0]; ++}; ++ ++struct ethtool_value { ++ u32 cmd; ++ u32 data; ++}; ++#endif /* ETHTOOL_GEEPROM */ ++ ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0xa ++#endif /* ETHTOOL_GLINK */ ++ ++#ifndef ETHTOOL_GWOL ++#define ETHTOOL_GWOL 0x5 ++#define ETHTOOL_SWOL 0x6 ++#define SOPASS_MAX 6 ++struct ethtool_wolinfo { ++ u32 cmd; ++ u32 supported; ++ u32 wolopts; ++ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ ++}; ++#endif /* ETHTOOL_GWOL */ ++ ++#ifndef ETHTOOL_GREGS ++#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ ++#define ethtool_regs _kc_ethtool_regs ++/* for passing big chunks of data */ ++struct _kc_ethtool_regs { ++ u32 cmd; ++ u32 version; /* driver-specific, indicates different chips/revs */ ++ u32 len; /* bytes */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GREGS */ ++ ++#ifndef ETHTOOL_GMSGLVL ++#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ ++#endif ++#ifndef ETHTOOL_SMSGLVL ++#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ ++#endif ++#ifndef ETHTOOL_NWAY_RST ++#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ ++#endif ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0x0000000a /* Get link status */ ++#endif ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ ++#endif ++#ifndef ETHTOOL_SEEPROM ++#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ ++#endif ++#ifndef ETHTOOL_GCOALESCE ++#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ ++/* for configuring coalescing parameters of chip */ ++#define ethtool_coalesce _kc_ethtool_coalesce ++struct _kc_ethtool_coalesce { ++ u32 cmd; /* ETHTOOL_{G,S}COALESCE */ ++ ++ /* How many usecs to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_max_coalesced_frames ++ * is used. ++ */ ++ u32 rx_coalesce_usecs; ++ ++ /* How many packets to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause RX interrupts to never be ++ * generated. ++ */ ++ u32 rx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 rx_coalesce_usecs_irq; ++ u32 rx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_max_coalesced_frames ++ * is used. ++ */ ++ u32 tx_coalesce_usecs; ++ ++ /* How many packets to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause TX interrupts to never be ++ * generated. ++ */ ++ u32 tx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 tx_coalesce_usecs_irq; ++ u32 tx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay in-memory statistics ++ * block updates. Some drivers do not have an in-memory ++ * statistic block, and in such cases this value is ignored. ++ * This value must not be zero. ++ */ ++ u32 stats_block_coalesce_usecs; ++ ++ /* Adaptive RX/TX coalescing is an algorithm implemented by ++ * some drivers to improve latency under low packet rates and ++ * improve throughput under high packet rates. Some drivers ++ * only implement one of RX or TX adaptive coalescing. Anything ++ * not implemented by the driver causes these values to be ++ * silently ignored. ++ */ ++ u32 use_adaptive_rx_coalesce; ++ u32 use_adaptive_tx_coalesce; ++ ++ /* When the packet rate (measured in packets per second) ++ * is below pkt_rate_low, the {rx,tx}_*_low parameters are ++ * used. ++ */ ++ u32 pkt_rate_low; ++ u32 rx_coalesce_usecs_low; ++ u32 rx_max_coalesced_frames_low; ++ u32 tx_coalesce_usecs_low; ++ u32 tx_max_coalesced_frames_low; ++ ++ /* When the packet rate is below pkt_rate_high but above ++ * pkt_rate_low (both measured in packets per second) the ++ * normal {rx,tx}_* coalescing parameters are used. ++ */ ++ ++ /* When the packet rate is (measured in packets per second) ++ * is above pkt_rate_high, the {rx,tx}_*_high parameters are ++ * used. ++ */ ++ u32 pkt_rate_high; ++ u32 rx_coalesce_usecs_high; ++ u32 rx_max_coalesced_frames_high; ++ u32 tx_coalesce_usecs_high; ++ u32 tx_max_coalesced_frames_high; ++ ++ /* How often to do adaptive coalescing packet rate sampling, ++ * measured in seconds. Must not be zero. ++ */ ++ u32 rate_sample_interval; ++}; ++#endif /* ETHTOOL_GCOALESCE */ ++ ++#ifndef ETHTOOL_SCOALESCE ++#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ ++#endif ++#ifndef ETHTOOL_GRINGPARAM ++#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ ++/* for configuring RX/TX ring parameters */ ++#define ethtool_ringparam _kc_ethtool_ringparam ++struct _kc_ethtool_ringparam { ++ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ ++ ++ /* Read only attributes. These indicate the maximum number ++ * of pending RX/TX ring entries the driver will allow the ++ * user to set. ++ */ ++ u32 rx_max_pending; ++ u32 rx_mini_max_pending; ++ u32 rx_jumbo_max_pending; ++ u32 tx_max_pending; ++ ++ /* Values changeable by the user. The valid values are ++ * in the range 1 to the "*_max_pending" counterpart above. ++ */ ++ u32 rx_pending; ++ u32 rx_mini_pending; ++ u32 rx_jumbo_pending; ++ u32 tx_pending; ++}; ++#endif /* ETHTOOL_GRINGPARAM */ ++ ++#ifndef ETHTOOL_SRINGPARAM ++#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ ++#endif ++#ifndef ETHTOOL_GPAUSEPARAM ++#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ ++/* for configuring link flow control parameters */ ++#define ethtool_pauseparam _kc_ethtool_pauseparam ++struct _kc_ethtool_pauseparam { ++ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ ++ ++ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg ++ * being true) the user may set 'autoneg' here non-zero to have the ++ * pause parameters be auto-negotiated too. In such a case, the ++ * {rx,tx}_pause values below determine what capabilities are ++ * advertised. ++ * ++ * If 'autoneg' is zero or the link is not being auto-negotiated, ++ * then {rx,tx}_pause force the driver to use/not-use pause ++ * flow control. ++ */ ++ u32 autoneg; ++ u32 rx_pause; ++ u32 tx_pause; ++}; ++#endif /* ETHTOOL_GPAUSEPARAM */ ++ ++#ifndef ETHTOOL_SPAUSEPARAM ++#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ ++#endif ++#ifndef ETHTOOL_GRXCSUM ++#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SRXCSUM ++#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GTXCSUM ++#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STXCSUM ++#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GSG ++#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable ++ * (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SSG ++#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable ++ * (ethtool_value). */ ++#endif ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ ++#endif ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ ++#endif ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ ++#endif ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ ++#endif ++#ifndef ETHTOOL_GTSO ++#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STSO ++#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ ++#endif ++ ++#ifndef ETHTOOL_BUSINFO_LEN ++#define ETHTOOL_BUSINFO_LEN 32 ++#endif ++ ++#ifndef SPEED_2500 ++#define SPEED_2500 2500 ++#endif ++#ifndef SPEED_5000 ++#define SPEED_5000 5000 ++#endif ++ ++#ifndef RHEL_RELEASE_VERSION ++#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) ++#endif ++#ifndef AX_RELEASE_VERSION ++#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) ++#endif ++ ++#ifndef AX_RELEASE_CODE ++#define AX_RELEASE_CODE 0 ++#endif ++ ++#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) ++#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) ++#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) ++#endif ++ ++#ifndef RHEL_RELEASE_CODE ++/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ ++#define RHEL_RELEASE_CODE 0 ++#endif ++ ++/* RHEL 7 didn't backport the parameter change in ++ * create_singlethread_workqueue. ++ * If/when RH corrects this we will want to tighten up the version check. ++ */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) ++#undef create_singlethread_workqueue ++#define create_singlethread_workqueue(name) \ ++ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) ++#endif ++ ++/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find ++ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new ++ * enough versions of Ubuntu. Otherwise you can simply see it in the output of ++ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in ++ * the linux-source package, but in the linux-headers package. It begins to ++ * appear in later releases of 14.04 and 14.10. ++ * ++ * Ex: ++ * ++ * $uname -r ++ * 3.13.0-45-generic ++ * ABI is 45 ++ * ++ * ++ * $uname -r ++ * 3.16.0-23-generic ++ * ABI is 23 ++ */ ++#ifndef UTS_UBUNTU_RELEASE_ABI ++#define UTS_UBUNTU_RELEASE_ABI 0 ++#define UBUNTU_VERSION_CODE 0 ++#else ++/* Ubuntu does not provide actual release version macro, so we use the kernel ++ * version plus the ABI to generate a unique version code specific to Ubuntu. ++ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to ++ * ignore differences in sublevel which are not important since we have the ++ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for ++ * ordering checks. ++ */ ++#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ ++ UTS_UBUNTU_RELEASE_ABI) ++ ++#if UTS_UBUNTU_RELEASE_ABI > 255 ++#error UTS_UBUNTU_RELEASE_ABI is too large... ++#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ ++ ++#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) ++/* Our version code scheme does not make sense for non 3.x or newer kernels, ++ * and we have no support in kcompat for this scenario. Thus, treat this as a ++ * non-Ubuntu kernel. Possibly might be better to error here. ++ */ ++#define UTS_UBUNTU_RELEASE_ABI 0 ++#define UBUNTU_VERSION_CODE 0 ++#endif ++ ++#endif ++ ++/* Note that the 3rd digit is always zero, and will be ignored. This is ++ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux ++ * version codes are 3 digit, this 3rd digit is superseded by the ABI value. ++ */ ++#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) ++ ++/* SuSE version macro is the same as Linux kernel version */ ++#ifndef SLE_VERSION ++#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) ++#endif ++#ifdef CONFIG_SUSE_KERNEL ++#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) ++/* SLES11 GA is 2.6.27 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,0,0) ++#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) ++/* SLES11 SP1 is 2.6.32 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,1,0) ++#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) ++/* SLES11 SP2 is 3.0.13 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,2,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) ++/* SLES11 SP3 is 3.0.76 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,3,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101))) ++/* SLES11 SP4 is 3.0.101 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,4,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28))) ++/* SLES12 GA is 3.12.28 based */ ++#define SLE_VERSION_CODE SLE_VERSION(12,0,0) ++/* new SLES kernels must be added here with >= based on kernel ++ * the idea is to order from newest to oldest and just catch all ++ * of them using the >= ++ */ ++#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47))) ++/* SLES12 SP1 is 3.12.47-based */ ++#define SLE_VERSION_CODE SLE_VERSION(12,1,0) ++#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ ++#endif /* CONFIG_SUSE_KERNEL */ ++#ifndef SLE_VERSION_CODE ++#define SLE_VERSION_CODE 0 ++#endif /* SLE_VERSION_CODE */ ++ ++#ifdef __KLOCWORK__ ++#ifdef ARRAY_SIZE ++#undef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++#endif /* __KLOCWORK__ */ ++ ++/*****************************************************************************/ ++/* 2.4.3 => 2.4.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++#ifndef pci_set_dma_mask ++#define pci_set_dma_mask _kc_pci_set_dma_mask ++extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); ++#endif ++ ++#ifndef pci_request_regions ++#define pci_request_regions _kc_pci_request_regions ++extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); ++#endif ++ ++#ifndef pci_release_regions ++#define pci_release_regions _kc_pci_release_regions ++extern void _kc_pci_release_regions(struct pci_dev *pdev); ++#endif ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++#ifndef alloc_etherdev ++#define alloc_etherdev _kc_alloc_etherdev ++extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); ++#endif ++ ++#ifndef is_valid_ether_addr ++#define is_valid_ether_addr _kc_is_valid_ether_addr ++extern int _kc_is_valid_ether_addr(u8 *addr); ++#endif ++ ++/**************************************/ ++/* MISCELLANEOUS */ ++ ++#ifndef INIT_TQUEUE ++#define INIT_TQUEUE(_tq, _routine, _data) \ ++ do { \ ++ INIT_LIST_HEAD(&(_tq)->list); \ ++ (_tq)->sync = 0; \ ++ (_tq)->routine = _routine; \ ++ (_tq)->data = _data; \ ++ } while (0) ++#endif ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) ++/* Generic MII registers. */ ++#define MII_BMCR 0x00 /* Basic mode control register */ ++#define MII_BMSR 0x01 /* Basic mode status register */ ++#define MII_PHYSID1 0x02 /* PHYS ID 1 */ ++#define MII_PHYSID2 0x03 /* PHYS ID 2 */ ++#define MII_ADVERTISE 0x04 /* Advertisement control reg */ ++#define MII_LPA 0x05 /* Link partner ability reg */ ++#define MII_EXPANSION 0x06 /* Expansion register */ ++/* Basic mode control register. */ ++#define BMCR_FULLDPLX 0x0100 /* Full duplex */ ++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ ++/* Basic mode status register. */ ++#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ ++#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ ++#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ ++#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ ++#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ ++#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ ++/* Advertisement control register. */ ++#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ ++#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ ++#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ ++#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ ++#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ ++#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ ++ ADVERTISE_100HALF | ADVERTISE_100FULL) ++/* Expansion register for auto-negotiation. */ ++#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.6 => 2.4.3 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++#ifndef pci_set_power_state ++#define pci_set_power_state _kc_pci_set_power_state ++extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); ++#endif ++ ++#ifndef pci_enable_wake ++#define pci_enable_wake _kc_pci_enable_wake ++extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); ++#endif ++ ++#ifndef pci_disable_device ++#define pci_disable_device _kc_pci_disable_device ++extern void _kc_pci_disable_device(struct pci_dev *pdev); ++#endif ++ ++/* PCI PM entry point syntax changed, so don't support suspend/resume */ ++#undef CONFIG_PM ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++#ifndef HAVE_PCI_SET_MWI ++#define pci_set_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ ++ PCI_COMMAND_INVALIDATE); ++#define pci_clear_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ ++ ~PCI_COMMAND_INVALIDATE); ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.10 => 2.4.9 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) ++ ++/**************************************/ ++/* MODULE API */ ++ ++#ifndef MODULE_LICENSE ++ #define MODULE_LICENSE(X) ++#endif ++ ++/**************************************/ ++/* OTHER */ ++ ++#undef min ++#define min(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x < _y ? _x : _y; }) ++ ++#undef max ++#define max(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x > _y ? _x : _y; }) ++ ++#define min_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x < _y ? _x : _y; }) ++ ++#define max_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x > _y ? _x : _y; }) ++ ++#ifndef list_for_each_safe ++#define list_for_each_safe(pos, n, head) \ ++ for (pos = (head)->next, n = pos->next; pos != (head); \ ++ pos = n, n = pos->next) ++#endif ++ ++#ifndef ____cacheline_aligned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_aligned_in_smp ____cacheline_aligned ++#else ++#define ____cacheline_aligned_in_smp ++#endif /* CONFIG_SMP */ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); ++#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) ++extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) ++#else /* 2.4.8 => 2.4.9 */ ++extern int snprintf(char * buf, size_t size, const char *fmt, ...); ++extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#endif ++#endif /* 2.4.10 -> 2.4.6 */ ++ ++ ++/*****************************************************************************/ ++/* 2.4.12 => 2.4.10 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) ++#ifndef HAVE_NETIF_MSG ++#define HAVE_NETIF_MSG 1 ++enum { ++ NETIF_MSG_DRV = 0x0001, ++ NETIF_MSG_PROBE = 0x0002, ++ NETIF_MSG_LINK = 0x0004, ++ NETIF_MSG_TIMER = 0x0008, ++ NETIF_MSG_IFDOWN = 0x0010, ++ NETIF_MSG_IFUP = 0x0020, ++ NETIF_MSG_RX_ERR = 0x0040, ++ NETIF_MSG_TX_ERR = 0x0080, ++ NETIF_MSG_TX_QUEUED = 0x0100, ++ NETIF_MSG_INTR = 0x0200, ++ NETIF_MSG_TX_DONE = 0x0400, ++ NETIF_MSG_RX_STATUS = 0x0800, ++ NETIF_MSG_PKTDATA = 0x1000, ++ NETIF_MSG_HW = 0x2000, ++ NETIF_MSG_WOL = 0x4000, ++}; ++ ++#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) ++#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) ++#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) ++#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) ++#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) ++#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) ++#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) ++#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) ++#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) ++#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) ++#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) ++#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) ++#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) ++#endif /* !HAVE_NETIF_MSG */ ++#endif /* 2.4.12 => 2.4.10 */ ++ ++/*****************************************************************************/ ++/* 2.4.13 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#ifndef virt_to_page ++ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) ++#endif ++ ++#ifndef pci_map_page ++#define pci_map_page _kc_pci_map_page ++extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); ++#endif ++ ++#ifndef pci_unmap_page ++#define pci_unmap_page _kc_pci_unmap_page ++extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); ++#endif ++ ++/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ ++ ++#undef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0xffffffff ++#undef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffff ++ ++/**************************************/ ++/* OTHER */ ++ ++#ifndef cpu_relax ++#define cpu_relax() rep_nop() ++#endif ++ ++struct vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ unsigned short h_vlan_proto; ++ unsigned short h_vlan_TCI; ++ unsigned short h_vlan_encapsulated_proto; ++}; ++#endif /* 2.4.13 => 2.4.12 */ ++ ++/*****************************************************************************/ ++/* 2.4.17 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) ++ ++#ifndef __devexit_p ++ #define __devexit_p(x) &(x) ++#endif ++ ++#endif /* 2.4.17 => 2.4.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) ++#define NETIF_MSG_HW 0x2000 ++#define NETIF_MSG_WOL 0x4000 ++ ++#ifndef netif_msg_hw ++#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) ++#endif ++#ifndef netif_msg_wol ++#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) ++#endif ++#endif /* 2.4.18 */ ++ ++/*****************************************************************************/ ++ ++/*****************************************************************************/ ++/* 2.4.20 => 2.4.19 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) ++ ++/* we won't support NAPI on less than 2.4.20 */ ++#ifdef NAPI ++#undef NAPI ++#endif ++ ++#endif /* 2.4.20 => 2.4.19 */ ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#define pci_name(x) ((x)->slot_name) ++ ++#ifndef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#endif ++#ifndef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#endif ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#ifndef IGB_NO_LRO ++#define IGB_NO_LRO ++#endif ++#endif ++ ++/*****************************************************************************/ ++/*****************************************************************************/ ++/* 2.4.23 => 2.4.22 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) ++/*****************************************************************************/ ++#ifdef NAPI ++#ifndef netif_poll_disable ++#define netif_poll_disable(x) _kc_netif_poll_disable(x) ++static inline void _kc_netif_poll_disable(struct net_device *netdev) ++{ ++ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { ++ /* No hurry */ ++ current->state = TASK_INTERRUPTIBLE; ++ schedule_timeout(1); ++ } ++} ++#endif ++#ifndef netif_poll_enable ++#define netif_poll_enable(x) _kc_netif_poll_enable(x) ++static inline void _kc_netif_poll_enable(struct net_device *netdev) ++{ ++ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); ++} ++#endif ++#endif /* NAPI */ ++#ifndef netif_tx_disable ++#define netif_tx_disable(x) _kc_netif_tx_disable(x) ++static inline void _kc_netif_tx_disable(struct net_device *dev) ++{ ++ spin_lock_bh(&dev->xmit_lock); ++ netif_stop_queue(dev); ++ spin_unlock_bh(&dev->xmit_lock); ++} ++#endif ++#else /* 2.4.23 => 2.4.22 */ ++#define HAVE_SCTP ++#endif /* 2.4.23 => 2.4.22 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) ++#define ETHTOOL_OPS_COMPAT ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) ++#define __user ++#endif /* < 2.4.27 */ ++ ++/*****************************************************************************/ ++/* 2.5.71 => 2.4.x */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) ++#define sk_protocol protocol ++#define pci_get_device pci_find_device ++#endif /* 2.5.70 => 2.4.x */ ++ ++/*****************************************************************************/ ++/* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) ++ ++#ifndef netif_msg_init ++#define netif_msg_init _kc_netif_msg_init ++static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) ++{ ++ /* use default */ ++ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) ++ return default_msg_enable_bits; ++ if (debug_value == 0) /* no output */ ++ return 0; ++ /* set low N bits */ ++ return (1 << debug_value) -1; ++} ++#endif ++ ++#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++/*****************************************************************************/ ++#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ ++ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ ++ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) ++#define netdev_priv(x) x->priv ++#endif ++ ++/*****************************************************************************/ ++/* <= 2.5.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) ++#include ++#undef pci_register_driver ++#define pci_register_driver pci_module_init ++ ++/* ++ * Most of the dma compat code is copied/modifed from the 2.4.37 ++ * /include/linux/libata-compat.h header file ++ */ ++/* These definitions mirror those in pci.h, so they can be used ++ * interchangeably with their PCI_ counterparts */ ++enum dma_data_direction { ++ DMA_BIDIRECTIONAL = 0, ++ DMA_TO_DEVICE = 1, ++ DMA_FROM_DEVICE = 2, ++ DMA_NONE = 3, ++}; ++ ++struct device { ++ struct pci_dev pdev; ++}; ++ ++static inline struct pci_dev *to_pci_dev (struct device *dev) ++{ ++ return (struct pci_dev *) dev; ++} ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return (struct device *) pdev; ++} ++ ++#define pdev_printk(lvl, pdev, fmt, args...) \ ++ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) ++#define dev_err(dev, fmt, args...) \ ++ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) ++#define dev_info(dev, fmt, args...) \ ++ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) ++#define dev_warn(dev, fmt, args...) \ ++ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) ++#define dev_notice(dev, fmt, args...) \ ++ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) ++#define dev_dbg(dev, fmt, args...) \ ++ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) ++ ++/* NOTE: dangerous! we ignore the 'gfp' argument */ ++#define dma_alloc_coherent(dev,sz,dma,gfp) \ ++ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) ++#define dma_free_coherent(dev,sz,addr,dma_addr) \ ++ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) ++ ++#define dma_map_page(dev,a,b,c,d) \ ++ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) ++#define dma_unmap_page(dev,a,b,c) \ ++ pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_single(dev,a,b,c) \ ++ pci_map_single(to_pci_dev(dev),(a),(b),(c)) ++#define dma_unmap_single(dev,a,b,c) \ ++ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_sg(dev, sg, nents, dir) \ ++ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) ++#define dma_unmap_sg(dev, sg, nents, dir) \ ++ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) ++ ++#define dma_sync_single(dev,a,b,c) \ ++ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) ++ ++/* for range just sync everything, that's all the pci API can do */ ++#define dma_sync_single_range(dev,addr,off,sz,dir) \ ++ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) ++ ++#define dma_set_mask(dev,mask) \ ++ pci_set_dma_mask(to_pci_dev(dev),(mask)) ++ ++/* hlist_* code - double linked lists */ ++struct hlist_head { ++ struct hlist_node *first; ++}; ++ ++struct hlist_node { ++ struct hlist_node *next, **pprev; ++}; ++ ++static inline void __hlist_del(struct hlist_node *n) ++{ ++ struct hlist_node *next = n->next; ++ struct hlist_node **pprev = n->pprev; ++ *pprev = next; ++ if (next) ++ next->pprev = pprev; ++} ++ ++static inline void hlist_del(struct hlist_node *n) ++{ ++ __hlist_del(n); ++ n->next = NULL; ++ n->pprev = NULL; ++} ++ ++static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) ++{ ++ struct hlist_node *first = h->first; ++ n->next = first; ++ if (first) ++ first->pprev = &n->next; ++ h->first = n; ++ n->pprev = &h->first; ++} ++ ++static inline int hlist_empty(const struct hlist_head *h) ++{ ++ return !h->first; ++} ++#define HLIST_HEAD_INIT { .first = NULL } ++#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } ++#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) ++static inline void INIT_HLIST_NODE(struct hlist_node *h) ++{ ++ h->next = NULL; ++ h->pprev = NULL; ++} ++ ++#ifndef might_sleep ++#define might_sleep() ++#endif ++#else ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return &pdev->dev; ++} ++#endif /* <= 2.5.0 */ ++ ++/*****************************************************************************/ ++/* 2.5.28 => 2.4.23 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ ++#include ++#define work_struct tq_struct ++#undef INIT_WORK ++#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) ++#undef container_of ++#define container_of list_entry ++#define schedule_work schedule_task ++#define flush_scheduled_work flush_scheduled_tasks ++#define cancel_work_sync(x) flush_scheduled_work() ++ ++#endif /* 2.5.28 => 2.4.17 */ ++ ++/*****************************************************************************/ ++/* 2.6.0 => 2.5.28 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#ifndef read_barrier_depends ++#define read_barrier_depends() rmb() ++#endif ++ ++#ifndef rcu_head ++struct __kc_callback_head { ++ struct __kc_callback_head *next; ++ void (*func)(struct callback_head *head); ++}; ++#define rcu_head __kc_callback_head ++#endif ++ ++#undef get_cpu ++#define get_cpu() smp_processor_id() ++#undef put_cpu ++#define put_cpu() do { } while(0) ++#define MODULE_INFO(version, _version) ++#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT ++#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 ++#endif ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 ++#endif ++ ++#define dma_set_coherent_mask(dev,mask) 1 ++ ++#undef dev_put ++#define dev_put(dev) __dev_put(dev) ++ ++#ifndef skb_fill_page_desc ++#define skb_fill_page_desc _kc_skb_fill_page_desc ++extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); ++#endif ++ ++#undef ALIGN ++#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) ++ ++#ifndef page_count ++#define page_count(p) atomic_read(&(p)->count) ++#endif ++ ++#ifdef MAX_NUMNODES ++#undef MAX_NUMNODES ++#endif ++#define MAX_NUMNODES 1 ++ ++/* find_first_bit and find_next bit are not defined for most ++ * 2.4 kernels (except for the redhat 2.4.21 kernels ++ */ ++#include ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++#undef find_next_bit ++#define find_next_bit _kc_find_next_bit ++extern unsigned long _kc_find_next_bit(const unsigned long *addr, ++ unsigned long size, ++ unsigned long offset); ++#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) ++ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (strchr(dev->name, '%')) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#ifndef strlcpy ++#define strlcpy _kc_strlcpy ++extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); ++#endif /* strlcpy */ ++ ++#ifndef do_div ++#if BITS_PER_LONG == 64 ++# define do_div(n,base) ({ \ ++ uint32_t __base = (base); \ ++ uint32_t __rem; \ ++ __rem = ((uint64_t)(n)) % __base; \ ++ (n) = ((uint64_t)(n)) / __base; \ ++ __rem; \ ++ }) ++#elif BITS_PER_LONG == 32 ++extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); ++# define do_div(n,base) ({ \ ++ uint32_t __base = (base); \ ++ uint32_t __rem; \ ++ if (likely(((n) >> 32) == 0)) { \ ++ __rem = (uint32_t)(n) % __base; \ ++ (n) = (uint32_t)(n) / __base; \ ++ } else \ ++ __rem = _kc__div64_32(&(n), __base); \ ++ __rem; \ ++ }) ++#else /* BITS_PER_LONG == ?? */ ++# error do_div() does not yet support the C64 ++#endif /* BITS_PER_LONG */ ++#endif /* do_div */ ++ ++#ifndef NSEC_PER_SEC ++#define NSEC_PER_SEC 1000000000L ++#endif ++ ++#undef HAVE_I2C_SUPPORT ++#else /* 2.6.0 */ ++#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \ ++ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9))) ++#define HAVE_I2C_SUPPORT ++#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */ ++ ++#endif /* 2.6.0 => 2.5.28 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) ++#define dma_pool pci_pool ++#define dma_pool_destroy pci_pool_destroy ++#define dma_pool_alloc pci_pool_alloc ++#define dma_pool_free pci_pool_free ++ ++#define dma_pool_create(name,dev,size,align,allocation) \ ++ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) ++#endif /* < 2.6.3 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++#define MODULE_VERSION(_version) MODULE_INFO(version, _version) ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++/* 2.6.5 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ++#define dma_sync_single_for_cpu dma_sync_single ++#define dma_sync_single_for_device dma_sync_single ++#define dma_sync_single_range_for_cpu dma_sync_single_range ++#define dma_sync_single_range_for_device dma_sync_single_range ++#ifndef pci_dma_mapping_error ++#define pci_dma_mapping_error _kc_pci_dma_mapping_error ++static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) ++{ ++ return dma_addr == 0; ++} ++#endif ++#endif /* 2.6.5 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); ++#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) ++/* taken from 2.6 include/linux/bitmap.h */ ++#undef bitmap_zero ++#define bitmap_zero _kc_bitmap_zero ++static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) ++{ ++ if (nbits <= BITS_PER_LONG) ++ *dst = 0UL; ++ else { ++ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ memset(dst, 0, len); ++ } ++} ++#define page_to_nid(x) 0 ++ ++#endif /* < 2.6.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) ++#undef if_mii ++#define if_mii _kc_if_mii ++static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) ++{ ++ return (struct mii_ioctl_data *) &rq->ifr_ifru; ++} ++ ++#ifndef __force ++#define __force ++#endif ++#endif /* < 2.6.7 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++#ifndef PCI_EXP_DEVCTL ++#define PCI_EXP_DEVCTL 8 ++#endif ++#ifndef PCI_EXP_DEVCTL_CERE ++#define PCI_EXP_DEVCTL_CERE 0x0001 ++#endif ++#define PCI_EXP_FLAGS 2 /* Capabilities register */ ++#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ ++#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ ++#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ ++#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ ++#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ ++#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ ++#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ ++#define PCI_EXP_DEVCAP 4 /* Device capabilities */ ++#define PCI_EXP_DEVSTA 10 /* Device Status */ ++#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ ++ schedule_timeout((x * HZ)/1000 + 2); \ ++ } while (0) ++ ++#endif /* < 2.6.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) ++#include ++#define __iomem ++ ++#ifndef kcalloc ++#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++#define MSEC_PER_SEC 1000L ++static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) ++{ ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (MSEC_PER_SEC / HZ) * j; ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); ++#else ++ return (j * MSEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return m * (HZ / MSEC_PER_SEC); ++#else ++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; ++#endif ++} ++ ++#define msleep_interruptible _kc_msleep_interruptible ++static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) ++{ ++ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; ++ ++ while (timeout && !signal_pending(current)) { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ timeout = schedule_timeout(timeout); ++ } ++ return _kc_jiffies_to_msecs(timeout); ++} ++ ++/* Basic mode control register. */ ++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ ++ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++#endif ++#ifndef __be16 ++#define __be16 u16 ++#endif ++#ifndef __be32 ++#define __be32 u32 ++#endif ++#ifndef __be64 ++#define __be64 u64 ++#endif ++ ++static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) ++{ ++ return (struct vlan_ethhdr *)skb->mac.raw; ++} ++ ++/* Wake-On-Lan options. */ ++#define WAKE_PHY (1 << 0) ++#define WAKE_UCAST (1 << 1) ++#define WAKE_MCAST (1 << 2) ++#define WAKE_BCAST (1 << 3) ++#define WAKE_ARP (1 << 4) ++#define WAKE_MAGIC (1 << 5) ++#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ ++ ++#define skb_header_pointer _kc_skb_header_pointer ++static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, ++ int offset, int len, void *buffer) ++{ ++ int hlen = skb_headlen(skb); ++ ++ if (hlen - offset >= len) ++ return skb->data + offset; ++ ++#ifdef MAX_SKB_FRAGS ++ if (skb_copy_bits(skb, offset, buffer, len) < 0) ++ return NULL; ++ ++ return buffer; ++#else ++ return NULL; ++#endif ++ ++#ifndef NETDEV_TX_OK ++#define NETDEV_TX_OK 0 ++#endif ++#ifndef NETDEV_TX_BUSY ++#define NETDEV_TX_BUSY 1 ++#endif ++#ifndef NETDEV_TX_LOCKED ++#define NETDEV_TX_LOCKED -1 ++#endif ++} ++ ++#ifndef __bitwise ++#define __bitwise ++#endif ++#endif /* < 2.6.9 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++#ifdef module_param_array_named ++#undef module_param_array_named ++#define module_param_array_named(name, array, type, nump, perm) \ ++ static struct kparam_array __param_arr_##name \ ++ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ ++ sizeof(array[0]), array }; \ ++ module_param_call(name, param_array_set, param_array_get, \ ++ &__param_arr_##name, perm) ++#endif /* module_param_array_named */ ++/* ++ * num_online is broken for all < 2.6.10 kernels. This is needed to support ++ * Node module parameter of ixgbe. ++ */ ++#undef num_online_nodes ++#define num_online_nodes(n) 1 ++extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); ++#undef node_online_map ++#define node_online_map _kcompat_node_online_map ++#define pci_get_class pci_find_class ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) ++#define PCI_D0 0 ++#define PCI_D1 1 ++#define PCI_D2 2 ++#define PCI_D3hot 3 ++#define PCI_D3cold 4 ++typedef int pci_power_t; ++#define pci_choose_state(pdev,state) state ++#define PMSG_SUSPEND 3 ++#define PCI_EXP_LNKCTL 16 ++ ++#undef NETIF_F_LLTX ++ ++#ifndef ARCH_HAS_PREFETCH ++#define prefetch(X) ++#endif ++ ++#ifndef NET_IP_ALIGN ++#define NET_IP_ALIGN 2 ++#endif ++ ++#define KC_USEC_PER_SEC 1000000L ++#define usecs_to_jiffies _kc_usecs_to_jiffies ++static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) ++{ ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (KC_USEC_PER_SEC / HZ) * j; ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); ++#else ++ return (j * KC_USEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return m * (HZ / KC_USEC_PER_SEC); ++#else ++ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; ++#endif ++} ++ ++#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ ++#define PCI_EXP_LNKSTA 18 /* Link Status */ ++#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ ++#define PCI_EXP_SLTCTL 24 /* Slot Control */ ++#define PCI_EXP_SLTSTA 26 /* Slot Status */ ++#define PCI_EXP_RTCTL 28 /* Root Control */ ++#define PCI_EXP_RTCAP 30 /* Root Capabilities */ ++#define PCI_EXP_RTSTA 32 /* Root Status */ ++#endif /* < 2.6.11 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) ++#include ++#define USE_REBOOT_NOTIFIER ++ ++/* Generic MII registers. */ ++#define MII_CTRL1000 0x09 /* 1000BASE-T control */ ++#define MII_STAT1000 0x0a /* 1000BASE-T status */ ++/* Advertisement control register. */ ++#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ ++#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ ++/* Link partner ability register. */ ++#define LPA_PAUSE_CAP 0x0400 /* Can pause */ ++#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ ++/* 1000BASE-T Control register */ ++#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ ++#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ ++/* 1000BASE-T Status register */ ++#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ ++#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ ++ ++#ifndef is_zero_ether_addr ++#define is_zero_ether_addr _kc_is_zero_ether_addr ++static inline int _kc_is_zero_ether_addr(const u8 *addr) ++{ ++ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); ++} ++#endif /* is_zero_ether_addr */ ++#ifndef is_multicast_ether_addr ++#define is_multicast_ether_addr _kc_is_multicast_ether_addr ++static inline int _kc_is_multicast_ether_addr(const u8 *addr) ++{ ++ return addr[0] & 0x01; ++} ++#endif /* is_multicast_ether_addr */ ++#endif /* < 2.6.12 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) ++#ifndef kstrdup ++#define kstrdup _kc_kstrdup ++extern char *_kc_kstrdup(const char *s, unsigned int gfp); ++#endif ++#endif /* < 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++#define pm_message_t u32 ++#ifndef kzalloc ++#define kzalloc _kc_kzalloc ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++ ++/* Generic MII registers. */ ++#define MII_ESTATUS 0x0f /* Extended Status */ ++/* Basic mode status register. */ ++#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ ++/* Extended status register. */ ++#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ ++#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ ++ ++#define SUPPORTED_Pause (1 << 13) ++#define SUPPORTED_Asym_Pause (1 << 14) ++#define ADVERTISED_Pause (1 << 13) ++#define ADVERTISED_Asym_Pause (1 << 14) ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) ++#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) ++#define gfp_t unsigned ++#else ++typedef unsigned gfp_t; ++#endif ++#endif /* !RHEL4.3->RHEL5.0 */ ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) ++#ifdef CONFIG_X86_64 ++#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ ++ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) ++#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ ++ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) ++#endif ++#endif ++#endif /* < 2.6.14 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) ++#ifndef kfree_rcu ++/* this is placed here due to a lack of rcu_barrier in previous kernels */ ++#define kfree_rcu(_ptr, _offset) kfree(_ptr) ++#endif /* kfree_rcu */ ++#ifndef vmalloc_node ++#define vmalloc_node(a,b) vmalloc(a) ++#endif /* vmalloc_node*/ ++ ++#define setup_timer(_timer, _function, _data) \ ++do { \ ++ (_timer)->function = _function; \ ++ (_timer)->data = _data; \ ++ init_timer(_timer); \ ++} while (0) ++#ifndef device_can_wakeup ++#define device_can_wakeup(dev) (1) ++#endif ++#ifndef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) do{}while(0) ++#endif ++#ifndef device_init_wakeup ++#define device_init_wakeup(dev,val) do {} while (0) ++#endif ++static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) ++{ ++ const u16 *a = (const u16 *) addr1; ++ const u16 *b = (const u16 *) addr2; ++ ++ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; ++} ++#undef compare_ether_addr ++#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) ++#endif /* < 2.6.15 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) ++#undef DEFINE_MUTEX ++#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) ++#define mutex_lock(x) down_interruptible(x) ++#define mutex_unlock(x) up(x) ++ ++#ifndef ____cacheline_internodealigned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp ++#else ++#define ____cacheline_internodealigned_in_smp ++#endif /* CONFIG_SMP */ ++#endif /* ____cacheline_internodealigned_in_smp */ ++#undef HAVE_PCI_ERS ++#else /* 2.6.16 and above */ ++#undef HAVE_PCI_ERS ++#define HAVE_PCI_ERS ++#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) ++#ifdef device_can_wakeup ++#undef device_can_wakeup ++#endif /* device_can_wakeup */ ++#define device_can_wakeup(dev) 1 ++#endif /* SLE_VERSION(10,4,0) */ ++#endif /* < 2.6.16 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) ++#ifndef dev_notice ++#define dev_notice(dev, fmt, args...) \ ++ dev_printk(KERN_NOTICE, dev, fmt, ## args) ++#endif ++ ++#ifndef first_online_node ++#define first_online_node 0 ++#endif ++#ifndef NET_SKB_PAD ++#define NET_SKB_PAD 16 ++#endif ++#endif /* < 2.6.17 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) ++ ++#ifndef IRQ_HANDLED ++#define irqreturn_t void ++#define IRQ_HANDLED ++#define IRQ_NONE ++#endif ++ ++#ifndef IRQF_PROBE_SHARED ++#ifdef SA_PROBEIRQ ++#define IRQF_PROBE_SHARED SA_PROBEIRQ ++#else ++#define IRQF_PROBE_SHARED 0 ++#endif ++#endif ++ ++#ifndef IRQF_SHARED ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++ ++#ifndef FIELD_SIZEOF ++#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) ++#endif ++ ++#ifndef skb_is_gso ++#ifdef NETIF_F_TSO ++#define skb_is_gso _kc_skb_is_gso ++static inline int _kc_skb_is_gso(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_size; ++} ++#else ++#define skb_is_gso(a) 0 ++#endif ++#endif ++ ++#ifndef resource_size_t ++#define resource_size_t unsigned long ++#endif ++ ++#ifdef skb_pad ++#undef skb_pad ++#endif ++#define skb_pad(x,y) _kc_skb_pad(x, y) ++int _kc_skb_pad(struct sk_buff *skb, int pad); ++#ifdef skb_padto ++#undef skb_padto ++#endif ++#define skb_padto(x,y) _kc_skb_padto(x, y) ++static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int size = skb->len; ++ if(likely(size >= len)) ++ return 0; ++ return _kc_skb_pad(skb, len - size); ++} ++ ++#ifndef DECLARE_PCI_UNMAP_ADDR ++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ ++ dma_addr_t ADDR_NAME ++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ ++ u32 LEN_NAME ++#define pci_unmap_addr(PTR, ADDR_NAME) \ ++ ((PTR)->ADDR_NAME) ++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ ++ (((PTR)->ADDR_NAME) = (VAL)) ++#define pci_unmap_len(PTR, LEN_NAME) \ ++ ((PTR)->LEN_NAME) ++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ ++ (((PTR)->LEN_NAME) = (VAL)) ++#endif /* DECLARE_PCI_UNMAP_ADDR */ ++#endif /* < 2.6.18 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++enum pcie_link_width { ++ PCIE_LNK_WIDTH_RESRV = 0x00, ++ PCIE_LNK_X1 = 0x01, ++ PCIE_LNK_X2 = 0x02, ++ PCIE_LNK_X4 = 0x04, ++ PCIE_LNK_X8 = 0x08, ++ PCIE_LNK_X12 = 0x0C, ++ PCIE_LNK_X16 = 0x10, ++ PCIE_LNK_X32 = 0x20, ++ PCIE_LNK_WIDTH_UNKNOWN = 0xFF, ++}; ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) ++#define i_private u.generic_ip ++#endif /* >= RHEL 5.0 */ ++ ++#ifndef DIV_ROUND_UP ++#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) ++#endif ++#ifndef __ALIGN_MASK ++#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) ++#endif ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) ++#if (!((RHEL_RELEASE_CODE && \ ++ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) ++typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); ++#endif ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#undef CONFIG_INET_LRO ++#undef CONFIG_INET_LRO_MODULE ++#endif ++typedef irqreturn_t (*new_handler_t)(int, void*); ++static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#else /* 2.4.x */ ++typedef void (*irq_handler_t)(int, void*, struct pt_regs *); ++typedef void (*new_handler_t)(int, void*); ++static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#endif /* >= 2.5.x */ ++{ ++ irq_handler_t new_handler = (irq_handler_t) handler; ++ return request_irq(irq, new_handler, flags, devname, dev_id); ++} ++ ++#undef request_irq ++#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) ++ ++#define irq_handler_t new_handler_t ++/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) ++#define PCIE_CONFIG_SPACE_LEN 256 ++#define PCI_CONFIG_SPACE_LEN 64 ++#define PCIE_LINK_STATUS 0x12 ++#define pci_config_space_ich8lan() do {} while(0) ++#undef pci_save_state ++extern int _kc_pci_save_state(struct pci_dev *); ++#define pci_save_state(pdev) _kc_pci_save_state(pdev) ++#undef pci_restore_state ++extern void _kc_pci_restore_state(struct pci_dev *); ++#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ ++ ++#ifdef HAVE_PCI_ERS ++#undef free_netdev ++extern void _kc_free_netdev(struct net_device *); ++#define free_netdev(netdev) _kc_free_netdev(netdev) ++#endif ++static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) ++{ ++ return 0; ++} ++#define pci_disable_pcie_error_reporting(dev) do {} while (0) ++#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) ++ ++extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); ++#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) ++#ifndef bool ++#define bool _Bool ++#define true 1 ++#define false 0 ++#endif ++#else /* 2.6.19 */ ++#include ++#include ++#include ++#endif /* < 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) ++#undef INIT_WORK ++#define INIT_WORK(_work, _func) \ ++do { \ ++ INIT_LIST_HEAD(&(_work)->entry); \ ++ (_work)->pending = 0; \ ++ (_work)->func = (void (*)(void *))_func; \ ++ (_work)->data = _work; \ ++ init_timer(&(_work)->timer); \ ++} while (0) ++#endif ++ ++#ifndef PCI_VDEVICE ++#define PCI_VDEVICE(ven, dev) \ ++ PCI_VENDOR_ID_##ven, (dev), \ ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0 ++#endif ++ ++#ifndef PCI_VENDOR_ID_INTEL ++#define PCI_VENDOR_ID_INTEL 0x8086 ++#endif ++ ++#ifndef round_jiffies ++#define round_jiffies(x) x ++#endif ++ ++#define csum_offset csum ++ ++#define HAVE_EARLY_VMALLOC_NODE ++#define dev_to_node(dev) -1 ++#undef set_dev_node ++/* remove compiler warning with b=b, for unused variable */ ++#define set_dev_node(a, b) do { (b) = (b); } while(0) ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) ++typedef __u16 __bitwise __sum16; ++typedef __u32 __bitwise __wsum; ++#endif ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) ++static inline __wsum csum_unfold(__sum16 n) ++{ ++ return (__force __wsum)n; ++} ++#endif ++ ++#else /* < 2.6.20 */ ++#define HAVE_DEVICE_NUMA_NODE ++#endif /* < 2.6.20 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define to_net_dev(class) container_of(class, struct net_device, class_dev) ++#define NETDEV_CLASS_DEV ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) ++#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) ++#define vlan_group_set_device(vg, id, dev) \ ++ do { \ ++ if (vg) vg->vlan_devices[id] = dev; \ ++ } while (0) ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ ++#define pci_channel_offline(pdev) (pdev->error_state && \ ++ pdev->error_state != pci_channel_io_normal) ++#define pci_request_selected_regions(pdev, bars, name) \ ++ pci_request_regions(pdev, name) ++#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); ++ ++#ifndef __aligned ++#define __aligned(x) __attribute__((aligned(x))) ++#endif ++ ++extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); ++#define netdev_to_dev(netdev) \ ++ pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) ++#else ++static inline struct device *netdev_to_dev(struct net_device *netdev) ++{ ++ return &netdev->dev; ++} ++ ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define tcp_hdr(skb) (skb->h.th) ++#define tcp_hdrlen(skb) (skb->h.th->doff << 2) ++#define skb_transport_offset(skb) (skb->h.raw - skb->data) ++#define skb_transport_header(skb) (skb->h.raw) ++#define ipv6_hdr(skb) (skb->nh.ipv6h) ++#define ip_hdr(skb) (skb->nh.iph) ++#define skb_network_offset(skb) (skb->nh.raw - skb->data) ++#define skb_network_header(skb) (skb->nh.raw) ++#define skb_tail_pointer(skb) skb->tail ++#define skb_reset_tail_pointer(skb) \ ++ do { \ ++ skb->tail = skb->data; \ ++ } while (0) ++#define skb_set_tail_pointer(skb, offset) \ ++ do { \ ++ skb->tail = skb->data + offset; \ ++ } while (0) ++#define skb_copy_to_linear_data(skb, from, len) \ ++ memcpy(skb->data, from, len) ++#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ ++ memcpy(skb->data + offset, from, len) ++#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) ++#define pci_register_driver pci_module_init ++#define skb_mac_header(skb) skb->mac.raw ++ ++#ifdef NETIF_F_MULTI_QUEUE ++#ifndef alloc_etherdev_mq ++#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) ++#endif ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef ETH_FCS_LEN ++#define ETH_FCS_LEN 4 ++#endif ++#define cancel_work_sync(x) flush_scheduled_work() ++#ifndef udp_hdr ++#define udp_hdr _udp_hdr ++static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) ++{ ++ return (struct udphdr *)skb_transport_header(skb); ++} ++#endif ++ ++#ifdef cpu_to_be16 ++#undef cpu_to_be16 ++#endif ++#define cpu_to_be16(x) __constant_htons(x) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) ++enum { ++ DUMP_PREFIX_NONE, ++ DUMP_PREFIX_ADDRESS, ++ DUMP_PREFIX_OFFSET ++}; ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ ++#ifndef hex_asc ++#define hex_asc(x) "0123456789abcdef"[x] ++#endif ++#include ++extern void _kc_print_hex_dump(const char *level, const char *prefix_str, ++ int prefix_type, int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii); ++#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ ++ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) ++#ifndef ADVERTISED_2500baseX_Full ++#define ADVERTISED_2500baseX_Full (1 << 15) ++#endif ++#ifndef SUPPORTED_2500baseX_Full ++#define SUPPORTED_2500baseX_Full (1 << 15) ++#endif ++ ++#ifdef HAVE_I2C_SUPPORT ++#include ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) ++struct i2c_board_info { ++ char driver_name[KOBJ_NAME_LEN]; ++ char type[I2C_NAME_SIZE]; ++ unsigned short flags; ++ unsigned short addr; ++ void *platform_data; ++}; ++#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\ ++ .addr = (dev_addr) ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ ++#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info) ++extern struct i2c_client * ++_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); ++#endif /* HAVE_I2C_SUPPORT */ ++ ++#ifndef ETH_P_PAUSE ++#define ETH_P_PAUSE 0x8808 ++#endif ++ ++#else /* 2.6.22 */ ++#define ETH_TYPE_TRANS_SETS_DEV ++#define HAVE_NETDEV_STATS_IN_NETDEV ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) ++#undef SET_MODULE_OWNER ++#define SET_MODULE_OWNER(dev) do { } while (0) ++#endif /* > 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) ++#define netif_subqueue_stopped(_a, _b) 0 ++#ifndef PTR_ALIGN ++#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) ++#endif ++ ++#ifndef CONFIG_PM_SLEEP ++#define CONFIG_PM_SLEEP CONFIG_PM ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) ++#define HAVE_ETHTOOL_GET_PERM_ADDR ++#endif /* 2.6.14 through 2.6.22 */ ++ ++static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) ++{ ++ int delta = 0; ++ ++ if (headroom > (skb->data - skb->head)) ++ delta = headroom - (skb->data - skb->head); ++ ++ if (delta || skb_header_cloned(skb)) ++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, ++ GFP_ATOMIC); ++ return 0; ++} ++#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) ++#endif /* < 2.6.23 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifndef ETH_FLAG_LRO ++#define ETH_FLAG_LRO NETIF_F_LRO ++#endif ++ ++#ifndef ACCESS_ONCE ++#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#endif ++ ++/* if GRO is supported then the napi struct must already exist */ ++#ifndef NETIF_F_GRO ++/* NAPI API changes in 2.6.24 break everything */ ++struct napi_struct { ++ /* used to look up the real NAPI polling routine */ ++ int (*poll)(struct napi_struct *, int); ++ struct net_device *dev; ++ int weight; ++}; ++#endif ++ ++#ifdef NAPI ++extern int __kc_adapter_clean(struct net_device *, int *); ++/* The following definitions are multi-queue aware, and thus we have a driver ++ * define list which determines which drivers support multiple queues, and ++ * thus need these stronger defines. If a driver does not support multi-queue ++ * functionality, you don't need to add it to this list. ++ */ ++extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); ++ ++static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, ++ int (*poll)(struct napi_struct *, int), int weight) ++{ ++ struct net_device *poll_dev = napi_to_poll_dev(napi); ++ poll_dev->poll = __kc_adapter_clean; ++ poll_dev->priv = napi; ++ poll_dev->weight = weight; ++ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); ++ set_bit(__LINK_STATE_START, &poll_dev->state); ++ dev_hold(poll_dev); ++ napi->poll = poll; ++ napi->weight = weight; ++ napi->dev = dev; ++} ++#define netif_napi_add __kc_mq_netif_napi_add ++ ++static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) ++{ ++ struct net_device *poll_dev = napi_to_poll_dev(napi); ++ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); ++ dev_put(poll_dev); ++ memset(poll_dev, 0, sizeof(struct net_device)); ++} ++ ++#define netif_napi_del __kc_mq_netif_napi_del ++ ++static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) ++{ ++ return netif_running(napi->dev) && ++ netif_rx_schedule_prep(napi_to_poll_dev(napi)); ++} ++#define napi_schedule_prep __kc_mq_napi_schedule_prep ++ ++static inline void __kc_mq_napi_schedule(struct napi_struct *napi) ++{ ++ if (napi_schedule_prep(napi)) ++ __netif_rx_schedule(napi_to_poll_dev(napi)); ++} ++#define napi_schedule __kc_mq_napi_schedule ++ ++#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) ++#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) ++#ifdef CONFIG_SMP ++static inline void napi_synchronize(const struct napi_struct *n) ++{ ++ struct net_device *dev = napi_to_poll_dev(n); ++ ++ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { ++ /* No hurry. */ ++ msleep(1); ++ } ++} ++#else ++#define napi_synchronize(n) barrier() ++#endif /* CONFIG_SMP */ ++#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) ++static inline void _kc_napi_complete(struct napi_struct *napi) ++{ ++#ifdef NETIF_F_GRO ++ napi_gro_flush(napi); ++#endif ++ netif_rx_complete(napi_to_poll_dev(napi)); ++} ++#define napi_complete _kc_napi_complete ++#else /* NAPI */ ++ ++/* The following definitions are only used if we don't support NAPI at all. */ ++ ++static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, ++ int (*poll)(struct napi_struct *, int), int weight) ++{ ++ dev->poll = poll; ++ dev->weight = weight; ++ napi->poll = poll; ++ napi->weight = weight; ++ napi->dev = dev; ++} ++#define netif_napi_del(_a) do {} while (0) ++#endif /* NAPI */ ++ ++#undef dev_get_by_name ++#define dev_get_by_name(_a, _b) dev_get_by_name(_b) ++#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) ++#endif ++ ++#ifdef NETIF_F_TSO6 ++#define skb_is_gso_v6 _kc_skb_is_gso_v6 ++static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++#endif /* NETIF_F_TSO6 */ ++ ++#ifndef KERN_CONT ++#define KERN_CONT "" ++#endif ++#ifndef pr_err ++#define pr_err(fmt, arg...) \ ++ printk(KERN_ERR fmt, ##arg) ++#endif ++ ++#ifndef rounddown_pow_of_two ++#define rounddown_pow_of_two(n) \ ++ __builtin_constant_p(n) ? ( \ ++ (n == 1) ? 0 : \ ++ (1UL << ilog2(n))) : \ ++ (1UL << (fls_long(n) - 1)) ++#endif ++ ++#ifndef BIT ++#define BIT(nr) (1UL << (nr)) ++#endif ++ ++#else /* < 2.6.24 */ ++#define HAVE_ETHTOOL_GET_SSET_COUNT ++#define HAVE_NETDEV_NAPI_LIST ++#endif /* < 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) ++#define INCLUDE_PM_QOS_PARAMS_H ++#include ++#else /* >= 3.2.0 */ ++#include ++#endif /* else >= 3.2.0 */ ++#endif /* > 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) ++#define PM_QOS_CPU_DMA_LATENCY 1 ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) ++#include ++#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY ++#define pm_qos_add_requirement(pm_qos_class, name, value) \ ++ set_acceptable_latency(name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) \ ++ remove_acceptable_latency(name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) \ ++ modify_acceptable_latency(name, value) ++#else ++#define PM_QOS_DEFAULT_VALUE -1 ++#define pm_qos_add_requirement(pm_qos_class, name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) { \ ++ if (value != PM_QOS_DEFAULT_VALUE) { \ ++ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ ++ pci_name(adapter->pdev)); \ ++ } \ ++} ++ ++#endif /* > 2.6.18 */ ++ ++#define pci_enable_device_mem(pdev) pci_enable_device(pdev) ++ ++#ifndef DEFINE_PCI_DEVICE_TABLE ++#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] ++#endif /* DEFINE_PCI_DEVICE_TABLE */ ++ ++#ifndef strict_strtol ++#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) ++static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) ++{ ++ /* adapted from strict_strtoul() in 2.6.25 */ ++ char *tail; ++ long val; ++ size_t len; ++ ++ *res = 0; ++ len = strlen(buf); ++ if (!len) ++ return -EINVAL; ++ val = simple_strtol(buf, &tail, base); ++ if (tail == buf) ++ return -EINVAL; ++ if ((*tail == '\0') || ++ ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { ++ *res = val; ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++#endif ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++#ifndef IGB_PROCFS ++#define IGB_PROCFS ++#endif /* IGB_PROCFS */ ++#endif /* >= 2.6.0 */ ++ ++#else /* < 2.6.25 */ ++ ++#if IS_ENABLED(CONFIG_HWMON) ++#ifndef IGB_HWMON ++#define IGB_HWMON ++#endif /* IGB_HWMON */ ++#endif /* CONFIG_HWMON */ ++ ++#endif /* < 2.6.25 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++#ifndef clamp_t ++#define clamp_t(type, val, min, max) ({ \ ++ type __val = (val); \ ++ type __min = (min); \ ++ type __max = (max); \ ++ __val = __val < __min ? __min : __val; \ ++ __val > __max ? __max : __val; }) ++#endif /* clamp_t */ ++#undef kzalloc_node ++#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) ++ ++extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); ++#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) ++#else /* < 2.6.26 */ ++#define NETDEV_CAN_SET_GSO_MAX_SIZE ++#include ++#define HAVE_NETDEV_VLAN_FEATURES ++#ifndef PCI_EXP_LNKCAP_ASPMS ++#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ ++#endif /* PCI_EXP_LNKCAP_ASPMS */ ++#endif /* < 2.6.26 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, ++ __u32 speed) ++{ ++ ep->speed = (__u16)speed; ++ /* ep->speed_hi = (__u16)(speed >> 16); */ ++} ++#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set ++ ++static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) ++{ ++ /* no speed_hi before 2.6.27, and probably no need for it yet */ ++ return (__u32)ep->speed; ++} ++#define ethtool_cmd_speed _kc_ethtool_cmd_speed ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) ++#define ANCIENT_PM 1 ++#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ ++ defined(CONFIG_PM_SLEEP)) ++#define NEWER_PM 1 ++#endif ++#if defined(ANCIENT_PM) || defined(NEWER_PM) ++#undef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) \ ++ do { \ ++ u16 pmc = 0; \ ++ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ ++ if (pm) { \ ++ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ ++ &pmc); \ ++ } \ ++ (dev)->power.can_wakeup = !!(pmc >> 11); \ ++ (dev)->power.should_wakeup = (val && (pmc >> 11)); \ ++ } while (0) ++#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ ++#endif /* 2.6.15 through 2.6.27 */ ++#ifndef netif_napi_del ++#define netif_napi_del(_a) do {} while (0) ++#ifdef NAPI ++#ifdef CONFIG_NETPOLL ++#undef netif_napi_del ++#define netif_napi_del(_a) list_del(&(_a)->dev_list); ++#endif ++#endif ++#endif /* netif_napi_del */ ++#ifdef dma_mapping_error ++#undef dma_mapping_error ++#endif ++#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) ++ ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++#define HAVE_TX_MQ ++#endif ++ ++#ifdef HAVE_TX_MQ ++extern void _kc_netif_tx_stop_all_queues(struct net_device *); ++extern void _kc_netif_tx_wake_all_queues(struct net_device *); ++extern void _kc_netif_tx_start_all_queues(struct net_device *); ++#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) ++#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) ++#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) ++#undef netif_stop_subqueue ++#define netif_stop_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_stop_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_stop_queue((_ndev)); \ ++ } while (0) ++#undef netif_start_subqueue ++#define netif_start_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_start_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_start_queue((_ndev)); \ ++ } while (0) ++#else /* HAVE_TX_MQ */ ++#define netif_tx_stop_all_queues(a) netif_stop_queue(a) ++#define netif_tx_wake_all_queues(a) netif_wake_queue(a) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) ++#define netif_tx_start_all_queues(a) netif_start_queue(a) ++#else ++#define netif_tx_start_all_queues(a) do {} while (0) ++#endif ++#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) ++#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) ++#endif /* HAVE_TX_MQ */ ++#ifndef NETIF_F_MULTI_QUEUE ++#define NETIF_F_MULTI_QUEUE 0 ++#define netif_is_multiqueue(a) 0 ++#define netif_wake_subqueue(a, b) ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef __WARN_printf ++extern void __kc_warn_slowpath(const char *file, const int line, ++ const char *fmt, ...) __attribute__((format(printf, 3, 4))); ++#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) ++#endif /* __WARN_printf */ ++ ++#ifndef WARN ++#define WARN(condition, format...) ({ \ ++ int __ret_warn_on = !!(condition); \ ++ if (unlikely(__ret_warn_on)) \ ++ __WARN_printf(format); \ ++ unlikely(__ret_warn_on); \ ++}) ++#endif /* WARN */ ++#undef HAVE_IXGBE_DEBUG_FS ++#undef HAVE_IGB_DEBUG_FS ++#else /* < 2.6.27 */ ++#define HAVE_TX_MQ ++#define HAVE_NETDEV_SELECT_QUEUE ++#ifdef CONFIG_DEBUG_FS ++#define HAVE_IXGBE_DEBUG_FS ++#define HAVE_IGB_DEBUG_FS ++#endif /* CONFIG_DEBUG_FS */ ++#endif /* < 2.6.27 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ ++ pci_resource_len(pdev, bar)) ++#define pci_wake_from_d3 _kc_pci_wake_from_d3 ++#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep ++extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); ++extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); ++#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) ++#ifndef __skb_queue_head_init ++static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) ++{ ++ list->prev = list->next = (struct sk_buff *)list; ++ list->qlen = 0; ++} ++#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) ++#endif ++ ++#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ ++#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ ++ ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++#ifndef swap ++#define swap(a, b) \ ++ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) ++#endif ++#define pci_request_selected_regions_exclusive(pdev, bars, name) \ ++ pci_request_selected_regions(pdev, bars, name) ++#ifndef CONFIG_NR_CPUS ++#define CONFIG_NR_CPUS 1 ++#endif /* CONFIG_NR_CPUS */ ++#ifndef pcie_aspm_enabled ++#define pcie_aspm_enabled() (1) ++#endif /* pcie_aspm_enabled */ ++ ++#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ ++ ++#ifndef PCI_EXP_LNKSTA_CLS ++#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ ++#endif ++#ifndef PCI_EXP_LNKSTA_NLW ++#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ ++#endif ++ ++#ifndef pci_clear_master ++extern void _kc_pci_clear_master(struct pci_dev *dev); ++#define pci_clear_master(dev) _kc_pci_clear_master(dev) ++#endif ++ ++#ifndef PCI_EXP_LNKCTL_ASPMC ++#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ ++#endif ++#else /* < 2.6.29 */ ++#ifndef HAVE_NET_DEVICE_OPS ++#define HAVE_NET_DEVICE_OPS ++#endif ++#ifdef CONFIG_DCB ++#define HAVE_PFC_MODE_ENABLE ++#endif /* CONFIG_DCB */ ++#endif /* < 2.6.29 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) ++#define NO_PTP_SUPPORT ++#define skb_rx_queue_recorded(a) false ++#define skb_get_rx_queue(a) 0 ++#define skb_record_rx_queue(a, b) do {} while (0) ++#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) ++#ifndef CONFIG_PCI_IOV ++#undef pci_enable_sriov ++#define pci_enable_sriov(a, b) -ENOTSUPP ++#undef pci_disable_sriov ++#define pci_disable_sriov(a) do {} while (0) ++#endif /* CONFIG_PCI_IOV */ ++#ifndef pr_cont ++#define pr_cont(fmt, ...) \ ++ printk(KERN_CONT fmt, ##__VA_ARGS__) ++#endif /* pr_cont */ ++static inline void _kc_synchronize_irq(unsigned int a) ++{ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ synchronize_irq(); ++#else /* < 2.5.28 */ ++ synchronize_irq(a); ++#endif /* < 2.5.28 */ ++} ++#undef synchronize_irq ++#define synchronize_irq(a) _kc_synchronize_irq(a) ++ ++#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ ++ ++#ifdef nr_cpus_node ++#undef nr_cpus_node ++#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) ++#endif ++ ++#else /* < 2.6.30 */ ++#define HAVE_ASPM_QUIRKS ++#endif /* < 2.6.30 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) ++#define ETH_P_1588 0x88F7 ++#define ETH_P_FIP 0x8914 ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc_count) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(uclist, dev) \ ++ for (uclist = dev->uc_list; uclist; uclist = uclist->next) ++#endif ++#ifndef PORT_OTHER ++#define PORT_OTHER 0xff ++#endif ++#ifndef MDIO_PHY_ID_PRTAD ++#define MDIO_PHY_ID_PRTAD 0x03e0 ++#endif ++#ifndef MDIO_PHY_ID_DEVAD ++#define MDIO_PHY_ID_DEVAD 0x001f ++#endif ++#ifndef skb_dst ++#define skb_dst(s) ((s)->dst) ++#endif ++ ++#ifndef SUPPORTED_1000baseKX_Full ++#define SUPPORTED_1000baseKX_Full (1 << 17) ++#endif ++#ifndef SUPPORTED_10000baseKX4_Full ++#define SUPPORTED_10000baseKX4_Full (1 << 18) ++#endif ++#ifndef SUPPORTED_10000baseKR_Full ++#define SUPPORTED_10000baseKR_Full (1 << 19) ++#endif ++ ++#ifndef ADVERTISED_1000baseKX_Full ++#define ADVERTISED_1000baseKX_Full (1 << 17) ++#endif ++#ifndef ADVERTISED_10000baseKX4_Full ++#define ADVERTISED_10000baseKX4_Full (1 << 18) ++#endif ++#ifndef ADVERTISED_10000baseKR_Full ++#define ADVERTISED_10000baseKR_Full (1 << 19) ++#endif ++ ++#else /* < 2.6.31 */ ++#ifndef HAVE_NETDEV_STORAGE_ADDRESS ++#define HAVE_NETDEV_STORAGE_ADDRESS ++#endif ++#ifndef HAVE_NETDEV_HW_ADDR ++#define HAVE_NETDEV_HW_ADDR ++#endif ++#ifndef HAVE_TRANS_START_IN_QUEUE ++#define HAVE_TRANS_START_IN_QUEUE ++#endif ++#ifndef HAVE_INCLUDE_LINUX_MDIO_H ++#define HAVE_INCLUDE_LINUX_MDIO_H ++#endif ++#include ++#endif /* < 2.6.31 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) ++#undef netdev_tx_t ++#define netdev_tx_t int ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++static inline int _kc_pm_runtime_get_sync() ++{ ++ return 1; ++} ++#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() ++#else /* 2.6.0 => 2.6.32 */ ++static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) ++{ ++ return 1; ++} ++#ifndef pm_runtime_get_sync ++#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) ++#endif ++#endif /* 2.6.0 => 2.6.32 */ ++#ifndef pm_runtime_put ++#define pm_runtime_put(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_sync ++#define pm_runtime_put_sync(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_resume ++#define pm_runtime_resume(dev) do {} while (0) ++#endif ++#ifndef pm_schedule_suspend ++#define pm_schedule_suspend(dev, t) do {} while (0) ++#endif ++#ifndef pm_runtime_set_suspended ++#define pm_runtime_set_suspended(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_disable ++#define pm_runtime_disable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_noidle ++#define pm_runtime_put_noidle(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_set_active ++#define pm_runtime_set_active(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_enable ++#define pm_runtime_enable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_get_noresume ++#define pm_runtime_get_noresume(dev) do {} while (0) ++#endif ++#else /* < 2.6.32 */ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_NET_DEVICE_EXTENDED ++#endif /* RHEL >= 6.2 && RHEL < 7.0 */ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_NET_DEVICE_OPS_EXT ++#define HAVE_NDO_SET_FEATURES ++#endif /* RHEL >= 6.6 && RHEL < 7.0 */ ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_OPS_GETAPP ++#define HAVE_DCBNL_OPS_GETAPP ++#endif ++#endif /* CONFIG_DCB */ ++#include ++/* IOV bad DMA target work arounds require at least this kernel rev support */ ++#define HAVE_PCIE_TYPE ++#endif /* < 2.6.32 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#ifndef pci_pcie_cap ++#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) ++#endif ++#ifndef IPV4_FLOW ++#define IPV4_FLOW 0x10 ++#endif /* IPV4_FLOW */ ++#ifndef IPV6_FLOW ++#define IPV6_FLOW 0x11 ++#endif /* IPV6_FLOW */ ++/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ ++#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) ++#endif /* RHEL6 or SLES11 SP1 */ ++#ifndef __percpu ++#define __percpu ++#endif /* __percpu */ ++#ifndef PORT_DA ++#define PORT_DA PORT_OTHER ++#endif ++#ifndef PORT_NONE ++#define PORT_NONE PORT_OTHER ++#endif ++ ++#if ((RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ++#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) ++#undef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME ++#undef DEFINE_DMA_UNMAP_LEN ++#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME ++#undef dma_unmap_addr ++#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) ++#undef dma_unmap_addr_set ++#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) ++#undef dma_unmap_len ++#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) ++#undef dma_unmap_len_set ++#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) ++#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ ++ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) ++static inline bool pci_is_pcie(struct pci_dev *dev) ++{ ++ return !!pci_pcie_cap(dev); ++} ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) ++#define sk_tx_queue_get(_sk) (-1) ++#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) ++#endif /* !(RHEL >= 6.2) */ ++ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#define HAVE_ETHTOOL_SET_PHYS_ID ++#define HAVE_ETHTOOL_GET_TS_INFO ++#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) ++#define HAVE_ETHTOOL_GSRSSH ++#define HAVE_RHEL6_SRIOV_CONFIGURE ++#define HAVE_RXFH_NONCONST ++#endif /* RHEL > 6.5 */ ++#endif /* RHEL >= 6.4 && RHEL < 7.0 */ ++ ++#else /* < 2.6.33 */ ++#endif /* < 2.6.33 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#ifndef pci_num_vf ++#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) ++extern int _kc_pci_num_vf(struct pci_dev *dev); ++#endif ++#endif /* RHEL_RELEASE_CODE */ ++ ++#ifndef ETH_FLAG_NTUPLE ++#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE ++#endif ++ ++#ifndef netdev_mc_count ++#define netdev_mc_count(dev) ((dev)->mc_count) ++#endif ++#ifndef netdev_mc_empty ++#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(mclist, dev) \ ++ for (mclist = dev->mc_list; mclist; mclist = mclist->next) ++#endif ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc.count) ++#endif ++#ifndef netdev_uc_empty ++#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(ha, dev) \ ++ list_for_each_entry(ha, &dev->uc.list, list) ++#endif ++#ifndef dma_set_coherent_mask ++#define dma_set_coherent_mask(dev,mask) \ ++ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) ++#endif ++#ifndef pci_dev_run_wake ++#define pci_dev_run_wake(pdev) (0) ++#endif ++ ++/* netdev logging taken from include/linux/netdevice.h */ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (dev->reg_state != NETREG_REGISTERED) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#undef netdev_printk ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ ++ printk(level "%s: " format, pci_name(pdev), ##args); \ ++} while(0) ++#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ ++ struct device *dev = pci_dev_to_dev(pdev); \ ++ dev_printk(level, dev, "%s: " format, \ ++ netdev_name(netdev), ##args); \ ++} while(0) ++#else /* 2.6.21 => 2.6.34 */ ++#define netdev_printk(level, netdev, format, args...) \ ++ dev_printk(level, (netdev)->dev.parent, \ ++ "%s: " format, \ ++ netdev_name(netdev), ##args) ++#endif /* <2.6.0 <2.6.21 <2.6.34 */ ++#undef netdev_emerg ++#define netdev_emerg(dev, format, args...) \ ++ netdev_printk(KERN_EMERG, dev, format, ##args) ++#undef netdev_alert ++#define netdev_alert(dev, format, args...) \ ++ netdev_printk(KERN_ALERT, dev, format, ##args) ++#undef netdev_crit ++#define netdev_crit(dev, format, args...) \ ++ netdev_printk(KERN_CRIT, dev, format, ##args) ++#undef netdev_err ++#define netdev_err(dev, format, args...) \ ++ netdev_printk(KERN_ERR, dev, format, ##args) ++#undef netdev_warn ++#define netdev_warn(dev, format, args...) \ ++ netdev_printk(KERN_WARNING, dev, format, ##args) ++#undef netdev_notice ++#define netdev_notice(dev, format, args...) \ ++ netdev_printk(KERN_NOTICE, dev, format, ##args) ++#undef netdev_info ++#define netdev_info(dev, format, args...) \ ++ netdev_printk(KERN_INFO, dev, format, ##args) ++#undef netdev_dbg ++#if defined(DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args) ++#elif defined(CONFIG_DYNAMIC_DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++do { \ ++ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ ++ netdev_name(__dev), ##args); \ ++} while (0) ++#else /* DEBUG */ ++#define netdev_dbg(__dev, format, args...) \ ++({ \ ++ if (0) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ ++ 0; \ ++}) ++#endif /* DEBUG */ ++ ++#undef netif_printk ++#define netif_printk(priv, type, level, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_printk(level, (dev), fmt, ##args); \ ++} while (0) ++ ++#undef netif_emerg ++#define netif_emerg(priv, type, dev, fmt, args...) \ ++ netif_level(emerg, priv, type, dev, fmt, ##args) ++#undef netif_alert ++#define netif_alert(priv, type, dev, fmt, args...) \ ++ netif_level(alert, priv, type, dev, fmt, ##args) ++#undef netif_crit ++#define netif_crit(priv, type, dev, fmt, args...) \ ++ netif_level(crit, priv, type, dev, fmt, ##args) ++#undef netif_err ++#define netif_err(priv, type, dev, fmt, args...) \ ++ netif_level(err, priv, type, dev, fmt, ##args) ++#undef netif_warn ++#define netif_warn(priv, type, dev, fmt, args...) \ ++ netif_level(warn, priv, type, dev, fmt, ##args) ++#undef netif_notice ++#define netif_notice(priv, type, dev, fmt, args...) \ ++ netif_level(notice, priv, type, dev, fmt, ##args) ++#undef netif_info ++#define netif_info(priv, type, dev, fmt, args...) \ ++ netif_level(info, priv, type, dev, fmt, ##args) ++#undef netif_dbg ++#define netif_dbg(priv, type, dev, fmt, args...) \ ++ netif_level(dbg, priv, type, dev, fmt, ##args) ++ ++#ifdef SET_SYSTEM_SLEEP_PM_OPS ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#endif ++ ++#ifndef for_each_set_bit ++#define for_each_set_bit(bit, addr, size) \ ++ for ((bit) = find_first_bit((addr), (size)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++#endif /* for_each_set_bit */ ++ ++#ifndef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN ++#define dma_unmap_addr pci_unmap_addr ++#define dma_unmap_addr_set pci_unmap_addr_set ++#define dma_unmap_len pci_unmap_len ++#define dma_unmap_len_set pci_unmap_len_set ++#endif /* DEFINE_DMA_UNMAP_ADDR */ ++ ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) ++#ifdef IGB_HWMON ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++#define sysfs_attr_init(attr) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ (attr)->key = &__key; \ ++ } while (0) ++#else ++#define sysfs_attr_init(attr) do {} while (0) ++#endif /* CONFIG_DEBUG_LOCK_ALLOC */ ++#endif /* IGB_HWMON */ ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++static inline bool _kc_pm_runtime_suspended() ++{ ++ return false; ++} ++#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() ++#else /* 2.6.0 => 2.6.34 */ ++static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) ++{ ++ return false; ++} ++#ifndef pm_runtime_suspended ++#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) ++#endif ++#endif /* 2.6.0 => 2.6.34 */ ++ ++#ifndef pci_bus_speed ++/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ ++enum _kc_pci_bus_speed { ++ _KC_PCIE_SPEED_2_5GT = 0x14, ++ _KC_PCIE_SPEED_5_0GT = 0x15, ++ _KC_PCIE_SPEED_8_0GT = 0x16, ++ _KC_PCI_SPEED_UNKNOWN = 0xff, ++}; ++#define pci_bus_speed _kc_pci_bus_speed ++#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT ++#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT ++#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT ++#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN ++#endif /* pci_bus_speed */ ++ ++#else /* < 2.6.34 */ ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#ifndef HAVE_SET_RX_MODE ++#define HAVE_SET_RX_MODE ++#endif ++ ++#endif /* < 2.6.34 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, ++ const void __user *from, size_t count); ++#define simple_write_to_buffer _kc_simple_write_to_buffer ++ ++#ifndef PCI_EXP_LNKSTA_NLW_SHIFT ++#define PCI_EXP_LNKSTA_NLW_SHIFT 4 ++#endif ++ ++#ifndef numa_node_id ++#define numa_node_id() 0 ++#endif ++#ifndef numa_mem_id ++#define numa_mem_id numa_node_id ++#endif ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) ++#ifdef HAVE_TX_MQ ++#include ++#ifndef CONFIG_NETDEVICES_MULTIQUEUE ++int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); ++#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ dev->egress_subqueue_count = txq; ++ return 0; ++} ++#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ ++#else /* HAVE_TX_MQ */ ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, ++ unsigned int __always_unused txq) ++{ ++ return 0; ++} ++#endif /* HAVE_TX_MQ */ ++#define netif_set_real_num_tx_queues(dev, txq) \ ++ _kc_netif_set_real_num_tx_queues(dev, txq) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ ++#ifndef ETH_FLAG_RXHASH ++#define ETH_FLAG_RXHASH (1<<28) ++#endif /* ETH_FLAG_RXHASH */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) ++#define HAVE_IRQ_AFFINITY_HINT ++#endif ++#else /* < 2.6.35 */ ++#define HAVE_PM_QOS_REQUEST_LIST ++#define HAVE_IRQ_AFFINITY_HINT ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); ++#define ethtool_op_set_flags _kc_ethtool_op_set_flags ++extern u32 _kc_ethtool_op_get_flags(struct net_device *); ++#define ethtool_op_get_flags _kc_ethtool_op_get_flags ++ ++enum { ++ WQ_UNBOUND = 0, ++ WQ_RESCUER = 0, ++}; ++ ++#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#ifdef NET_IP_ALIGN ++#undef NET_IP_ALIGN ++#endif ++#define NET_IP_ALIGN 0 ++#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ ++ ++#ifdef NET_SKB_PAD ++#undef NET_SKB_PAD ++#endif ++ ++#if (L1_CACHE_BYTES > 32) ++#define NET_SKB_PAD L1_CACHE_BYTES ++#else ++#define NET_SKB_PAD 32 ++#endif ++ ++static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) ++{ ++ struct sk_buff *skb; ++ ++ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); ++ if (skb) { ++#if (NET_IP_ALIGN + NET_SKB_PAD) ++ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); ++#endif ++ skb->dev = dev; ++ } ++ return skb; ++} ++ ++#ifdef netdev_alloc_skb_ip_align ++#undef netdev_alloc_skb_ip_align ++#endif ++#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) ++ ++#undef netif_level ++#define netif_level(level, priv, type, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_##level(dev, fmt, ##args); \ ++} while (0) ++ ++#undef usleep_range ++#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) ++ ++#define u64_stats_update_begin(a) do { } while(0) ++#define u64_stats_update_end(a) do { } while(0) ++#define u64_stats_fetch_begin(a) do { } while(0) ++#define u64_stats_fetch_retry_bh(a,b) (0) ++#define u64_stats_fetch_begin_bh(a) (0) ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) ++#define HAVE_8021P_SUPPORT ++#endif ++ ++/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) ++static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) ++{ ++ return; ++} ++#endif ++ ++#else /* < 2.6.36 */ ++ ++#define HAVE_PM_QOS_REQUEST_ACTIVE ++#define HAVE_8021P_SUPPORT ++#define HAVE_NDO_GET_STATS64 ++#endif /* < 2.6.36 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) ++#define HAVE_NON_CONST_PCI_DRIVER_NAME ++#ifndef netif_set_real_num_tx_queues ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ netif_set_real_num_tx_queues(dev, txq); ++ return 0; ++} ++#define netif_set_real_num_tx_queues(dev, txq) \ ++ _kc_netif_set_real_num_tx_queues(dev, txq) ++#endif ++#ifndef netif_set_real_num_rx_queues ++static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, ++ unsigned int __always_unused rxq) ++{ ++ return 0; ++} ++#define netif_set_real_num_rx_queues(dev, rxq) \ ++ __kc_netif_set_real_num_rx_queues((dev), (rxq)) ++#endif ++#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR ++#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) ++#endif ++#ifndef VLAN_N_VID ++#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN ++#endif /* VLAN_N_VID */ ++#ifndef ETH_FLAG_TXVLAN ++#define ETH_FLAG_TXVLAN (1 << 7) ++#endif /* ETH_FLAG_TXVLAN */ ++#ifndef ETH_FLAG_RXVLAN ++#define ETH_FLAG_RXVLAN (1 << 8) ++#endif /* ETH_FLAG_RXVLAN */ ++ ++#define WQ_MEM_RECLAIM WQ_RESCUER ++ ++static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) ++{ ++ WARN_ON(skb->ip_summed != CHECKSUM_NONE); ++} ++#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) ++ ++static inline void *_kc_vzalloc_node(unsigned long size, int node) ++{ ++ void *addr = vmalloc_node(size, node); ++ if (addr) ++ memset(addr, 0, size); ++ return addr; ++} ++#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) ++ ++static inline void *_kc_vzalloc(unsigned long size) ++{ ++ void *addr = vmalloc(size); ++ if (addr) ++ memset(addr, 0, size); ++ return addr; ++} ++#define vzalloc(_size) _kc_vzalloc(_size) ++ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ ++ (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) ++static inline __be16 vlan_get_protocol(const struct sk_buff *skb) ++{ ++ if (vlan_tx_tag_present(skb) || ++ skb->protocol != cpu_to_be16(ETH_P_8021Q)) ++ return skb->protocol; ++ ++ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) ++ return 0; ++ ++ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; ++} ++#endif /* !RHEL5.7+ || RHEL6.0 */ ++ ++#ifdef HAVE_HW_TIME_STAMP ++#define SKBTX_HW_TSTAMP (1 << 0) ++#define SKBTX_IN_PROGRESS (1 << 2) ++#define SKB_SHARED_TX_IS_UNION ++#endif ++ ++#ifndef device_wakeup_enable ++#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) ++#ifndef HAVE_VLAN_RX_REGISTER ++#define HAVE_VLAN_RX_REGISTER ++#endif ++#endif /* > 2.4.18 */ ++#endif /* < 2.6.37 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define skb_checksum_start_offset(skb) skb_transport_offset(skb) ++#else /* 2.6.22 -> 2.6.37 */ ++static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) ++{ ++ return skb->csum_start - skb_headroom(skb); ++} ++#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) ++#endif /* 2.6.22 -> 2.6.37 */ ++#if IS_ENABLED(CONFIG_DCB) ++#ifndef IEEE_8021QAZ_MAX_TCS ++#define IEEE_8021QAZ_MAX_TCS 8 ++#endif ++#ifndef DCB_CAP_DCBX_HOST ++#define DCB_CAP_DCBX_HOST 0x01 ++#endif ++#ifndef DCB_CAP_DCBX_LLD_MANAGED ++#define DCB_CAP_DCBX_LLD_MANAGED 0x02 ++#endif ++#ifndef DCB_CAP_DCBX_VER_CEE ++#define DCB_CAP_DCBX_VER_CEE 0x04 ++#endif ++#ifndef DCB_CAP_DCBX_VER_IEEE ++#define DCB_CAP_DCBX_VER_IEEE 0x08 ++#endif ++#ifndef DCB_CAP_DCBX_STATIC ++#define DCB_CAP_DCBX_STATIC 0x10 ++#endif ++#endif /* CONFIG_DCB */ ++#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) ++#define CONFIG_XPS ++#endif /* RHEL_RELEASE_VERSION(6,2) */ ++#endif /* < 2.6.38 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) ++#ifndef TC_BITMASK ++#define TC_BITMASK 15 ++#endif ++#ifndef NETIF_F_RXCSUM ++#define NETIF_F_RXCSUM (1 << 29) ++#endif ++#ifndef skb_queue_reverse_walk_safe ++#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ ++ for (skb = (queue)->prev, tmp = skb->prev; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->prev) ++#endif ++ ++#ifndef udp_csum ++#define udp_csum __kc_udp_csum ++static inline __wsum __kc_udp_csum(struct sk_buff *skb) ++{ ++ __wsum csum = csum_partial(skb_transport_header(skb), ++ sizeof(struct udphdr), skb->csum); ++ ++ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { ++ csum = csum_add(csum, skb->csum); ++ } ++ return csum; ++} ++#endif /* udp_csum */ ++#else /* < 2.6.39 */ ++#ifndef HAVE_MQPRIO ++#define HAVE_MQPRIO ++#endif ++#ifndef HAVE_SETUP_TC ++#define HAVE_SETUP_TC ++#endif ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_IEEE ++#define HAVE_DCBNL_IEEE ++#endif ++#endif /* CONFIG_DCB */ ++#ifndef HAVE_NDO_SET_FEATURES ++#define HAVE_NDO_SET_FEATURES ++#endif ++#endif /* < 2.6.39 */ ++ ++/*****************************************************************************/ ++/* use < 2.6.40 because of a Fedora 15 kernel update where they ++ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features ++ * like set_phys_id for ethtool. ++ */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) ++#ifdef ETHTOOL_GRXRINGS ++#ifndef FLOW_EXT ++#define FLOW_EXT 0x80000000 ++union _kc_ethtool_flow_union { ++ struct ethtool_tcpip4_spec tcp_ip4_spec; ++ struct ethtool_usrip4_spec usr_ip4_spec; ++ __u8 hdata[60]; ++}; ++struct _kc_ethtool_flow_ext { ++ __be16 vlan_etype; ++ __be16 vlan_tci; ++ __be32 data[2]; ++}; ++struct _kc_ethtool_rx_flow_spec { ++ __u32 flow_type; ++ union _kc_ethtool_flow_union h_u; ++ struct _kc_ethtool_flow_ext h_ext; ++ union _kc_ethtool_flow_union m_u; ++ struct _kc_ethtool_flow_ext m_ext; ++ __u64 ring_cookie; ++ __u32 location; ++}; ++#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec ++#endif /* FLOW_EXT */ ++#endif ++ ++#define pci_disable_link_state_locked pci_disable_link_state ++ ++#ifndef PCI_LTR_VALUE_MASK ++#define PCI_LTR_VALUE_MASK 0x000003ff ++#endif ++#ifndef PCI_LTR_SCALE_MASK ++#define PCI_LTR_SCALE_MASK 0x00001c00 ++#endif ++#ifndef PCI_LTR_SCALE_SHIFT ++#define PCI_LTR_SCALE_SHIFT 10 ++#endif ++ ++#else /* < 2.6.40 */ ++#define HAVE_ETHTOOL_SET_PHYS_ID ++#endif /* < 2.6.40 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) ++#define USE_LEGACY_PM_SUPPORT ++#ifndef kfree_rcu ++#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) ++#endif /* kfree_rcu */ ++#ifndef kstrtol_from_user ++#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) ++static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, ++ unsigned int base, long *res) ++{ ++ /* sign, base 2 representation, newline, terminator */ ++ char buf[1 + sizeof(long) * 8 + 1 + 1]; ++ ++ count = min(count, sizeof(buf) - 1); ++ if (copy_from_user(buf, s, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ return strict_strtol(buf, base, res); ++} ++#endif ++ ++/* 20000base_blah_full Supported and Advertised Registers */ ++#define SUPPORTED_20000baseMLD2_Full (1 << 21) ++#define SUPPORTED_20000baseKR2_Full (1 << 22) ++#define ADVERTISED_20000baseMLD2_Full (1 << 21) ++#define ADVERTISED_20000baseKR2_Full (1 << 22) ++#endif /* < 3.0.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) ++#ifndef __netdev_alloc_skb_ip_align ++#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) ++#endif /* __netdev_alloc_skb_ip_align */ ++#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) ++#define dcb_ieee_delapp(dev, app) 0 ++#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) ++ ++/* 1000BASE-T Control register */ ++#define CTL1000_AS_MASTER 0x0800 ++#define CTL1000_ENABLE_MASTER 0x1000 ++ ++/* kernels less than 3.0.0 don't have this */ ++#ifndef ETH_P_8021AD ++#define ETH_P_8021AD 0x88A8 ++#endif ++#else /* < 3.1.0 */ ++#ifndef HAVE_DCBNL_IEEE_DELAPP ++#define HAVE_DCBNL_IEEE_DELAPP ++#endif ++#endif /* < 3.1.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) ++#ifndef dma_zalloc_coherent ++#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) ++static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, ++ dma_addr_t *dma_handle, gfp_t flag) ++{ ++ void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif ++#ifdef ETHTOOL_GRXRINGS ++#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS ++#endif /* ETHTOOL_GRXRINGS */ ++ ++#ifndef skb_frag_size ++#define skb_frag_size(frag) _kc_skb_frag_size(frag) ++static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) ++{ ++ return frag->size; ++} ++#endif /* skb_frag_size */ ++ ++#ifndef skb_frag_size_sub ++#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) ++static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) ++{ ++ frag->size -= delta; ++} ++#endif /* skb_frag_size_sub */ ++ ++#ifndef skb_frag_page ++#define skb_frag_page(frag) _kc_skb_frag_page(frag) ++static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) ++{ ++ return frag->page; ++} ++#endif /* skb_frag_page */ ++ ++#ifndef skb_frag_address ++#define skb_frag_address(frag) _kc_skb_frag_address(frag) ++static inline void *_kc_skb_frag_address(const skb_frag_t *frag) ++{ ++ return page_address(skb_frag_page(frag)) + frag->page_offset; ++} ++#endif /* skb_frag_address */ ++ ++#ifndef skb_frag_dma_map ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++#include ++#endif ++#define skb_frag_dma_map(dev,frag,offset,size,dir) \ ++ _kc_skb_frag_dma_map(dev,frag,offset,size,dir) ++static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, ++ const skb_frag_t *frag, ++ size_t offset, size_t size, ++ enum dma_data_direction dir) ++{ ++ return dma_map_page(dev, skb_frag_page(frag), ++ frag->page_offset + offset, size, dir); ++} ++#endif /* skb_frag_dma_map */ ++ ++#ifndef __skb_frag_unref ++#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) ++static inline void __kc_skb_frag_unref(skb_frag_t *frag) ++{ ++ put_page(skb_frag_page(frag)); ++} ++#endif /* __skb_frag_unref */ ++ ++#ifndef SPEED_UNKNOWN ++#define SPEED_UNKNOWN -1 ++#endif ++#ifndef DUPLEX_UNKNOWN ++#define DUPLEX_UNKNOWN 0xff ++#endif ++#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) ++#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_PCI_DEV_FLAGS_ASSIGNED ++#endif ++#endif ++#else /* < 3.2.0 */ ++#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_VF_SPOOFCHK_CONFIGURE ++#endif ++#ifndef HAVE_SKB_L4_RXHASH ++#define HAVE_SKB_L4_RXHASH ++#endif ++#define HAVE_IOMMU_PRESENT ++#define HAVE_PM_QOS_REQUEST_LIST_NEW ++#endif /* < 3.2.0 */ ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) ++#undef ixgbe_get_netdev_tc_txq ++#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) ++#endif ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) ++/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than ++ * alloc_workqueue() to avoid compiler warning from -Wvarargs ++ */ ++static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) ++_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, ++ const char *fmt, ...) ++{ ++ struct workqueue_struct *wq; ++ va_list args, temp; ++ unsigned int len; ++ char *p; ++ ++ va_start(args, fmt); ++ va_copy(temp, args); ++ len = vsnprintf(NULL, 0, fmt, temp); ++ va_end(temp); ++ ++ p = kmalloc(len + 1, GFP_KERNEL); ++ if (!p) { ++ va_end(args); ++ return NULL; ++ } ++ ++ vsnprintf(p, len + 1, fmt, args); ++ va_end(args); ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++ wq = create_workqueue(p); ++#else ++ wq = alloc_workqueue(p, flags, max_active); ++#endif ++ kfree(p); ++ ++ return wq; ++} ++#ifdef alloc_workqueue ++#undef alloc_workqueue ++#endif ++#define alloc_workqueue(fmt, flags, max_active, args...) \ ++ _kc_alloc_workqueue(flags, max_active, fmt, ##args) ++ ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) ++typedef u32 netdev_features_t; ++#endif ++#undef PCI_EXP_TYPE_RC_EC ++#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ ++#ifndef CONFIG_BQL ++#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) ++#define netdev_completed_queue(_n, _p, _b) do {} while (0) ++#define netdev_tx_sent_queue(_q, _b) do {} while (0) ++#define netdev_sent_queue(_n, _b) do {} while (0) ++#define netdev_tx_reset_queue(_q) do {} while (0) ++#define netdev_reset_queue(_n) do {} while (0) ++#endif ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#endif /* SLE_VERSION(11,3,0) */ ++#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) ++static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, ++ u8 *nexthdrp, ++ __be16 __always_unused *frag_offp) ++{ ++ return ipv6_skip_exthdr(skb, start, nexthdrp); ++} ++#undef ipv6_skip_exthdr ++#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) ++#endif /* !SLES11sp4 or greater */ ++ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) ++static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) ++{ ++ return index % n_rx_rings; ++} ++#endif ++ ++#else /* ! < 3.3.0 */ ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#define HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef ETHTOOL_SRXNTUPLE ++#undef ETHTOOL_SRXNTUPLE ++#endif ++#endif /* < 3.3.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) ++#ifndef NETIF_F_RXFCS ++#define NETIF_F_RXFCS 0 ++#endif /* NETIF_F_RXFCS */ ++#ifndef NETIF_F_RXALL ++#define NETIF_F_RXALL 0 ++#endif /* NETIF_F_RXALL */ ++ ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define NUMTCS_RETURNS_U8 ++ ++int _kc_simple_open(struct inode *inode, struct file *file); ++#define simple_open _kc_simple_open ++#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ ++ ++#ifndef skb_add_rx_frag ++#define skb_add_rx_frag _kc_skb_add_rx_frag ++extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, ++ int, int, unsigned int); ++#endif ++#ifdef NET_ADDR_RANDOM ++#define eth_hw_addr_random(N) do { \ ++ eth_random_addr(N->dev_addr); \ ++ N->addr_assign_type |= NET_ADDR_RANDOM; \ ++ } while (0) ++#else /* NET_ADDR_RANDOM */ ++#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) ++#endif /* NET_ADDR_RANDOM */ ++ ++#ifndef for_each_set_bit_from ++#define for_each_set_bit_from(bit, addr, size) \ ++ for ((bit) = find_next_bit((addr), (size), (bit)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++#endif /* for_each_set_bit_from */ ++ ++#else /* < 3.4.0 */ ++#include ++#endif /* >= 3.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ ++ ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ++#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++#define HAVE_PTP_1588_CLOCK ++#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) ++ ++#ifndef ether_addr_equal ++static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) ++{ ++ return !compare_ether_addr(addr1, addr2); ++} ++#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) ++#endif ++ ++#else ++#define HAVE_FDB_OPS ++#define HAVE_ETHTOOL_GET_TS_INFO ++#endif /* < 3.5.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) ++#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ ++ ++#ifndef MDIO_EEE_100TX ++#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ ++#endif ++#ifndef MDIO_EEE_1000T ++#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GT ++#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ ++#endif ++#ifndef MDIO_EEE_1000KX ++#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GKX4 ++#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GKR ++#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ ++#endif ++ ++#ifndef __GFP_MEMALLOC ++#define __GFP_MEMALLOC 0 ++#endif ++ ++#ifndef eth_random_addr ++#define eth_random_addr _kc_eth_random_addr ++static inline void _kc_eth_random_addr(u8 *addr) ++{ ++ get_random_bytes(addr, ETH_ALEN); ++ addr[0] &= 0xfe; /* clear multicast */ ++ addr[0] |= 0x02; /* set local assignment */ ++} ++#endif /* eth_random_addr */ ++#else /* < 3.6.0 */ ++#define HAVE_STRUCT_PAGE_PFMEMALLOC ++#endif /* < 3.6.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) ++#ifndef ADVERTISED_40000baseKR4_Full ++/* these defines were all added in one commit, so should be safe ++ * to trigger activiation on one define ++ */ ++#define SUPPORTED_40000baseKR4_Full (1 << 23) ++#define SUPPORTED_40000baseCR4_Full (1 << 24) ++#define SUPPORTED_40000baseSR4_Full (1 << 25) ++#define SUPPORTED_40000baseLR4_Full (1 << 26) ++#define ADVERTISED_40000baseKR4_Full (1 << 23) ++#define ADVERTISED_40000baseCR4_Full (1 << 24) ++#define ADVERTISED_40000baseSR4_Full (1 << 25) ++#define ADVERTISED_40000baseLR4_Full (1 << 26) ++#endif ++ ++#ifndef mmd_eee_cap_to_ethtool_sup_t ++/** ++ * mmd_eee_cap_to_ethtool_sup_t ++ * @eee_cap: value of the MMD EEE Capability register ++ * ++ * A small helper function that translates MMD EEE Capability (3.20) bits ++ * to ethtool supported settings. ++ */ ++static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) ++{ ++ u32 supported = 0; ++ ++ if (eee_cap & MDIO_EEE_100TX) ++ supported |= SUPPORTED_100baseT_Full; ++ if (eee_cap & MDIO_EEE_1000T) ++ supported |= SUPPORTED_1000baseT_Full; ++ if (eee_cap & MDIO_EEE_10GT) ++ supported |= SUPPORTED_10000baseT_Full; ++ if (eee_cap & MDIO_EEE_1000KX) ++ supported |= SUPPORTED_1000baseKX_Full; ++ if (eee_cap & MDIO_EEE_10GKX4) ++ supported |= SUPPORTED_10000baseKX4_Full; ++ if (eee_cap & MDIO_EEE_10GKR) ++ supported |= SUPPORTED_10000baseKR_Full; ++ ++ return supported; ++} ++#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ ++ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) ++#endif /* mmd_eee_cap_to_ethtool_sup_t */ ++ ++#ifndef mmd_eee_adv_to_ethtool_adv_t ++/** ++ * mmd_eee_adv_to_ethtool_adv_t ++ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers ++ * ++ * A small helper function that translates the MMD EEE Advertisment (7.60) ++ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement ++ * settings. ++ */ ++static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) ++{ ++ u32 adv = 0; ++ ++ if (eee_adv & MDIO_EEE_100TX) ++ adv |= ADVERTISED_100baseT_Full; ++ if (eee_adv & MDIO_EEE_1000T) ++ adv |= ADVERTISED_1000baseT_Full; ++ if (eee_adv & MDIO_EEE_10GT) ++ adv |= ADVERTISED_10000baseT_Full; ++ if (eee_adv & MDIO_EEE_1000KX) ++ adv |= ADVERTISED_1000baseKX_Full; ++ if (eee_adv & MDIO_EEE_10GKX4) ++ adv |= ADVERTISED_10000baseKX4_Full; ++ if (eee_adv & MDIO_EEE_10GKR) ++ adv |= ADVERTISED_10000baseKR_Full; ++ ++ return adv; ++} ++ ++#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ ++ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) ++#endif /* mmd_eee_adv_to_ethtool_adv_t */ ++ ++#ifndef ethtool_adv_to_mmd_eee_adv_t ++/** ++ * ethtool_adv_to_mmd_eee_adv_t ++ * @adv: the ethtool advertisement settings ++ * ++ * A small helper function that translates ethtool advertisement settings ++ * to EEE advertisements for the MMD EEE Advertisement (7.60) and ++ * MMD EEE Link Partner Ability (7.61) registers. ++ */ ++static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) ++{ ++ u16 reg = 0; ++ ++ if (adv & ADVERTISED_100baseT_Full) ++ reg |= MDIO_EEE_100TX; ++ if (adv & ADVERTISED_1000baseT_Full) ++ reg |= MDIO_EEE_1000T; ++ if (adv & ADVERTISED_10000baseT_Full) ++ reg |= MDIO_EEE_10GT; ++ if (adv & ADVERTISED_1000baseKX_Full) ++ reg |= MDIO_EEE_1000KX; ++ if (adv & ADVERTISED_10000baseKX4_Full) ++ reg |= MDIO_EEE_10GKX4; ++ if (adv & ADVERTISED_10000baseKR_Full) ++ reg |= MDIO_EEE_10GKR; ++ ++ return reg; ++} ++#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) ++#endif /* ethtool_adv_to_mmd_eee_adv_t */ ++ ++#ifndef pci_pcie_type ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++static inline u8 pci_pcie_type(struct pci_dev *pdev) ++{ ++ int pos; ++ u16 reg16; ++ ++ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ BUG_ON(!pos); ++ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); ++ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; ++} ++#else /* < 2.6.24 */ ++#define pci_pcie_type(x) (x)->pcie_type ++#endif /* < 2.6.24 */ ++#endif /* pci_pcie_type */ ++ ++#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ ++ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) ++#define ptp_clock_register(caps, args...) ptp_clock_register(caps) ++#endif ++ ++#ifndef pcie_capability_read_word ++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); ++#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) ++#endif /* pcie_capability_read_word */ ++ ++#ifndef pcie_capability_write_word ++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); ++#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) ++#endif /* pcie_capability_write_word */ ++ ++#ifndef pcie_capability_clear_and_set_word ++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, ++ u16 clear, u16 set); ++#define pcie_capability_clear_and_set_word(d,p,c,s) \ ++ __kc_pcie_capability_clear_and_set_word(d,p,c,s) ++#endif /* pcie_capability_clear_and_set_word */ ++ ++#ifndef pcie_capability_clear_word ++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear); ++#define pcie_capability_clear_word(d, p, c) \ ++ __kc_pcie_capability_clear_word(d, p, c) ++#endif /* pcie_capability_clear_word */ ++ ++#ifndef PCI_EXP_LNKSTA2 ++#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ ++#endif ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define USE_CONST_DEV_UC_CHAR ++#endif ++ ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) ++#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) ++#endif /* !RHEL6.8+ */ ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ++#include ++#else ++ ++#define DEFINE_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] = \ ++ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } ++ ++#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] __read_mostly = \ ++ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } ++ ++#define DECLARE_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] ++ ++#define HASH_SIZE(name) (ARRAY_SIZE(name)) ++#define HASH_BITS(name) ilog2(HASH_SIZE(name)) ++ ++/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ ++#define hash_min(val, bits) \ ++ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) ++ ++static inline void __hash_init(struct hlist_head *ht, unsigned int sz) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < sz; i++) ++ INIT_HLIST_HEAD(&ht[i]); ++} ++ ++#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) ++ ++#define hash_add(hashtable, node, key) \ ++ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) ++ ++static inline bool hash_hashed(struct hlist_node *node) ++{ ++ return !hlist_unhashed(node); ++} ++ ++static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < sz; i++) ++ if (!hlist_empty(&ht[i])) ++ return false; ++ ++ return true; ++} ++ ++#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) ++ ++static inline void hash_del(struct hlist_node *node) ++{ ++ hlist_del_init(node); ++} ++#endif /* RHEL >= 6.6 */ ++ ++#else /* >= 3.7.0 */ ++#include ++#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS ++#define USE_CONST_DEV_UC_CHAR ++#endif /* >= 3.7.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) ++#ifndef pci_sriov_set_totalvfs ++static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) ++{ ++ return 0; ++} ++#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) ++#endif ++#ifndef PCI_EXP_LNKCTL_ASPM_L0S ++#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ ++#endif ++#ifndef PCI_EXP_LNKCTL_ASPM_L1 ++#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ ++#endif ++#define HAVE_CONFIG_HOTPLUG ++/* Reserved Ethernet Addresses per IEEE 802.1Q */ ++static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { ++ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; ++ ++#ifndef is_link_local_ether_addr ++static inline bool __kc_is_link_local_ether_addr(const u8 *addr) ++{ ++ __be16 *a = (__be16 *)addr; ++ static const __be16 *b = (const __be16 *)eth_reserved_addr_base; ++ static const __be16 m = cpu_to_be16(0xfff0); ++ ++ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; ++} ++#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) ++#endif /* is_link_local_ether_addr */ ++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, ++ int target, unsigned short *fragoff, int *flags); ++#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) ++ ++#ifndef FLOW_MAC_EXT ++#define FLOW_MAC_EXT 0x40000000 ++#endif /* FLOW_MAC_EXT */ ++ ++#else /* >= 3.8.0 */ ++#ifndef __devinit ++#define __devinit ++#endif ++ ++#ifndef __devinitdata ++#define __devinitdata ++#endif ++ ++#ifndef __devinitconst ++#define __devinitconst ++#endif ++ ++#ifndef __devexit ++#define __devexit ++#endif ++ ++#ifndef __devexit_p ++#define __devexit_p ++#endif ++ ++#ifndef HAVE_ENCAP_CSUM_OFFLOAD ++#define HAVE_ENCAP_CSUM_OFFLOAD ++#endif ++ ++#ifndef HAVE_GRE_ENCAP_OFFLOAD ++#define HAVE_GRE_ENCAP_OFFLOAD ++#endif ++ ++#ifndef HAVE_SRIOV_CONFIGURE ++#define HAVE_SRIOV_CONFIGURE ++#endif ++ ++#define HAVE_BRIDGE_ATTRIBS ++#ifndef BRIDGE_MODE_VEB ++#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ ++#endif /* BRIDGE_MODE_VEB */ ++#ifndef BRIDGE_MODE_VEPA ++#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ ++#endif /* BRIDGE_MODE_VEPA */ ++#endif /* >= 3.8.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) ++ ++#undef BUILD_BUG_ON ++#ifdef __CHECKER__ ++#define BUILD_BUG_ON(condition) (0) ++#else /* __CHECKER__ */ ++#ifndef __compiletime_warning ++#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) ++#define __compiletime_warning(message) __attribute__((warning(message))) ++#else /* __GNUC__ */ ++#define __compiletime_warning(message) ++#endif /* __GNUC__ */ ++#endif /* __compiletime_warning */ ++#ifndef __compiletime_error ++#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) ++#define __compiletime_error(message) __attribute__((error(message))) ++#define __compiletime_error_fallback(condition) do { } while (0) ++#else /* __GNUC__ */ ++#define __compiletime_error(message) ++#define __compiletime_error_fallback(condition) \ ++ do { ((void)sizeof(char[1 - 2 * condition])); } while (0) ++#endif /* __GNUC__ */ ++#else /* __compiletime_error */ ++#define __compiletime_error_fallback(condition) do { } while (0) ++#endif /* __compiletime_error */ ++#define __compiletime_assert(condition, msg, prefix, suffix) \ ++ do { \ ++ bool __cond = !(condition); \ ++ extern void prefix ## suffix(void) __compiletime_error(msg); \ ++ if (__cond) \ ++ prefix ## suffix(); \ ++ __compiletime_error_fallback(__cond); \ ++ } while (0) ++ ++#define _compiletime_assert(condition, msg, prefix, suffix) \ ++ __compiletime_assert(condition, msg, prefix, suffix) ++#define compiletime_assert(condition, msg) \ ++ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ++#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) ++#ifndef __OPTIMIZE__ ++#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) ++#else /* __OPTIMIZE__ */ ++#define BUILD_BUG_ON(condition) \ ++ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) ++#endif /* __OPTIMIZE__ */ ++#endif /* __CHECKER__ */ ++ ++#undef hlist_entry ++#define hlist_entry(ptr, type, member) container_of(ptr,type,member) ++ ++#undef hlist_entry_safe ++#define hlist_entry_safe(ptr, type, member) \ ++ ({ typeof(ptr) ____ptr = (ptr); \ ++ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ ++ }) ++ ++#undef hlist_for_each_entry ++#define hlist_for_each_entry(pos, head, member) \ ++ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ ++ pos; \ ++ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) ++ ++#undef hlist_for_each_entry_safe ++#define hlist_for_each_entry_safe(pos, n, head, member) \ ++ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ ++ pos && ({ n = pos->member.next; 1; }); \ ++ pos = hlist_entry_safe(n, typeof(*pos), member)) ++ ++#undef hash_for_each ++#define hash_for_each(name, bkt, obj, member) \ ++ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ ++ (bkt)++)\ ++ hlist_for_each_entry(obj, &name[bkt], member) ++ ++#undef hash_for_each_safe ++#define hash_for_each_safe(name, bkt, tmp, obj, member) \ ++ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ ++ (bkt)++)\ ++ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) ++ ++#undef hash_for_each_possible ++#define hash_for_each_possible(name, obj, member, key) \ ++ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) ++ ++#undef hash_for_each_possible_safe ++#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ ++ hlist_for_each_entry_safe(obj, tmp,\ ++ &name[hash_min(key, HASH_BITS(name))], member) ++ ++#ifdef CONFIG_XPS ++extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16); ++#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) ++#else /* CONFIG_XPS */ ++#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) ++#endif /* CONFIG_XPS */ ++ ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#define _kc_hashrnd 0xd631614b /* not so random hash salt */ ++extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); ++#define __netdev_pick_tx __kc_netdev_pick_tx ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#else ++#define HAVE_BRIDGE_FILTER ++#define HAVE_FDB_DEL_NLATTR ++#endif /* < 3.9.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++#ifndef NAPI_POLL_WEIGHT ++#define NAPI_POLL_WEIGHT 64 ++#endif ++#ifdef CONFIG_PCI_IOV ++extern int __kc_pci_vfs_assigned(struct pci_dev *dev); ++#else ++static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) ++{ ++ return 0; ++} ++#endif ++#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) ++ ++#ifndef list_first_entry_or_null ++#define list_first_entry_or_null(ptr, type, member) \ ++ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) ++#endif ++ ++#ifndef VLAN_TX_COOKIE_MAGIC ++static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, ++ u16 vlan_tci) ++{ ++#ifdef VLAN_TAG_PRESENT ++ vlan_tci |= VLAN_TAG_PRESENT; ++#endif ++ skb->vlan_tci = vlan_tci; ++ return skb; ++} ++#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ ++ __kc__vlan_hwaccel_put_tag(skb, vlan_tci) ++#endif ++ ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr, u16 flags); ++#ifdef HAVE_FDB_DEL_NLATTR ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr); ++#else ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ const unsigned char *addr); ++#endif ++#else ++extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr, u16 flags); ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr); ++#endif ++#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add ++#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del ++#endif /* HAVE_FDB_OPS */ ++ ++#ifndef PCI_DEVID ++#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) ++#endif ++#else /* >= 3.10.0 */ ++#define HAVE_ENCAP_TSO_OFFLOAD ++#define USE_DEFAULT_FDB_DEL_DUMP ++#define HAVE_SKB_INNER_NETWORK_HEADER ++#endif /* >= 3.10.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) ++#define netdev_notifier_info_to_dev(ptr) ptr ++#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) ++#define HAVE_NDO_SET_VF_LINK_STATE ++#endif ++#else /* >= 3.11.0 */ ++#define HAVE_NDO_SET_VF_LINK_STATE ++#define HAVE_SKB_INNER_PROTOCOL ++#define HAVE_MPLS_FEATURES ++#endif /* >= 3.11.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) ++extern int __kc_pcie_get_minimum_link(struct pci_dev *dev, ++ enum pci_bus_speed *speed, ++ enum pcie_link_width *width); ++#ifndef pcie_get_minimum_link ++#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) ++#endif ++#else /* >= 3.12.0 */ ++#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) ++#define HAVE_VXLAN_RX_OFFLOAD ++#endif /* < 4.8.0 */ ++#define HAVE_NDO_GET_PHYS_PORT_ID ++#endif /* >= 3.12.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) ++#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) ++extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); ++#ifndef u64_stats_init ++#define u64_stats_init(a) do { } while(0) ++#endif ++#ifndef BIT_ULL ++#define BIT_ULL(n) (1ULL << (n)) ++#endif ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) ++#undef HAVE_STRUCT_PAGE_PFMEMALLOC ++#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT ++#endif ++#ifndef list_next_entry ++#define list_next_entry(pos, member) \ ++ list_entry((pos)->member.next, typeof(*(pos)), member) ++#endif ++ ++#else /* >= 3.13.0 */ ++#define HAVE_VXLAN_CHECKS ++#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#else ++#define HAVE_NDO_SELECT_QUEUE_ACCEL ++#endif ++#define HAVE_NET_GET_RANDOM_ONCE ++#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS ++#endif ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) ++ ++#ifndef U16_MAX ++#define U16_MAX ((u16)~0U) ++#endif ++ ++#ifndef U32_MAX ++#define U32_MAX ((u32)~0U) ++#endif ++ ++#define dev_consume_skb_any(x) dev_kfree_skb_any(x) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) ++ ++/* it isn't expected that this would be a #define unless we made it so */ ++#ifndef skb_set_hash ++ ++#define PKT_HASH_TYPE_NONE 0 ++#define PKT_HASH_TYPE_L2 1 ++#define PKT_HASH_TYPE_L3 2 ++#define PKT_HASH_TYPE_L4 3 ++ ++#define skb_set_hash __kc_skb_set_hash ++static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, ++ u32 __maybe_unused hash, ++ int __maybe_unused type) ++{ ++#ifdef HAVE_SKB_L4_RXHASH ++ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); ++#endif ++#ifdef NETIF_F_RXHASH ++ skb->rxhash = hash; ++#endif ++} ++#endif /* !skb_set_hash */ ++ ++#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ ++ ++#ifndef HAVE_VXLAN_RX_OFFLOAD ++#define HAVE_VXLAN_RX_OFFLOAD ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ ++#ifndef HAVE_VXLAN_CHECKS ++#define HAVE_VXLAN_CHECKS ++#endif /* HAVE_VXLAN_CHECKS */ ++#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ ++ ++#ifndef pci_enable_msix_range ++extern int __kc_pci_enable_msix_range(struct pci_dev *dev, ++ struct msix_entry *entries, ++ int minvec, int maxvec); ++#define pci_enable_msix_range __kc_pci_enable_msix_range ++#endif ++ ++#ifndef ether_addr_copy ++#define ether_addr_copy __kc_ether_addr_copy ++static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) ++{ ++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ++ *(u32 *)dst = *(const u32 *)src; ++ *(u16 *)(dst + 4) = *(const u16 *)(src + 4); ++#else ++ u16 *a = (u16 *)dst; ++ const u16 *b = (const u16 *)src; ++ ++ a[0] = b[0]; ++ a[1] = b[1]; ++ a[2] = b[2]; ++#endif ++} ++#endif /* ether_addr_copy */ ++ ++#else /* >= 3.14.0 */ ++ ++/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ ++#ifndef HAVE_NDO_DFWD_OPS ++#define HAVE_NDO_DFWD_OPS ++#endif ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif /* 3.14.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ ++ !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) ++#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh ++#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh ++#endif ++ ++#else ++#define HAVE_PTP_1588_CLOCK_PINS ++#define HAVE_NETDEV_PORT ++#endif /* 3.15.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) ++#ifndef smp_mb__before_atomic ++#define smp_mb__before_atomic() smp_mb() ++#define smp_mb__after_atomic() smp_mb() ++#endif ++#ifndef __dev_uc_sync ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)); ++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)); ++#endif ++#ifndef NETDEV_HW_ADDR_T_MULTICAST ++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)); ++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)); ++#endif ++#endif /* HAVE_SET_RX_MODE */ ++ ++static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); ++#elif defined(HAVE_SET_RX_MODE) ++ return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, ++ dev, sync, unsync); ++#else ++ return 0; ++#endif ++} ++#define __dev_uc_sync __kc_dev_uc_sync ++ ++static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); ++#else /* NETDEV_HW_ADDR_T_MULTICAST */ ++ __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); ++#endif /* NETDEV_HW_ADDR_T_UNICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++} ++#define __dev_uc_unsync __kc_dev_uc_unsync ++ ++static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); ++#elif defined(HAVE_SET_RX_MODE) ++ return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, ++ dev, sync, unsync); ++#else ++ return 0; ++#endif ++ ++} ++#define __dev_mc_sync __kc_dev_mc_sync ++ ++static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); ++#else /* NETDEV_HW_ADDR_T_MULTICAST */ ++ __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); ++#endif /* NETDEV_HW_ADDR_T_MULTICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++} ++#define __dev_mc_unsync __kc_dev_mc_unsync ++#endif /* __dev_uc_sync */ ++ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++#endif ++ ++#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM ++/* if someone backports this, hopefully they backport as a #define. ++ * declare it as zero on older kernels so that if it get's or'd in ++ * it won't effect anything, therefore preventing core driver changes ++ */ ++#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 ++#define SKB_GSO_UDP_TUNNEL_CSUM 0 ++#endif ++ ++#else ++#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY ++#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++#endif /* 3.16.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ ++ !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) ++#ifndef timespec64 ++#define timespec64 timespec ++static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) ++{ ++ return ts; ++} ++static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) ++{ ++ return ts64; ++} ++#define timespec64_equal timespec_equal ++#define timespec64_compare timespec_compare ++#define set_normalized_timespec64 set_normalized_timespec ++#define timespec64_add_safe timespec_add_safe ++#define timespec64_add timespec_add ++#define timespec64_sub timespec_sub ++#define timespec64_valid timespec_valid ++#define timespec64_valid_strict timespec_valid_strict ++#define timespec64_to_ns timespec_to_ns ++#define ns_to_timespec64 ns_to_timespec ++#define ktime_to_timespec64 ktime_to_timespec ++#define timespec64_add_ns timespec_add_ns ++#endif /* timespec64 */ ++#endif /* !(RHEL6.8 ++extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); ++extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps); ++#define skb_clone_sk __kc_skb_clone_sk ++#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp ++#endif ++extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); ++#define eth_get_headlen __kc_eth_get_headlen ++#ifndef ETH_P_XDSA ++#define ETH_P_XDSA 0x00F8 ++#endif ++/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_SKBUFF_CSUM_LEVEL ++#endif /* >= RH 7.1 */ ++ ++#undef GENMASK ++#define GENMASK(h, l) \ ++ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++#undef GENMASK_ULL ++#define GENMASK_ULL(h, l) \ ++ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) ++ ++#else /* 3.18.0 */ ++#define HAVE_SKBUFF_CSUM_LEVEL ++#define HAVE_SKB_XMIT_MORE ++#define HAVE_SKB_INNER_PROTOCOL_TYPE ++#endif /* 3.18.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) ++#else ++#define HAVE_NDO_FEATURES_CHECK ++#endif /* 3.18.4 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) ++/* netdev_phys_port_id renamed to netdev_phys_item_id */ ++#define netdev_phys_item_id netdev_phys_port_id ++ ++static inline void _kc_napi_complete_done(struct napi_struct *napi, ++ int __always_unused work_done) { ++ napi_complete(napi); ++} ++#define napi_complete_done _kc_napi_complete_done ++ ++#ifndef NETDEV_RSS_KEY_LEN ++#define NETDEV_RSS_KEY_LEN (13 * 4) ++#endif ++#if ( !(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ) ++#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) ++#endif /* RHEL_RELEASE_CODE */ ++extern void __kc_netdev_rss_key_fill(void *buffer, size_t len); ++#define SPEED_20000 20000 ++#define SPEED_40000 40000 ++#ifndef dma_rmb ++#define dma_rmb() rmb() ++#endif ++#ifndef dev_alloc_pages ++#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) ++#endif ++#ifndef dev_alloc_page ++#define dev_alloc_page() dev_alloc_pages(0) ++#endif ++#if !defined(eth_skb_pad) && !defined(skb_put_padto) ++/** ++ * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size ++ * @skb: buffer to pad ++ * @len: minimal length ++ * ++ * Pads up a buffer to ensure the trailing bytes exist and are ++ * blanked. If the buffer already contains sufficient data it ++ * is untouched. Otherwise it is extended. Returns zero on ++ * success. The skb is freed on error. ++ */ ++static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int size = skb->len; ++ ++ if (unlikely(size < len)) { ++ len -= size; ++ if (skb_pad(skb, len)) ++ return -ENOMEM; ++ __skb_put(skb, len); ++ } ++ return 0; ++} ++#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) ++ ++static inline int __kc_eth_skb_pad(struct sk_buff *skb) ++{ ++ return __kc_skb_put_padto(skb, ETH_ZLEN); ++} ++#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) ++#endif /* eth_skb_pad && skb_put_padto */ ++ ++#ifndef SKB_ALLOC_NAPI ++/* RHEL 7.2 backported napi_alloc_skb and friends */ ++static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) ++{ ++ return netdev_alloc_skb_ip_align(napi->dev, length); ++} ++#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) ++#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) ++#endif /* SKB_ALLOC_NAPI */ ++#define HAVE_CONFIG_PM_RUNTIME ++#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RXFH_HASHFUNC ++#endif /* 6.7 < RHEL < 7.0 */ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_RXFH_HASHFUNC ++#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++#endif /* RHEL > 7.1 */ ++#ifndef napi_schedule_irqoff ++#define napi_schedule_irqoff napi_schedule ++#endif ++#ifndef READ_ONCE ++#define READ_ONCE(_x) ACCESS_ONCE(_x) ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_FDB_ADD_VID ++#endif ++#else /* 3.19.0 */ ++#define HAVE_NDO_FDB_ADD_VID ++#define HAVE_RXFH_HASHFUNC ++#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++#endif /* 3.19.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) ++/* vlan_tx_xx functions got renamed to skb_vlan */ ++#ifndef skb_vlan_tag_get ++#define skb_vlan_tag_get vlan_tx_tag_get ++#endif ++#ifndef skb_vlan_tag_present ++#define skb_vlan_tag_present vlan_tx_tag_present ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++#endif ++#else ++#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++#endif /* 3.20.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) ++#ifndef NO_PTP_SUPPORT ++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#include ++#else ++#include ++#endif ++static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) ++{ ++ tc->nsec += delta; ++} ++#define timecounter_adjtime __kc_timecounter_adjtime ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++#endif ++#else ++#define HAVE_PTP_CLOCK_INFO_GETTIME64 ++#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++#define HAVE_PASSTHRU_FEATURES_CHECK ++#define HAVE_NDO_SET_VF_RSS_QUERY_EN ++#endif /* 4,1,0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) ++#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) && \ ++ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) ++{ ++#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC ++ return page->pfmemalloc; ++#else ++ return false; ++#endif ++} ++#endif /* !SLES12sp1 */ ++#else ++#undef HAVE_STRUCT_PAGE_PFMEMALLOC ++#endif /* 4.1.9 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) ++#else ++#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++#endif /* 4.2.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) ++#ifndef CONFIG_64BIT ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) ++#include /* 32-bit readq/writeq */ ++#else /* 3.3.0 => 4.3.x */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) ++#include ++#endif /* 2.6.26 => 3.3.0 */ ++#ifndef readq ++static inline __u64 readq(const volatile void __iomem *addr) ++{ ++ const volatile u32 __iomem *p = addr; ++ u32 low, high; ++ ++ low = readl(p); ++ high = readl(p + 1); ++ ++ return low + ((u64)high << 32); ++} ++#define readq readq ++#endif ++ ++#ifndef writeq ++static inline void writeq(__u64 val, volatile void __iomem *addr) ++{ ++ writel(val, addr); ++ writel(val >> 32, addr + 4); ++} ++#define writeq writeq ++#endif ++#endif /* < 3.3.0 */ ++#endif /* !CONFIG_64BIT */ ++#else ++#define HAVE_NDO_SET_VF_TRUST ++ ++#ifndef CONFIG_64BIT ++#include /* 32-bit readq/writeq */ ++#endif /* !CONFIG_64BIT */ ++#endif /* 4.4.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) ++/* protect against a likely backport */ ++#ifndef NETIF_F_CSUM_MASK ++#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM ++#endif /* NETIF_F_CSUM_MASK */ ++#ifndef NETIF_F_SCTP_CRC ++#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM ++#endif /* NETIF_F_SCTP_CRC */ ++#else ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) ++#define HAVE_GENEVE_RX_OFFLOAD ++#endif /* < 4.8.0 */ ++#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD ++#endif /* 4.5.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) ++#if !(UBUNTU_VERSION_CODE && \ ++ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ ++ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline void napi_consume_skb(struct sk_buff *skb, ++ int __always_unused budget) ++{ ++ dev_consume_skb_any(skb); ++} ++ ++#endif /* UBUNTU_VERSION(4,4,0,21) */ ++static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) ++{ ++ * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); ++} ++ ++#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline void page_ref_inc(struct page *page) ++{ ++ atomic_inc(&page->_count); ++} ++ ++#endif ++ ++#endif /* 4.6.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) ++#else ++#define HAVE_NETIF_TRANS_UPDATE ++#endif /* 4.7.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) ++enum udp_parsable_tunnel_type { ++ UDP_TUNNEL_TYPE_VXLAN, ++ UDP_TUNNEL_TYPE_GENEVE, ++}; ++struct udp_tunnel_info { ++ unsigned short type; ++ sa_family_t sa_family; ++ __be16 port; ++}; ++#else ++#define HAVE_UDP_ENC_RX_OFFLOAD ++#endif /* 4.8.0 */ ++ ++#endif /* _KCOMPAT_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c +--- a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c 2016-11-14 14:32:08.583567168 +0000 +@@ -0,0 +1,1169 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ++ * net/core/ethtool.c - Ethtool ioctl handler ++ * Copyright (c) 2003 Matthew Wilcox ++ * ++ * This file is where we call all the ethtool_ops commands to get ++ * the information ethtool needs. We fall back to calling do_ioctl() ++ * for drivers which haven't been converted to ethtool_ops yet. ++ * ++ * It's GPL, stupid. ++ * ++ * Modification by sfeldma@pobox.com to work as backward compat ++ * solution for pre-ethtool_ops kernels. ++ * - copied struct ethtool_ops from ethtool.h ++ * - defined SET_ETHTOOL_OPS ++ * - put in some #ifndef NETIF_F_xxx wrappers ++ * - changes refs to dev->ethtool_ops to ethtool_ops ++ * - changed dev_ethtool to ethtool_ioctl ++ * - remove EXPORT_SYMBOL()s ++ * - added _kc_ prefix in built-in ethtool_op_xxx ops. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "kcompat.h" ++ ++#undef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#undef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#undef SPEED_10000 ++#define SPEED_10000 10000 ++ ++#undef ethtool_ops ++#define ethtool_ops _kc_ethtool_ops ++ ++struct _kc_ethtool_ops { ++ int (*get_settings)(struct net_device *, struct ethtool_cmd *); ++ int (*set_settings)(struct net_device *, struct ethtool_cmd *); ++ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); ++ int (*get_regs_len)(struct net_device *); ++ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); ++ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); ++ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); ++ u32 (*get_msglevel)(struct net_device *); ++ void (*set_msglevel)(struct net_device *, u32); ++ int (*nway_reset)(struct net_device *); ++ u32 (*get_link)(struct net_device *); ++ int (*get_eeprom_len)(struct net_device *); ++ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ void (*get_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ int (*set_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ u32 (*get_rx_csum)(struct net_device *); ++ int (*set_rx_csum)(struct net_device *, u32); ++ u32 (*get_tx_csum)(struct net_device *); ++ int (*set_tx_csum)(struct net_device *, u32); ++ u32 (*get_sg)(struct net_device *); ++ int (*set_sg)(struct net_device *, u32); ++ u32 (*get_tso)(struct net_device *); ++ int (*set_tso)(struct net_device *, u32); ++ int (*self_test_count)(struct net_device *); ++ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); ++ void (*get_strings)(struct net_device *, u32 stringset, u8 *); ++ int (*phys_id)(struct net_device *, u32); ++ int (*get_stats_count)(struct net_device *); ++ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, ++ u64 *); ++} *ethtool_ops = NULL; ++ ++#undef SET_ETHTOOL_OPS ++#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) ++ ++/* ++ * Some useful ethtool_ops methods that are device independent. If we find that ++ * all drivers want to do the same thing here, we can turn these into dev_() ++ * function calls. ++ */ ++ ++#undef ethtool_op_get_link ++#define ethtool_op_get_link _kc_ethtool_op_get_link ++u32 _kc_ethtool_op_get_link(struct net_device *dev) ++{ ++ return netif_carrier_ok(dev) ? 1 : 0; ++} ++ ++#undef ethtool_op_get_tx_csum ++#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum ++u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) ++{ ++#ifdef NETIF_F_IP_CSUM ++ return (dev->features & NETIF_F_IP_CSUM) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tx_csum ++#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum ++int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_IP_CSUM ++ if (data) ++#ifdef NETIF_F_IPV6_CSUM ++ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++ else ++ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++#else ++ dev->features |= NETIF_F_IP_CSUM; ++ else ++ dev->features &= ~NETIF_F_IP_CSUM; ++#endif ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_sg ++#define ethtool_op_get_sg _kc_ethtool_op_get_sg ++u32 _kc_ethtool_op_get_sg(struct net_device *dev) ++{ ++#ifdef NETIF_F_SG ++ return (dev->features & NETIF_F_SG) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_sg ++#define ethtool_op_set_sg _kc_ethtool_op_set_sg ++int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_SG ++ if (data) ++ dev->features |= NETIF_F_SG; ++ else ++ dev->features &= ~NETIF_F_SG; ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_tso ++#define ethtool_op_get_tso _kc_ethtool_op_get_tso ++u32 _kc_ethtool_op_get_tso(struct net_device *dev) ++{ ++#ifdef NETIF_F_TSO ++ return (dev->features & NETIF_F_TSO) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tso ++#define ethtool_op_set_tso _kc_ethtool_op_set_tso ++int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_TSO ++ if (data) ++ dev->features |= NETIF_F_TSO; ++ else ++ dev->features &= ~NETIF_F_TSO; ++#endif ++ ++ return 0; ++} ++ ++/* Handlers for each ethtool command */ ++ ++static int ethtool_get_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd = { ETHTOOL_GSET }; ++ int err; ++ ++ if (!ethtool_ops->get_settings) ++ return -EOPNOTSUPP; ++ ++ err = ethtool_ops->get_settings(dev, &cmd); ++ if (err < 0) ++ return err; ++ ++ if (copy_to_user(useraddr, &cmd, sizeof(cmd))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd; ++ ++ if (!ethtool_ops->set_settings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&cmd, useraddr, sizeof(cmd))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_settings(dev, &cmd); ++} ++ ++static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_drvinfo info; ++ struct ethtool_ops *ops = ethtool_ops; ++ ++ if (!ops->get_drvinfo) ++ return -EOPNOTSUPP; ++ ++ memset(&info, 0, sizeof(info)); ++ info.cmd = ETHTOOL_GDRVINFO; ++ ops->get_drvinfo(dev, &info); ++ ++ if (ops->self_test_count) ++ info.testinfo_len = ops->self_test_count(dev); ++ if (ops->get_stats_count) ++ info.n_stats = ops->get_stats_count(dev); ++ if (ops->get_regs_len) ++ info.regdump_len = ops->get_regs_len(dev); ++ if (ops->get_eeprom_len) ++ info.eedump_len = ops->get_eeprom_len(dev); ++ ++ if (copy_to_user(useraddr, &info, sizeof(info))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_regs(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_regs regs; ++ struct ethtool_ops *ops = ethtool_ops; ++ void *regbuf; ++ int reglen, ret; ++ ++ if (!ops->get_regs || !ops->get_regs_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(®s, useraddr, sizeof(regs))) ++ return -EFAULT; ++ ++ reglen = ops->get_regs_len(dev); ++ if (regs.len > reglen) ++ regs.len = reglen; ++ ++ regbuf = kmalloc(reglen, GFP_USER); ++ if (!regbuf) ++ return -ENOMEM; ++ ++ ops->get_regs(dev, ®s, regbuf); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, ®s, sizeof(regs))) ++ goto out; ++ useraddr += offsetof(struct ethtool_regs, data); ++ if (copy_to_user(useraddr, regbuf, reglen)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(regbuf); ++ return ret; ++} ++ ++static int ethtool_get_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; ++ ++ if (!ethtool_ops->get_wol) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_wol(dev, &wol); ++ ++ if (copy_to_user(useraddr, &wol, sizeof(wol))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol; ++ ++ if (!ethtool_ops->set_wol) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&wol, useraddr, sizeof(wol))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_wol(dev, &wol); ++} ++ ++static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GMSGLVL }; ++ ++ if (!ethtool_ops->get_msglevel) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_msglevel(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_msglevel) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_msglevel(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_nway_reset(struct net_device *dev) ++{ ++ if (!ethtool_ops->nway_reset) ++ return -EOPNOTSUPP; ++ ++ return ethtool_ops->nway_reset(dev); ++} ++ ++static int ethtool_get_link(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GLINK }; ++ ++ if (!ethtool_ops->get_link) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_link(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->get_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) ++ goto out; ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->set_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->set_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ ret = -EFAULT; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_coalesce(dev, &coalesce); ++ ++ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_coalesce(dev, &coalesce); ++} ++ ++static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_ringparam(dev, &ringparam); ++ ++ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_ringparam(dev, &ringparam); ++} ++ ++static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_pauseparam(dev, &pauseparam); ++ ++ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_pauseparam(dev, &pauseparam); ++} ++ ++static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GRXCSUM }; ++ ++ if (!ethtool_ops->get_rx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_rx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_rx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_rx_csum(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTXCSUM }; ++ ++ if (!ethtool_ops->get_tx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tx_csum(dev, edata.data); ++} ++ ++static int ethtool_get_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GSG }; ++ ++ if (!ethtool_ops->get_sg) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_sg(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_sg) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_sg(dev, edata.data); ++} ++ ++static int ethtool_get_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTSO }; ++ ++ if (!ethtool_ops->get_tso) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tso(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tso) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tso(dev, edata.data); ++} ++ ++static int ethtool_self_test(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_test test; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->self_test || !ops->self_test_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&test, useraddr, sizeof(test))) ++ return -EFAULT; ++ ++ test.len = ops->self_test_count(dev); ++ data = kmalloc(test.len * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->self_test(dev, &test, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &test, sizeof(test))) ++ goto out; ++ useraddr += sizeof(test); ++ if (copy_to_user(useraddr, data, test.len * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_strings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_gstrings gstrings; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_strings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) ++ return -EFAULT; ++ ++ switch (gstrings.string_set) { ++ case ETH_SS_TEST: ++ if (!ops->self_test_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->self_test_count(dev); ++ break; ++ case ETH_SS_STATS: ++ if (!ops->get_stats_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->get_stats_count(dev); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_strings(dev, gstrings.string_set, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) ++ goto out; ++ useraddr += sizeof(gstrings); ++ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_phys_id(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value id; ++ ++ if (!ethtool_ops->phys_id) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&id, useraddr, sizeof(id))) ++ return -EFAULT; ++ ++ return ethtool_ops->phys_id(dev, id.data); ++} ++ ++static int ethtool_get_stats(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_stats stats; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->get_ethtool_stats || !ops->get_stats_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&stats, useraddr, sizeof(stats))) ++ return -EFAULT; ++ ++ stats.n_stats = ops->get_stats_count(dev); ++ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_ethtool_stats(dev, &stats, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &stats, sizeof(stats))) ++ goto out; ++ useraddr += sizeof(stats); ++ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++/* The main entry point in this file. Called from net/core/dev.c */ ++ ++#define ETHTOOL_OPS_COMPAT ++int ethtool_ioctl(struct ifreq *ifr) ++{ ++ struct net_device *dev = __dev_get_by_name(ifr->ifr_name); ++ void *useraddr = (void *) ifr->ifr_data; ++ u32 ethcmd; ++ ++ /* ++ * XXX: This can be pushed down into the ethtool_* handlers that ++ * need it. Keep existing behavior for the moment. ++ */ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (!dev || !netif_device_present(dev)) ++ return -ENODEV; ++ ++ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) ++ return -EFAULT; ++ ++ switch (ethcmd) { ++ case ETHTOOL_GSET: ++ return ethtool_get_settings(dev, useraddr); ++ case ETHTOOL_SSET: ++ return ethtool_set_settings(dev, useraddr); ++ case ETHTOOL_GDRVINFO: ++ return ethtool_get_drvinfo(dev, useraddr); ++ case ETHTOOL_GREGS: ++ return ethtool_get_regs(dev, useraddr); ++ case ETHTOOL_GWOL: ++ return ethtool_get_wol(dev, useraddr); ++ case ETHTOOL_SWOL: ++ return ethtool_set_wol(dev, useraddr); ++ case ETHTOOL_GMSGLVL: ++ return ethtool_get_msglevel(dev, useraddr); ++ case ETHTOOL_SMSGLVL: ++ return ethtool_set_msglevel(dev, useraddr); ++ case ETHTOOL_NWAY_RST: ++ return ethtool_nway_reset(dev); ++ case ETHTOOL_GLINK: ++ return ethtool_get_link(dev, useraddr); ++ case ETHTOOL_GEEPROM: ++ return ethtool_get_eeprom(dev, useraddr); ++ case ETHTOOL_SEEPROM: ++ return ethtool_set_eeprom(dev, useraddr); ++ case ETHTOOL_GCOALESCE: ++ return ethtool_get_coalesce(dev, useraddr); ++ case ETHTOOL_SCOALESCE: ++ return ethtool_set_coalesce(dev, useraddr); ++ case ETHTOOL_GRINGPARAM: ++ return ethtool_get_ringparam(dev, useraddr); ++ case ETHTOOL_SRINGPARAM: ++ return ethtool_set_ringparam(dev, useraddr); ++ case ETHTOOL_GPAUSEPARAM: ++ return ethtool_get_pauseparam(dev, useraddr); ++ case ETHTOOL_SPAUSEPARAM: ++ return ethtool_set_pauseparam(dev, useraddr); ++ case ETHTOOL_GRXCSUM: ++ return ethtool_get_rx_csum(dev, useraddr); ++ case ETHTOOL_SRXCSUM: ++ return ethtool_set_rx_csum(dev, useraddr); ++ case ETHTOOL_GTXCSUM: ++ return ethtool_get_tx_csum(dev, useraddr); ++ case ETHTOOL_STXCSUM: ++ return ethtool_set_tx_csum(dev, useraddr); ++ case ETHTOOL_GSG: ++ return ethtool_get_sg(dev, useraddr); ++ case ETHTOOL_SSG: ++ return ethtool_set_sg(dev, useraddr); ++ case ETHTOOL_GTSO: ++ return ethtool_get_tso(dev, useraddr); ++ case ETHTOOL_STSO: ++ return ethtool_set_tso(dev, useraddr); ++ case ETHTOOL_TEST: ++ return ethtool_self_test(dev, useraddr); ++ case ETHTOOL_GSTRINGS: ++ return ethtool_get_strings(dev, useraddr); ++ case ETHTOOL_PHYS_ID: ++ return ethtool_phys_id(dev, useraddr); ++ case ETHTOOL_GSTATS: ++ return ethtool_get_stats(dev, useraddr); ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++#define mii_if_info _kc_mii_if_info ++struct _kc_mii_if_info { ++ int phy_id; ++ int advertising; ++ int phy_id_mask; ++ int reg_num_mask; ++ ++ unsigned int full_duplex : 1; /* is full duplex? */ ++ unsigned int force_media : 1; /* is autoneg. disabled? */ ++ ++ struct net_device *dev; ++ int (*mdio_read) (struct net_device *dev, int phy_id, int location); ++ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); ++}; ++ ++struct ethtool_cmd; ++struct mii_ioctl_data; ++ ++#undef mii_link_ok ++#define mii_link_ok _kc_mii_link_ok ++#undef mii_nway_restart ++#define mii_nway_restart _kc_mii_nway_restart ++#undef mii_ethtool_gset ++#define mii_ethtool_gset _kc_mii_ethtool_gset ++#undef mii_ethtool_sset ++#define mii_ethtool_sset _kc_mii_ethtool_sset ++#undef mii_check_link ++#define mii_check_link _kc_mii_check_link ++extern int _kc_mii_link_ok (struct mii_if_info *mii); ++extern int _kc_mii_nway_restart (struct mii_if_info *mii); ++extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern void _kc_mii_check_link (struct mii_if_info *mii); ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++#undef generic_mii_ioctl ++#define generic_mii_ioctl _kc_generic_mii_ioctl ++extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_changed); ++#endif /* > 2.4.6 */ ++ ++ ++struct _kc_pci_dev_ext { ++ struct pci_dev *dev; ++ void *pci_drvdata; ++ struct pci_driver *driver; ++}; ++ ++struct _kc_net_dev_ext { ++ struct net_device *dev; ++ unsigned int carrier; ++}; ++ ++ ++/**************************************/ ++/* mii support */ ++ ++int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ u32 advert, bmcr, lpa, nego; ++ ++ ecmd->supported = ++ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | ++ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ++ ++ /* only supports twisted-pair */ ++ ecmd->port = PORT_MII; ++ ++ /* only supports internal transceiver */ ++ ecmd->transceiver = XCVR_INTERNAL; ++ ++ /* this isn't fully supported at higher layers */ ++ ecmd->phy_address = mii->phy_id; ++ ++ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ if (advert & ADVERTISE_10HALF) ++ ecmd->advertising |= ADVERTISED_10baseT_Half; ++ if (advert & ADVERTISE_10FULL) ++ ecmd->advertising |= ADVERTISED_10baseT_Full; ++ if (advert & ADVERTISE_100HALF) ++ ecmd->advertising |= ADVERTISED_100baseT_Half; ++ if (advert & ADVERTISE_100FULL) ++ ecmd->advertising |= ADVERTISED_100baseT_Full; ++ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); ++ if (bmcr & BMCR_ANENABLE) { ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ ecmd->autoneg = AUTONEG_ENABLE; ++ ++ nego = mii_nway_result(advert & lpa); ++ if (nego == LPA_100FULL || nego == LPA_100HALF) ++ ecmd->speed = SPEED_100; ++ else ++ ecmd->speed = SPEED_10; ++ if (nego == LPA_100FULL || nego == LPA_10FULL) { ++ ecmd->duplex = DUPLEX_FULL; ++ mii->full_duplex = 1; ++ } else { ++ ecmd->duplex = DUPLEX_HALF; ++ mii->full_duplex = 0; ++ } ++ } else { ++ ecmd->autoneg = AUTONEG_DISABLE; ++ ++ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; ++ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; ++ } ++ ++ /* ignore maxtxpkt, maxrxpkt for now */ ++ ++ return 0; ++} ++ ++int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ ++ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) ++ return -EINVAL; ++ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) ++ return -EINVAL; ++ if (ecmd->port != PORT_MII) ++ return -EINVAL; ++ if (ecmd->transceiver != XCVR_INTERNAL) ++ return -EINVAL; ++ if (ecmd->phy_address != mii->phy_id) ++ return -EINVAL; ++ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) ++ return -EINVAL; ++ ++ /* ignore supported, maxtxpkt, maxrxpkt */ ++ ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ u32 bmcr, advert, tmp; ++ ++ if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full)) == 0) ++ return -EINVAL; ++ ++ /* advertise only what has been requested */ ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); ++ if (ADVERTISED_10baseT_Half) ++ tmp |= ADVERTISE_10HALF; ++ if (ADVERTISED_10baseT_Full) ++ tmp |= ADVERTISE_10FULL; ++ if (ADVERTISED_100baseT_Half) ++ tmp |= ADVERTISE_100HALF; ++ if (ADVERTISED_100baseT_Full) ++ tmp |= ADVERTISE_100FULL; ++ if (advert != tmp) { ++ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); ++ mii->advertising = tmp; ++ } ++ ++ /* turn on autonegotiation, and force a renegotiate */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); ++ ++ mii->force_media = 0; ++ } else { ++ u32 bmcr, tmp; ++ ++ /* turn off auto negotiation, set speed and duplexity */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); ++ if (ecmd->speed == SPEED_100) ++ tmp |= BMCR_SPEED100; ++ if (ecmd->duplex == DUPLEX_FULL) { ++ tmp |= BMCR_FULLDPLX; ++ mii->full_duplex = 1; ++ } else ++ mii->full_duplex = 0; ++ if (bmcr != tmp) ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); ++ ++ mii->force_media = 1; ++ } ++ return 0; ++} ++ ++int _kc_mii_link_ok (struct mii_if_info *mii) ++{ ++ /* first, a dummy read, needed to latch some MII phys */ ++ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); ++ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) ++ return 1; ++ return 0; ++} ++ ++int _kc_mii_nway_restart (struct mii_if_info *mii) ++{ ++ int bmcr; ++ int r = -EINVAL; ++ ++ /* if autoneg is off, it's an error */ ++ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); ++ ++ if (bmcr & BMCR_ANENABLE) { ++ bmcr |= BMCR_ANRESTART; ++ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); ++ r = 0; ++ } ++ ++ return r; ++} ++ ++void _kc_mii_check_link (struct mii_if_info *mii) ++{ ++ int cur_link = mii_link_ok(mii); ++ int prev_link = netif_carrier_ok(mii->dev); ++ ++ if (cur_link && !prev_link) ++ netif_carrier_on(mii->dev); ++ else if (prev_link && !cur_link) ++ netif_carrier_off(mii->dev); ++} ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_chg_out) ++{ ++ int rc = 0; ++ unsigned int duplex_changed = 0; ++ ++ if (duplex_chg_out) ++ *duplex_chg_out = 0; ++ ++ mii_data->phy_id &= mii_if->phy_id_mask; ++ mii_data->reg_num &= mii_if->reg_num_mask; ++ ++ switch(cmd) { ++ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ ++ case SIOCGMIIPHY: ++ mii_data->phy_id = mii_if->phy_id; ++ /* fall through */ ++ ++ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ ++ case SIOCGMIIREG: ++ mii_data->val_out = ++ mii_if->mdio_read(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num); ++ break; ++ ++ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ ++ case SIOCSMIIREG: { ++ u16 val = mii_data->val_in; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (mii_data->phy_id == mii_if->phy_id) { ++ switch(mii_data->reg_num) { ++ case MII_BMCR: { ++ unsigned int new_duplex = 0; ++ if (val & (BMCR_RESET|BMCR_ANENABLE)) ++ mii_if->force_media = 0; ++ else ++ mii_if->force_media = 1; ++ if (mii_if->force_media && ++ (val & BMCR_FULLDPLX)) ++ new_duplex = 1; ++ if (mii_if->full_duplex != new_duplex) { ++ duplex_changed = 1; ++ mii_if->full_duplex = new_duplex; ++ } ++ break; ++ } ++ case MII_ADVERTISE: ++ mii_if->advertising = val; ++ break; ++ default: ++ /* do nothing */ ++ break; ++ } ++ } ++ ++ mii_if->mdio_write(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num, val); ++ break; ++ } ++ ++ default: ++ rc = -EOPNOTSUPP; ++ break; ++ } ++ ++ if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) ++ *duplex_chg_out = 1; ++ ++ return rc; ++} ++#endif /* > 2.4.6 */ ++ diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch index e258426c..5de8cb5b 100644 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch +++ b/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch @@ -1,175 +1,145 @@ -+ 0x75diff -urpN a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c ---- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-03-02 10:31:21.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-09-15 19:55:02.680725611 +0000 -@@ -179,8 +179,11 @@ static s32 igb_init_phy_params_82575(str - ctrl_ext = rd32(E1000_CTRL_EXT); - - if (igb_sgmii_active_82575(hw)) { -- phy->ops.reset = igb_phy_hw_reset_sgmii_82575; -- ctrl_ext |= E1000_CTRL_I2C_ENA; -+ if(phy->type == e1000_phy_bcm5461s) -+ phy->ops.reset = igb_phy_hw_reset; -+ else -+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575; -+ ctrl_ext |= E1000_CTRL_I2C_ENA; - } else { - phy->ops.reset = igb_phy_hw_reset; - ctrl_ext &= ~E1000_CTRL_I2C_ENA; -@@ -286,6 +289,19 @@ static s32 igb_init_phy_params_82575(str - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c +--- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 15:48:41.379628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-15 09:36:04.608478513 +0000 +@@ -302,6 +302,16 @@ + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; -+ -+ case BCM5461S_PHY_ID: -+ phy->type = e1000_phy_bcm5461s; -+ phy->ops.check_polarity = NULL; -+ phy->ops.get_phy_info = igb_get_phy_info_5461s; -+ phy->ops.get_cable_length = NULL; -+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; -+ break; -+ ++ case BCM5461S_PHY_ID: ++ phy->type = e1000_phy_bcm5461s; ++ phy->ops.check_polarity = NULL; ++ phy->ops.get_info = igb_get_phy_info_5461s; ++ phy->ops.get_cable_length = NULL; ++ phy->ops.force_speed_duplex = igb_e1000_phy_force_speed_duplex_82577; ++ break; + case BCM54616_E_PHY_ID: -+ phy->type = e1000_phy_bcm54616; -+ break; -+ ++ phy->type = e1000_phy_bcm54616; ++ break; default: ret_val = -E1000_ERR_PHY; goto out; -@@ -827,9 +843,9 @@ static s32 igb_get_phy_id_82575(struct e - break; - case e1000_82580: - case e1000_i350: -- case e1000_i354: - case e1000_i210: - case e1000_i211: -+ case e1000_i354: - mdic = rd32(E1000_MDICNFG); - mdic &= E1000_MDICNFG_PHY_MASK; - phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; -@@ -840,6 +856,17 @@ static s32 igb_get_phy_id_82575(struct e +@@ -701,6 +711,17 @@ break; } - ret_val = igb_get_phy_id(hw); + ret_val = e1000_get_phy_id(hw); + + if (ret_val && hw->mac.type == e1000_i354) { -+ /* we do a special check for bcm5461s phy by setting -+ * the phy->addr to 5 and doing the phy check again. This -+ * call will succeed and retrieve a valid phy id if we have -+ * the bcm5461s phy -+ */ -+ phy->addr = 5; -+ phy->type = e1000_phy_bcm5461s; -+ ret_val = igb_get_phy_id(hw); ++ /* we do a special check for bcm5461s phy by setting ++ * the phy->addr to 5 and doing the phy check again. This ++ * call will succeed and retrieve a valid phy id if we have ++ * the bcm5461s phy ++ */ ++ phy->addr = 5; ++ phy->type = e1000_phy_bcm5461s; ++ ret_val = e1000_get_phy_id(hw); + } goto out; } - -@@ -1220,6 +1247,9 @@ static s32 igb_get_cfg_done_82575(struct + +@@ -1148,6 +1169,9 @@ (hw->phy.type == e1000_phy_igp_3)) - igb_phy_init_script_igp3(hw); - + e1000_phy_init_script_igp3(hw); + + if (hw->phy.type == e1000_phy_bcm5461s) -+ igb_phy_init_script_5461s(hw); ++ igb_phy_init_script_5461s(hw); + - return 0; + return E1000_SUCCESS; } - -@@ -1552,6 +1582,7 @@ static s32 igb_setup_copper_link_82575(s + +@@ -1557,6 +1581,7 @@ case e1000_i350: case e1000_i210: case e1000_i211: + case e1000_i354: - phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); phpm_reg &= ~E1000_82580_PM_GO_LINKD; - wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); -@@ -1595,6 +1626,10 @@ static s32 igb_setup_copper_link_82575(s + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); +@@ -1602,6 +1627,10 @@ case e1000_phy_82580: - ret_val = igb_copper_link_setup_82580(hw); + ret_val = igb_e1000_copper_link_setup_82577(hw); break; + case e1000_phy_bcm54616: + break; + case e1000_phy_bcm5461s: -+ break; ++ break; default: ret_val = -E1000_ERR_PHY; break; -diff -urpN a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h ---- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-03-02 10:31:21.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-09-15 19:55:27.068726140 +0000 -@@ -860,6 +860,8 @@ - #define M88_VENDOR 0x0141 - #define I210_I_PHY_ID 0x01410C00 - #define M88E1543_E_PHY_ID 0x01410EA0 -+#define BCM54616_E_PHY_ID 0x3625D10 -+#define BCM5461S_PHY_ID 0x002060C0 - +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h +--- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 15:48:41.383628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 17:13:16.567695539 +0000 +@@ -1184,6 +1184,8 @@ + #define I350_I_PHY_ID 0x015403B0 + #define I210_I_PHY_ID 0x01410C00 + #define IGP04E1000_E_PHY_ID 0x02A80391 ++#define BCM54616_E_PHY_ID 0x3625D10 ++#define BCM5461S_PHY_ID 0x002060C0 + #define M88_VENDOR 0x0141 + /* M88E1000 Specific Registers */ - #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -diff -urpN a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h ---- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-03-02 10:31:21.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-09-15 19:55:44.584726520 +0000 -@@ -128,6 +128,8 @@ enum e1000_phy_type { - e1000_phy_ife, +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h +--- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 15:48:41.387628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 17:11:55.735694465 +0000 +@@ -133,6 +133,8 @@ e1000_phy_82580, + e1000_phy_vf, e1000_phy_i210, + e1000_phy_bcm54616, + e1000_phy_bcm5461s, }; - + enum e1000_bus_type { -diff -urpN a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c ---- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-03-02 10:31:21.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-09-15 20:07:34.964741935 +0000 -@@ -148,6 +148,14 @@ s32 igb_read_phy_reg_mdic(struct e1000_h +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c +--- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 15:48:41.403628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-15 09:48:09.668488140 +0000 +@@ -272,6 +272,13 @@ * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ -+ + if (phy->type == e1000_phy_bcm5461s) { -+ mdic = rd32(E1000_MDICNFG); -+ mdic &= ~E1000_MDICNFG_PHY_MASK; -+ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); -+ wr32(E1000_MDICNFG, mdic); ++ mdic = E1000_READ_REG(hw, E1000_MDICNFG); ++ mdic &= ~E1000_MDICNFG_PHY_MASK; ++ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdic); + } + mdic = ((offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_READ)); -@@ -204,6 +212,14 @@ s32 igb_write_phy_reg_mdic(struct e1000_ +@@ -331,6 +338,13 @@ * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ -+ + if (phy->type == e1000_phy_bcm5461s) { -+ mdic = rd32(E1000_MDICNFG); -+ mdic &= ~E1000_MDICNFG_PHY_MASK; -+ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); -+ wr32(E1000_MDICNFG, mdic); ++ mdic = E1000_READ_REG(hw, E1000_MDICNFG); ++ mdic &= ~E1000_MDICNFG_PHY_MASK; ++ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdic); + } + mdic = (((u32)data) | (offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | -@@ -1115,11 +1131,13 @@ s32 igb_setup_copper_link(struct e1000_h +@@ -1614,10 +1628,12 @@ * depending on user settings. */ - hw_dbg("Forcing Speed and Duplex\n"); + DEBUGOUT("Forcing Speed and Duplex\n"); - ret_val = hw->phy.ops.force_speed_duplex(hw); - if (ret_val) { -+ if(hw->phy.ops.force_speed_duplex) { -+ ret_val = hw->phy.ops.force_speed_duplex(hw); -+ if (ret_val) { - hw_dbg("Error Forcing Speed and Duplex\n"); - goto out; -- } -+ } -+ } +- DEBUGOUT("Error Forcing Speed and Duplex\n"); +- return ret_val; ++ if (hw->phy.ops.force_speed_duplex) { ++ ret_val = hw->phy.ops.force_speed_duplex(hw); ++ if (ret_val) { ++ DEBUGOUT("Error Forcing Speed and Duplex\n"); ++ return ret_val; ++ } + } } - - /* Check link status. Wait up to 100 microseconds for link to become -@@ -2509,3 +2527,67 @@ static s32 igb_set_master_slave_mode(str - - return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); + +@@ -3407,3 +3423,67 @@ + + return ready; } + +/** @@ -216,7 +186,7 @@ diff -urpN a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/i + s32 ret_val; + bool link; + -+ ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + @@ -235,46 +205,38 @@ diff -urpN a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/i +out: + return ret_val; +} -diff -urpN a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h ---- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-03-02 10:31:21.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-09-15 19:41:43.584708271 +0000 -@@ -61,6 +61,8 @@ s32 igb_phy_has_link(struct e1000_hw *h - void igb_power_up_phy_copper(struct e1000_hw *hw); - void igb_power_down_phy_copper(struct e1000_hw *hw); - s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h +--- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 15:48:41.403628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 17:21:08.243701801 +0000 +@@ -74,6 +74,8 @@ + s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); + s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_phy_init_script_5461s(struct e1000_hw *hw); +s32 igb_get_phy_info_5461s(struct e1000_hw *hw); - s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); - s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); - s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); -diff -urpN a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c ---- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-03-02 10:31:21.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-09-15 19:56:53.276728011 +0000 -@@ -108,6 +108,7 @@ static const struct pci_device_id igb_pc - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII), board_82575 }, - /* required last entry */ - {0, } - }; -@@ -7198,11 +7199,19 @@ static int igb_mii_ioctl(struct net_devi - data->phy_id = adapter->hw.phy.addr; - break; + enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); + s32 e1000_determine_phy_address(struct e1000_hw *hw); + s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +--- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 15:48:41.411628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 19:07:51.867786828 +0000 +@@ -8607,11 +8607,19 @@ case SIOCGMIIREG: -+ adapter->hw.phy.addr = data->phy_id; - if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, - &data->val_out)) + if (!capable(CAP_NET_ADMIN)) + return -EPERM; ++ adapter->hw.phy.addr = data->phy_id; + if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) return -EIO; break; case SIOCSMIIREG: -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ adapter->hw.phy.addr = data->phy_id; -+ if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, -+ data->val_in)) -+ return -EIO; -+ break; ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ adapter->hw.phy.addr = data->phy_id; ++ if (igb_e1000_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, ++ data->val_in)) ++ return -EIO; ++ break; default: return -EOPNOTSUPP; } diff --git a/packages/base/any/kernels/3.16+deb8/patches/series b/packages/base/any/kernels/3.16+deb8/patches/series index a3d4952f..f1785767 100644 --- a/packages/base/any/kernels/3.16+deb8/patches/series +++ b/packages/base/any/kernels/3.16+deb8/patches/series @@ -11,6 +11,5 @@ driver-hwmon-pmbus-add-dps460-support.patch driver-hwmon-pmbus-ucd9200-mlnx.patch driver-arista-piix4-mux-patch.patch 3.16-fs-overlayfs.patch +driver-igb-version-5.3.54.patch driver-support-intel-igb-bcm5461X-phy.patch - - diff --git a/packages/platforms/mellanox/vendor-config/Makefile b/packages/platforms/mellanox/vendor-config/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/mellanox/vendor-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/mellanox/vendor-config/PKG.yml b/packages/platforms/mellanox/vendor-config/PKG.yml new file mode 100644 index 00000000..5622b833 --- /dev/null +++ b/packages/platforms/mellanox/vendor-config/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-vendor.yml VENDOR=mellanox Vendor=Mellanox diff --git a/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py b/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py new file mode 100644 index 00000000..2d089e30 --- /dev/null +++ b/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/python + +from onl.platform.base import * + +class OnlPlatformMellanox(OnlPlatformBase): + MANUFACTURER='Mellanox' + PRIVATE_ENTERPRISE_NUMBER=33049 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/PKG.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/PKG.yml new file mode 100644 index 00000000..d029cd80 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=x86-64-mlnx-msn2100 ARCH=amd64 TOOLCHAIN=x86_64-linux-gnu diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/Makefile new file mode 100644 index 00000000..e7437cb2 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/Makefile @@ -0,0 +1,2 @@ +FILTER=src +include $(ONL)/make/subdirs.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/Makefile new file mode 100644 index 00000000..7ed38df4 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +MODULE := libonlp-x86-64-mlnx-msn2100 +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF x86_64_mlnx_msn2100 onlplib +DEPENDMODULE_HEADERS := sff + +include $(BUILDER)/dependmodules.mk + +SHAREDLIB := libonlp-x86-64-mlnx-msn2100.so +$(SHAREDLIB)_TARGETS := $(ALL_TARGETS) +include $(BUILDER)/so.mk +.DEFAULT_GOAL := $(SHAREDLIB) + +GLOBAL_CFLAGS += -I$(onlp_BASEDIR)/module/inc +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -fPIC +GLOBAL_LINK_LIBS += -lpthread + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100-r0.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100-r0.mk new file mode 100644 index 00000000..d89e7d7b --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100-r0.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the libonlp-x86-64-mlnx-msn2100-r0 module. +# +# Autogenerated 2015-12-23 23:45:22.249911 +# +############################################################################### +libonlp-x86-64-mlnx-msn2100-r0_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100.mk new file mode 100644 index 00000000..fc3761e3 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/libonlp-x86-64-mlnx-msn2100.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the libonlp-x86-64-mlnx-msn2100 module. +# +# Autogenerated 2016-10-13 22:58:39.095824 +# +############################################################################### +libonlp-x86-64-mlnx-msn2100_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/x86_64_mlnx_msn2100.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/x86_64_mlnx_msn2100.mk new file mode 100644 index 00000000..a286a5dd --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/lib/x86_64_mlnx_msn2100.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the x86_64_mlnx_msn2100 module. +# +# Autogenerated 2015-12-23 23:45:22.262891 +# +############################################################################### +x86_64_mlnx_msn2100_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/Makefile new file mode 100644 index 00000000..491f363d --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/Makefile @@ -0,0 +1,46 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +.DEFAULT_GOAL := onlpdump + +MODULE := onlpdump +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF onlp x86_64_mlnx_msn2100 onlplib onlp_platform_defaults sff cjson cjson_util timer_wheel OS + +include $(BUILDER)/dependmodules.mk + +BINARY := onlpdump +$(BINARY)_LIBRARIES := $(LIBRARY_TARGETS) +include $(BUILDER)/bin.mk + +GLOBAL_CFLAGS += -DAIM_CONFIG_AIM_MAIN_FUNCTION=onlpdump_main +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MAIN=1 +GLOBAL_LINK_LIBS += -lpthread -lm + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/onlpdump.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/onlpdump.mk new file mode 100644 index 00000000..77d7d005 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/onlpdump/onlpdump.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the onlpdump module. +# +# Autogenerated 2016-10-13 22:58:37.393320 +# +############################################################################### +onlpdump_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/.module b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/.module new file mode 100644 index 00000000..c4f80c2c --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/.module @@ -0,0 +1 @@ +name: x86_64_mlnx_msn2100 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/Makefile new file mode 100644 index 00000000..c8d80db2 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### +include ../../init.mk +MODULE := x86_64_mlnx_msn2100 +AUTOMODULE := x86_64_mlnx_msn2100 +include $(BUILDER)/definemodule.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/README b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/README new file mode 100644 index 00000000..c6578436 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/README @@ -0,0 +1,6 @@ +############################################################################### +# +# x86_64_mlnx_msn2100 README +# +############################################################################### + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/make.mk new file mode 100644 index 00000000..02701211 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# x86_64_mlnx_msn2100 Autogeneration +# +############################################################################### +x86_64_mlnx_msn2100_AUTO_DEFS := module/auto/x86_64_mlnx_msn2100.yml +x86_64_mlnx_msn2100_AUTO_DIRS := module/inc/x86_64_mlnx_msn2100 module/src +include $(BUILDER)/auto.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/x86_64_mlnx_msn2100.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/x86_64_mlnx_msn2100.yml new file mode 100644 index 00000000..2f25156f --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/auto/x86_64_mlnx_msn2100.yml @@ -0,0 +1,50 @@ +############################################################################### +# +# x86_64_mlnx_msn2100 Autogeneration Definitions. +# +############################################################################### + +cdefs: &cdefs +- X86_64_MLNX_MSN2100_CONFIG_INCLUDE_LOGGING: + doc: "Include or exclude logging." + default: 1 +- X86_64_MLNX_MSN2100_CONFIG_LOG_OPTIONS_DEFAULT: + doc: "Default enabled log options." + default: AIM_LOG_OPTIONS_DEFAULT +- X86_64_MLNX_MSN2100_CONFIG_LOG_BITS_DEFAULT: + doc: "Default enabled log bits." + default: AIM_LOG_BITS_DEFAULT +- X86_64_MLNX_MSN2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT: + doc: "Default enabled custom log bits." + default: 0 +- X86_64_MLNX_MSN2100_CONFIG_PORTING_STDLIB: + doc: "Default all porting macros to use the C standard libraries." + default: 1 +- X86_64_MLNX_MSN2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS: + doc: "Include standard library headers for stdlib porting macros." + default: x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB +- X86_64_MLNX_MSN2100_CONFIG_INCLUDE_UCLI: + doc: "Include generic uCli support." + default: 0 +- X86_64_MLNX_MSN2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION: + doc: "Assume chassis fan direction is the same as the PSU fan direction." + default: 0 + + +definitions: + cdefs: + X86_64_MLNX_MSN2100_CONFIG_HEADER: + defs: *cdefs + basename: x86_64_mlnx_msn2100_config + + portingmacro: + x86_64_mlnx_msn2100: + macros: + - malloc + - free + - memset + - memcpy + - strncpy + - vsnprintf + - snprintf + - strlen diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100.x b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100.x new file mode 100644 index 00000000..c9bd20db --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100.x @@ -0,0 +1,14 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.xmacro(ALL).define> */ +/* */ + +/* <--auto.start.xenum(ALL).define> */ +/* */ + + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_config.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_config.h new file mode 100644 index 00000000..35d90487 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_config.h @@ -0,0 +1,137 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_mlnx_msn2100 Configuration Header + * + * @addtogroup x86_64_mlnx_msn2100-config + * @{ + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2100_CONFIG_H__ +#define __x86_64_mlnx_msn2100_CONFIG_H__ + +#ifdef GLOBAL_INCLUDE_CUSTOM_CONFIG +#include +#endif +#ifdef x86_64_mlnx_msn2100_INCLUDE_CUSTOM_CONFIG +#include +#endif + +/* */ +#include +/** + * x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING + * + * Include or exclude logging. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING +#define x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING 1 +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT + * + * Default enabled log options. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT +#define x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT AIM_LOG_OPTIONS_DEFAULT +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT + * + * Default enabled log bits. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT +#define x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT AIM_LOG_BITS_DEFAULT +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT + * + * Default enabled custom log bits. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT +#define x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT 0 +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB + * + * Default all porting macros to use the C standard libraries. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB +#define x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB 1 +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + * + * Include standard library headers for stdlib porting macros. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS +#define x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI + * + * Include generic uCli support. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI +#define x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI 0 +#endif + +/** + * x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + * + * Assume chassis fan direction is the same as the PSU fan direction. */ + + +#ifndef x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION +#define x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION 0 +#endif + + + +/** + * All compile time options can be queried or displayed + */ + +/** Configuration settings structure. */ +typedef struct x86_64_mlnx_msn2100_config_settings_s { + /** name */ + const char* name; + /** value */ + const char* value; +} x86_64_mlnx_msn2100_config_settings_t; + +/** Configuration settings table. */ +/** x86_64_mlnx_msn2100_config_settings table. */ +extern x86_64_mlnx_msn2100_config_settings_t x86_64_mlnx_msn2100_config_settings[]; + +/** + * @brief Lookup a configuration setting. + * @param setting The name of the configuration option to lookup. + */ +const char* x86_64_mlnx_msn2100_config_lookup(const char* setting); + +/** + * @brief Show the compile-time configuration. + * @param pvs The output stream. + */ +int x86_64_mlnx_msn2100_config_show(struct aim_pvs_s* pvs); + +/* */ + +#include "x86_64_mlnx_msn2100_porting.h" + +#endif /* __x86_64_mlnx_msn2100_CONFIG_H__ */ +/* @} */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_dox.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_dox.h new file mode 100644 index 00000000..14059acd --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_dox.h @@ -0,0 +1,26 @@ +/**************************************************************************//** + * + * x86_64_mlnx_msn2100 Doxygen Header + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2100_DOX_H__ +#define __x86_64_mlnx_msn2100_DOX_H__ + +/** + * @defgroup x86_64_mlnx_msn2100 x86_64_mlnx_msn2100 - x86_64_mlnx_msn2100 Description + * + +The documentation overview for this module should go here. + + * + * @{ + * + * @defgroup x86_64_mlnx_msn2100-x86_64_mlnx_msn2100 Public Interface + * @defgroup x86_64_mlnx_msn2100-config Compile Time Configuration + * @defgroup x86_64_mlnx_msn2100-porting Porting Macros + * + * @} + * + */ + +#endif /* __x86_64_mlnx_msn2100_DOX_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_porting.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_porting.h new file mode 100644 index 00000000..e640eca2 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/inc/x86_64_mlnx_msn2100/x86_64_mlnx_msn2100_porting.h @@ -0,0 +1,107 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_mlnx_msn2100 Porting Macros. + * + * @addtogroup x86_64_mlnx_msn2100-porting + * @{ + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2100_PORTING_H__ +#define __x86_64_mlnx_msn2100_PORTING_H__ + + +/* */ +#if x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS == 1 +#include +#include +#include +#include +#include +#endif + +#ifndef x86_64_mlnx_msn2100_MALLOC + #if defined(GLOBAL_MALLOC) + #define x86_64_mlnx_msn2100_MALLOC GLOBAL_MALLOC + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_MALLOC malloc + #else + #error The macro x86_64_mlnx_msn2100_MALLOC is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_FREE + #if defined(GLOBAL_FREE) + #define x86_64_mlnx_msn2100_FREE GLOBAL_FREE + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_FREE free + #else + #error The macro x86_64_mlnx_msn2100_FREE is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_MEMSET + #if defined(GLOBAL_MEMSET) + #define x86_64_mlnx_msn2100_MEMSET GLOBAL_MEMSET + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_MEMSET memset + #else + #error The macro x86_64_mlnx_msn2100_MEMSET is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_MEMCPY + #if defined(GLOBAL_MEMCPY) + #define x86_64_mlnx_msn2100_MEMCPY GLOBAL_MEMCPY + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_MEMCPY memcpy + #else + #error The macro x86_64_mlnx_msn2100_MEMCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_STRNCPY + #if defined(GLOBAL_STRNCPY) + #define x86_64_mlnx_msn2100_STRNCPY GLOBAL_STRNCPY + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_STRNCPY strncpy + #else + #error The macro x86_64_mlnx_msn2100_STRNCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_VSNPRINTF + #if defined(GLOBAL_VSNPRINTF) + #define x86_64_mlnx_msn2100_VSNPRINTF GLOBAL_VSNPRINTF + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_VSNPRINTF vsnprintf + #else + #error The macro x86_64_mlnx_msn2100_VSNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_SNPRINTF + #if defined(GLOBAL_SNPRINTF) + #define x86_64_mlnx_msn2100_SNPRINTF GLOBAL_SNPRINTF + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_SNPRINTF snprintf + #else + #error The macro x86_64_mlnx_msn2100_SNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2100_STRLEN + #if defined(GLOBAL_STRLEN) + #define x86_64_mlnx_msn2100_STRLEN GLOBAL_STRLEN + #elif x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2100_STRLEN strlen + #else + #error The macro x86_64_mlnx_msn2100_STRLEN is required but cannot be defined. + #endif +#endif + +/* */ + + +#endif /* __x86_64_mlnx_msn2100_PORTING_H__ */ +/* @} */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/make.mk new file mode 100644 index 00000000..db833277 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/make.mk @@ -0,0 +1,10 @@ +############################################################################### +# +# +# +############################################################################### +THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +x86_64_mlnx_msn2100_INCLUDES := -I $(THIS_DIR)inc +x86_64_mlnx_msn2100_INTERNAL_INCLUDES := -I $(THIS_DIR)src +x86_64_mlnx_msn2100_DEPENDMODULE_ENTRIES := init:x86_64_mlnx_msn2100 ucli:x86_64_mlnx_msn2100 + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/Makefile new file mode 100644 index 00000000..bff8ee55 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# Local source generation targets. +# +############################################################################### + +ucli: + @../../../../tools/uclihandlers.py x86_64_mlnx_msn2100_ucli.c + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c new file mode 100644 index 00000000..7dd59e0d --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c @@ -0,0 +1,356 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Fan Platform Implementation Defaults. + * + ***********************************************************/ +#include +#include +#include +#include +#include "platform_lib.h" + +#define PREFIX_PATH "/bsp/fan/" + +#define FAN_STATUS_OK 1 + +#define PERCENTAGE_MIN 60.0 +#define PERCENTAGE_MAX 100.0 +#define RPM_MAGIC_MIN 153.0 +#define RPM_MAGIC_MAX 255.0 + +#define PROJECT_NAME +#define LEN_FILE_NAME 80 + +#define FAN_RESERVED 0 +#define FAN_1_ON_MAIN_BOARD 1 +#define FAN_2_ON_MAIN_BOARD 2 +#define FAN_3_ON_MAIN_BOARD 3 +#define FAN_4_ON_MAIN_BOARD 4 +#define FAN_MODEL "MEC012579" + +static int min_fan_speed[CHASSIS_FAN_COUNT+1] = {0}; +static int max_fan_speed[CHASSIS_FAN_COUNT+1] = {0}; + +typedef struct fan_path_S +{ + char status[LEN_FILE_NAME]; + char r_speed_get[LEN_FILE_NAME]; + char r_speed_set[LEN_FILE_NAME]; + char min[LEN_FILE_NAME]; + char max[LEN_FILE_NAME]; +}fan_path_T; + +#define _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) \ + { #prj"fan"#id"_status", \ + #prj"fan"#id"_speed_get", \ + #prj"fan"#id"_speed_set", \ + #prj"fan"#id"_min", \ + #prj"fan"#id"_max" } + +#define MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) + +static fan_path_T fan_path[] = /* must map with onlp_fan_id */ +{ + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_RESERVED), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_1_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_2_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_3_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_4_ON_MAIN_BOARD) +}; + +#define MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(id) \ + { \ + { ONLP_FAN_ID_CREATE(FAN_##id##_ON_MAIN_BOARD), "Chassis Fan "#id, 0 }, \ + 0x0, \ + (ONLP_FAN_CAPS_SET_PERCENTAGE | ONLP_FAN_CAPS_GET_PERCENTAGE | \ + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_SET_RPM), \ + 0, \ + 0, \ + ONLP_FAN_MODE_INVALID, \ + } + +/* Static fan information */ +onlp_fan_info_t linfo[] = { + { }, /* Not used */ + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(1), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(2), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(3), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(4) +}; + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_FAN(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +#define OPEN_READ_FILE(fullpath, data, nbytes, len) \ + if (onlp_file_read((uint8_t*)data, nbytes, &len, fullpath) < 0) \ + return ONLP_STATUS_E_INTERNAL; \ + else \ + AIM_LOG_VERBOSE("read data: %s\n", r_data); \ + +static int +_onlp_fani_info_get_fan(int local_id, onlp_fan_info_t* info) +{ + int len = 0, nbytes = 10; + float range = 0; + float temp = 0; + char r_data[10] = {0}; + char fullpath[65] = {0}; + const char fan_model[]=FAN_MODEL; + + /* Fixed system FAN is always present */ + info->status |= ONLP_FAN_STATUS_PRESENT; + + strncpy(info->model, fan_model, sizeof(info->model)); + + /* get fan speed */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].r_speed_get); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + info->rpm = atoi(r_data); + + /* check failure */ + if (info->rpm <= 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + if (ONLP_FAN_CAPS_GET_PERCENTAGE & info->caps) { + /* get fan min speed */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].min); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + min_fan_speed[local_id] = atoi(r_data); + + /* get fan max speed */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].max); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + max_fan_speed[local_id] = atoi(r_data); + + /* get speed percentage from rpm */ + range = max_fan_speed[local_id] - min_fan_speed[local_id]; + if (range > 0) { + temp = ((float)info->rpm - (float)min_fan_speed[local_id]) / range * 40.0 + 60.0; + if (temp < PERCENTAGE_MIN) { + temp = PERCENTAGE_MIN; + } + info->percentage = (int)temp; + } else { + return ONLP_STATUS_E_INTERNAL; + } + } + + return ONLP_STATUS_OK; +} + +/* + * This function will be called prior to all of onlp_fani_* functions. + */ +int +onlp_fani_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info) +{ + int rc = 0; + int local_id = 0; + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + *info = linfo[local_id]; + + switch (local_id) + { + case FAN_1_ON_MAIN_BOARD: + case FAN_2_ON_MAIN_BOARD: + case FAN_3_ON_MAIN_BOARD: + case FAN_4_ON_MAIN_BOARD: + rc =_onlp_fani_info_get_fan(local_id, info); + break; + default: + rc = ONLP_STATUS_E_INVALID; + break; + } + + return rc; +} + +/* + * This function sets the speed of the given fan in RPM. + * + * This function will only be called if the fan supprots the RPM_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_rpm_set(onlp_oid_t id, int rpm) +{ + float temp = 0.0; + int rv = 0, local_id = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[LEN_FILE_NAME] = {0}; + onlp_fan_info_t* info = NULL; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + info = &linfo[local_id]; + + if (0 == (ONLP_FAN_CAPS_SET_RPM & info->caps)) { + return ONLP_STATUS_E_UNSUPPORTED; + } + + /* reject rpm=0% (rpm=0%, stop fan) */ + if (0 == rpm) { + return ONLP_STATUS_E_INVALID; + } + + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); + + /* Set fan speed + Converting percent to driver value. + Driver accept value in range between 153 and 255. + Value 153 is minimum rpm. + Value 255 is maximum rpm. + */ + if (local_id > sizeof(min_fan_speed)/sizeof(min_fan_speed[0])) { + return ONLP_STATUS_E_INTERNAL; + } + if (max_fan_speed[local_id] - min_fan_speed[local_id] < 0) { + return ONLP_STATUS_E_INTERNAL; + } + if (rpm < min_fan_speed[local_id] || rpm > max_fan_speed[local_id]) { + return ONLP_STATUS_E_PARAM; + } + + temp = (rpm - min_fan_speed[local_id]) * (RPM_MAGIC_MAX - RPM_MAGIC_MIN) / + (max_fan_speed[local_id] - min_fan_speed[local_id]) + RPM_MAGIC_MIN; + + snprintf(r_data, sizeof(r_data), "%d", (int)temp); + nbytes = strnlen(r_data, sizeof(r_data)); + rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function sets the fan speed of the given OID as a percentage. + * + * This will only be called if the OID has the PERCENTAGE_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_percentage_set(onlp_oid_t id, int p) +{ + float temp = 0.0; + int rv = 0, local_id = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[LEN_FILE_NAME] = {0}; + onlp_fan_info_t* info = NULL; + + VALIDATE(id); + local_id = ONLP_OID_ID_GET(id); + info = &linfo[local_id]; + + if (0 == (ONLP_FAN_CAPS_SET_PERCENTAGE & info->caps)) { + return ONLP_STATUS_E_UNSUPPORTED; + } + + /* reject p=0% (p=0%, stop fan) */ + if (0 == p) { + return ONLP_STATUS_E_INVALID; + } + + if (p < PERCENTAGE_MIN || p > PERCENTAGE_MAX) { + return ONLP_STATUS_E_PARAM; + } + + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); + + /* Set fan speed + Converting percent to driver value. + Driver accept value in range between 153 and 255. + Value 153 is 60%. + Value 255 is 100%. + */ + temp = (p - PERCENTAGE_MIN) * (RPM_MAGIC_MAX - RPM_MAGIC_MIN) / + (PERCENTAGE_MAX - PERCENTAGE_MIN) + RPM_MAGIC_MIN; + + snprintf(r_data, sizeof(r_data), "%d", (int)temp); + nbytes = strnlen(r_data, sizeof(r_data)); + rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function sets the fan speed of the given OID as per + * the predefined ONLP fan speed modes: off, slow, normal, fast, max. + * + * Interpretation of these modes is up to the platform. + * + */ +int +onlp_fani_mode_set(onlp_oid_t id, onlp_fan_mode_t mode) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function sets the fan direction of the given OID. + * + * This function is only relevant if the fan OID supports both direction + * capabilities. + * + * This function is optional unless the functionality is available. + */ +int +onlp_fani_dir_set(onlp_oid_t id, onlp_fan_dir_t dir) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * Generic fan ioctl. Optional. + */ +int +onlp_fani_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c new file mode 100644 index 00000000..459207e7 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c @@ -0,0 +1,284 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define prefix_path "/bsp/led/led_" +#define driver_value_len 50 + +#define LED_MODE_OFF "none" +#define LED_MODE_GREEN "green" +#define LED_MODE_RED "red" +#define LED_MODE_BLUE "blue" +#define LED_MODE_GREEN_BLINK "green_blink" +#define LED_MODE_RED_BLINK "red_blink" +#define LED_MODE_BLUE_BLINK "blue_blink" +#define LED_MODE_AUTO "cpld_control" + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_LED(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +/* LED related data + */ +enum onlp_led_id +{ + LED_RESERVED = 0, + LED_SYSTEM, + LED_FAN, + LED_PSU1, + LED_PSU2, + LED_UID +}; + +typedef struct led_light_mode_map { + enum onlp_led_id id; + char* driver_led_mode; + enum onlp_led_mode_e onlp_led_mode; +} led_light_mode_map_t; + +led_light_mode_map_t led_map[] = { +{LED_SYSTEM, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_SYSTEM, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_SYSTEM, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_SYSTEM, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_SYSTEM, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_SYSTEM, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_PSU1, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_PSU1, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_PSU1, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_PSU1, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_PSU1, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_PSU1, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_PSU2, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_PSU2, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_PSU2, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_PSU2, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_PSU2, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_PSU2, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_UID, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_UID, LED_MODE_BLUE, ONLP_LED_MODE_BLUE}, +{LED_UID, LED_MODE_BLUE_BLINK, ONLP_LED_MODE_BLUE_BLINKING}, +{LED_UID, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, +}; + +static char file_names[][10] = /* must map with onlp_led_id */ +{ + "reserved", + "status", + "fan", + "psu1", + "psu2", + "uid" +}; + +/* + * Get the information for the given LED OID. + */ +static onlp_led_info_t linfo[] = +{ + { }, /* Not used */ + { + { ONLP_LED_ID_CREATE(LED_SYSTEM), "Chassis LED 1 (SYSTEM LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN), "Chassis LED 2 (FAN LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_PSU1), "Chassis LED 3 (PSU1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_PSU2), "Chassis LED 4 (PSU2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_UID), "Chassis LED 5 (UID LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_BLUE | ONLP_LED_CAPS_BLUE_BLINKING | + ONLP_LED_CAPS_AUTO, + } +}; + +static int driver_to_onlp_led_mode(enum onlp_led_id id, char* driver_led_mode) +{ + int i, nsize = sizeof(led_map)/sizeof(led_map[0]); + + for (i = 0; i < nsize; i++) + { + if (id == led_map[i].id && + !strncmp(led_map[i].driver_led_mode, driver_led_mode, driver_value_len)) + { + return led_map[i].onlp_led_mode; + } + } + + return 0; +} + +static char* onlp_to_driver_led_mode(enum onlp_led_id id, onlp_led_mode_t onlp_led_mode) +{ + int i, nsize = sizeof(led_map)/sizeof(led_map[0]); + + for (i = 0; i < nsize; i++) + { + if (id == led_map[i].id && onlp_led_mode == led_map[i].onlp_led_mode) + { + return led_map[i].driver_led_mode; + } + } + + return LED_MODE_OFF; +} + +/* + * This function will be called prior to any other onlp_ledi_* functions. + */ +int +onlp_ledi_init(void) +{ + /* + * TODO setting UI LED to off when it will be supported + */ + + return ONLP_STATUS_OK; +} + +int +onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) +{ + int len, local_id = 0; + uint8_t data[driver_value_len] = {0}; + char fullpath[50] = {0}; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + /* get fullpath */ + snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); + + /* Set the onlp_oid_hdr_t and capabilities */ + *info = linfo[ONLP_OID_ID_GET(id)]; + + /* Get LED mode */ + if (onlp_file_read(data, sizeof(data), &len, fullpath) != 0) { + return ONLP_STATUS_E_INTERNAL; + } + + info->mode = driver_to_onlp_led_mode(local_id, (char*)data); + + /* Set the on/off status */ + if (info->mode != ONLP_LED_MODE_OFF) { + info->status |= ONLP_LED_STATUS_ON; + } + + return ONLP_STATUS_OK; +} + +/* + * Turn an LED on or off. + * + * This function will only be called if the LED OID supports the ONOFF + * capability. + * + * What 'on' means in terms of colors or modes for multimode LEDs is + * up to the platform to decide. This is intended as baseline toggle mechanism. + */ +int +onlp_ledi_set(onlp_oid_t id, int on_or_off) +{ + VALIDATE(id); + + if (!on_or_off) { + return onlp_ledi_mode_set(id, ONLP_LED_MODE_OFF); + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function puts the LED into the given mode. It is a more functional + * interface for multimode LEDs. + * + * Only modes reported in the LED's capabilities will be attempted. + */ +int +onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int local_id; + char fullpath[50] = {0}; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); + + if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, fullpath) != 0) + { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * Generic LED ioctl interface. + */ +int +onlp_ledi_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/make.mk new file mode 100644 index 00000000..415fc8f4 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### + +LIBRARY := x86_64_mlnx_msn2100 +$(LIBRARY)_SUBDIR := $(dir $(lastword $(MAKEFILE_LIST))) +include $(BUILDER)/lib.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.c new file mode 100644 index 00000000..07b40173 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.c @@ -0,0 +1,35 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +/* Nothing on this platform */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.h new file mode 100644 index 00000000..863da59d --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/platform_lib.h @@ -0,0 +1,52 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#ifndef __PLATFORM_LIB_H__ +#define __PLATFORM_LIB_H__ + +#include +#include +#include "x86_64_mlnx_msn2100_log.h" + +#define CHASSIS_LED_COUNT 5 +#define CHASSIS_PSU_COUNT 2 +#define CHASSIS_FAN_COUNT 4 +#define CHASSIS_THERMAL_COUNT 7 + +#define PSU1_ID 1 +#define PSU2_ID 2 + +#define PSU_MODULE_PREFIX "/bsp/module/psu%d_%s" +#define PSU_POWER_PREFIX "/bsp/power/psu%d_%s" +#define IDPROM_PATH "/bsp/eeprom/%s%d_info" + +typedef enum psu_type { + PSU_TYPE_UNKNOWN, + PSU_TYPE_AC_F2B, + PSU_TYPE_AC_B2F +} psu_type_t; + +psu_type_t get_psu_type(int id, char* modelname, int modelname_len); + +#endif /* __PLATFORM_LIB_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c new file mode 100644 index 00000000..e48cee9f --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c @@ -0,0 +1,176 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define PSU_STATUS_PRESENT 1 +#define PSU_CABLE_PRESENT 1 + +#define PSU_NODE_MAX_INT_LEN 8 +#define PSU_NODE_MAX_PATH_LEN 64 + +#define PSU_MODEL "POW000167" + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_PSU(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +static int +psu_module_info_get(int id, char *node, int *value) +{ + int len, ret = 0; + char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; + char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; + + *value = 0; + + sprintf(node_path, PSU_MODULE_PREFIX, id, node); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + if (ret == 0) { + *value = atoi(buf); + } + + return ret; +} + +static int +psu_power_info_get(int id, char *node, int *value) +{ + int len, ret = 0; + char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; + char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; + + *value = 0; + + sprintf(node_path, PSU_POWER_PREFIX, id, node); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + if (ret == 0) { + *value = atoi(buf); + } + + return ret; +} + +int +onlp_psui_init(void) +{ + return ONLP_STATUS_OK; +} + +static int +_psu_info_get(onlp_psu_info_t* info) +{ + int val = 0; + int index = ONLP_OID_ID_GET(info->hdr.id); + + /* Set capability */ + info->caps = ONLP_PSU_CAPS_AC; + + if (info->status & ONLP_PSU_STATUS_FAILED) { + return ONLP_STATUS_OK; + } + + /* Read voltage, current and power */ + if (psu_power_info_get(index, "volt", &val) == 0) { + info->mvout = val; + info->caps |= ONLP_PSU_CAPS_VOUT; + } + + if (psu_power_info_get(index, "curr", &val) == 0) { + info->miout = val; + info->caps |= ONLP_PSU_CAPS_IOUT; + } + + info->mpout = info->mvout * info->miout; + info->caps |= ONLP_PSU_CAPS_POUT; + + info->mpin = ((int)(info->mpout / 91)) * 100; + info->caps |= ONLP_PSU_CAPS_PIN; + + return ONLP_STATUS_OK; +} + +/* + * Get all information about the given PSU oid. + */ +static onlp_psu_info_t pinfo[] = +{ + { }, /* Not used */ + { + { ONLP_PSU_ID_CREATE(PSU1_ID), "PSU-1", 0 }, + }, + { + { ONLP_PSU_ID_CREATE(PSU2_ID), "PSU-2", 0 }, + } +}; + +int +onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) +{ + int val = 0; + int ret = ONLP_STATUS_OK; + int index = ONLP_OID_ID_GET(id); + const char psu_model[]=PSU_MODEL; + + VALIDATE(id); + + memset(info, 0, sizeof(onlp_psu_info_t)); + *info = pinfo[index]; /* Set the onlp_oid_hdr_t */ + + /* Fixed system, PSU is always present */ + info->status |= ONLP_PSU_STATUS_PRESENT; + + strncpy(info->model, psu_model, sizeof(info->model)); + + /* Get the cable preset state */ + if (psu_module_info_get(index, "pwr_status", &val) != 0) { + AIM_LOG_ERROR("Unable to read PSU(%d) node(cable_present)\r\n", index); + } + + if (val != PSU_CABLE_PRESENT) { + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } + + info->status |= ONLP_PSU_STATUS_PRESENT; + + ret = _psu_info_get(info); + + return ret; +} + +int +onlp_psui_ioctl(onlp_oid_t pid, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sfpi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sfpi.c new file mode 100644 index 00000000..51fa0147 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sfpi.c @@ -0,0 +1,197 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include + +#include /* For O_RDWR && open */ +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define MAX_SFP_PATH 64 +#define SFP_SYSFS_VALUE_LEN 20 +static char sfp_node_path[MAX_SFP_PATH] = {0}; +#define NUM_OF_SFP_PORT 16 +#define SFP_PRESENT_STATUS "good" +#define SFP_NOT_PRESENT_STATUS "not_connected" + +static int +msn2100_sfp_node_read_int(char *node_path, int *value) +{ + int data_len = 0, ret = 0; + char buf[SFP_SYSFS_VALUE_LEN] = {0}; + *value = -1; + + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &data_len, node_path); + + if (ret == 0) { + if (!strncmp(buf, SFP_PRESENT_STATUS, strlen(SFP_PRESENT_STATUS))) { + *value = 1; + } else if (!strncmp(buf, SFP_NOT_PRESENT_STATUS, strlen(SFP_NOT_PRESENT_STATUS))) { + *value = 0; + } + } + + return ret; +} + +static char* +msn2100_sfp_get_port_path(int port, char *node_name) +{ + sprintf(sfp_node_path, "/bsp/qsfp/qsfp%d%s", port, node_name); + return sfp_node_path; +} + +/************************************************************ + * + * SFPI Entry Points + * + ***********************************************************/ +int +onlp_sfpi_init(void) +{ + /* Called at initialization time */ + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) +{ + int p = 1; + AIM_BITMAP_CLR_ALL(bmap); + + for (; p <= NUM_OF_SFP_PORT; p++) { + AIM_BITMAP_SET(bmap, p); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_is_present(int port) +{ + /* + * Return 1 if present. + * Return 0 if not present. + * Return < 0 if error. + */ + int present = -1; + char* path = msn2100_sfp_get_port_path(port, "_status"); + + if (msn2100_sfp_node_read_int(path, &present) != 0) { + AIM_LOG_ERROR("Unable to read present status from port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + return present; +} + +int +onlp_sfpi_presence_bitmap_get(onlp_sfp_bitmap_t* dst) +{ + int ii = 1; + int rc = 0; + + for (;ii <= NUM_OF_SFP_PORT; ii++) { + rc = onlp_sfpi_is_present(ii); + AIM_BITMAP_MOD(dst, ii, (1 == rc) ? 1 : 0); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_eeprom_read(int port, uint8_t data[256]) +{ + char* path = msn2100_sfp_get_port_path(port, ""); + + /* + * Read the SFP eeprom into data[] + * + * Return MISSING if SFP is missing. + * Return OK if eeprom is read + */ + memset(data, 0, 256); + + if (onlplib_sfp_eeprom_read_file(path, data) != 0) { + AIM_LOG_ERROR("Unable to read eeprom from port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_supported(int port, onlp_sfp_control_t control, int* rv) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_get(int port, onlp_sfp_control_t control, int* value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_denit(void) +{ + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c new file mode 100644 index 00000000..74b17de7 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c @@ -0,0 +1,258 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" +#include "x86_64_mlnx_msn2100_int.h" +#include "x86_64_mlnx_msn2100_log.h" + + + +#define COMMAND_OUTPUT_BUFFER 256 + +#define PREFIX_PATH_ON_CPLD_DEV "/bsp/cpld" +#define NUM_OF_CPLD 2 +static char arr_cplddev_name[NUM_OF_CPLD][30] = +{ + "cpld_brd_version", + "cpld_mgmt_version" +}; + +static void +_onlp_sysi_execute_command(char *command, char buffer[COMMAND_OUTPUT_BUFFER]) +{ + FILE *fp = NULL; + + /* Open the command for reading. */ + fp = popen(command, "r"); + if (NULL == fp) { + AIM_LOG_WARN("Failed to run command '%s'\n", command); + } + + /* Read the output */ + if (fgets(buffer, COMMAND_OUTPUT_BUFFER-1, fp) == NULL) { + AIM_LOG_WARN("Failed to read output of command '%s'\n", command); + pclose(fp); + } + + /* The last symbol is '\n', so remote it */ + buffer[strnlen(buffer, COMMAND_OUTPUT_BUFFER) - 1] = '\0'; + + /* close */ + pclose(fp); +} + +const char* +onlp_sysi_platform_get(void) +{ + return "x86-64-mlnx-msn2100-r0"; +} + +int +onlp_sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int i, v[NUM_OF_CPLD]={0}; + + for (i=0; i < NUM_OF_CPLD; i++) { + v[i] = 0; + if(onlp_file_read_int(v+i, "%s/%s", PREFIX_PATH_ON_CPLD_DEV, arr_cplddev_name[i]) < 0) { + return ONLP_STATUS_E_INTERNAL; + } + } + pi->cpld_versions = aim_fstrdup("brd=%d, mgmt=%d", v[0], v[1]); + + return ONLP_STATUS_OK; +} + +void +onlp_sysi_platform_info_free(onlp_platform_info_t* pi) +{ + aim_free(pi->cpld_versions); +} + + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + int i; + onlp_oid_t* e = table; + memset(table, 0, max*sizeof(onlp_oid_t)); + + for (i = 1; i <= CHASSIS_THERMAL_COUNT; i++) + { + *e++ = ONLP_THERMAL_ID_CREATE(i); + } + + for (i = 1; i <= CHASSIS_LED_COUNT; i++) + { + *e++ = ONLP_LED_ID_CREATE(i); + } + + for (i = 1; i <= CHASSIS_PSU_COUNT; i++) + { + *e++ = ONLP_PSU_ID_CREATE(i); + } + + for (i = 1; i <= CHASSIS_FAN_COUNT; i++) + { + *e++ = ONLP_FAN_ID_CREATE(i); + } + + return 0; +} + +static int +_onlp_sysi_grep_output(char value[256], const char *attr, const char *tmp_file) +{ + int value_offset = 30; /* value offset in onie-syseeprom */ + char command[256] = {0}; + char buffer[COMMAND_OUTPUT_BUFFER] = {0}; + int v = 0; + + snprintf(command, sizeof(command), "cat '%s' | grep '%s'", tmp_file, attr); + _onlp_sysi_execute_command(command, buffer); + + /* Reading value from buffer with command output */ + while (buffer[value_offset] != '\n' && + buffer[value_offset] != '\r' && + buffer[value_offset] != '\0') { + value[v] = buffer[value_offset]; + v++; + value_offset++; + } + value[v] = '\0'; + + AIM_LOG_VERBOSE("Value for sytem attribute '%s' is '%s' \n", attr, value); + + return ONLP_STATUS_OK; +} + +int +onlp_sysi_onie_info_get(onlp_onie_info_t* onie) +{ + + const char onie_version_file[] = "/bsp/onie-version"; + const char onie_version_command[] = "onie-shell -c 'onie-sysinfo -v' > /bsp/onie-version"; + const char onie_syseeprom_file[] = "/bsp/onie-syseeprom"; + const char onie_syseeprom_command[] = "onie-shell -c onie-syseeprom > /bsp/onie-syseeprom"; + struct stat stat_buf; + char value[256] = {0}; + char command[256] = {0}; + int rc = 0; + int exit_status; + + /* We must initialize this otherwise crash occurs while free memory */ + list_init(&onie->vx_list); + + /* Check if cache file exist */ + rc = stat(onie_syseeprom_file, &stat_buf); + if (-1 == rc) { + rc = system(onie_syseeprom_command); + if (-1 == rc) { + return rc; + } + exit_status = WEXITSTATUS(rc); + if (EXIT_SUCCESS != exit_status) { + return ONLP_STATUS_E_GENERIC; + } + } + + rc = _onlp_sysi_grep_output(value, "Product Name", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->product_name = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Part Number", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->part_number = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Serial Number", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->serial_number = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Base MAC Address", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + strncpy((char*)onie->mac, value, sizeof(onie->mac)); + rc = _onlp_sysi_grep_output(value, "Manufacture Date", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacture_date = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Device Version", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->device_version = atoi(value); + rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacturer = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacturer = aim_strdup(value); + onie->vendor = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "MAC Addresses", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->mac_range = atoi(value); + /* Check if onie version first run and cache file exist */ + rc = stat(onie_version_file, &stat_buf); + if (-1 == rc) + { + rc = system(onie_version_command); + if (-1 == rc) { + return rc; + } + exit_status = WEXITSTATUS(rc); + if (EXIT_SUCCESS != exit_status) { + return ONLP_STATUS_E_GENERIC; + }} + snprintf(command, sizeof(command), "cat '%s'", onie_version_file); + _onlp_sysi_execute_command(command, value); + /* ONIE version */ + onie->onie_version = aim_strdup(value); + + /* Platform name */ + onie->platform_name = aim_strdup("x86_64-mlnx_msn2100-r0"); + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c new file mode 100644 index 00000000..32b9d254 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c @@ -0,0 +1,169 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Thermal Sensor Platform Implementation. + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define prefix_path "/bsp/thermal" + +/* CPU thermal_threshold */ +typedef enum cpu_thermal_threshold_e { + CPU_THERMAL_THRESHOLD_WARNING_DEFAULT = 87000, + CPU_THERMAL_THRESHOLD_ERROR_DEFAULT = 100000, + CPU_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT = 105000, +} cpu_thermal_threshold_t; + +/* Shortcut for CPU thermal threshold value. */ +#define CPU_THERMAL_THRESHOLD_INIT_DEFAULTS \ + { CPU_THERMAL_THRESHOLD_WARNING_DEFAULT, \ + CPU_THERMAL_THRESHOLD_ERROR_DEFAULT, \ + CPU_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT } + +/* Asic thermal_threshold */ +typedef enum asic_thermal_threshold_e { + ASIC_THERMAL_THRESHOLD_WARNING_DEFAULT = 105000, + ASIC_THERMAL_THRESHOLD_ERROR_DEFAULT = 115000, + ASIC_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT = 120000, +} asic_thermal_threshold_t; + +/* Shortcut for CPU thermal threshold value. */ +#define ASIC_THERMAL_THRESHOLD_INIT_DEFAULTS \ + { ASIC_THERMAL_THRESHOLD_WARNING_DEFAULT, \ + ASIC_THERMAL_THRESHOLD_ERROR_DEFAULT, \ + ASIC_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT } + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_THERMAL(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +enum onlp_thermal_id +{ + THERMAL_RESERVED = 0, + THERMAL_CPU_CORE_0, + THERMAL_CPU_CORE_1, + THERMAL_CPU_CORE_2, + THERMAL_CPU_CORE_3, + THERMAL_ASIC, + THERMAL_BOARD_AMB, + THERMAL_PORT +}; + +static char* last_path[] = /* must map with onlp_thermal_id */ +{ + "reserved", + "cpu_core0", + "cpu_core1", + "cpu_core2", + "cpu_core3", + "asic", + "board_amb", + "port_amb" +}; + +/* Static values */ +static onlp_thermal_info_t linfo[] = { + { }, /* Not used */ + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_0), "CPU Core 0", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_1), "CPU Core 1", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_2), "CPU Core 2", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_3), "CPU Core 3", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ASIC), "Asic Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, ASIC_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_BOARD_AMB), "Board AMB Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_PORT), "Port AMB Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + } +}; + +/* + * This will be called to intiialize the thermali subsystem. + */ +int +onlp_thermali_init(void) +{ + return ONLP_STATUS_OK; +} + +/* + * Retrieve the information structure for the given thermal OID. + * + * If the OID is invalid, return ONLP_E_STATUS_INVALID. + * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. + * Otherwise, return ONLP_STATUS_OK with the OID's information. + * + * Note -- it is expected that you fill out the information + * structure even if the sensor described by the OID is not present. + */ +int +onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) +{ + int rv, len = 10, temp_base=1, local_id = 0; + char r_data[10] = {0}; + char fullpath[50] = {0}; + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + /* Set the onlp_oid_hdr_t and capabilities */ + *info = linfo[local_id]; + + /* get fullpath */ + snprintf(fullpath, sizeof(fullpath), "%s/%s", prefix_path, last_path[local_id]); + + rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + info->mcelsius = atoi(r_data) / temp_base; + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_config.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_config.c new file mode 100644 index 00000000..131dc7af --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_config.c @@ -0,0 +1,81 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* */ +#define __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(_x) #_x +#define __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(_x) __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(_x) +x86_64_mlnx_msn2100_config_settings_t x86_64_mlnx_msn2100_config_settings[] = +{ +#ifdef x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_INCLUDE_LOGGING(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_PORTING_STDLIB(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + { __x86_64_mlnx_msn2100_config_STRINGIFY_NAME(x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION), __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE(x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION) }, +#else +{ x86_64_mlnx_msn2100_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION(__x86_64_mlnx_msn2100_config_STRINGIFY_NAME), "__undefined__" }, +#endif + { NULL, NULL } +}; +#undef __x86_64_mlnx_msn2100_config_STRINGIFY_VALUE +#undef __x86_64_mlnx_msn2100_config_STRINGIFY_NAME + +const char* +x86_64_mlnx_msn2100_config_lookup(const char* setting) +{ + int i; + for(i = 0; x86_64_mlnx_msn2100_config_settings[i].name; i++) { + if(strcmp(x86_64_mlnx_msn2100_config_settings[i].name, setting)) { + return x86_64_mlnx_msn2100_config_settings[i].value; + } + } + return NULL; +} + +int +x86_64_mlnx_msn2100_config_show(struct aim_pvs_s* pvs) +{ + int i; + for(i = 0; x86_64_mlnx_msn2100_config_settings[i].name; i++) { + aim_printf(pvs, "%s = %s\n", x86_64_mlnx_msn2100_config_settings[i].name, x86_64_mlnx_msn2100_config_settings[i].value); + } + return i; +} + +/* */ + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_enums.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_enums.c new file mode 100644 index 00000000..f72c40c5 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_enums.c @@ -0,0 +1,10 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.enum(ALL).source> */ +/* */ + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_int.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_int.h new file mode 100644 index 00000000..51706942 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_int.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * x86_64_mlnx_msn2100 Internal Header + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2100_INT_H__ +#define __x86_64_mlnx_msn2100_INT_H__ + +#include + + +#endif /* __x86_64_mlnx_msn2100_INT_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.c new file mode 100644 index 00000000..20e39e8c --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.c @@ -0,0 +1,18 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_mlnx_msn2100_log.h" +/* + * x86_64_mlnx_msn2100 log struct. + */ +AIM_LOG_STRUCT_DEFINE( + x86_64_mlnx_msn2100_CONFIG_LOG_OPTIONS_DEFAULT, + x86_64_mlnx_msn2100_CONFIG_LOG_BITS_DEFAULT, + NULL, /* Custom log map */ + x86_64_mlnx_msn2100_CONFIG_LOG_CUSTOM_BITS_DEFAULT + ); + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.h new file mode 100644 index 00000000..34f2a98e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_log.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2100_LOG_H__ +#define __x86_64_mlnx_msn2100_LOG_H__ + +#define AIM_LOG_MODULE_NAME x86_64_mlnx_msn2100 +#include + +#endif /* __x86_64_mlnx_msn2100_LOG_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_module.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_module.c new file mode 100644 index 00000000..7c1ecf56 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_module.c @@ -0,0 +1,24 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_mlnx_msn2100_log.h" + +static int +datatypes_init__(void) +{ +#define x86_64_mlnx_msn2100_ENUMERATION_ENTRY(_enum_name, _desc) AIM_DATATYPE_MAP_REGISTER(_enum_name, _enum_name##_map, _desc, AIM_LOG_INTERNAL); +#include + return 0; +} + +void __x86_64_mlnx_msn2100_module_init__(void) +{ + AIM_LOG_STRUCT_REGISTER(); + datatypes_init__(); +} + +int __onlp_platform_version__ = 1; diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_ucli.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_ucli.c new file mode 100644 index 00000000..dd124ba3 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/x86_64_mlnx_msn2100_ucli.c @@ -0,0 +1,50 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#if x86_64_mlnx_msn2100_CONFIG_INCLUDE_UCLI == 1 + +#include +#include +#include + +static ucli_status_t +x86_64_mlnx_msn2100_ucli_ucli__config__(ucli_context_t* uc) +{ + UCLI_HANDLER_MACRO_MODULE_CONFIG(x86_64_mlnx_msn2100) +} + +/* */ +/* */ + +static ucli_module_t +x86_64_mlnx_msn2100_ucli_module__ = + { + "x86_64_mlnx_msn2100_ucli", + NULL, + x86_64_mlnx_msn2100_ucli_ucli_handlers__, + NULL, + NULL, + }; + +ucli_node_t* +x86_64_mlnx_msn2100_ucli_node_create(void) +{ + ucli_node_t* n; + ucli_module_init(&x86_64_mlnx_msn2100_ucli_module__); + n = ucli_node_create("x86_64_mlnx_msn2100", NULL, &x86_64_mlnx_msn2100_ucli_module__); + ucli_node_subnode_add(n, ucli_module_log_node_create("x86_64_mlnx_msn2100")); + return n; +} + +#else +void* +x86_64_mlnx_msn2100_ucli_node_create(void) +{ + return NULL; +} +#endif + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/x86_64_mlnx_msn2100.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/x86_64_mlnx_msn2100.mk new file mode 100644 index 00000000..88588977 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/x86_64_mlnx_msn2100.mk @@ -0,0 +1,13 @@ + +############################################################################### +# +# Inclusive Makefile for the x86_64_mlnx_msn2100 module. +# +# Autogenerated 2015-12-23 23:45:56.754200 +# +############################################################################### +x86_64_mlnx_msn2100_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +include $(x86_64_mlnx_msn2100_BASEDIR)/module/make.mk +include $(x86_64_mlnx_msn2100_BASEDIR)/module/auto/make.mk +include $(x86_64_mlnx_msn2100_BASEDIR)/module/src/make.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/PKG.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/PKG.yml new file mode 100644 index 00000000..eb6c9456 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=mellanox PLATFORM=x86-64-mlnx-msn2100-r0 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml new file mode 100644 index 00000000..b2a9eb99 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml @@ -0,0 +1,35 @@ +--- + +###################################################################### +# +# platform-config for Mellanox msn2100 +# +###################################################################### + +x86-64-mlnx-msn2100-r0: + + grub: + + serial: >- + --unit=0 + --speed=115200 + --word=8 + --parity=0 + --stop=1 + + kernel: + <<: *kernel-3-16 + + args: >- + nopat + console=ttyS0,115200n8 + rd_NO_MD + rd_NO_LUKS + acpi_enforce_resources=lax + acpi=noirq + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:14.0 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py new file mode 100644 index 00000000..07c284ea --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py @@ -0,0 +1,17 @@ +from onl.platform.base import * +from onl.platform.mellanox import * + +class OnlPlatform_x86_64_mlnx_msn2100_r0(OnlPlatformMellanox, + OnlPlatformPortConfig_32x100): + PLATFORM='x86-64-mlnx-msn2100-r0' + MODEL="SN2100" + SYS_OBJECT_ID=".2100.1" + + def baseconfig(self): + # load modules + import os + # necessary if there are issues with the install + # os.system("/usr/bin/apt-get install") + os.system("/etc/mlnx/mlnx-hw-management start") + + return True diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/PKG.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/PKG.yml new file mode 100644 index 00000000..962cf73b --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=x86-64-mlnx-msn2410 ARCH=amd64 TOOLCHAIN=x86_64-linux-gnu diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/Makefile new file mode 100644 index 00000000..e7437cb2 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/Makefile @@ -0,0 +1,2 @@ +FILTER=src +include $(ONL)/make/subdirs.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/Makefile new file mode 100644 index 00000000..375c940e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +MODULE := libonlp-x86-64-mlnx-msn2410 +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF x86_64_mlnx_msn2410 onlplib +DEPENDMODULE_HEADERS := sff + +include $(BUILDER)/dependmodules.mk + +SHAREDLIB := libonlp-x86-64-mlnx-msn2410.so +$(SHAREDLIB)_TARGETS := $(ALL_TARGETS) +include $(BUILDER)/so.mk +.DEFAULT_GOAL := $(SHAREDLIB) + +GLOBAL_CFLAGS += -I$(onlp_BASEDIR)/module/inc +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -fPIC +GLOBAL_LINK_LIBS += -lpthread + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410-r0.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410-r0.mk new file mode 100644 index 00000000..89d5639b --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410-r0.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the libonlp-x86-64-mlnx-msn2410-r0 module. +# +# Autogenerated 2015-12-23 23:45:22.249911 +# +############################################################################### +libonlp-x86-64-mlnx-msn2410-r0_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410.mk new file mode 100644 index 00000000..a83dfa57 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/libonlp-x86-64-mlnx-msn2410.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the libonlp-x86-64-mlnx-msn2410 module. +# +# Autogenerated 2016-10-13 22:58:39.095824 +# +############################################################################### +libonlp-x86-64-mlnx-msn2410_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/x86_64_mlnx_msn2410.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/x86_64_mlnx_msn2410.mk new file mode 100644 index 00000000..8588a42c --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/lib/x86_64_mlnx_msn2410.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the x86_64_mlnx_msn2410 module. +# +# Autogenerated 2015-12-23 23:45:22.262891 +# +############################################################################### +x86_64_mlnx_msn2410_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/Makefile new file mode 100644 index 00000000..d2523a26 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/Makefile @@ -0,0 +1,46 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +.DEFAULT_GOAL := onlpdump + +MODULE := onlpdump +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF onlp x86_64_mlnx_msn2410 onlplib onlp_platform_defaults sff cjson cjson_util timer_wheel OS + +include $(BUILDER)/dependmodules.mk + +BINARY := onlpdump +$(BINARY)_LIBRARIES := $(LIBRARY_TARGETS) +include $(BUILDER)/bin.mk + +GLOBAL_CFLAGS += -DAIM_CONFIG_AIM_MAIN_FUNCTION=onlpdump_main +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MAIN=1 +GLOBAL_LINK_LIBS += -lpthread -lm + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/onlpdump.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/onlpdump.mk new file mode 100644 index 00000000..77d7d005 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/onlpdump/onlpdump.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the onlpdump module. +# +# Autogenerated 2016-10-13 22:58:37.393320 +# +############################################################################### +onlpdump_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/.module b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/.module new file mode 100644 index 00000000..e17f0ebc --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/.module @@ -0,0 +1 @@ +name: x86_64_mlnx_msn2410 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/Makefile new file mode 100644 index 00000000..27e2df8d --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### +include ../../init.mk +MODULE := x86_64_mlnx_msn2410 +AUTOMODULE := x86_64_mlnx_msn2410 +include $(BUILDER)/definemodule.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/README b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/README new file mode 100644 index 00000000..4de12853 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/README @@ -0,0 +1,6 @@ +############################################################################### +# +# x86_64_mlnx_msn2410 README +# +############################################################################### + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/make.mk new file mode 100644 index 00000000..9db9e207 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# x86_64_mlnx_msn2410 Autogeneration +# +############################################################################### +x86_64_mlnx_msn2410_AUTO_DEFS := module/auto/x86_64_mlnx_msn2410.yml +x86_64_mlnx_msn2410_AUTO_DIRS := module/inc/x86_64_mlnx_msn2410 module/src +include $(BUILDER)/auto.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/x86_64_mlnx_msn2410.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/x86_64_mlnx_msn2410.yml new file mode 100644 index 00000000..a897908e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/auto/x86_64_mlnx_msn2410.yml @@ -0,0 +1,50 @@ +############################################################################### +# +# x86_64_mlnx_msn2410 Autogeneration Definitions. +# +############################################################################### + +cdefs: &cdefs +- X86_64_MLNX_MSN2410_CONFIG_INCLUDE_LOGGING: + doc: "Include or exclude logging." + default: 1 +- X86_64_MLNX_MSN2410_CONFIG_LOG_OPTIONS_DEFAULT: + doc: "Default enabled log options." + default: AIM_LOG_OPTIONS_DEFAULT +- X86_64_MLNX_MSN2410_CONFIG_LOG_BITS_DEFAULT: + doc: "Default enabled log bits." + default: AIM_LOG_BITS_DEFAULT +- X86_64_MLNX_MSN2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT: + doc: "Default enabled custom log bits." + default: 0 +- X86_64_MLNX_MSN2410_CONFIG_PORTING_STDLIB: + doc: "Default all porting macros to use the C standard libraries." + default: 1 +- X86_64_MLNX_MSN2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS: + doc: "Include standard library headers for stdlib porting macros." + default: x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB +- X86_64_MLNX_MSN2410_CONFIG_INCLUDE_UCLI: + doc: "Include generic uCli support." + default: 0 +- X86_64_MLNX_MSN2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION: + doc: "Assume chassis fan direction is the same as the PSU fan direction." + default: 0 + + +definitions: + cdefs: + X86_64_MLNX_MSN2410_CONFIG_HEADER: + defs: *cdefs + basename: x86_64_mlnx_msn2410_config + + portingmacro: + x86_64_mlnx_msn2410: + macros: + - malloc + - free + - memset + - memcpy + - strncpy + - vsnprintf + - snprintf + - strlen diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410.x b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410.x new file mode 100644 index 00000000..f4073e86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410.x @@ -0,0 +1,14 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.xmacro(ALL).define> */ +/* */ + +/* <--auto.start.xenum(ALL).define> */ +/* */ + + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_config.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_config.h new file mode 100644 index 00000000..ea70773d --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_config.h @@ -0,0 +1,137 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_mlnx_msn2410 Configuration Header + * + * @addtogroup x86_64_mlnx_msn2410-config + * @{ + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2410_CONFIG_H__ +#define __x86_64_mlnx_msn2410_CONFIG_H__ + +#ifdef GLOBAL_INCLUDE_CUSTOM_CONFIG +#include +#endif +#ifdef x86_64_mlnx_msn2410_INCLUDE_CUSTOM_CONFIG +#include +#endif + +/* */ +#include +/** + * x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING + * + * Include or exclude logging. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING +#define x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING 1 +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT + * + * Default enabled log options. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT +#define x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT AIM_LOG_OPTIONS_DEFAULT +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT + * + * Default enabled log bits. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT +#define x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT AIM_LOG_BITS_DEFAULT +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT + * + * Default enabled custom log bits. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT +#define x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT 0 +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB + * + * Default all porting macros to use the C standard libraries. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB +#define x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB 1 +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + * + * Include standard library headers for stdlib porting macros. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS +#define x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI + * + * Include generic uCli support. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI +#define x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI 0 +#endif + +/** + * x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + * + * Assume chassis fan direction is the same as the PSU fan direction. */ + + +#ifndef x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION +#define x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION 0 +#endif + + + +/** + * All compile time options can be queried or displayed + */ + +/** Configuration settings structure. */ +typedef struct x86_64_mlnx_msn2410_config_settings_s { + /** name */ + const char* name; + /** value */ + const char* value; +} x86_64_mlnx_msn2410_config_settings_t; + +/** Configuration settings table. */ +/** x86_64_mlnx_msn2410_config_settings table. */ +extern x86_64_mlnx_msn2410_config_settings_t x86_64_mlnx_msn2410_config_settings[]; + +/** + * @brief Lookup a configuration setting. + * @param setting The name of the configuration option to lookup. + */ +const char* x86_64_mlnx_msn2410_config_lookup(const char* setting); + +/** + * @brief Show the compile-time configuration. + * @param pvs The output stream. + */ +int x86_64_mlnx_msn2410_config_show(struct aim_pvs_s* pvs); + +/* */ + +#include "x86_64_mlnx_msn2410_porting.h" + +#endif /* __x86_64_mlnx_msn2410_CONFIG_H__ */ +/* @} */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_dox.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_dox.h new file mode 100644 index 00000000..9a4c2246 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_dox.h @@ -0,0 +1,26 @@ +/**************************************************************************//** + * + * x86_64_mlnx_msn2410 Doxygen Header + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2410_DOX_H__ +#define __x86_64_mlnx_msn2410_DOX_H__ + +/** + * @defgroup x86_64_mlnx_msn2410 x86_64_mlnx_msn2410 - x86_64_mlnx_msn2410 Description + * + +The documentation overview for this module should go here. + + * + * @{ + * + * @defgroup x86_64_mlnx_msn2410-x86_64_mlnx_msn2410 Public Interface + * @defgroup x86_64_mlnx_msn2410-config Compile Time Configuration + * @defgroup x86_64_mlnx_msn2410-porting Porting Macros + * + * @} + * + */ + +#endif /* __x86_64_mlnx_msn2410_DOX_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_porting.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_porting.h new file mode 100644 index 00000000..4ff0f886 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/inc/x86_64_mlnx_msn2410/x86_64_mlnx_msn2410_porting.h @@ -0,0 +1,107 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_mlnx_msn2410 Porting Macros. + * + * @addtogroup x86_64_mlnx_msn2410-porting + * @{ + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2410_PORTING_H__ +#define __x86_64_mlnx_msn2410_PORTING_H__ + + +/* */ +#if x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS == 1 +#include +#include +#include +#include +#include +#endif + +#ifndef x86_64_mlnx_msn2410_MALLOC + #if defined(GLOBAL_MALLOC) + #define x86_64_mlnx_msn2410_MALLOC GLOBAL_MALLOC + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_MALLOC malloc + #else + #error The macro x86_64_mlnx_msn2410_MALLOC is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_FREE + #if defined(GLOBAL_FREE) + #define x86_64_mlnx_msn2410_FREE GLOBAL_FREE + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_FREE free + #else + #error The macro x86_64_mlnx_msn2410_FREE is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_MEMSET + #if defined(GLOBAL_MEMSET) + #define x86_64_mlnx_msn2410_MEMSET GLOBAL_MEMSET + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_MEMSET memset + #else + #error The macro x86_64_mlnx_msn2410_MEMSET is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_MEMCPY + #if defined(GLOBAL_MEMCPY) + #define x86_64_mlnx_msn2410_MEMCPY GLOBAL_MEMCPY + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_MEMCPY memcpy + #else + #error The macro x86_64_mlnx_msn2410_MEMCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_STRNCPY + #if defined(GLOBAL_STRNCPY) + #define x86_64_mlnx_msn2410_STRNCPY GLOBAL_STRNCPY + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_STRNCPY strncpy + #else + #error The macro x86_64_mlnx_msn2410_STRNCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_VSNPRINTF + #if defined(GLOBAL_VSNPRINTF) + #define x86_64_mlnx_msn2410_VSNPRINTF GLOBAL_VSNPRINTF + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_VSNPRINTF vsnprintf + #else + #error The macro x86_64_mlnx_msn2410_VSNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_SNPRINTF + #if defined(GLOBAL_SNPRINTF) + #define x86_64_mlnx_msn2410_SNPRINTF GLOBAL_SNPRINTF + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_SNPRINTF snprintf + #else + #error The macro x86_64_mlnx_msn2410_SNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2410_STRLEN + #if defined(GLOBAL_STRLEN) + #define x86_64_mlnx_msn2410_STRLEN GLOBAL_STRLEN + #elif x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2410_STRLEN strlen + #else + #error The macro x86_64_mlnx_msn2410_STRLEN is required but cannot be defined. + #endif +#endif + +/* */ + + +#endif /* __x86_64_mlnx_msn2410_PORTING_H__ */ +/* @} */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/make.mk new file mode 100644 index 00000000..05c99981 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/make.mk @@ -0,0 +1,10 @@ +############################################################################### +# +# +# +############################################################################### +THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +x86_64_mlnx_msn2410_INCLUDES := -I $(THIS_DIR)inc +x86_64_mlnx_msn2410_INTERNAL_INCLUDES := -I $(THIS_DIR)src +x86_64_mlnx_msn2410_DEPENDMODULE_ENTRIES := init:x86_64_mlnx_msn2410 ucli:x86_64_mlnx_msn2410 + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/Makefile new file mode 100644 index 00000000..07be82d9 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# Local source generation targets. +# +############################################################################### + +ucli: + @../../../../tools/uclihandlers.py x86_64_mlnx_msn2410_ucli.c + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c new file mode 100644 index 00000000..dfcb39c9 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c @@ -0,0 +1,544 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Fan Platform Implementation Defaults. + * + ***********************************************************/ +#include +#include +#include +#include +#include "platform_lib.h" + +#define PREFIX_PATH "/bsp/fan/" +#define PREFIX_MODULE_PATH "/bsp/module/" + +#define FAN_STATUS_OK 1 + +#define PERCENTAGE_MIN 60.0 +#define PERCENTAGE_MAX 100.0 +#define RPM_MAGIC_MIN 153.0 +#define RPM_MAGIC_MAX 255.0 + +#define PSU_FAN_RPM_MIN 11700.0 +#define PSU_FAN_RPM_MAX 19500.0 + +#define PROJECT_NAME +#define LEN_FILE_NAME 80 + +#define FAN_RESERVED 0 +#define FAN_1_ON_MAIN_BOARD 1 +#define FAN_2_ON_MAIN_BOARD 2 +#define FAN_3_ON_MAIN_BOARD 3 +#define FAN_4_ON_MAIN_BOARD 4 +#define FAN_5_ON_MAIN_BOARD 5 +#define FAN_6_ON_MAIN_BOARD 6 +#define FAN_7_ON_MAIN_BOARD 7 +#define FAN_8_ON_MAIN_BOARD 8 +#define FAN_1_ON_PSU1 9 +#define FAN_1_ON_PSU2 10 + +static int min_fan_speed[CHASSIS_FAN_COUNT+1] = {0}; +static int max_fan_speed[CHASSIS_FAN_COUNT+1] = {0}; + +typedef struct fan_path_S +{ + char status[LEN_FILE_NAME]; + char r_speed_get[LEN_FILE_NAME]; + char r_speed_set[LEN_FILE_NAME]; + char min[LEN_FILE_NAME]; + char max[LEN_FILE_NAME]; +}fan_path_T; + +#define _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) \ + { #prj"fan"#id"_status", \ + #prj"fan"#id"_speed_get", \ + #prj"fan"#id"_speed_set", \ + #prj"fan"#id"_min", \ + #prj"fan"#id"_max" } + +#define MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) + +#define MAKE_FAN_PATH_ON_PSU(psu_id, fan_id) \ + {"psu"#psu_id"_status", \ + "psu"#psu_id"_fan"#fan_id"_speed_get", "", "", "",} + +static fan_path_T fan_path[] = /* must map with onlp_fan_id */ +{ + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_RESERVED), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_1_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_2_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_3_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_4_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_5_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_6_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_7_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_8_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_PSU(1 ,1), + MAKE_FAN_PATH_ON_PSU(2, 1) +}; + +#define MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(id) \ + { \ + { ONLP_FAN_ID_CREATE(FAN_##id##_ON_MAIN_BOARD), "Chassis Fan "#id, 0 }, \ + 0x0, \ + (ONLP_FAN_CAPS_SET_PERCENTAGE | ONLP_FAN_CAPS_GET_PERCENTAGE | \ + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_SET_RPM), \ + 0, \ + 0, \ + ONLP_FAN_MODE_INVALID, \ + } + +#define MAKE_FAN_INFO_NODE_ON_PSU(psu_id, fan_id) \ + { \ + { ONLP_FAN_ID_CREATE(FAN_##fan_id##_ON_PSU##psu_id), "Chassis PSU-"#psu_id" Fan "#fan_id, 0 }, \ + 0x0, \ + (ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE), \ + 0, \ + 0, \ + ONLP_FAN_MODE_INVALID, \ + } + +/* Static fan information */ +onlp_fan_info_t linfo[] = { + { }, /* Not used */ + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(1), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(2), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(3), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(4), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(5), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(6), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(7), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(8), + MAKE_FAN_INFO_NODE_ON_PSU(1,1), + MAKE_FAN_INFO_NODE_ON_PSU(2,1) +}; + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_FAN(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +#define OPEN_READ_FILE(fullpath, data, nbytes, len) \ + if (onlp_file_read((uint8_t*)data, nbytes, &len, fullpath) < 0) \ + return ONLP_STATUS_E_INTERNAL; \ + else \ + AIM_LOG_VERBOSE("read data: %s\n", r_data); \ + + +static int +_onlp_fani_read_fan_eeprom(int local_id, onlp_fan_info_t* info) +{ + const char sanity_checker[] = "MLNX"; + const uint8_t sanity_offset = 8; + const uint8_t sanity_len = 4; + const uint8_t block1_start = 12; + const uint8_t block1_type = 1; + const uint8_t block2_start = 14; + const uint8_t block2_type = 5; + const uint8_t serial_offset = 8; + const uint8_t serial_len = 24; + const uint8_t part_len = 20; + const uint8_t fan_offset = 14; + const uint8_t multiplier = 16; + uint8_t data[256] = {0}; + uint8_t offset = 0; + uint8_t temp = 0; + int rv = 0; + int len = 0; + char path[LEN_FILE_NAME] = {0}; + + /* We have 4 FRU with 2 fans(total 8 fans). + Eeprom is per FRU but not per fan. + So, need to convert fan ID to FRU ID.*/ + if (local_id % 2) { + local_id = local_id / 2 + 1; + } else { + local_id /= 2; + } + + /* Reading FRU eeprom. */ + snprintf(path, sizeof(path), IDPROM_PATH, "fan", local_id); + rv = onlp_file_read(data, sizeof(data), &len, path); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Sanity checker */ + if (strncmp(sanity_checker, (char*)&data[sanity_offset], sanity_len)) { + return ONLP_STATUS_E_INVALID; + } + + /* Checking eeprom block type with S/N and P/N */ + if (data[block1_start + 1] != block1_type) { + return ONLP_STATUS_E_INVALID; + } + + /* Reading serial number */ + offset = data[block1_start] * multiplier + serial_offset; + strncpy(info->serial, (char *)&data[offset], serial_len); + + /* Reading part number */ + offset += serial_len; + strncpy(info->model, (char *)&data[offset], part_len); + + /* Reading fan direction */ + if (data[block2_start + 1] != block2_type) { + return ONLP_STATUS_E_INVALID; + } + offset = data[block2_start] * multiplier + fan_offset; + temp = data[offset]; + switch (temp) { + case 1: + info->caps |= ONLP_FAN_CAPS_F2B; + break; + case 2: + info->caps |= ONLP_FAN_CAPS_B2F; + break; + default: + break; + } + + return ONLP_STATUS_OK; +} + +static int +_onlp_fani_info_get_fan(int local_id, onlp_fan_info_t* info) +{ + int len = 0, nbytes = 10; + float range = 0; + float temp = 0; + float fru_index = 0; + char r_data[10] = {0}; + char fullpath[65] = {0}; + + /* We have 4 FRU with 2 fans(total 8 fans). + Eeprom is per FRU but not per fan. + So, need to convert fan ID to FRU ID.*/ + if (local_id % 2) { + fru_index = local_id / 2 + 1; + } else { + fru_index = local_id / 2; + } + + /* get fan status + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_MODULE_PATH, fan_path[(int)fru_index].status); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + if (atoi(r_data) != FAN_STATUS_OK) { + return ONLP_STATUS_OK; + } + info->status |= ONLP_FAN_STATUS_PRESENT; + + /* get fan speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].r_speed_get); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + info->rpm = atoi(r_data); + + /* check failure */ + if (info->rpm <= 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + if (ONLP_FAN_CAPS_GET_PERCENTAGE & info->caps) { + /* get fan min speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].min); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + min_fan_speed[local_id] = atoi(r_data); + + /* get fan max speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].max); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + max_fan_speed[local_id] = atoi(r_data); + + /* get speed percentage from rpm */ + range = max_fan_speed[local_id] - min_fan_speed[local_id]; + if (range > 0) { + temp = ((float)info->rpm - (float)min_fan_speed[local_id]) / range * 40.0 + 60.0; + if (temp < PERCENTAGE_MIN) { + temp = PERCENTAGE_MIN; + } + info->percentage = (int)temp; + } else { + return ONLP_STATUS_E_INTERNAL; + } + } + + return _onlp_fani_read_fan_eeprom(local_id, info); +} + +static int +_onlp_fani_info_get_fan_on_psu(int local_id, int psu_id, onlp_fan_info_t* info) +{ + int len = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[80] = {0}; + float rpms_per_perc = 0.0; + float temp = 0.0; + + /* get fan status + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_MODULE_PATH, fan_path[local_id].status); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + if (atoi(r_data) != FAN_STATUS_OK) { + return ONLP_STATUS_OK; + } + info->status |= ONLP_FAN_STATUS_PRESENT; + + /* get fan speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].r_speed_get); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + info->rpm = atoi(r_data); + + /* check failure */ + if (info->rpm <= 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + /* get speed percentage from rpm */ + rpms_per_perc = PSU_FAN_RPM_MIN / PERCENTAGE_MIN; + temp = (float)info->rpm / rpms_per_perc; + if (temp < PERCENTAGE_MIN) { + temp = PERCENTAGE_MIN; + } + info->percentage = (int)temp; + + /* Serial number and model for PSU fan is the same as for appropriate PSU */ + if (FAN_1_ON_PSU1 == local_id) { + if (0 != psu_read_eeprom(PSU1_ID, NULL, info)) + return ONLP_STATUS_E_INTERNAL; + } else if (FAN_1_ON_PSU2 == local_id) { + if (0 != psu_read_eeprom(PSU2_ID, NULL, info)) + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function will be called prior to all of onlp_fani_* functions. + */ +int +onlp_fani_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info) +{ + int rc = 0; + int local_id = 0; + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + *info = linfo[local_id]; + + switch (local_id) + { + case FAN_1_ON_PSU1: + rc = _onlp_fani_info_get_fan_on_psu(local_id, PSU1_ID, info); + break; + case FAN_1_ON_PSU2: + rc = _onlp_fani_info_get_fan_on_psu(local_id, PSU2_ID, info); + break; + case FAN_1_ON_MAIN_BOARD: + case FAN_2_ON_MAIN_BOARD: + case FAN_3_ON_MAIN_BOARD: + case FAN_4_ON_MAIN_BOARD: + case FAN_5_ON_MAIN_BOARD: + case FAN_6_ON_MAIN_BOARD: + case FAN_7_ON_MAIN_BOARD: + case FAN_8_ON_MAIN_BOARD: + rc =_onlp_fani_info_get_fan(local_id, info); + break; + default: + rc = ONLP_STATUS_E_INVALID; + break; + } + + return rc; +} + +/* + * This function sets the speed of the given fan in RPM. + * + * This function will only be called if the fan supprots the RPM_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_rpm_set(onlp_oid_t id, int rpm) +{ + float temp = 0.0; + int rv = 0, local_id = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[LEN_FILE_NAME] = {0}; + onlp_fan_info_t* info = NULL; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + info = &linfo[local_id]; + + if (0 == (ONLP_FAN_CAPS_SET_RPM & info->caps)) { + return ONLP_STATUS_E_UNSUPPORTED; + } + + /* reject rpm=0% (rpm=0%, stop fan) */ + if (0 == rpm) { + return ONLP_STATUS_E_INVALID; + } + + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); + + /* Set fan speed + Converting percent to driver value. + Driver accept value in range between 153 and 255. + Value 153 is minimum rpm. + Value 255 is maximum rpm. + */ + if (local_id > sizeof(min_fan_speed)/sizeof(min_fan_speed[0])) { + return ONLP_STATUS_E_INTERNAL; + } + if (max_fan_speed[local_id] - min_fan_speed[local_id] < 0) { + return ONLP_STATUS_E_INTERNAL; + } + if (rpm < min_fan_speed[local_id] || rpm > max_fan_speed[local_id]) { + return ONLP_STATUS_E_PARAM; + } + + temp = (rpm - min_fan_speed[local_id]) * (RPM_MAGIC_MAX - RPM_MAGIC_MIN) / + (max_fan_speed[local_id] - min_fan_speed[local_id]) + RPM_MAGIC_MIN; + + snprintf(r_data, sizeof(r_data), "%d", (int)temp); + nbytes = strnlen(r_data, sizeof(r_data)); + rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function sets the fan speed of the given OID as a percentage. + * + * This will only be called if the OID has the PERCENTAGE_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_percentage_set(onlp_oid_t id, int p) +{ + float temp = 0.0; + int rv = 0, local_id = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[LEN_FILE_NAME] = {0}; + onlp_fan_info_t* info = NULL; + + VALIDATE(id); + local_id = ONLP_OID_ID_GET(id); + info = &linfo[local_id]; + + if (0 == (ONLP_FAN_CAPS_SET_PERCENTAGE & info->caps)) { + return ONLP_STATUS_E_UNSUPPORTED; + } + + /* reject p=0% (p=0%, stop fan) */ + if (0 == p) { + return ONLP_STATUS_E_INVALID; + } + + if (p < PERCENTAGE_MIN || p > PERCENTAGE_MAX) { + return ONLP_STATUS_E_PARAM; + } + + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); + + /* Set fan speed + Converting percent to driver value. + Driver accept value in range between 153 and 255. + Value 153 is 60%. + Value 255 is 100%. + */ + temp = (p - PERCENTAGE_MIN) * (RPM_MAGIC_MAX - RPM_MAGIC_MIN) / + (PERCENTAGE_MAX - PERCENTAGE_MIN) + RPM_MAGIC_MIN; + + snprintf(r_data, sizeof(r_data), "%d", (int)temp); + nbytes = strnlen(r_data, sizeof(r_data)); + rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function sets the fan speed of the given OID as per + * the predefined ONLP fan speed modes: off, slow, normal, fast, max. + * + * Interpretation of these modes is up to the platform. + * + */ +int +onlp_fani_mode_set(onlp_oid_t id, onlp_fan_mode_t mode) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function sets the fan direction of the given OID. + * + * This function is only relevant if the fan OID supports both direction + * capabilities. + * + * This function is optional unless the functionality is available. + */ +int +onlp_fani_dir_set(onlp_oid_t id, onlp_fan_dir_t dir) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * Generic fan ioctl. Optional. + */ +int +onlp_fani_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c new file mode 100644 index 00000000..27b39485 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c @@ -0,0 +1,301 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define prefix_path "/bsp/led/led_" +#define driver_value_len 50 + +#define LED_MODE_OFF "none" +#define LED_MODE_GREEN "green" +#define LED_MODE_RED "red" +#define LED_MODE_BLUE "blue" +#define LED_MODE_GREEN_BLINK "green_blink" +#define LED_MODE_RED_BLINK "red_blink" +#define LED_MODE_BLUE_BLINK "blue_blink" +#define LED_MODE_AUTO "cpld_control" + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_LED(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +/* LED related data + */ +enum onlp_led_id +{ + LED_RESERVED = 0, + LED_SYSTEM, + LED_FAN1, + LED_FAN2, + LED_FAN3, + LED_FAN4, + LED_PSU, +}; + +typedef struct led_light_mode_map { + enum onlp_led_id id; + char* driver_led_mode; + enum onlp_led_mode_e onlp_led_mode; +} led_light_mode_map_t; + +led_light_mode_map_t led_map[] = { +{LED_SYSTEM, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_SYSTEM, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_SYSTEM, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_SYSTEM, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_SYSTEM, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_SYSTEM, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN1, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN1, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN1, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN1, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN1, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN1, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN2, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN2, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN2, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN2, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN2, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN2, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN3, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN3, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN3, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN3, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN3, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN3, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN4, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN4, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN4, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN4, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN4, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN4, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_PSU, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_PSU, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_PSU, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_PSU, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_PSU, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_PSU, LED_MODE_AUTO, ONLP_LED_MODE_AUTO} +}; + +static char file_names[][10] = /* must map with onlp_led_id */ +{ + "reserved", + "status", + "fan1", + "fan2", + "fan3", + "fan4", + "psu" +}; + +/* + * Get the information for the given LED OID. + */ +static onlp_led_info_t linfo[] = +{ + { }, /* Not used */ + { + { ONLP_LED_ID_CREATE(LED_SYSTEM), "Chassis LED 1 (SYSTEM LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN1), "Chassis LED 2 (FAN1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN2), "Chassis LED 3 (FAN2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN3), "Chassis LED 4 (FAN3 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN4), "Chassis LED 5 (FAN4 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_PSU), "Chassis LED 6 (PSU LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + } +}; + +static int driver_to_onlp_led_mode(enum onlp_led_id id, char* driver_led_mode) +{ + int i, nsize = sizeof(led_map)/sizeof(led_map[0]); + + for (i = 0; i < nsize; i++) + { + if (id == led_map[i].id && + !strncmp(led_map[i].driver_led_mode, driver_led_mode, driver_value_len)) + { + return led_map[i].onlp_led_mode; + } + } + + return 0; +} + +static char* onlp_to_driver_led_mode(enum onlp_led_id id, onlp_led_mode_t onlp_led_mode) +{ + int i, nsize = sizeof(led_map)/sizeof(led_map[0]); + + for (i = 0; i < nsize; i++) + { + if (id == led_map[i].id && onlp_led_mode == led_map[i].onlp_led_mode) + { + return led_map[i].driver_led_mode; + } + } + + return LED_MODE_OFF; +} + +/* + * This function will be called prior to any other onlp_ledi_* functions. + */ +int +onlp_ledi_init(void) +{ + /* + * TODO setting UI LED to off when it will be supported on MSN2410 + */ + + return ONLP_STATUS_OK; +} + +int +onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) +{ + int len, local_id = 0; + uint8_t data[driver_value_len] = {0}; + char fullpath[50] = {0}; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + /* get fullpath */ + snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); + + /* Set the onlp_oid_hdr_t and capabilities */ + *info = linfo[ONLP_OID_ID_GET(id)]; + + /* Get LED mode */ + if (onlp_file_read(data, sizeof(data), &len, fullpath) != 0) { + return ONLP_STATUS_E_INTERNAL; + } + + info->mode = driver_to_onlp_led_mode(local_id, (char*)data); + + /* Set the on/off status */ + if (info->mode != ONLP_LED_MODE_OFF) { + info->status |= ONLP_LED_STATUS_ON; + } + + return ONLP_STATUS_OK; +} + +/* + * Turn an LED on or off. + * + * This function will only be called if the LED OID supports the ONOFF + * capability. + * + * What 'on' means in terms of colors or modes for multimode LEDs is + * up to the platform to decide. This is intended as baseline toggle mechanism. + */ +int +onlp_ledi_set(onlp_oid_t id, int on_or_off) +{ + VALIDATE(id); + + if (!on_or_off) { + return onlp_ledi_mode_set(id, ONLP_LED_MODE_OFF); + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function puts the LED into the given mode. It is a more functional + * interface for multimode LEDs. + * + * Only modes reported in the LED's capabilities will be attempted. + */ +int +onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int local_id; + char fullpath[50] = {0}; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); + + if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, fullpath) != 0) + { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * Generic LED ioctl interface. + */ +int +onlp_ledi_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/make.mk new file mode 100644 index 00000000..b5b9d110 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### + +LIBRARY := x86_64_mlnx_msn2410 +$(LIBRARY)_SUBDIR := $(dir $(lastword $(MAKEFILE_LIST))) +include $(BUILDER)/lib.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c new file mode 100644 index 00000000..c0c1765e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c @@ -0,0 +1,80 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +int +psu_read_eeprom(int psu_index, onlp_psu_info_t* psu_info, onlp_fan_info_t* fan_info) +{ + char path[64] = {0}; + const char sanity_check[] = "MLNX"; + const uint8_t serial_len = 24; + char data[256] = {0}; + bool sanity_found = false; + int index = 0, rv = 0, len = 0; + + snprintf(path, sizeof(path), IDPROM_PATH, "psu", psu_index); + rv = onlp_file_read((uint8_t* )data, sizeof(data)-1, &len, path); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Looking for sanity checker */ + while (index < sizeof(data) - sizeof(sanity_check) - 1) { + if (!strncmp(&data[index], sanity_check, sizeof(sanity_check) - 1)) { + sanity_found = true; + break; + } + index++; + } + if (false == sanity_found) { + return ONLP_STATUS_E_INVALID; + } + + /* Serial number */ + index += strlen(sanity_check); + if (psu_info) { + strncpy(psu_info->serial, &data[index], sizeof(psu_info->serial)); + } else if (fan_info) { + strncpy(fan_info->serial, &data[index], sizeof(fan_info->serial)); + } + + /* Part number */ + index += serial_len; + if (psu_info) { + strncpy(psu_info->model, &data[index], sizeof(psu_info->model)); + } else if (fan_info) { + strncpy(fan_info->model, &data[index], sizeof(fan_info->model)); + } + + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.h new file mode 100644 index 00000000..68242891 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.h @@ -0,0 +1,56 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#ifndef __PLATFORM_LIB_H__ +#define __PLATFORM_LIB_H__ + +#include +#include +#include "x86_64_mlnx_msn2410_log.h" + +#define CHASSIS_PSU_COUNT 2 +#define CHASSIS_TOTAL_FAN_COUNT 10 +#define CHASSIS_TOTAL_THERMAL_COUNT 8 +#define CHASSIS_FAN_COUNT (CHASSIS_TOTAL_FAN_COUNT - CHASSIS_PSU_COUNT) +#define CHASSIS_THERMAL_COUNT (CHASSIS_TOTAL_THERMAL_COUNT - CHASSIS_PSU_COUNT) + +#define PSU1_ID 1 +#define PSU2_ID 2 + +#define PSU_MODULE_PREFIX "/bsp/module/psu%d_%s" +#define PSU_POWER_PREFIX "/bsp/power/psu%d_%s" +#define IDPROM_PATH "/bsp/eeprom/%s%d_info" + +typedef enum psu_type { + PSU_TYPE_UNKNOWN, + PSU_TYPE_AC_F2B, + PSU_TYPE_AC_B2F +} psu_type_t; + +psu_type_t get_psu_type(int id, char* modelname, int modelname_len); + +int psu_read_eeprom(int psu_index, onlp_psu_info_t* psu_info, + onlp_fan_info_t* fan_info); + +#endif /* __PLATFORM_LIB_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c new file mode 100644 index 00000000..610b034e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c @@ -0,0 +1,202 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define PSU_STATUS_PRESENT 1 +#define PSU_CABLE_PRESENT 1 + +#define PSU_NODE_MAX_INT_LEN 8 +#define PSU_NODE_MAX_PATH_LEN 64 + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_PSU(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +static int +psu_module_info_get(int id, char *node, int *value) +{ + int len, ret = 0; + char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; + char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; + + *value = 0; + + sprintf(node_path, PSU_MODULE_PREFIX, id, node); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + if (ret == 0) { + *value = atoi(buf); + } + + return ret; +} + +static int +psu_power_info_get(int id, char *node, int *value) +{ + int len, ret = 0; + char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; + char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; + + *value = 0; + + sprintf(node_path, PSU_POWER_PREFIX, id, node); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + if (ret == 0) { + *value = atoi(buf); + } + + return ret; +} + +int +onlp_psui_init(void) +{ + return ONLP_STATUS_OK; +} + +static int +_psu_info_get(onlp_psu_info_t* info) +{ + int val = 0; + int index = ONLP_OID_ID_GET(info->hdr.id); + + /* Set capability + */ + info->caps = ONLP_PSU_CAPS_AC; + + if (info->status & ONLP_PSU_STATUS_FAILED) { + return ONLP_STATUS_OK; + } + + /* Set the associated oid_table */ + info->hdr.coids[0] = ONLP_FAN_ID_CREATE(index + CHASSIS_FAN_COUNT); + info->hdr.coids[1] = ONLP_THERMAL_ID_CREATE(index + CHASSIS_THERMAL_COUNT); + + /* Read voltage, current and power */ + if (psu_power_info_get(index, "volt_in", &val) == 0 && + 0 != val) { + info->mvin = val; + info->caps |= ONLP_PSU_CAPS_VIN; + + if (psu_power_info_get(index, "volt", &val) == 0) { + info->mvout = val; + info->caps |= ONLP_PSU_CAPS_VOUT; + } + + if (psu_power_info_get(index, "curr_in", &val) == 0) { + info->miin = val; + info->caps |= ONLP_PSU_CAPS_IIN; + } + + if (psu_power_info_get(index, "curr", &val) == 0) { + info->miout = val; + info->caps |= ONLP_PSU_CAPS_IOUT; + } + + if (psu_power_info_get(index, "power_in", &val) == 0) { + info->mpin = val; + info->caps |= ONLP_PSU_CAPS_PIN; + } + + if (psu_power_info_get(index, "power", &val) == 0) { + info->mpout = val; + info->caps |= ONLP_PSU_CAPS_POUT; + } + } else { + info->status |= ONLP_PSU_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + return psu_read_eeprom(index, info, NULL); +} + +/* + * Get all information about the given PSU oid. + */ +static onlp_psu_info_t pinfo[] = +{ + { }, /* Not used */ + { + { ONLP_PSU_ID_CREATE(PSU1_ID), "PSU-1", 0 }, + }, + { + { ONLP_PSU_ID_CREATE(PSU2_ID), "PSU-2", 0 }, + } +}; + +int +onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) +{ + int val = 0; + int ret = ONLP_STATUS_OK; + int index = ONLP_OID_ID_GET(id); + + VALIDATE(id); + + memset(info, 0, sizeof(onlp_psu_info_t)); + *info = pinfo[index]; /* Set the onlp_oid_hdr_t */ + + /* Get the present state */ + if (psu_module_info_get(index, "status", &val) != 0) { + AIM_LOG_ERROR("Unable to read PSU(%d) node(psu_present)\r\n", index); + } + + if (val != PSU_STATUS_PRESENT) { + info->status &= ~ONLP_PSU_STATUS_PRESENT; + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } + + /* Get the cable preset state */ + if (psu_module_info_get(index, "pwr_status", &val) != 0) { + AIM_LOG_ERROR("Unable to read PSU(%d) node(cable_present)\r\n", index); + } + + if (val != PSU_CABLE_PRESENT) { + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } + + info->status |= ONLP_PSU_STATUS_PRESENT; + + ret = _psu_info_get(info); + + return ret; +} + +int +onlp_psui_ioctl(onlp_oid_t pid, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sfpi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sfpi.c new file mode 100644 index 00000000..7ee8262a --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sfpi.c @@ -0,0 +1,200 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include + +#include /* For O_RDWR && open */ +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define MAX_SFP_PATH 64 +#define SFP_SYSFS_VALUE_LEN 20 +static char sfp_node_path[MAX_SFP_PATH] = {0}; +#define NUM_OF_SFP_PORT 56 +#define SFP_PRESENT_STATUS "good" +#define SFP_NOT_PRESENT_STATUS "not_connected" + +static int +msn2410_sfp_node_read_int(char *node_path, int *value) +{ + int data_len = 0, ret = 0; + char buf[SFP_SYSFS_VALUE_LEN] = {0}; + *value = -1; + + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &data_len, node_path); + + if (ret == 0) { + if (!strncmp(buf, SFP_PRESENT_STATUS, strlen(SFP_PRESENT_STATUS))) { + *value = 1; + } else if (!strncmp(buf, SFP_NOT_PRESENT_STATUS, strlen(SFP_NOT_PRESENT_STATUS))) { + *value = 0; + } + } + + return ret; +} + +static char* +msn2410_sfp_get_port_path(int port, char *node_name) +{ + sprintf(sfp_node_path, "/bsp/qsfp/qsfp%d%s", port, node_name); + return sfp_node_path; +} + +/************************************************************ + * + * SFPI Entry Points + * + ***********************************************************/ +int +onlp_sfpi_init(void) +{ + /* Called at initialization time */ + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) +{ + /* + * Ports {1, 32} + */ + int p = 1; + AIM_BITMAP_CLR_ALL(bmap); + + for (; p <= NUM_OF_SFP_PORT; p++) { + AIM_BITMAP_SET(bmap, p); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_is_present(int port) +{ + /* + * Return 1 if present. + * Return 0 if not present. + * Return < 0 if error. + */ + int present = -1; + char* path = msn2410_sfp_get_port_path(port, "_status"); + + if (msn2410_sfp_node_read_int(path, &present) != 0) { + AIM_LOG_ERROR("Unable to read present status from port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + return present; +} + +int +onlp_sfpi_presence_bitmap_get(onlp_sfp_bitmap_t* dst) +{ + int ii = 1; + int rc = 0; + + for (;ii <= NUM_OF_SFP_PORT; ii++) { + rc = onlp_sfpi_is_present(ii); + AIM_BITMAP_MOD(dst, ii, (1 == rc) ? 1 : 0); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_eeprom_read(int port, uint8_t data[256]) +{ + char* path = msn2410_sfp_get_port_path(port, ""); + + /* + * Read the SFP eeprom into data[] + * + * Return MISSING if SFP is missing. + * Return OK if eeprom is read + */ + memset(data, 0, 256); + + if (onlplib_sfp_eeprom_read_file(path, data) != 0) { + AIM_LOG_ERROR("Unable to read eeprom from port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_supported(int port, onlp_sfp_control_t control, int* rv) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_get(int port, onlp_sfp_control_t control, int* value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_denit(void) +{ + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c new file mode 100644 index 00000000..5ad5efd1 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c @@ -0,0 +1,266 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" +#include "x86_64_mlnx_msn2410_int.h" +#include "x86_64_mlnx_msn2410_log.h" + +#define NUM_OF_THERMAL_ON_MAIN_BROAD CHASSIS_THERMAL_COUNT +#define NUM_OF_FAN_ON_MAIN_BROAD CHASSIS_FAN_COUNT +#define NUM_OF_PSU_ON_MAIN_BROAD 2 +#define NUM_OF_LED_ON_MAIN_BROAD 6 + +#define COMMAND_OUTPUT_BUFFER 256 + +#define PREFIX_PATH_ON_CPLD_DEV "/bsp/cpld" +#define NUM_OF_CPLD 3 +static char arr_cplddev_name[NUM_OF_CPLD][30] = +{ + "cpld_brd_version", + "cpld_mgmt_version", + "cpld_port_version" +}; + +static void +_onlp_sysi_execute_command(char *command, char buffer[COMMAND_OUTPUT_BUFFER]) +{ + FILE *fp = NULL; + + /* Open the command for reading. */ + fp = popen(command, "r"); + if (NULL == fp) { + AIM_LOG_WARN("Failed to run command '%s'\n", command); + } + + /* Read the output */ + if (fgets(buffer, COMMAND_OUTPUT_BUFFER-1, fp) == NULL) { + AIM_LOG_WARN("Failed to read output of command '%s'\n", command); + pclose(fp); + } + + /* The last symbol is '\n', so remote it */ + buffer[strnlen(buffer, COMMAND_OUTPUT_BUFFER) - 1] = '\0'; + + /* close */ + pclose(fp); +} + +const char* +onlp_sysi_platform_get(void) +{ + return "x86-64-mlnx-msn2410-r0"; +} + +int +onlp_sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int i, v[NUM_OF_CPLD]={0}; + + for (i=0; i < NUM_OF_CPLD; i++) { + v[i] = 0; + if(onlp_file_read_int(v+i, "%s/%s", PREFIX_PATH_ON_CPLD_DEV, arr_cplddev_name[i]) < 0) { + return ONLP_STATUS_E_INTERNAL; + } + } + pi->cpld_versions = aim_fstrdup("brd=%d, mgmt=%d, port=%d", v[0], v[1], v[2]); + + return ONLP_STATUS_OK; +} + +void +onlp_sysi_platform_info_free(onlp_platform_info_t* pi) +{ + aim_free(pi->cpld_versions); +} + + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + int i; + onlp_oid_t* e = table; + memset(table, 0, max*sizeof(onlp_oid_t)); + + /* 8 Thermal sensors on the chassis */ + for (i = 1; i <= NUM_OF_THERMAL_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_THERMAL_ID_CREATE(i); + } + + /* 6 LEDs on the chassis */ + for (i = 1; i <= NUM_OF_LED_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_LED_ID_CREATE(i); + } + + /* 2 PSUs on the chassis */ + for (i = 1; i <= NUM_OF_PSU_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_PSU_ID_CREATE(i); + } + + /* 8 Fans and 2 PSU fans on the chassis */ + for (i = 1; i <= NUM_OF_FAN_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_FAN_ID_CREATE(i); + } + + return 0; +} + +static int +_onlp_sysi_grep_output(char value[256], const char *attr, const char *tmp_file) +{ + int value_offset = 30; /* value offset in onie-syseeprom */ + char command[256] = {0}; + char buffer[COMMAND_OUTPUT_BUFFER] = {0}; + int v = 0; + + snprintf(command, sizeof(command), "cat '%s' | grep '%s'", tmp_file, attr); + _onlp_sysi_execute_command(command, buffer); + + /* Reading value from buffer with command output */ + while (buffer[value_offset] != '\n' && + buffer[value_offset] != '\r' && + buffer[value_offset] != '\0') { + value[v] = buffer[value_offset]; + v++; + value_offset++; + } + value[v] = '\0'; + + AIM_LOG_VERBOSE("Value for sytem attribute '%s' is '%s' \n", attr, value); + + return ONLP_STATUS_OK; +} + +int +onlp_sysi_onie_info_get(onlp_onie_info_t* onie) +{ + + const char onie_version_file[] = "/bsp/onie-version"; + const char onie_version_command[] = "onie-shell -c 'onie-sysinfo -v' > /bsp/onie-version"; + const char onie_syseeprom_file[] = "/bsp/onie-syseeprom"; + const char onie_syseeprom_command[] = "onie-shell -c onie-syseeprom > /bsp/onie-syseeprom"; + struct stat stat_buf; + char value[256] = {0}; + char command[256] = {0}; + int rc = 0; + int exit_status; + + /* We must initialize this otherwise crash occurs while free memory */ + list_init(&onie->vx_list); + + /* Check if cache file exist */ + rc = stat(onie_syseeprom_file, &stat_buf); + if (-1 == rc) { + rc = system(onie_syseeprom_command); + if (-1 == rc) { + return rc; + } + exit_status = WEXITSTATUS(rc); + if (EXIT_SUCCESS != exit_status) { + return ONLP_STATUS_E_GENERIC; + } + } + + rc = _onlp_sysi_grep_output(value, "Product Name", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->product_name = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Part Number", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->part_number = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Serial Number", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->serial_number = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Base MAC Address", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + strncpy((char*)onie->mac, value, sizeof(onie->mac)); + rc = _onlp_sysi_grep_output(value, "Manufacture Date", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacture_date = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Device Version", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->device_version = atoi(value); + rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacturer = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacturer = aim_strdup(value); + onie->vendor = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "MAC Addresses", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->mac_range = atoi(value); + /* Check if onie version first run and cache file exist */ + rc = stat(onie_version_file, &stat_buf); + if (-1 == rc) + { + rc = system(onie_version_command); + if (-1 == rc) { + return rc; + } + exit_status = WEXITSTATUS(rc); + if (EXIT_SUCCESS != exit_status) { + return ONLP_STATUS_E_GENERIC; + }} + snprintf(command, sizeof(command), "cat '%s'", onie_version_file); + _onlp_sysi_execute_command(command, value); + /* ONIE version */ + onie->onie_version = aim_strdup(value); + + /* Platform name */ + onie->platform_name = aim_strdup("x86_64-mlnx_msn2410-r0"); + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c new file mode 100644 index 00000000..d2e1a533 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c @@ -0,0 +1,179 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Thermal Sensor Platform Implementation. + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define prefix_path "/bsp/thermal" + +/** CPU thermal_threshold */ +typedef enum cpu_thermal_threshold_e { + CPU_THERMAL_THRESHOLD_WARNING_DEFAULT = 87000, + CPU_THERMAL_THRESHOLD_ERROR_DEFAULT = 100000, + CPU_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT = 105000, +} cpu_thermal_threshold_t; + +/** + * Shortcut for CPU thermal threshold value. + */ +#define CPU_THERMAL_THRESHOLD_INIT_DEFAULTS \ + { CPU_THERMAL_THRESHOLD_WARNING_DEFAULT, \ + CPU_THERMAL_THRESHOLD_ERROR_DEFAULT, \ + CPU_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT } + +/** Asic thermal_threshold */ +typedef enum asic_thermal_threshold_e { + ASIC_THERMAL_THRESHOLD_WARNING_DEFAULT = 105000, + ASIC_THERMAL_THRESHOLD_ERROR_DEFAULT = 115000, + ASIC_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT = 120000, +} asic_thermal_threshold_t; + +/** + * Shortcut for CPU thermal threshold value. + */ +#define ASIC_THERMAL_THRESHOLD_INIT_DEFAULTS \ + { ASIC_THERMAL_THRESHOLD_WARNING_DEFAULT, \ + ASIC_THERMAL_THRESHOLD_ERROR_DEFAULT, \ + ASIC_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT } + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_THERMAL(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +enum onlp_thermal_id +{ + THERMAL_RESERVED = 0, + THERMAL_CPU_CORE_0, + THERMAL_CPU_CORE_1, + THERMAL_CPU_PACK, + THERMAL_ASIC, + THERMAL_BOAR_AMB, + THERMAL_PORT, + THERMAL_ON_PSU1, + THERMAL_ON_PSU2, +}; + +static char* last_path[] = /* must map with onlp_thermal_id */ +{ + "reserved", + "cpu_core0", + "cpu_core1", + "cpu_pack", + "asic", + "board_amb", + "port_amb", + "psu1", + "psu2" +}; + +/* Static values */ +static onlp_thermal_info_t linfo[] = { + { }, /* Not used */ + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_0), "CPU Core 0", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_1), "CPU Core 1", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_PACK), "CPU Pack", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ASIC), "Asic Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, ASIC_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_BOAR_AMB), "Board AMB Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_PORT), "Port AMB Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ON_PSU1), "PSU-1 Thermal Sensor 1", ONLP_PSU_ID_CREATE(PSU1_ID)}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ON_PSU2), "PSU-2 Thermal Sensor 1", ONLP_PSU_ID_CREATE(PSU2_ID)}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + } +}; + +/* + * This will be called to intiialize the thermali subsystem. + */ +int +onlp_thermali_init(void) +{ + return ONLP_STATUS_OK; +} + +/* + * Retrieve the information structure for the given thermal OID. + * + * If the OID is invalid, return ONLP_E_STATUS_INVALID. + * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. + * Otherwise, return ONLP_STATUS_OK with the OID's information. + * + * Note -- it is expected that you fill out the information + * structure even if the sensor described by the OID is not present. + */ +int +onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) +{ + int rv, len = 10, temp_base=1, local_id = 0; + char r_data[10] = {0}; + char fullpath[50] = {0}; + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + /* Set the onlp_oid_hdr_t and capabilities */ + *info = linfo[local_id]; + + /* get fullpath */ + snprintf(fullpath, sizeof(fullpath), "%s/%s", prefix_path, last_path[local_id]); + + rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + info->mcelsius = atoi(r_data) / temp_base; + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_config.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_config.c new file mode 100644 index 00000000..1bd4719c --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_config.c @@ -0,0 +1,81 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* */ +#define __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(_x) #_x +#define __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(_x) __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(_x) +x86_64_mlnx_msn2410_config_settings_t x86_64_mlnx_msn2410_config_settings[] = +{ +#ifdef x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_INCLUDE_LOGGING(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_PORTING_STDLIB(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + { __x86_64_mlnx_msn2410_config_STRINGIFY_NAME(x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION), __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE(x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION) }, +#else +{ x86_64_mlnx_msn2410_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION(__x86_64_mlnx_msn2410_config_STRINGIFY_NAME), "__undefined__" }, +#endif + { NULL, NULL } +}; +#undef __x86_64_mlnx_msn2410_config_STRINGIFY_VALUE +#undef __x86_64_mlnx_msn2410_config_STRINGIFY_NAME + +const char* +x86_64_mlnx_msn2410_config_lookup(const char* setting) +{ + int i; + for(i = 0; x86_64_mlnx_msn2410_config_settings[i].name; i++) { + if(strcmp(x86_64_mlnx_msn2410_config_settings[i].name, setting)) { + return x86_64_mlnx_msn2410_config_settings[i].value; + } + } + return NULL; +} + +int +x86_64_mlnx_msn2410_config_show(struct aim_pvs_s* pvs) +{ + int i; + for(i = 0; x86_64_mlnx_msn2410_config_settings[i].name; i++) { + aim_printf(pvs, "%s = %s\n", x86_64_mlnx_msn2410_config_settings[i].name, x86_64_mlnx_msn2410_config_settings[i].value); + } + return i; +} + +/* */ + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_enums.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_enums.c new file mode 100644 index 00000000..0d5a7094 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_enums.c @@ -0,0 +1,10 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.enum(ALL).source> */ +/* */ + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_int.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_int.h new file mode 100644 index 00000000..0c4cbfb1 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_int.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * x86_64_mlnx_msn2410 Internal Header + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2410_INT_H__ +#define __x86_64_mlnx_msn2410_INT_H__ + +#include + + +#endif /* __x86_64_mlnx_msn2410_INT_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.c new file mode 100644 index 00000000..613aba85 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.c @@ -0,0 +1,18 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_mlnx_msn2410_log.h" +/* + * x86_64_mlnx_msn2410 log struct. + */ +AIM_LOG_STRUCT_DEFINE( + x86_64_mlnx_msn2410_CONFIG_LOG_OPTIONS_DEFAULT, + x86_64_mlnx_msn2410_CONFIG_LOG_BITS_DEFAULT, + NULL, /* Custom log map */ + x86_64_mlnx_msn2410_CONFIG_LOG_CUSTOM_BITS_DEFAULT + ); + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.h new file mode 100644 index 00000000..dd691aa5 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_log.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2410_LOG_H__ +#define __x86_64_mlnx_msn2410_LOG_H__ + +#define AIM_LOG_MODULE_NAME x86_64_mlnx_msn2410 +#include + +#endif /* __x86_64_mlnx_msn2410_LOG_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_module.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_module.c new file mode 100644 index 00000000..27ee0afc --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_module.c @@ -0,0 +1,24 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_mlnx_msn2410_log.h" + +static int +datatypes_init__(void) +{ +#define x86_64_mlnx_msn2410_ENUMERATION_ENTRY(_enum_name, _desc) AIM_DATATYPE_MAP_REGISTER(_enum_name, _enum_name##_map, _desc, AIM_LOG_INTERNAL); +#include + return 0; +} + +void __x86_64_mlnx_msn2410_module_init__(void) +{ + AIM_LOG_STRUCT_REGISTER(); + datatypes_init__(); +} + +int __onlp_platform_version__ = 1; diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_ucli.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_ucli.c new file mode 100644 index 00000000..9b39988a --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/x86_64_mlnx_msn2410_ucli.c @@ -0,0 +1,50 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#if x86_64_mlnx_msn2410_CONFIG_INCLUDE_UCLI == 1 + +#include +#include +#include + +static ucli_status_t +x86_64_mlnx_msn2410_ucli_ucli__config__(ucli_context_t* uc) +{ + UCLI_HANDLER_MACRO_MODULE_CONFIG(x86_64_mlnx_msn2410) +} + +/* */ +/* */ + +static ucli_module_t +x86_64_mlnx_msn2410_ucli_module__ = + { + "x86_64_mlnx_msn2410_ucli", + NULL, + x86_64_mlnx_msn2410_ucli_ucli_handlers__, + NULL, + NULL, + }; + +ucli_node_t* +x86_64_mlnx_msn2410_ucli_node_create(void) +{ + ucli_node_t* n; + ucli_module_init(&x86_64_mlnx_msn2410_ucli_module__); + n = ucli_node_create("x86_64_mlnx_msn2410", NULL, &x86_64_mlnx_msn2410_ucli_module__); + ucli_node_subnode_add(n, ucli_module_log_node_create("x86_64_mlnx_msn2410")); + return n; +} + +#else +void* +x86_64_mlnx_msn2410_ucli_node_create(void) +{ + return NULL; +} +#endif + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/x86_64_mlnx_msn2410.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/x86_64_mlnx_msn2410.mk new file mode 100644 index 00000000..c72d417f --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/x86_64_mlnx_msn2410.mk @@ -0,0 +1,13 @@ + +############################################################################### +# +# Inclusive Makefile for the x86_64_mlnx_msn2410 module. +# +# Autogenerated 2015-12-23 23:45:56.754200 +# +############################################################################### +x86_64_mlnx_msn2410_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +include $(x86_64_mlnx_msn2410_BASEDIR)/module/make.mk +include $(x86_64_mlnx_msn2410_BASEDIR)/module/auto/make.mk +include $(x86_64_mlnx_msn2410_BASEDIR)/module/src/make.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/PKG.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/PKG.yml new file mode 100644 index 00000000..49e15204 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=mellanox PLATFORM=x86-64-mlnx-msn2410-r0 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml new file mode 100644 index 00000000..11fc5728 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml @@ -0,0 +1,35 @@ +--- + +###################################################################### +# +# platform-config for Mellanox 2410 +# +###################################################################### + +x86-64-mlnx-msn2410-r0: + + grub: + + serial: >- + --unit=0 + --speed=115200 + --word=8 + --parity=0 + --stop=1 + + kernel: + <<: *kernel-3-16 + + args: >- + nopat + console=ttyS0,115200n8 + rd_NO_MD + rd_NO_LUKS + acpi_enforce_resources=lax + acpi=noirq + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:14.0 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py new file mode 100644 index 00000000..5fea9592 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py @@ -0,0 +1,17 @@ +from onl.platform.base import * +from onl.platform.mellanox import * + +class OnlPlatform_x86_64_mlnx_msn2410_r0(OnlPlatformMellanox, + OnlPlatformPortConfig_32x100): + PLATFORM='x86-64-mlnx-msn2410-r0' + MODEL="SN2410" + SYS_OBJECT_ID=".2410.1" + + def baseconfig(self): + # load modules + import os + # necessary if there are issues with the install + # os.system("/usr/bin/apt-get install") + os.system("/etc/mlnx/mlnx-hw-management start") + + return True diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/PKG.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/PKG.yml new file mode 100644 index 00000000..2b5bad2b --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=x86-64-mlnx-msn2700 ARCH=amd64 TOOLCHAIN=x86_64-linux-gnu diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/Makefile new file mode 100644 index 00000000..e7437cb2 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/Makefile @@ -0,0 +1,2 @@ +FILTER=src +include $(ONL)/make/subdirs.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/Makefile new file mode 100644 index 00000000..023532c4 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +MODULE := libonlp-x86-64-mlnx-msn2700 +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF x86_64_mlnx_msn2700 onlplib +DEPENDMODULE_HEADERS := sff + +include $(BUILDER)/dependmodules.mk + +SHAREDLIB := libonlp-x86-64-mlnx-msn2700.so +$(SHAREDLIB)_TARGETS := $(ALL_TARGETS) +include $(BUILDER)/so.mk +.DEFAULT_GOAL := $(SHAREDLIB) + +GLOBAL_CFLAGS += -I$(onlp_BASEDIR)/module/inc +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -fPIC +GLOBAL_LINK_LIBS += -lpthread + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700-r0.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700-r0.mk new file mode 100644 index 00000000..93b58313 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700-r0.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the libonlp-x86-64-mlnx-msn2700-r0 module. +# +# Autogenerated 2015-12-23 23:45:22.249911 +# +############################################################################### +libonlp-x86-64-mlnx-msn2700-r0_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700.mk new file mode 100644 index 00000000..c43ce7ea --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/libonlp-x86-64-mlnx-msn2700.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the libonlp-x86-64-mlnx-msn2700 module. +# +# Autogenerated 2016-10-13 22:58:39.095824 +# +############################################################################### +libonlp-x86-64-mlnx-msn2700_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/x86_64_mlnx_msn2700.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/x86_64_mlnx_msn2700.mk new file mode 100644 index 00000000..9d7abd9c --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/lib/x86_64_mlnx_msn2700.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the x86_64_mlnx_msn2700 module. +# +# Autogenerated 2015-12-23 23:45:22.262891 +# +############################################################################### +x86_64_mlnx_msn2700_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/Makefile new file mode 100644 index 00000000..7087c1be --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/Makefile @@ -0,0 +1,46 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +.DEFAULT_GOAL := onlpdump + +MODULE := onlpdump +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF onlp x86_64_mlnx_msn2700 onlplib onlp_platform_defaults sff cjson cjson_util timer_wheel OS + +include $(BUILDER)/dependmodules.mk + +BINARY := onlpdump +$(BINARY)_LIBRARIES := $(LIBRARY_TARGETS) +include $(BUILDER)/bin.mk + +GLOBAL_CFLAGS += -DAIM_CONFIG_AIM_MAIN_FUNCTION=onlpdump_main +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MAIN=1 +GLOBAL_LINK_LIBS += -lpthread -lm + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/onlpdump.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/onlpdump.mk new file mode 100644 index 00000000..77d7d005 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/onlpdump/onlpdump.mk @@ -0,0 +1,10 @@ + +############################################################################### +# +# Inclusive Makefile for the onlpdump module. +# +# Autogenerated 2016-10-13 22:58:37.393320 +# +############################################################################### +onlpdump_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/.module b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/.module new file mode 100644 index 00000000..17e81a31 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/.module @@ -0,0 +1 @@ +name: x86_64_mlnx_msn2700 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/Makefile new file mode 100644 index 00000000..c493c185 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### +include ../../init.mk +MODULE := x86_64_mlnx_msn2700 +AUTOMODULE := x86_64_mlnx_msn2700 +include $(BUILDER)/definemodule.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/README b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/README new file mode 100644 index 00000000..ab63cc6f --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/README @@ -0,0 +1,6 @@ +############################################################################### +# +# x86_64_mlnx_msn2700 README +# +############################################################################### + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/make.mk new file mode 100644 index 00000000..42443ccb --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# x86_64_mlnx_msn2700 Autogeneration +# +############################################################################### +x86_64_mlnx_msn2700_AUTO_DEFS := module/auto/x86_64_mlnx_msn2700.yml +x86_64_mlnx_msn2700_AUTO_DIRS := module/inc/x86_64_mlnx_msn2700 module/src +include $(BUILDER)/auto.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/x86_64_mlnx_msn2700.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/x86_64_mlnx_msn2700.yml new file mode 100644 index 00000000..6c492053 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/auto/x86_64_mlnx_msn2700.yml @@ -0,0 +1,50 @@ +############################################################################### +# +# x86_64_mlnx_msn2700 Autogeneration Definitions. +# +############################################################################### + +cdefs: &cdefs +- X86_64_MLNX_MSN2700_CONFIG_INCLUDE_LOGGING: + doc: "Include or exclude logging." + default: 1 +- X86_64_MLNX_MSN2700_CONFIG_LOG_OPTIONS_DEFAULT: + doc: "Default enabled log options." + default: AIM_LOG_OPTIONS_DEFAULT +- X86_64_MLNX_MSN2700_CONFIG_LOG_BITS_DEFAULT: + doc: "Default enabled log bits." + default: AIM_LOG_BITS_DEFAULT +- X86_64_MLNX_MSN2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT: + doc: "Default enabled custom log bits." + default: 0 +- X86_64_MLNX_MSN2700_CONFIG_PORTING_STDLIB: + doc: "Default all porting macros to use the C standard libraries." + default: 1 +- X86_64_MLNX_MSN2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS: + doc: "Include standard library headers for stdlib porting macros." + default: x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB +- X86_64_MLNX_MSN2700_CONFIG_INCLUDE_UCLI: + doc: "Include generic uCli support." + default: 0 +- X86_64_MLNX_MSN2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION: + doc: "Assume chassis fan direction is the same as the PSU fan direction." + default: 0 + + +definitions: + cdefs: + X86_64_MLNX_MSN2700_CONFIG_HEADER: + defs: *cdefs + basename: x86_64_mlnx_msn2700_config + + portingmacro: + x86_64_mlnx_msn2700: + macros: + - malloc + - free + - memset + - memcpy + - strncpy + - vsnprintf + - snprintf + - strlen diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700.x b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700.x new file mode 100644 index 00000000..311e5a3e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700.x @@ -0,0 +1,14 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.xmacro(ALL).define> */ +/* */ + +/* <--auto.start.xenum(ALL).define> */ +/* */ + + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_config.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_config.h new file mode 100644 index 00000000..407b34cd --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_config.h @@ -0,0 +1,137 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_mlnx_msn2700 Configuration Header + * + * @addtogroup x86_64_mlnx_msn2700-config + * @{ + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2700_CONFIG_H__ +#define __x86_64_mlnx_msn2700_CONFIG_H__ + +#ifdef GLOBAL_INCLUDE_CUSTOM_CONFIG +#include +#endif +#ifdef x86_64_mlnx_msn2700_INCLUDE_CUSTOM_CONFIG +#include +#endif + +/* */ +#include +/** + * x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING + * + * Include or exclude logging. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING +#define x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING 1 +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT + * + * Default enabled log options. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT +#define x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT AIM_LOG_OPTIONS_DEFAULT +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT + * + * Default enabled log bits. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT +#define x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT AIM_LOG_BITS_DEFAULT +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT + * + * Default enabled custom log bits. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT +#define x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT 0 +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB + * + * Default all porting macros to use the C standard libraries. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB +#define x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB 1 +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + * + * Include standard library headers for stdlib porting macros. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS +#define x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI + * + * Include generic uCli support. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI +#define x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI 0 +#endif + +/** + * x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + * + * Assume chassis fan direction is the same as the PSU fan direction. */ + + +#ifndef x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION +#define x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION 0 +#endif + + + +/** + * All compile time options can be queried or displayed + */ + +/** Configuration settings structure. */ +typedef struct x86_64_mlnx_msn2700_config_settings_s { + /** name */ + const char* name; + /** value */ + const char* value; +} x86_64_mlnx_msn2700_config_settings_t; + +/** Configuration settings table. */ +/** x86_64_mlnx_msn2700_config_settings table. */ +extern x86_64_mlnx_msn2700_config_settings_t x86_64_mlnx_msn2700_config_settings[]; + +/** + * @brief Lookup a configuration setting. + * @param setting The name of the configuration option to lookup. + */ +const char* x86_64_mlnx_msn2700_config_lookup(const char* setting); + +/** + * @brief Show the compile-time configuration. + * @param pvs The output stream. + */ +int x86_64_mlnx_msn2700_config_show(struct aim_pvs_s* pvs); + +/* */ + +#include "x86_64_mlnx_msn2700_porting.h" + +#endif /* __x86_64_mlnx_msn2700_CONFIG_H__ */ +/* @} */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_dox.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_dox.h new file mode 100644 index 00000000..ff0f08c1 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_dox.h @@ -0,0 +1,26 @@ +/**************************************************************************//** + * + * x86_64_mlnx_msn2700 Doxygen Header + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2700_DOX_H__ +#define __x86_64_mlnx_msn2700_DOX_H__ + +/** + * @defgroup x86_64_mlnx_msn2700 x86_64_mlnx_msn2700 - x86_64_mlnx_msn2700 Description + * + +The documentation overview for this module should go here. + + * + * @{ + * + * @defgroup x86_64_mlnx_msn2700-x86_64_mlnx_msn2700 Public Interface + * @defgroup x86_64_mlnx_msn2700-config Compile Time Configuration + * @defgroup x86_64_mlnx_msn2700-porting Porting Macros + * + * @} + * + */ + +#endif /* __x86_64_mlnx_msn2700_DOX_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_porting.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_porting.h new file mode 100644 index 00000000..e295b799 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/inc/x86_64_mlnx_msn2700/x86_64_mlnx_msn2700_porting.h @@ -0,0 +1,107 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_mlnx_msn2700 Porting Macros. + * + * @addtogroup x86_64_mlnx_msn2700-porting + * @{ + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2700_PORTING_H__ +#define __x86_64_mlnx_msn2700_PORTING_H__ + + +/* */ +#if x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS == 1 +#include +#include +#include +#include +#include +#endif + +#ifndef x86_64_mlnx_msn2700_MALLOC + #if defined(GLOBAL_MALLOC) + #define x86_64_mlnx_msn2700_MALLOC GLOBAL_MALLOC + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_MALLOC malloc + #else + #error The macro x86_64_mlnx_msn2700_MALLOC is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_FREE + #if defined(GLOBAL_FREE) + #define x86_64_mlnx_msn2700_FREE GLOBAL_FREE + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_FREE free + #else + #error The macro x86_64_mlnx_msn2700_FREE is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_MEMSET + #if defined(GLOBAL_MEMSET) + #define x86_64_mlnx_msn2700_MEMSET GLOBAL_MEMSET + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_MEMSET memset + #else + #error The macro x86_64_mlnx_msn2700_MEMSET is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_MEMCPY + #if defined(GLOBAL_MEMCPY) + #define x86_64_mlnx_msn2700_MEMCPY GLOBAL_MEMCPY + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_MEMCPY memcpy + #else + #error The macro x86_64_mlnx_msn2700_MEMCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_STRNCPY + #if defined(GLOBAL_STRNCPY) + #define x86_64_mlnx_msn2700_STRNCPY GLOBAL_STRNCPY + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_STRNCPY strncpy + #else + #error The macro x86_64_mlnx_msn2700_STRNCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_VSNPRINTF + #if defined(GLOBAL_VSNPRINTF) + #define x86_64_mlnx_msn2700_VSNPRINTF GLOBAL_VSNPRINTF + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_VSNPRINTF vsnprintf + #else + #error The macro x86_64_mlnx_msn2700_VSNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_SNPRINTF + #if defined(GLOBAL_SNPRINTF) + #define x86_64_mlnx_msn2700_SNPRINTF GLOBAL_SNPRINTF + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_SNPRINTF snprintf + #else + #error The macro x86_64_mlnx_msn2700_SNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_mlnx_msn2700_STRLEN + #if defined(GLOBAL_STRLEN) + #define x86_64_mlnx_msn2700_STRLEN GLOBAL_STRLEN + #elif x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB == 1 + #define x86_64_mlnx_msn2700_STRLEN strlen + #else + #error The macro x86_64_mlnx_msn2700_STRLEN is required but cannot be defined. + #endif +#endif + +/* */ + + +#endif /* __x86_64_mlnx_msn2700_PORTING_H__ */ +/* @} */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/make.mk new file mode 100644 index 00000000..2bb27ef3 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/make.mk @@ -0,0 +1,10 @@ +############################################################################### +# +# +# +############################################################################### +THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +x86_64_mlnx_msn2700_INCLUDES := -I $(THIS_DIR)inc +x86_64_mlnx_msn2700_INTERNAL_INCLUDES := -I $(THIS_DIR)src +x86_64_mlnx_msn2700_DEPENDMODULE_ENTRIES := init:x86_64_mlnx_msn2700 ucli:x86_64_mlnx_msn2700 + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/Makefile new file mode 100644 index 00000000..b92cf7ab --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# Local source generation targets. +# +############################################################################### + +ucli: + @../../../../tools/uclihandlers.py x86_64_mlnx_msn2700_ucli.c + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c new file mode 100644 index 00000000..dfcb39c9 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c @@ -0,0 +1,544 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Fan Platform Implementation Defaults. + * + ***********************************************************/ +#include +#include +#include +#include +#include "platform_lib.h" + +#define PREFIX_PATH "/bsp/fan/" +#define PREFIX_MODULE_PATH "/bsp/module/" + +#define FAN_STATUS_OK 1 + +#define PERCENTAGE_MIN 60.0 +#define PERCENTAGE_MAX 100.0 +#define RPM_MAGIC_MIN 153.0 +#define RPM_MAGIC_MAX 255.0 + +#define PSU_FAN_RPM_MIN 11700.0 +#define PSU_FAN_RPM_MAX 19500.0 + +#define PROJECT_NAME +#define LEN_FILE_NAME 80 + +#define FAN_RESERVED 0 +#define FAN_1_ON_MAIN_BOARD 1 +#define FAN_2_ON_MAIN_BOARD 2 +#define FAN_3_ON_MAIN_BOARD 3 +#define FAN_4_ON_MAIN_BOARD 4 +#define FAN_5_ON_MAIN_BOARD 5 +#define FAN_6_ON_MAIN_BOARD 6 +#define FAN_7_ON_MAIN_BOARD 7 +#define FAN_8_ON_MAIN_BOARD 8 +#define FAN_1_ON_PSU1 9 +#define FAN_1_ON_PSU2 10 + +static int min_fan_speed[CHASSIS_FAN_COUNT+1] = {0}; +static int max_fan_speed[CHASSIS_FAN_COUNT+1] = {0}; + +typedef struct fan_path_S +{ + char status[LEN_FILE_NAME]; + char r_speed_get[LEN_FILE_NAME]; + char r_speed_set[LEN_FILE_NAME]; + char min[LEN_FILE_NAME]; + char max[LEN_FILE_NAME]; +}fan_path_T; + +#define _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) \ + { #prj"fan"#id"_status", \ + #prj"fan"#id"_speed_get", \ + #prj"fan"#id"_speed_set", \ + #prj"fan"#id"_min", \ + #prj"fan"#id"_max" } + +#define MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) + +#define MAKE_FAN_PATH_ON_PSU(psu_id, fan_id) \ + {"psu"#psu_id"_status", \ + "psu"#psu_id"_fan"#fan_id"_speed_get", "", "", "",} + +static fan_path_T fan_path[] = /* must map with onlp_fan_id */ +{ + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_RESERVED), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_1_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_2_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_3_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_4_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_5_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_6_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_7_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_MAIN_BOARD(PROJECT_NAME, FAN_8_ON_MAIN_BOARD), + MAKE_FAN_PATH_ON_PSU(1 ,1), + MAKE_FAN_PATH_ON_PSU(2, 1) +}; + +#define MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(id) \ + { \ + { ONLP_FAN_ID_CREATE(FAN_##id##_ON_MAIN_BOARD), "Chassis Fan "#id, 0 }, \ + 0x0, \ + (ONLP_FAN_CAPS_SET_PERCENTAGE | ONLP_FAN_CAPS_GET_PERCENTAGE | \ + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_SET_RPM), \ + 0, \ + 0, \ + ONLP_FAN_MODE_INVALID, \ + } + +#define MAKE_FAN_INFO_NODE_ON_PSU(psu_id, fan_id) \ + { \ + { ONLP_FAN_ID_CREATE(FAN_##fan_id##_ON_PSU##psu_id), "Chassis PSU-"#psu_id" Fan "#fan_id, 0 }, \ + 0x0, \ + (ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE), \ + 0, \ + 0, \ + ONLP_FAN_MODE_INVALID, \ + } + +/* Static fan information */ +onlp_fan_info_t linfo[] = { + { }, /* Not used */ + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(1), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(2), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(3), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(4), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(5), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(6), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(7), + MAKE_FAN_INFO_NODE_ON_MAIN_BOARD(8), + MAKE_FAN_INFO_NODE_ON_PSU(1,1), + MAKE_FAN_INFO_NODE_ON_PSU(2,1) +}; + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_FAN(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +#define OPEN_READ_FILE(fullpath, data, nbytes, len) \ + if (onlp_file_read((uint8_t*)data, nbytes, &len, fullpath) < 0) \ + return ONLP_STATUS_E_INTERNAL; \ + else \ + AIM_LOG_VERBOSE("read data: %s\n", r_data); \ + + +static int +_onlp_fani_read_fan_eeprom(int local_id, onlp_fan_info_t* info) +{ + const char sanity_checker[] = "MLNX"; + const uint8_t sanity_offset = 8; + const uint8_t sanity_len = 4; + const uint8_t block1_start = 12; + const uint8_t block1_type = 1; + const uint8_t block2_start = 14; + const uint8_t block2_type = 5; + const uint8_t serial_offset = 8; + const uint8_t serial_len = 24; + const uint8_t part_len = 20; + const uint8_t fan_offset = 14; + const uint8_t multiplier = 16; + uint8_t data[256] = {0}; + uint8_t offset = 0; + uint8_t temp = 0; + int rv = 0; + int len = 0; + char path[LEN_FILE_NAME] = {0}; + + /* We have 4 FRU with 2 fans(total 8 fans). + Eeprom is per FRU but not per fan. + So, need to convert fan ID to FRU ID.*/ + if (local_id % 2) { + local_id = local_id / 2 + 1; + } else { + local_id /= 2; + } + + /* Reading FRU eeprom. */ + snprintf(path, sizeof(path), IDPROM_PATH, "fan", local_id); + rv = onlp_file_read(data, sizeof(data), &len, path); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Sanity checker */ + if (strncmp(sanity_checker, (char*)&data[sanity_offset], sanity_len)) { + return ONLP_STATUS_E_INVALID; + } + + /* Checking eeprom block type with S/N and P/N */ + if (data[block1_start + 1] != block1_type) { + return ONLP_STATUS_E_INVALID; + } + + /* Reading serial number */ + offset = data[block1_start] * multiplier + serial_offset; + strncpy(info->serial, (char *)&data[offset], serial_len); + + /* Reading part number */ + offset += serial_len; + strncpy(info->model, (char *)&data[offset], part_len); + + /* Reading fan direction */ + if (data[block2_start + 1] != block2_type) { + return ONLP_STATUS_E_INVALID; + } + offset = data[block2_start] * multiplier + fan_offset; + temp = data[offset]; + switch (temp) { + case 1: + info->caps |= ONLP_FAN_CAPS_F2B; + break; + case 2: + info->caps |= ONLP_FAN_CAPS_B2F; + break; + default: + break; + } + + return ONLP_STATUS_OK; +} + +static int +_onlp_fani_info_get_fan(int local_id, onlp_fan_info_t* info) +{ + int len = 0, nbytes = 10; + float range = 0; + float temp = 0; + float fru_index = 0; + char r_data[10] = {0}; + char fullpath[65] = {0}; + + /* We have 4 FRU with 2 fans(total 8 fans). + Eeprom is per FRU but not per fan. + So, need to convert fan ID to FRU ID.*/ + if (local_id % 2) { + fru_index = local_id / 2 + 1; + } else { + fru_index = local_id / 2; + } + + /* get fan status + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_MODULE_PATH, fan_path[(int)fru_index].status); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + if (atoi(r_data) != FAN_STATUS_OK) { + return ONLP_STATUS_OK; + } + info->status |= ONLP_FAN_STATUS_PRESENT; + + /* get fan speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].r_speed_get); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + info->rpm = atoi(r_data); + + /* check failure */ + if (info->rpm <= 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + if (ONLP_FAN_CAPS_GET_PERCENTAGE & info->caps) { + /* get fan min speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].min); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + min_fan_speed[local_id] = atoi(r_data); + + /* get fan max speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].max); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + max_fan_speed[local_id] = atoi(r_data); + + /* get speed percentage from rpm */ + range = max_fan_speed[local_id] - min_fan_speed[local_id]; + if (range > 0) { + temp = ((float)info->rpm - (float)min_fan_speed[local_id]) / range * 40.0 + 60.0; + if (temp < PERCENTAGE_MIN) { + temp = PERCENTAGE_MIN; + } + info->percentage = (int)temp; + } else { + return ONLP_STATUS_E_INTERNAL; + } + } + + return _onlp_fani_read_fan_eeprom(local_id, info); +} + +static int +_onlp_fani_info_get_fan_on_psu(int local_id, int psu_id, onlp_fan_info_t* info) +{ + int len = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[80] = {0}; + float rpms_per_perc = 0.0; + float temp = 0.0; + + /* get fan status + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_MODULE_PATH, fan_path[local_id].status); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + if (atoi(r_data) != FAN_STATUS_OK) { + return ONLP_STATUS_OK; + } + info->status |= ONLP_FAN_STATUS_PRESENT; + + /* get fan speed + */ + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, fan_path[local_id].r_speed_get); + OPEN_READ_FILE(fullpath, r_data, nbytes, len); + info->rpm = atoi(r_data); + + /* check failure */ + if (info->rpm <= 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + /* get speed percentage from rpm */ + rpms_per_perc = PSU_FAN_RPM_MIN / PERCENTAGE_MIN; + temp = (float)info->rpm / rpms_per_perc; + if (temp < PERCENTAGE_MIN) { + temp = PERCENTAGE_MIN; + } + info->percentage = (int)temp; + + /* Serial number and model for PSU fan is the same as for appropriate PSU */ + if (FAN_1_ON_PSU1 == local_id) { + if (0 != psu_read_eeprom(PSU1_ID, NULL, info)) + return ONLP_STATUS_E_INTERNAL; + } else if (FAN_1_ON_PSU2 == local_id) { + if (0 != psu_read_eeprom(PSU2_ID, NULL, info)) + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function will be called prior to all of onlp_fani_* functions. + */ +int +onlp_fani_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info) +{ + int rc = 0; + int local_id = 0; + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + *info = linfo[local_id]; + + switch (local_id) + { + case FAN_1_ON_PSU1: + rc = _onlp_fani_info_get_fan_on_psu(local_id, PSU1_ID, info); + break; + case FAN_1_ON_PSU2: + rc = _onlp_fani_info_get_fan_on_psu(local_id, PSU2_ID, info); + break; + case FAN_1_ON_MAIN_BOARD: + case FAN_2_ON_MAIN_BOARD: + case FAN_3_ON_MAIN_BOARD: + case FAN_4_ON_MAIN_BOARD: + case FAN_5_ON_MAIN_BOARD: + case FAN_6_ON_MAIN_BOARD: + case FAN_7_ON_MAIN_BOARD: + case FAN_8_ON_MAIN_BOARD: + rc =_onlp_fani_info_get_fan(local_id, info); + break; + default: + rc = ONLP_STATUS_E_INVALID; + break; + } + + return rc; +} + +/* + * This function sets the speed of the given fan in RPM. + * + * This function will only be called if the fan supprots the RPM_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_rpm_set(onlp_oid_t id, int rpm) +{ + float temp = 0.0; + int rv = 0, local_id = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[LEN_FILE_NAME] = {0}; + onlp_fan_info_t* info = NULL; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + info = &linfo[local_id]; + + if (0 == (ONLP_FAN_CAPS_SET_RPM & info->caps)) { + return ONLP_STATUS_E_UNSUPPORTED; + } + + /* reject rpm=0% (rpm=0%, stop fan) */ + if (0 == rpm) { + return ONLP_STATUS_E_INVALID; + } + + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); + + /* Set fan speed + Converting percent to driver value. + Driver accept value in range between 153 and 255. + Value 153 is minimum rpm. + Value 255 is maximum rpm. + */ + if (local_id > sizeof(min_fan_speed)/sizeof(min_fan_speed[0])) { + return ONLP_STATUS_E_INTERNAL; + } + if (max_fan_speed[local_id] - min_fan_speed[local_id] < 0) { + return ONLP_STATUS_E_INTERNAL; + } + if (rpm < min_fan_speed[local_id] || rpm > max_fan_speed[local_id]) { + return ONLP_STATUS_E_PARAM; + } + + temp = (rpm - min_fan_speed[local_id]) * (RPM_MAGIC_MAX - RPM_MAGIC_MIN) / + (max_fan_speed[local_id] - min_fan_speed[local_id]) + RPM_MAGIC_MIN; + + snprintf(r_data, sizeof(r_data), "%d", (int)temp); + nbytes = strnlen(r_data, sizeof(r_data)); + rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function sets the fan speed of the given OID as a percentage. + * + * This will only be called if the OID has the PERCENTAGE_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_percentage_set(onlp_oid_t id, int p) +{ + float temp = 0.0; + int rv = 0, local_id = 0, nbytes = 10; + char r_data[10] = {0}; + char fullpath[LEN_FILE_NAME] = {0}; + onlp_fan_info_t* info = NULL; + + VALIDATE(id); + local_id = ONLP_OID_ID_GET(id); + info = &linfo[local_id]; + + if (0 == (ONLP_FAN_CAPS_SET_PERCENTAGE & info->caps)) { + return ONLP_STATUS_E_UNSUPPORTED; + } + + /* reject p=0% (p=0%, stop fan) */ + if (0 == p) { + return ONLP_STATUS_E_INVALID; + } + + if (p < PERCENTAGE_MIN || p > PERCENTAGE_MAX) { + return ONLP_STATUS_E_PARAM; + } + + snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); + + /* Set fan speed + Converting percent to driver value. + Driver accept value in range between 153 and 255. + Value 153 is 60%. + Value 255 is 100%. + */ + temp = (p - PERCENTAGE_MIN) * (RPM_MAGIC_MAX - RPM_MAGIC_MIN) / + (PERCENTAGE_MAX - PERCENTAGE_MIN) + RPM_MAGIC_MIN; + + snprintf(r_data, sizeof(r_data), "%d", (int)temp); + nbytes = strnlen(r_data, sizeof(r_data)); + rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * This function sets the fan speed of the given OID as per + * the predefined ONLP fan speed modes: off, slow, normal, fast, max. + * + * Interpretation of these modes is up to the platform. + * + */ +int +onlp_fani_mode_set(onlp_oid_t id, onlp_fan_mode_t mode) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function sets the fan direction of the given OID. + * + * This function is only relevant if the fan OID supports both direction + * capabilities. + * + * This function is optional unless the functionality is available. + */ +int +onlp_fani_dir_set(onlp_oid_t id, onlp_fan_dir_t dir) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * Generic fan ioctl. Optional. + */ +int +onlp_fani_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c new file mode 100644 index 00000000..c8b71de3 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c @@ -0,0 +1,301 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define prefix_path "/bsp/led/led_" +#define driver_value_len 50 + +#define LED_MODE_OFF "none" +#define LED_MODE_GREEN "green" +#define LED_MODE_RED "red" +#define LED_MODE_BLUE "blue" +#define LED_MODE_GREEN_BLINK "green_blink" +#define LED_MODE_RED_BLINK "red_blink" +#define LED_MODE_BLUE_BLINK "blue_blink" +#define LED_MODE_AUTO "cpld_control" + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_LED(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +/* LED related data + */ +enum onlp_led_id +{ + LED_RESERVED = 0, + LED_SYSTEM, + LED_FAN1, + LED_FAN2, + LED_FAN3, + LED_FAN4, + LED_PSU, +}; + +typedef struct led_light_mode_map { + enum onlp_led_id id; + char* driver_led_mode; + enum onlp_led_mode_e onlp_led_mode; +} led_light_mode_map_t; + +led_light_mode_map_t led_map[] = { +{LED_SYSTEM, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_SYSTEM, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_SYSTEM, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_SYSTEM, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_SYSTEM, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_SYSTEM, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN1, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN1, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN1, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN1, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN1, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN1, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN2, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN2, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN2, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN2, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN2, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN2, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN3, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN3, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN3, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN3, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN3, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN3, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_FAN4, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_FAN4, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_FAN4, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_FAN4, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_FAN4, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_FAN4, LED_MODE_AUTO, ONLP_LED_MODE_AUTO}, + +{LED_PSU, LED_MODE_OFF, ONLP_LED_MODE_OFF}, +{LED_PSU, LED_MODE_GREEN, ONLP_LED_MODE_GREEN}, +{LED_PSU, LED_MODE_RED, ONLP_LED_MODE_RED}, +{LED_PSU, LED_MODE_RED_BLINK, ONLP_LED_MODE_RED_BLINKING}, +{LED_PSU, LED_MODE_GREEN_BLINK, ONLP_LED_MODE_GREEN_BLINKING}, +{LED_PSU, LED_MODE_AUTO, ONLP_LED_MODE_AUTO} +}; + +static char file_names[][10] = /* must map with onlp_led_id */ +{ + "reserved", + "status", + "fan1", + "fan2", + "fan3", + "fan4", + "psu" +}; + +/* + * Get the information for the given LED OID. + */ +static onlp_led_info_t linfo[] = +{ + { }, /* Not used */ + { + { ONLP_LED_ID_CREATE(LED_SYSTEM), "Chassis LED 1 (SYSTEM LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN1), "Chassis LED 2 (FAN1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN2), "Chassis LED 3 (FAN2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN3), "Chassis LED 4 (FAN3 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_FAN4), "Chassis LED 5 (FAN4 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + }, + { + { ONLP_LED_ID_CREATE(LED_PSU), "Chassis LED 6 (PSU LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_GREEN_BLINKING | + ONLP_LED_CAPS_RED | ONLP_LED_CAPS_RED_BLINKING | ONLP_LED_CAPS_AUTO, + } +}; + +static int driver_to_onlp_led_mode(enum onlp_led_id id, char* driver_led_mode) +{ + int i, nsize = sizeof(led_map)/sizeof(led_map[0]); + + for (i = 0; i < nsize; i++) + { + if (id == led_map[i].id && + !strncmp(led_map[i].driver_led_mode, driver_led_mode, driver_value_len)) + { + return led_map[i].onlp_led_mode; + } + } + + return 0; +} + +static char* onlp_to_driver_led_mode(enum onlp_led_id id, onlp_led_mode_t onlp_led_mode) +{ + int i, nsize = sizeof(led_map)/sizeof(led_map[0]); + + for (i = 0; i < nsize; i++) + { + if (id == led_map[i].id && onlp_led_mode == led_map[i].onlp_led_mode) + { + return led_map[i].driver_led_mode; + } + } + + return LED_MODE_OFF; +} + +/* + * This function will be called prior to any other onlp_ledi_* functions. + */ +int +onlp_ledi_init(void) +{ + /* + * TODO setting UI LED to off when it will be supported on SN2700 + */ + + return ONLP_STATUS_OK; +} + +int +onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) +{ + int len, local_id = 0; + uint8_t data[driver_value_len] = {0}; + char fullpath[50] = {0}; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + /* get fullpath */ + snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); + + /* Set the onlp_oid_hdr_t and capabilities */ + *info = linfo[ONLP_OID_ID_GET(id)]; + + /* Get LED mode */ + if (onlp_file_read(data, sizeof(data), &len, fullpath) != 0) { + return ONLP_STATUS_E_INTERNAL; + } + + info->mode = driver_to_onlp_led_mode(local_id, (char*)data); + + /* Set the on/off status */ + if (info->mode != ONLP_LED_MODE_OFF) { + info->status |= ONLP_LED_STATUS_ON; + } + + return ONLP_STATUS_OK; +} + +/* + * Turn an LED on or off. + * + * This function will only be called if the LED OID supports the ONOFF + * capability. + * + * What 'on' means in terms of colors or modes for multimode LEDs is + * up to the platform to decide. This is intended as baseline toggle mechanism. + */ +int +onlp_ledi_set(onlp_oid_t id, int on_or_off) +{ + VALIDATE(id); + + if (!on_or_off) { + return onlp_ledi_mode_set(id, ONLP_LED_MODE_OFF); + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function puts the LED into the given mode. It is a more functional + * interface for multimode LEDs. + * + * Only modes reported in the LED's capabilities will be attempted. + */ +int +onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int local_id; + char fullpath[50] = {0}; + + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); + + if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, fullpath) != 0) + { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * Generic LED ioctl interface. + */ +int +onlp_ledi_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/make.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/make.mk new file mode 100644 index 00000000..3219cfff --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### + +LIBRARY := x86_64_mlnx_msn2700 +$(LIBRARY)_SUBDIR := $(dir $(lastword $(MAKEFILE_LIST))) +include $(BUILDER)/lib.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c new file mode 100644 index 00000000..c0c1765e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c @@ -0,0 +1,80 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +int +psu_read_eeprom(int psu_index, onlp_psu_info_t* psu_info, onlp_fan_info_t* fan_info) +{ + char path[64] = {0}; + const char sanity_check[] = "MLNX"; + const uint8_t serial_len = 24; + char data[256] = {0}; + bool sanity_found = false; + int index = 0, rv = 0, len = 0; + + snprintf(path, sizeof(path), IDPROM_PATH, "psu", psu_index); + rv = onlp_file_read((uint8_t* )data, sizeof(data)-1, &len, path); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Looking for sanity checker */ + while (index < sizeof(data) - sizeof(sanity_check) - 1) { + if (!strncmp(&data[index], sanity_check, sizeof(sanity_check) - 1)) { + sanity_found = true; + break; + } + index++; + } + if (false == sanity_found) { + return ONLP_STATUS_E_INVALID; + } + + /* Serial number */ + index += strlen(sanity_check); + if (psu_info) { + strncpy(psu_info->serial, &data[index], sizeof(psu_info->serial)); + } else if (fan_info) { + strncpy(fan_info->serial, &data[index], sizeof(fan_info->serial)); + } + + /* Part number */ + index += serial_len; + if (psu_info) { + strncpy(psu_info->model, &data[index], sizeof(psu_info->model)); + } else if (fan_info) { + strncpy(fan_info->model, &data[index], sizeof(fan_info->model)); + } + + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.h new file mode 100644 index 00000000..6169310f --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.h @@ -0,0 +1,58 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#ifndef __PLATFORM_LIB_H__ +#define __PLATFORM_LIB_H__ + +#include +#include +#include "x86_64_mlnx_msn2700_log.h" + +// ./sm/infra/modules/AIM/module/inc/AIM/aim_log.h + +#define CHASSIS_PSU_COUNT 2 +#define CHASSIS_TOTAL_FAN_COUNT 10 +#define CHASSIS_TOTAL_THERMAL_COUNT 8 +#define CHASSIS_FAN_COUNT (CHASSIS_TOTAL_FAN_COUNT - CHASSIS_PSU_COUNT) +#define CHASSIS_THERMAL_COUNT (CHASSIS_TOTAL_THERMAL_COUNT - CHASSIS_PSU_COUNT) + +#define PSU1_ID 1 +#define PSU2_ID 2 + +#define PSU_MODULE_PREFIX "/bsp/module/psu%d_%s" +#define PSU_POWER_PREFIX "/bsp/power/psu%d_%s" +#define IDPROM_PATH "/bsp/eeprom/%s%d_info" + +typedef enum psu_type { + PSU_TYPE_UNKNOWN, + PSU_TYPE_AC_F2B, + PSU_TYPE_AC_B2F +} psu_type_t; + +psu_type_t get_psu_type(int id, char* modelname, int modelname_len); + +int psu_read_eeprom(int psu_index, onlp_psu_info_t* psu_info, + onlp_fan_info_t* fan_info); + +#endif /* __PLATFORM_LIB_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c new file mode 100644 index 00000000..610b034e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c @@ -0,0 +1,202 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define PSU_STATUS_PRESENT 1 +#define PSU_CABLE_PRESENT 1 + +#define PSU_NODE_MAX_INT_LEN 8 +#define PSU_NODE_MAX_PATH_LEN 64 + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_PSU(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +static int +psu_module_info_get(int id, char *node, int *value) +{ + int len, ret = 0; + char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; + char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; + + *value = 0; + + sprintf(node_path, PSU_MODULE_PREFIX, id, node); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + if (ret == 0) { + *value = atoi(buf); + } + + return ret; +} + +static int +psu_power_info_get(int id, char *node, int *value) +{ + int len, ret = 0; + char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; + char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; + + *value = 0; + + sprintf(node_path, PSU_POWER_PREFIX, id, node); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + if (ret == 0) { + *value = atoi(buf); + } + + return ret; +} + +int +onlp_psui_init(void) +{ + return ONLP_STATUS_OK; +} + +static int +_psu_info_get(onlp_psu_info_t* info) +{ + int val = 0; + int index = ONLP_OID_ID_GET(info->hdr.id); + + /* Set capability + */ + info->caps = ONLP_PSU_CAPS_AC; + + if (info->status & ONLP_PSU_STATUS_FAILED) { + return ONLP_STATUS_OK; + } + + /* Set the associated oid_table */ + info->hdr.coids[0] = ONLP_FAN_ID_CREATE(index + CHASSIS_FAN_COUNT); + info->hdr.coids[1] = ONLP_THERMAL_ID_CREATE(index + CHASSIS_THERMAL_COUNT); + + /* Read voltage, current and power */ + if (psu_power_info_get(index, "volt_in", &val) == 0 && + 0 != val) { + info->mvin = val; + info->caps |= ONLP_PSU_CAPS_VIN; + + if (psu_power_info_get(index, "volt", &val) == 0) { + info->mvout = val; + info->caps |= ONLP_PSU_CAPS_VOUT; + } + + if (psu_power_info_get(index, "curr_in", &val) == 0) { + info->miin = val; + info->caps |= ONLP_PSU_CAPS_IIN; + } + + if (psu_power_info_get(index, "curr", &val) == 0) { + info->miout = val; + info->caps |= ONLP_PSU_CAPS_IOUT; + } + + if (psu_power_info_get(index, "power_in", &val) == 0) { + info->mpin = val; + info->caps |= ONLP_PSU_CAPS_PIN; + } + + if (psu_power_info_get(index, "power", &val) == 0) { + info->mpout = val; + info->caps |= ONLP_PSU_CAPS_POUT; + } + } else { + info->status |= ONLP_PSU_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + return psu_read_eeprom(index, info, NULL); +} + +/* + * Get all information about the given PSU oid. + */ +static onlp_psu_info_t pinfo[] = +{ + { }, /* Not used */ + { + { ONLP_PSU_ID_CREATE(PSU1_ID), "PSU-1", 0 }, + }, + { + { ONLP_PSU_ID_CREATE(PSU2_ID), "PSU-2", 0 }, + } +}; + +int +onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) +{ + int val = 0; + int ret = ONLP_STATUS_OK; + int index = ONLP_OID_ID_GET(id); + + VALIDATE(id); + + memset(info, 0, sizeof(onlp_psu_info_t)); + *info = pinfo[index]; /* Set the onlp_oid_hdr_t */ + + /* Get the present state */ + if (psu_module_info_get(index, "status", &val) != 0) { + AIM_LOG_ERROR("Unable to read PSU(%d) node(psu_present)\r\n", index); + } + + if (val != PSU_STATUS_PRESENT) { + info->status &= ~ONLP_PSU_STATUS_PRESENT; + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } + + /* Get the cable preset state */ + if (psu_module_info_get(index, "pwr_status", &val) != 0) { + AIM_LOG_ERROR("Unable to read PSU(%d) node(cable_present)\r\n", index); + } + + if (val != PSU_CABLE_PRESENT) { + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } + + info->status |= ONLP_PSU_STATUS_PRESENT; + + ret = _psu_info_get(info); + + return ret; +} + +int +onlp_psui_ioctl(onlp_oid_t pid, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sfpi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sfpi.c new file mode 100644 index 00000000..4e9851f9 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sfpi.c @@ -0,0 +1,200 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include + +#include /* For O_RDWR && open */ +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define MAX_SFP_PATH 64 +#define SFP_SYSFS_VALUE_LEN 20 +static char sfp_node_path[MAX_SFP_PATH] = {0}; +#define NUM_OF_SFP_PORT 32 +#define SFP_PRESENT_STATUS "good" +#define SFP_NOT_PRESENT_STATUS "not_connected" + +static int +sn2700_sfp_node_read_int(char *node_path, int *value) +{ + int data_len = 0, ret = 0; + char buf[SFP_SYSFS_VALUE_LEN] = {0}; + *value = -1; + + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &data_len, node_path); + + if (ret == 0) { + if (!strncmp(buf, SFP_PRESENT_STATUS, strlen(SFP_PRESENT_STATUS))) { + *value = 1; + } else if (!strncmp(buf, SFP_NOT_PRESENT_STATUS, strlen(SFP_NOT_PRESENT_STATUS))) { + *value = 0; + } + } + + return ret; +} + +static char* +sn2700_sfp_get_port_path(int port, char *node_name) +{ + sprintf(sfp_node_path, "/bsp/qsfp/qsfp%d%s", port, node_name); + return sfp_node_path; +} + +/************************************************************ + * + * SFPI Entry Points + * + ***********************************************************/ +int +onlp_sfpi_init(void) +{ + /* Called at initialization time */ + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) +{ + /* + * Ports {1, 32} + */ + int p = 1; + AIM_BITMAP_CLR_ALL(bmap); + + for (; p <= NUM_OF_SFP_PORT; p++) { + AIM_BITMAP_SET(bmap, p); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_is_present(int port) +{ + /* + * Return 1 if present. + * Return 0 if not present. + * Return < 0 if error. + */ + int present = -1; + char* path = sn2700_sfp_get_port_path(port, "_status"); + + if (sn2700_sfp_node_read_int(path, &present) != 0) { + AIM_LOG_ERROR("Unable to read present status from port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + return present; +} + +int +onlp_sfpi_presence_bitmap_get(onlp_sfp_bitmap_t* dst) +{ + int ii = 1; + int rc = 0; + + for (;ii <= NUM_OF_SFP_PORT; ii++) { + rc = onlp_sfpi_is_present(ii); + AIM_BITMAP_MOD(dst, ii, (1 == rc) ? 1 : 0); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_eeprom_read(int port, uint8_t data[256]) +{ + char* path = sn2700_sfp_get_port_path(port, ""); + + /* + * Read the SFP eeprom into data[] + * + * Return MISSING if SFP is missing. + * Return OK if eeprom is read + */ + memset(data, 0, 256); + + if (onlplib_sfp_eeprom_read_file(path, data) != 0) { + AIM_LOG_ERROR("Unable to read eeprom from port(%d)\r\n", port); + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_supported(int port, onlp_sfp_control_t control, int* rv) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_get(int port, onlp_sfp_control_t control, int* value) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_denit(void) +{ + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c new file mode 100644 index 00000000..89b806be --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c @@ -0,0 +1,266 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" +#include "x86_64_mlnx_msn2700_int.h" +#include "x86_64_mlnx_msn2700_log.h" + +#define NUM_OF_THERMAL_ON_MAIN_BROAD CHASSIS_THERMAL_COUNT +#define NUM_OF_FAN_ON_MAIN_BROAD CHASSIS_FAN_COUNT +#define NUM_OF_PSU_ON_MAIN_BROAD 2 +#define NUM_OF_LED_ON_MAIN_BROAD 6 + +#define COMMAND_OUTPUT_BUFFER 256 + +#define PREFIX_PATH_ON_CPLD_DEV "/bsp/cpld" +#define NUM_OF_CPLD 3 +static char arr_cplddev_name[NUM_OF_CPLD][30] = +{ + "cpld_brd_version", + "cpld_mgmt_version", + "cpld_port_version" +}; + +static void +_onlp_sysi_execute_command(char *command, char buffer[COMMAND_OUTPUT_BUFFER]) +{ + FILE *fp = NULL; + + /* Open the command for reading. */ + fp = popen(command, "r"); + if (NULL == fp) { + AIM_LOG_WARN("Failed to run command '%s'\n", command); + } + + /* Read the output */ + if (fgets(buffer, COMMAND_OUTPUT_BUFFER-1, fp) == NULL) { + AIM_LOG_WARN("Failed to read output of command '%s'\n", command); + pclose(fp); + } + + /* The last symbol is '\n', so remote it */ + buffer[strnlen(buffer, COMMAND_OUTPUT_BUFFER) - 1] = '\0'; + + /* close */ + pclose(fp); +} + +const char* +onlp_sysi_platform_get(void) +{ + return "x86-64-mlnx-msn2700-r0"; +} + +int +onlp_sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int i, v[NUM_OF_CPLD]={0}; + + for (i=0; i < NUM_OF_CPLD; i++) { + v[i] = 0; + if(onlp_file_read_int(v+i, "%s/%s", PREFIX_PATH_ON_CPLD_DEV, arr_cplddev_name[i]) < 0) { + return ONLP_STATUS_E_INTERNAL; + } + } + pi->cpld_versions = aim_fstrdup("brd=%d, mgmt=%d, port=%d", v[0], v[1], v[2]); + + return ONLP_STATUS_OK; +} + +void +onlp_sysi_platform_info_free(onlp_platform_info_t* pi) +{ + aim_free(pi->cpld_versions); +} + + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + int i; + onlp_oid_t* e = table; + memset(table, 0, max*sizeof(onlp_oid_t)); + + /* 8 Thermal sensors on the chassis */ + for (i = 1; i <= NUM_OF_THERMAL_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_THERMAL_ID_CREATE(i); + } + + /* 6 LEDs on the chassis */ + for (i = 1; i <= NUM_OF_LED_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_LED_ID_CREATE(i); + } + + /* 2 PSUs on the chassis */ + for (i = 1; i <= NUM_OF_PSU_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_PSU_ID_CREATE(i); + } + + /* 8 Fans and 2 PSU fans on the chassis */ + for (i = 1; i <= NUM_OF_FAN_ON_MAIN_BROAD; i++) + { + *e++ = ONLP_FAN_ID_CREATE(i); + } + + return 0; +} + +static int +_onlp_sysi_grep_output(char value[256], const char *attr, const char *tmp_file) +{ + int value_offset = 30; /* value offset in onie-syseeprom */ + char command[256] = {0}; + char buffer[COMMAND_OUTPUT_BUFFER] = {0}; + int v = 0; + + snprintf(command, sizeof(command), "cat '%s' | grep '%s'", tmp_file, attr); + _onlp_sysi_execute_command(command, buffer); + + /* Reading value from buffer with command output */ + while (buffer[value_offset] != '\n' && + buffer[value_offset] != '\r' && + buffer[value_offset] != '\0') { + value[v] = buffer[value_offset]; + v++; + value_offset++; + } + value[v] = '\0'; + + AIM_LOG_VERBOSE("Value for sytem attribute '%s' is '%s' \n", attr, value); + + return ONLP_STATUS_OK; +} + +int +onlp_sysi_onie_info_get(onlp_onie_info_t* onie) +{ + + const char onie_version_file[] = "/bsp/onie-version"; + const char onie_version_command[] = "onie-shell -c 'onie-sysinfo -v' > /bsp/onie-version"; + const char onie_syseeprom_file[] = "/bsp/onie-syseeprom"; + const char onie_syseeprom_command[] = "onie-shell -c onie-syseeprom > /bsp/onie-syseeprom"; + struct stat stat_buf; + char value[256] = {0}; + char command[256] = {0}; + int rc = 0; + int exit_status; + + /* We must initialize this otherwise crash occurs while free memory */ + list_init(&onie->vx_list); + + /* Check if cache file exist */ + rc = stat(onie_syseeprom_file, &stat_buf); + if (-1 == rc) { + rc = system(onie_syseeprom_command); + if (-1 == rc) { + return rc; + } + exit_status = WEXITSTATUS(rc); + if (EXIT_SUCCESS != exit_status) { + return ONLP_STATUS_E_GENERIC; + } + } + + rc = _onlp_sysi_grep_output(value, "Product Name", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->product_name = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Part Number", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->part_number = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Serial Number", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->serial_number = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Base MAC Address", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + strncpy((char*)onie->mac, value, sizeof(onie->mac)); + rc = _onlp_sysi_grep_output(value, "Manufacture Date", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacture_date = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Device Version", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->device_version = atoi(value); + rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacturer = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->manufacturer = aim_strdup(value); + onie->vendor = aim_strdup(value); + rc = _onlp_sysi_grep_output(value, "MAC Addresses", onie_syseeprom_file); + if (ONLP_STATUS_OK != rc) { + return rc; + } + onie->mac_range = atoi(value); + /* Check if onie version first run and cache file exist */ + rc = stat(onie_version_file, &stat_buf); + if (-1 == rc) + { + rc = system(onie_version_command); + if (-1 == rc) { + return rc; + } + exit_status = WEXITSTATUS(rc); + if (EXIT_SUCCESS != exit_status) { + return ONLP_STATUS_E_GENERIC; + }} + snprintf(command, sizeof(command), "cat '%s'", onie_version_file); + _onlp_sysi_execute_command(command, value); + /* ONIE version */ + onie->onie_version = aim_strdup(value); + + /* Platform name */ + onie->platform_name = aim_strdup("x86_64-mlnx_msn2700-r0"); + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c new file mode 100644 index 00000000..d2e1a533 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c @@ -0,0 +1,179 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Thermal Sensor Platform Implementation. + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include "platform_lib.h" + +#define prefix_path "/bsp/thermal" + +/** CPU thermal_threshold */ +typedef enum cpu_thermal_threshold_e { + CPU_THERMAL_THRESHOLD_WARNING_DEFAULT = 87000, + CPU_THERMAL_THRESHOLD_ERROR_DEFAULT = 100000, + CPU_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT = 105000, +} cpu_thermal_threshold_t; + +/** + * Shortcut for CPU thermal threshold value. + */ +#define CPU_THERMAL_THRESHOLD_INIT_DEFAULTS \ + { CPU_THERMAL_THRESHOLD_WARNING_DEFAULT, \ + CPU_THERMAL_THRESHOLD_ERROR_DEFAULT, \ + CPU_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT } + +/** Asic thermal_threshold */ +typedef enum asic_thermal_threshold_e { + ASIC_THERMAL_THRESHOLD_WARNING_DEFAULT = 105000, + ASIC_THERMAL_THRESHOLD_ERROR_DEFAULT = 115000, + ASIC_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT = 120000, +} asic_thermal_threshold_t; + +/** + * Shortcut for CPU thermal threshold value. + */ +#define ASIC_THERMAL_THRESHOLD_INIT_DEFAULTS \ + { ASIC_THERMAL_THRESHOLD_WARNING_DEFAULT, \ + ASIC_THERMAL_THRESHOLD_ERROR_DEFAULT, \ + ASIC_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT } + +#define VALIDATE(_id) \ + do { \ + if(!ONLP_OID_IS_THERMAL(_id)) { \ + return ONLP_STATUS_E_INVALID; \ + } \ + } while(0) + +enum onlp_thermal_id +{ + THERMAL_RESERVED = 0, + THERMAL_CPU_CORE_0, + THERMAL_CPU_CORE_1, + THERMAL_CPU_PACK, + THERMAL_ASIC, + THERMAL_BOAR_AMB, + THERMAL_PORT, + THERMAL_ON_PSU1, + THERMAL_ON_PSU2, +}; + +static char* last_path[] = /* must map with onlp_thermal_id */ +{ + "reserved", + "cpu_core0", + "cpu_core1", + "cpu_pack", + "asic", + "board_amb", + "port_amb", + "psu1", + "psu2" +}; + +/* Static values */ +static onlp_thermal_info_t linfo[] = { + { }, /* Not used */ + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_0), "CPU Core 0", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_CORE_1), "CPU Core 1", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_CPU_PACK), "CPU Pack", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, CPU_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ASIC), "Asic Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, ASIC_THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_BOAR_AMB), "Board AMB Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_PORT), "Port AMB Thermal Sensor", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ON_PSU1), "PSU-1 Thermal Sensor 1", ONLP_PSU_ID_CREATE(PSU1_ID)}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + }, + { { ONLP_THERMAL_ID_CREATE(THERMAL_ON_PSU2), "PSU-2 Thermal Sensor 1", ONLP_PSU_ID_CREATE(PSU2_ID)}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0, {0,0,0} + } +}; + +/* + * This will be called to intiialize the thermali subsystem. + */ +int +onlp_thermali_init(void) +{ + return ONLP_STATUS_OK; +} + +/* + * Retrieve the information structure for the given thermal OID. + * + * If the OID is invalid, return ONLP_E_STATUS_INVALID. + * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. + * Otherwise, return ONLP_STATUS_OK with the OID's information. + * + * Note -- it is expected that you fill out the information + * structure even if the sensor described by the OID is not present. + */ +int +onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) +{ + int rv, len = 10, temp_base=1, local_id = 0; + char r_data[10] = {0}; + char fullpath[50] = {0}; + VALIDATE(id); + + local_id = ONLP_OID_ID_GET(id); + + /* Set the onlp_oid_hdr_t and capabilities */ + *info = linfo[local_id]; + + /* get fullpath */ + snprintf(fullpath, sizeof(fullpath), "%s/%s", prefix_path, last_path[local_id]); + + rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, fullpath); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + info->mcelsius = atoi(r_data) / temp_base; + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_config.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_config.c new file mode 100644 index 00000000..1b7081f7 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_config.c @@ -0,0 +1,81 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* */ +#define __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(_x) #_x +#define __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(_x) __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(_x) +x86_64_mlnx_msn2700_config_settings_t x86_64_mlnx_msn2700_config_settings[] = +{ +#ifdef x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_INCLUDE_LOGGING(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_PORTING_STDLIB(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + { __x86_64_mlnx_msn2700_config_STRINGIFY_NAME(x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION), __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE(x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION) }, +#else +{ x86_64_mlnx_msn2700_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION(__x86_64_mlnx_msn2700_config_STRINGIFY_NAME), "__undefined__" }, +#endif + { NULL, NULL } +}; +#undef __x86_64_mlnx_msn2700_config_STRINGIFY_VALUE +#undef __x86_64_mlnx_msn2700_config_STRINGIFY_NAME + +const char* +x86_64_mlnx_msn2700_config_lookup(const char* setting) +{ + int i; + for(i = 0; x86_64_mlnx_msn2700_config_settings[i].name; i++) { + if(strcmp(x86_64_mlnx_msn2700_config_settings[i].name, setting)) { + return x86_64_mlnx_msn2700_config_settings[i].value; + } + } + return NULL; +} + +int +x86_64_mlnx_msn2700_config_show(struct aim_pvs_s* pvs) +{ + int i; + for(i = 0; x86_64_mlnx_msn2700_config_settings[i].name; i++) { + aim_printf(pvs, "%s = %s\n", x86_64_mlnx_msn2700_config_settings[i].name, x86_64_mlnx_msn2700_config_settings[i].value); + } + return i; +} + +/* */ + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_enums.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_enums.c new file mode 100644 index 00000000..450f6975 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_enums.c @@ -0,0 +1,10 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.enum(ALL).source> */ +/* */ + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_int.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_int.h new file mode 100644 index 00000000..ab8e9e8e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_int.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * x86_64_mlnx_msn2700 Internal Header + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2700_INT_H__ +#define __x86_64_mlnx_msn2700_INT_H__ + +#include + + +#endif /* __x86_64_mlnx_msn2700_INT_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.c new file mode 100644 index 00000000..3eef11c3 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.c @@ -0,0 +1,18 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_mlnx_msn2700_log.h" +/* + * x86_64_mlnx_msn2700 log struct. + */ +AIM_LOG_STRUCT_DEFINE( + x86_64_mlnx_msn2700_CONFIG_LOG_OPTIONS_DEFAULT, + x86_64_mlnx_msn2700_CONFIG_LOG_BITS_DEFAULT, + NULL, /* Custom log map */ + x86_64_mlnx_msn2700_CONFIG_LOG_CUSTOM_BITS_DEFAULT + ); + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.h b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.h new file mode 100644 index 00000000..eaca5871 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_log.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#ifndef __x86_64_mlnx_msn2700_LOG_H__ +#define __x86_64_mlnx_msn2700_LOG_H__ + +#define AIM_LOG_MODULE_NAME x86_64_mlnx_msn2700 +#include + +#endif /* __x86_64_mlnx_msn2700_LOG_H__ */ diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_module.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_module.c new file mode 100644 index 00000000..3586ef45 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_module.c @@ -0,0 +1,24 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_mlnx_msn2700_log.h" + +static int +datatypes_init__(void) +{ +#define x86_64_mlnx_msn2700_ENUMERATION_ENTRY(_enum_name, _desc) AIM_DATATYPE_MAP_REGISTER(_enum_name, _enum_name##_map, _desc, AIM_LOG_INTERNAL); +#include + return 0; +} + +void __x86_64_mlnx_msn2700_module_init__(void) +{ + AIM_LOG_STRUCT_REGISTER(); + datatypes_init__(); +} + +int __onlp_platform_version__ = 1; diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_ucli.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_ucli.c new file mode 100644 index 00000000..32374f62 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/x86_64_mlnx_msn2700_ucli.c @@ -0,0 +1,50 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#if x86_64_mlnx_msn2700_CONFIG_INCLUDE_UCLI == 1 + +#include +#include +#include + +static ucli_status_t +x86_64_mlnx_msn2700_ucli_ucli__config__(ucli_context_t* uc) +{ + UCLI_HANDLER_MACRO_MODULE_CONFIG(x86_64_mlnx_msn2700) +} + +/* */ +/* */ + +static ucli_module_t +x86_64_mlnx_msn2700_ucli_module__ = + { + "x86_64_mlnx_msn2700_ucli", + NULL, + x86_64_mlnx_msn2700_ucli_ucli_handlers__, + NULL, + NULL, + }; + +ucli_node_t* +x86_64_mlnx_msn2700_ucli_node_create(void) +{ + ucli_node_t* n; + ucli_module_init(&x86_64_mlnx_msn2700_ucli_module__); + n = ucli_node_create("x86_64_mlnx_msn2700", NULL, &x86_64_mlnx_msn2700_ucli_module__); + ucli_node_subnode_add(n, ucli_module_log_node_create("x86_64_mlnx_msn2700")); + return n; +} + +#else +void* +x86_64_mlnx_msn2700_ucli_node_create(void) +{ + return NULL; +} +#endif + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/x86_64_mlnx_msn2700.mk b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/x86_64_mlnx_msn2700.mk new file mode 100644 index 00000000..7d9819d9 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/x86_64_mlnx_msn2700.mk @@ -0,0 +1,13 @@ + +############################################################################### +# +# Inclusive Makefile for the x86_64_mlnx_msn2700 module. +# +# Autogenerated 2015-12-23 23:45:56.754200 +# +############################################################################### +x86_64_mlnx_msn2700_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +include $(x86_64_mlnx_msn2700_BASEDIR)/module/make.mk +include $(x86_64_mlnx_msn2700_BASEDIR)/module/auto/make.mk +include $(x86_64_mlnx_msn2700_BASEDIR)/module/src/make.mk + diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/Makefile b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/Makefile new file mode 100644 index 00000000..dc1e7b86 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/PKG.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/PKG.yml new file mode 100644 index 00000000..463fc056 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=mellanox PLATFORM=x86-64-mlnx-msn2700-r0 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml new file mode 100644 index 00000000..2cc6742e --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml @@ -0,0 +1,35 @@ +--- + +###################################################################### +# +# platform-config for Mellanox 2700 +# +###################################################################### + +x86-64-mlnx-msn2700-r0: + + grub: + + serial: >- + --unit=0 + --speed=115200 + --word=8 + --parity=0 + --stop=1 + + kernel: + <<: *kernel-3-16 + + args: >- + nopat + console=ttyS0,115200n8 + rd_NO_MD + rd_NO_LUKS + acpi_enforce_resources=lax + acpi=noirq + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:14.0 diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py new file mode 100644 index 00000000..4e52aa01 --- /dev/null +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py @@ -0,0 +1,17 @@ +from onl.platform.base import * +from onl.platform.mellanox import * + +class OnlPlatform_x86_64_mlnx_msn2700_r0(OnlPlatformMellanox, + OnlPlatformPortConfig_32x100): + PLATFORM='x86-64-mlnx-msn2700-r0' + MODEL="SN2700" + SYS_OBJECT_ID=".2700.1" + + def baseconfig(self): + # load modules + import os + # necessary if there are issues with the install + # os.system("/usr/bin/apt-get install") + os.system("/etc/mlnx/mlnx-hw-management start") + + return True From 97f1dcbc58caa5085a6c038d2a093e290ed6010e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 12 Dec 2016 20:09:04 +0000 Subject: [PATCH 146/255] New operational status and OID Header APIs The following new APIs are introduced: onlp__status_get() - Retreives only the operational status (PRESENT, FAILED, UNPLUGGED) of the object Useful for improving the performance of the platform manager, whose primary concern is the operational status, not the object information itself. onlp__hdr_get() - Retrieves the OID header (description, parent, children) only. Improved performance of OID iteration by avoiding collection of all of the unused object information during execution. If a platform does not implement these functions they will be simulated using calls to the existing onlp__info_get(), which reduces to the original performance level. --- .../any/onlp/src/onlp/module/inc/onlp/fan.h | 16 ++++++++++ .../any/onlp/src/onlp/module/inc/onlp/led.h | 14 ++++++++ .../any/onlp/src/onlp/module/inc/onlp/onlp.h | 4 +++ .../src/onlp/module/inc/onlp/platformi/fani.h | 16 ++++++++++ .../src/onlp/module/inc/onlp/platformi/ledi.h | 14 ++++++++ .../src/onlp/module/inc/onlp/platformi/psui.h | 14 ++++++++ .../onlp/module/inc/onlp/platformi/thermali.h | 14 ++++++++ .../any/onlp/src/onlp/module/inc/onlp/psu.h | 14 ++++++++ .../onlp/src/onlp/module/inc/onlp/thermal.h | 13 ++++++++ .../base/any/onlp/src/onlp/module/src/fan.c | 31 ++++++++++++++++++ .../base/any/onlp/src/onlp/module/src/led.c | 32 +++++++++++++++++++ .../base/any/onlp/src/onlp/module/src/oids.c | 24 +++----------- .../base/any/onlp/src/onlp/module/src/psu.c | 31 ++++++++++++++++++ .../any/onlp/src/onlp/module/src/thermal.c | 31 ++++++++++++++++++ .../onlp_platform_defaults/module/src/fani.c | 2 ++ .../onlp_platform_defaults/module/src/ledi.c | 2 ++ .../onlp_platform_defaults/module/src/psui.c | 2 ++ .../module/src/thermali.c | 2 ++ 18 files changed, 256 insertions(+), 20 deletions(-) diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/fan.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/fan.h index 0c6680b7..bf750132 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/fan.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/fan.h @@ -113,6 +113,22 @@ int onlp_fan_init(void); */ int onlp_fan_info_get(onlp_oid_t id, onlp_fan_info_t* rv); +/** + * @brief Retrieve the fan's operational status. + * @param id The fan OID. + * @param rv [out] Receives the fan's operations status flags. + * @notes Only operational state needs to be returned - + * PRESENT/FAILED + */ +int onlp_fan_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Retrieve the fan's OID hdr. + * @param id The fan OID. + * @param rv [out] Receives the OID header. + */ +int onlp_fan_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* hdr); + /** * @brief Set the fan speed in RPMs. * @param id The fan OID. diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/led.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/led.h index 1c99843e..31503857 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/led.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/led.h @@ -112,6 +112,20 @@ int onlp_led_init(void); */ int onlp_led_info_get(onlp_oid_t id, onlp_led_info_t* rv); +/** + * @brief Get the LED operational status. + * @param id The LED OID + * @param rv [out] Receives the operational status. + */ +int onlp_led_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Get the LED header. + * @param id The LED OID + * @param rv [out] Receives the header. + */ +int onlp_led_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv); + /** * @brief Turn an LED on or off. * @param id The LED OID diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/onlp.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/onlp.h index d1e223a8..266bf3ee 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/onlp.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/onlp.h @@ -49,6 +49,10 @@ typedef enum onlp_status_e { } \ } while(0) +#define ONLP_FAILURE(_rv) ((_rv) < 0) +#define ONLP_SUCCESS(_rv) (!(ONLP_FAILURE(_rv))) +#define ONLP_UNSUPPORTED(_rv) \ + ((_rv) == ONLP_STATUS_E_UNSUPPORTED) /** * @brief Initialize all subsystems. diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/fani.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/fani.h index 76807822..53a2372f 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/fani.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/fani.h @@ -39,6 +39,22 @@ int onlp_fani_init(void); */ int onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* rv); +/** + * @brief Retrieve the fan's operational status. + * @param id The fan OID. + * @param rv [out] Receives the fan's operations status flags. + * @notes Only operational state needs to be returned - + * PRESENT/FAILED + */ +int onlp_fani_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Retrieve the fan's OID hdr. + * @param id The fan OID. + * @param rv [out] Receives the OID header. + */ +int onlp_fani_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* hdr); + /** * @brief Set the fan speed in RPM. * @param id The fan OID diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/ledi.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/ledi.h index 7d5bb545..c673ac32 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/ledi.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/ledi.h @@ -39,6 +39,20 @@ int onlp_ledi_init(void); */ int onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* rv); +/** + * @brief Get the LED operational status. + * @param id The LED OID + * @param rv [out] Receives the operational status. + */ +int onlp_ledi_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Get the LED header. + * @param id The LED OID + * @param rv [out] Receives the header. + */ +int onlp_ledi_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv); + /** * @brief Turn an LED on or off * @param id The LED OID diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/psui.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/psui.h index e9f75da5..36a3a806 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/psui.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/psui.h @@ -39,6 +39,20 @@ int onlp_psui_init(void); */ int onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* rv); +/** + * @brief Get the PSU's operational status. + * @param id The PSU OID. + * @param rv [out] Receives the operational status. + */ +int onlp_psui_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Get the PSU's oid header. + * @param id The PSU OID. + * @param rv [out] Receives the header. + */ +int onlp_psui_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv); + /** * @brief Generic PSU ioctl * @param id The PSU OID diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/thermali.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/thermali.h index fd2a29af..53175346 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/thermali.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/platformi/thermali.h @@ -40,6 +40,20 @@ int onlp_thermali_init(void); */ int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* rv); +/** + * @brief Retrieve the thermal's operational status. + * @param id The thermal oid. + * @param rv [out] Receives the operational status. + */ +int onlp_thermali_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Retrieve the thermal's oid header. + * @param id The thermal oid. + * @param rv [out] Receives the header. + */ +int onlp_thermali_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv); + /** * @brief Generic ioctl. */ diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/psu.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/psu.h index 0095ccdb..e51bd354 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/psu.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/psu.h @@ -96,6 +96,20 @@ int onlp_psu_init(void); */ int onlp_psu_info_get(onlp_oid_t id, onlp_psu_info_t* rv); +/** + * @brief Get the PSU's operational status. + * @param id The PSU OID. + * @param rv [out] Receives the operational status. + */ +int onlp_psu_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Get the PSU's oid header. + * @param id The PSU OID. + * @param rv [out] Receives the header. + */ +int onlp_psu_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv); + /** * @brief Issue a PSU ioctl. * @param id The PSU OID diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/thermal.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/thermal.h index 6f73ab20..11453861 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/thermal.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/thermal.h @@ -115,6 +115,19 @@ int onlp_thermal_init(void); */ int onlp_thermal_info_get(onlp_oid_t id, onlp_thermal_info_t* rv); +/** + * @brief Retrieve the thermal's operational status. + * @param id The thermal oid. + * @param rv [out] Receives the operational status. + */ +int onlp_thermal_status_get(onlp_oid_t id, uint32_t* rv); + +/** + * @brief Retrieve the thermal's oid header. + * @param id The thermal oid. + * @param rv [out] Receives the header. + */ +int onlp_thermal_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv); /** * @brief Thermal driver ioctl. diff --git a/packages/base/any/onlp/src/onlp/module/src/fan.c b/packages/base/any/onlp/src/onlp/module/src/fan.c index f85a782a..3ef8a75e 100644 --- a/packages/base/any/onlp/src/onlp/module/src/fan.c +++ b/packages/base/any/onlp/src/onlp/module/src/fan.c @@ -119,6 +119,37 @@ onlp_fan_info_get_locked__(onlp_oid_t oid, onlp_fan_info_t* fip) } ONLP_LOCKED_API2(onlp_fan_info_get, onlp_oid_t, oid, onlp_fan_info_t*, fip); +static int +onlp_fan_status_get_locked__(onlp_oid_t oid, uint32_t* status) +{ + int rv = onlp_fani_status_get(oid, status); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_fan_info_t fi; + rv = onlp_fani_info_get(oid, &fi); + *status = fi.status; + } + return rv; +} +ONLP_LOCKED_API2(onlp_fan_status_get, onlp_oid_t, oid, uint32_t*, status); + +static int +onlp_fan_hdr_get_locked__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) +{ + int rv = onlp_fani_hdr_get(oid, hdr); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_fan_info_t fi; + rv = onlp_fani_info_get(oid, &fi); + memcpy(hdr, &fi.hdr, sizeof(fi.hdr)); + } + return rv; +} +ONLP_LOCKED_API2(onlp_fan_hdr_get, onlp_oid_t, oid, onlp_oid_hdr_t*, hdr); static int onlp_fan_present__(onlp_oid_t id, onlp_fan_info_t* info) diff --git a/packages/base/any/onlp/src/onlp/module/src/led.c b/packages/base/any/onlp/src/onlp/module/src/led.c index 50f85809..e914ee69 100644 --- a/packages/base/any/onlp/src/onlp/module/src/led.c +++ b/packages/base/any/onlp/src/onlp/module/src/led.c @@ -84,6 +84,38 @@ onlp_led_info_get_locked__(onlp_oid_t id, onlp_led_info_t* info) } ONLP_LOCKED_API2(onlp_led_info_get, onlp_oid_t, id, onlp_led_info_t*, info); +static int +onlp_led_status_get_locked__(onlp_oid_t id, uint32_t* status) +{ + int rv = onlp_ledi_status_get(id, status); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_led_info_t li; + rv = onlp_ledi_info_get(id, &li); + *status = li.status; + } + return rv; +} +ONLP_LOCKED_API2(onlp_led_status_get, onlp_oid_t, id, uint32_t*, status); + +static int +onlp_led_hdr_get_locked__(onlp_oid_t id, onlp_oid_hdr_t* hdr) +{ + int rv = onlp_ledi_hdr_get(id, hdr); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_led_info_t li; + rv = onlp_ledi_info_get(id, &li); + memcpy(hdr, &li.hdr, sizeof(li.hdr)); + } + return rv; +} +ONLP_LOCKED_API2(onlp_led_hdr_get, onlp_oid_t, id, onlp_oid_hdr_t*, hdr); + static int onlp_led_set_locked__(onlp_oid_t id, int on_or_off) { diff --git a/packages/base/any/onlp/src/onlp/module/src/oids.c b/packages/base/any/onlp/src/onlp/module/src/oids.c index f2f495f1..9bac8979 100644 --- a/packages/base/any/onlp/src/onlp/module/src/oids.c +++ b/packages/base/any/onlp/src/onlp/module/src/oids.c @@ -90,41 +90,25 @@ oid_type_SYS_hdr_get__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) static int oid_type_THERMAL_hdr_get__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) { - int rv; - onlp_thermal_info_t ti; - rv = onlp_thermal_info_get(oid, &ti); - memcpy(hdr, &ti.hdr, sizeof(ti.hdr)); - return rv; + return onlp_thermal_hdr_get(oid, hdr); } static int oid_type_FAN_hdr_get__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) { - int rv; - onlp_fan_info_t fi; - rv = onlp_fan_info_get(oid, &fi); - memcpy(hdr, &fi.hdr, sizeof(fi.hdr)); - return rv; + return onlp_fan_hdr_get(oid, hdr); } static int oid_type_LED_hdr_get__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) { - int rv; - onlp_led_info_t li; - rv = onlp_led_info_get(oid, &li); - memcpy(hdr, &li.hdr, sizeof(li.hdr)); - return rv; + return onlp_led_hdr_get(oid, hdr); } static int oid_type_PSU_hdr_get__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) { - int rv; - onlp_psu_info_t pi; - rv = onlp_psu_info_get(oid, &pi); - memcpy(hdr, &pi.hdr, sizeof(pi.hdr)); - return rv; + return onlp_psu_hdr_get(oid, hdr); } static int diff --git a/packages/base/any/onlp/src/onlp/module/src/psu.c b/packages/base/any/onlp/src/onlp/module/src/psu.c index 6b826111..76baf3fe 100644 --- a/packages/base/any/onlp/src/onlp/module/src/psu.c +++ b/packages/base/any/onlp/src/onlp/module/src/psu.c @@ -60,6 +60,37 @@ onlp_psu_info_get_locked__(onlp_oid_t id, onlp_psu_info_t* info) } ONLP_LOCKED_API2(onlp_psu_info_get, onlp_oid_t, id, onlp_psu_info_t*, info); +static int +onlp_psu_status_get_locked__(onlp_oid_t id, uint32_t* status) +{ + int rv = onlp_psui_status_get(id, status); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_psu_info_t pi; + rv = onlp_psu_info_get(id, &pi); + *status = pi.status; + } + return rv; +} +ONLP_LOCKED_API2(onlp_psu_status_get, onlp_oid_t, id, uint32_t*, status); + +static int +onlp_psu_hdr_get_locked__(onlp_oid_t id, onlp_oid_hdr_t* hdr) +{ + int rv = onlp_psui_hdr_get(id, hdr); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_psu_info_t pi; + rv = onlp_psui_info_get(id, &pi); + memcpy(hdr, &pi.hdr, sizeof(pi.hdr)); + } + return rv; +} +ONLP_LOCKED_API2(onlp_psu_hdr_get, onlp_oid_t, id, onlp_oid_hdr_t*, hdr); int onlp_psu_vioctl_locked__(onlp_oid_t id, va_list vargs) { diff --git a/packages/base/any/onlp/src/onlp/module/src/thermal.c b/packages/base/any/onlp/src/onlp/module/src/thermal.c index 10813a81..316f1280 100644 --- a/packages/base/any/onlp/src/onlp/module/src/thermal.c +++ b/packages/base/any/onlp/src/onlp/module/src/thermal.c @@ -96,6 +96,37 @@ onlp_thermal_info_get_locked__(onlp_oid_t oid, onlp_thermal_info_t* info) } ONLP_LOCKED_API2(onlp_thermal_info_get, onlp_oid_t, oid, onlp_thermal_info_t*, info); +static int +onlp_thermal_status_get_locked__(onlp_oid_t id, uint32_t* status) +{ + int rv = onlp_thermali_status_get(id, status); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_thermal_info_t ti; + rv = onlp_thermali_info_get(id, &ti); + *status = ti.status; + } + return rv; +} +ONLP_LOCKED_API2(onlp_thermal_status_get, onlp_oid_t, id, uint32_t*, status); + +static int +onlp_thermal_hdr_get_locked__(onlp_oid_t id, onlp_oid_hdr_t* hdr) +{ + int rv = onlp_thermali_hdr_get(id, hdr); + if(ONLP_SUCCESS(rv)) { + return rv; + } + if(ONLP_UNSUPPORTED(rv)) { + onlp_thermal_info_t ti; + rv = onlp_thermali_info_get(id, &ti); + memcpy(hdr, &ti.hdr, sizeof(ti.hdr)); + } + return rv; +} +ONLP_LOCKED_API2(onlp_thermal_hdr_get, onlp_oid_t, id, onlp_oid_hdr_t*, hdr); int onlp_thermal_ioctl(int code, ...) { diff --git a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/fani.c b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/fani.c index 3ef9e111..715de62f 100644 --- a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/fani.c +++ b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/fani.c @@ -32,6 +32,8 @@ */ __ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_init(void)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_status_get(onlp_oid_t id, uint32_t* status)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* hdr)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_rpm_set(onlp_oid_t id, int rpm)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_percentage_set(onlp_oid_t id, int p)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_fani_mode_set(onlp_oid_t id, onlp_fan_mode_t mode)); diff --git a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/ledi.c b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/ledi.c index 461130b2..cb404ff3 100644 --- a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/ledi.c +++ b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/ledi.c @@ -27,6 +27,8 @@ __ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_init(void)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* rv)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_status_get(onlp_oid_t id, uint32_t* rv)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_set(onlp_oid_t id, int on_or_off)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_ioctl(onlp_oid_t id, va_list vargs)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode)); diff --git a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/psui.c b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/psui.c index ce534c88..5ef6d408 100644 --- a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/psui.c +++ b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/psui.c @@ -28,4 +28,6 @@ __ONLP_DEFAULTI_IMPLEMENTATION(onlp_psui_init(void)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* rv)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_psui_status_get(onlp_oid_t id, uint32_t* rv)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_psui_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_psui_ioctl(onlp_oid_t pid, va_list vargs)); diff --git a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/thermali.c b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/thermali.c index 3593961c..7117fbe9 100644 --- a/packages/base/any/onlp/src/onlp_platform_defaults/module/src/thermali.c +++ b/packages/base/any/onlp/src/onlp_platform_defaults/module/src/thermali.c @@ -28,4 +28,6 @@ __ONLP_DEFAULTI_IMPLEMENTATION(onlp_thermali_init(void)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* rv)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_thermali_status_get(onlp_oid_t id, uint32_t* rv)); +__ONLP_DEFAULTI_IMPLEMENTATION(onlp_thermali_hdr_get(onlp_oid_t id, onlp_oid_hdr_t* rv)); __ONLP_DEFAULTI_IMPLEMENTATION(onlp_thermali_ioctl(int code, va_list vargs)); From e5cb3f6ec125f81971c8068997bf0aab927a7aeb Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 12 Dec 2016 20:10:30 +0000 Subject: [PATCH 147/255] [from @kenchiang] Rework snmp oid registration so that the temperature, fan, and PSU oids are registered as netsnmp tables. When a sensor is added or deleted, the corresponding table is updated accordingly. --- .../src/onlp_snmp/module/auto/onlp_snmp.yml | 3 - .../module/inc/onlp_snmp/onlp_snmp_config.h | 10 - .../inc/onlp_snmp/onlp_snmp_sensor_oids.h | 58 +- .../onlp_snmp/module/src/onlp_snmp_config.c | 5 - .../onlp_snmp/module/src/onlp_snmp_enums.c | 128 +-- .../onlp_snmp/module/src/onlp_snmp_sensors.c | 1023 +++++++---------- 6 files changed, 485 insertions(+), 742 deletions(-) diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml index 7e71c183..6e62a16c 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml @@ -38,9 +38,6 @@ cdefs: &cdefs - ONLP_SNMP_CONFIG_DEV_BASE_INDEX: doc: "Base index." default: 1 -- ONLP_SNMP_CONFIG_DEV_MAX_INDEX: - doc: "Maximum index." - default: 100 - ONLP_SNMP_CONFIG_INCLUDE_THERMALS: doc: "Include Thermals." default: 1 diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h index 9818342a..383142b2 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h @@ -131,16 +131,6 @@ #define ONLP_SNMP_CONFIG_DEV_BASE_INDEX 1 #endif -/** - * ONLP_SNMP_CONFIG_DEV_MAX_INDEX - * - * Maximum index. */ - - -#ifndef ONLP_SNMP_CONFIG_DEV_MAX_INDEX -#define ONLP_SNMP_CONFIG_DEV_MAX_INDEX 100 -#endif - /** * ONLP_SNMP_CONFIG_INCLUDE_THERMALS * diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_sensor_oids.h b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_sensor_oids.h index 79a40ba9..625cf7a1 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_sensor_oids.h +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_sensor_oids.h @@ -79,14 +79,6 @@ typedef enum onlp_snmp_fan_flow_type_e { ONLP_SNMP_FAN_FLOW_TYPE_F2B = 2, } onlp_snmp_fan_flow_type_t; -/** onlp_snmp_sensor_status */ -typedef enum onlp_snmp_sensor_status_e { - ONLP_SNMP_SENSOR_STATUS_MISSING = 0, - ONLP_SNMP_SENSOR_STATUS_GOOD = 1, - ONLP_SNMP_SENSOR_STATUS_FAILED = 2, - ONLP_SNMP_SENSOR_STATUS_UNPLUGGED = 3, -} onlp_snmp_sensor_status_t; - /** onlp_snmp_psu_type */ typedef enum onlp_snmp_psu_type_e { ONLP_SNMP_PSU_TYPE_UNKNOWN = 0, @@ -95,6 +87,14 @@ typedef enum onlp_snmp_psu_type_e { ONLP_SNMP_PSU_TYPE_DC48 = 3, } onlp_snmp_psu_type_t; +/** onlp_snmp_sensor_status */ +typedef enum onlp_snmp_sensor_status_e { + ONLP_SNMP_SENSOR_STATUS_MISSING = 0, + ONLP_SNMP_SENSOR_STATUS_GOOD = 1, + ONLP_SNMP_SENSOR_STATUS_FAILED = 2, + ONLP_SNMP_SENSOR_STATUS_UNPLUGGED = 3, +} onlp_snmp_sensor_status_t; + /** onlp_snmp_sensor_type */ typedef enum onlp_snmp_sensor_type_e { ONLP_SNMP_SENSOR_TYPE_TEMP = 1, @@ -136,27 +136,6 @@ extern aim_map_si_t onlp_snmp_fan_flow_type_map[]; /** onlp_snmp_fan_flow_type_desc_map table. */ extern aim_map_si_t onlp_snmp_fan_flow_type_desc_map[]; -/** Enum names. */ -const char* onlp_snmp_sensor_status_name(onlp_snmp_sensor_status_t e); - -/** Enum values. */ -int onlp_snmp_sensor_status_value(const char* str, onlp_snmp_sensor_status_t* e, int substr); - -/** Enum descriptions. */ -const char* onlp_snmp_sensor_status_desc(onlp_snmp_sensor_status_t e); - -/** Enum validator. */ -int onlp_snmp_sensor_status_valid(onlp_snmp_sensor_status_t e); - -/** validator */ -#define ONLP_SNMP_SENSOR_STATUS_VALID(_e) \ - (onlp_snmp_sensor_status_valid((_e))) - -/** onlp_snmp_sensor_status_map table. */ -extern aim_map_si_t onlp_snmp_sensor_status_map[]; -/** onlp_snmp_sensor_status_desc_map table. */ -extern aim_map_si_t onlp_snmp_sensor_status_desc_map[]; - /** Enum names. */ const char* onlp_snmp_psu_type_name(onlp_snmp_psu_type_t e); @@ -178,6 +157,27 @@ extern aim_map_si_t onlp_snmp_psu_type_map[]; /** onlp_snmp_psu_type_desc_map table. */ extern aim_map_si_t onlp_snmp_psu_type_desc_map[]; +/** Enum names. */ +const char* onlp_snmp_sensor_status_name(onlp_snmp_sensor_status_t e); + +/** Enum values. */ +int onlp_snmp_sensor_status_value(const char* str, onlp_snmp_sensor_status_t* e, int substr); + +/** Enum descriptions. */ +const char* onlp_snmp_sensor_status_desc(onlp_snmp_sensor_status_t e); + +/** Enum validator. */ +int onlp_snmp_sensor_status_valid(onlp_snmp_sensor_status_t e); + +/** validator */ +#define ONLP_SNMP_SENSOR_STATUS_VALID(_e) \ + (onlp_snmp_sensor_status_valid((_e))) + +/** onlp_snmp_sensor_status_map table. */ +extern aim_map_si_t onlp_snmp_sensor_status_map[]; +/** onlp_snmp_sensor_status_desc_map table. */ +extern aim_map_si_t onlp_snmp_sensor_status_desc_map[]; + /** Enum names. */ const char* onlp_snmp_sensor_type_name(onlp_snmp_sensor_type_t e); diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c index a6e12942..339c5f42 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c @@ -65,11 +65,6 @@ onlp_snmp_config_settings_t onlp_snmp_config_settings[] = #else { ONLP_SNMP_CONFIG_DEV_BASE_INDEX(__onlp_snmp_config_STRINGIFY_NAME), "__undefined__" }, #endif -#ifdef ONLP_SNMP_CONFIG_DEV_MAX_INDEX - { __onlp_snmp_config_STRINGIFY_NAME(ONLP_SNMP_CONFIG_DEV_MAX_INDEX), __onlp_snmp_config_STRINGIFY_VALUE(ONLP_SNMP_CONFIG_DEV_MAX_INDEX) }, -#else -{ ONLP_SNMP_CONFIG_DEV_MAX_INDEX(__onlp_snmp_config_STRINGIFY_NAME), "__undefined__" }, -#endif #ifdef ONLP_SNMP_CONFIG_INCLUDE_THERMALS { __onlp_snmp_config_STRINGIFY_NAME(ONLP_SNMP_CONFIG_INCLUDE_THERMALS), __onlp_snmp_config_STRINGIFY_VALUE(ONLP_SNMP_CONFIG_INCLUDE_THERMALS) }, #else diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_enums.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_enums.c index 2891364b..3114d686 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_enums.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_enums.c @@ -69,70 +69,6 @@ onlp_snmp_fan_flow_type_valid(onlp_snmp_fan_flow_type_t e) } -aim_map_si_t onlp_snmp_sensor_status_map[] = -{ - { "missing", ONLP_SNMP_SENSOR_STATUS_MISSING }, - { "good", ONLP_SNMP_SENSOR_STATUS_GOOD }, - { "failed", ONLP_SNMP_SENSOR_STATUS_FAILED }, - { "unplugged", ONLP_SNMP_SENSOR_STATUS_UNPLUGGED }, - { NULL, 0 } -}; - -aim_map_si_t onlp_snmp_sensor_status_desc_map[] = -{ - { "None", ONLP_SNMP_SENSOR_STATUS_MISSING }, - { "None", ONLP_SNMP_SENSOR_STATUS_GOOD }, - { "None", ONLP_SNMP_SENSOR_STATUS_FAILED }, - { "None", ONLP_SNMP_SENSOR_STATUS_UNPLUGGED }, - { NULL, 0 } -}; - -const char* -onlp_snmp_sensor_status_name(onlp_snmp_sensor_status_t e) -{ - const char* name; - if(aim_map_si_i(&name, e, onlp_snmp_sensor_status_map, 0)) { - return name; - } - else { - return "-invalid value for enum type 'onlp_snmp_sensor_status'"; - } -} - -int -onlp_snmp_sensor_status_value(const char* str, onlp_snmp_sensor_status_t* e, int substr) -{ - int i; - AIM_REFERENCE(substr); - if(aim_map_si_s(&i, str, onlp_snmp_sensor_status_map, 0)) { - /* Enum Found */ - *e = i; - return 0; - } - else { - return -1; - } -} - -const char* -onlp_snmp_sensor_status_desc(onlp_snmp_sensor_status_t e) -{ - const char* name; - if(aim_map_si_i(&name, e, onlp_snmp_sensor_status_desc_map, 0)) { - return name; - } - else { - return "-invalid value for enum type 'onlp_snmp_sensor_status'"; - } -} - -int -onlp_snmp_sensor_status_valid(onlp_snmp_sensor_status_t e) -{ - return aim_map_si_i(NULL, e, onlp_snmp_sensor_status_map, 0) ? 1 : 0; -} - - aim_map_si_t onlp_snmp_psu_type_map[] = { { "unknown", ONLP_SNMP_PSU_TYPE_UNKNOWN }, @@ -197,6 +133,70 @@ onlp_snmp_psu_type_valid(onlp_snmp_psu_type_t e) } +aim_map_si_t onlp_snmp_sensor_status_map[] = +{ + { "missing", ONLP_SNMP_SENSOR_STATUS_MISSING }, + { "good", ONLP_SNMP_SENSOR_STATUS_GOOD }, + { "failed", ONLP_SNMP_SENSOR_STATUS_FAILED }, + { "unplugged", ONLP_SNMP_SENSOR_STATUS_UNPLUGGED }, + { NULL, 0 } +}; + +aim_map_si_t onlp_snmp_sensor_status_desc_map[] = +{ + { "None", ONLP_SNMP_SENSOR_STATUS_MISSING }, + { "None", ONLP_SNMP_SENSOR_STATUS_GOOD }, + { "None", ONLP_SNMP_SENSOR_STATUS_FAILED }, + { "None", ONLP_SNMP_SENSOR_STATUS_UNPLUGGED }, + { NULL, 0 } +}; + +const char* +onlp_snmp_sensor_status_name(onlp_snmp_sensor_status_t e) +{ + const char* name; + if(aim_map_si_i(&name, e, onlp_snmp_sensor_status_map, 0)) { + return name; + } + else { + return "-invalid value for enum type 'onlp_snmp_sensor_status'"; + } +} + +int +onlp_snmp_sensor_status_value(const char* str, onlp_snmp_sensor_status_t* e, int substr) +{ + int i; + AIM_REFERENCE(substr); + if(aim_map_si_s(&i, str, onlp_snmp_sensor_status_map, 0)) { + /* Enum Found */ + *e = i; + return 0; + } + else { + return -1; + } +} + +const char* +onlp_snmp_sensor_status_desc(onlp_snmp_sensor_status_t e) +{ + const char* name; + if(aim_map_si_i(&name, e, onlp_snmp_sensor_status_desc_map, 0)) { + return name; + } + else { + return "-invalid value for enum type 'onlp_snmp_sensor_status'"; + } +} + +int +onlp_snmp_sensor_status_valid(onlp_snmp_sensor_status_t e) +{ + return aim_map_si_i(NULL, e, onlp_snmp_sensor_status_map, 0) ? 1 : 0; +} + + aim_map_si_t onlp_snmp_sensor_type_map[] = { { "temp", ONLP_SNMP_SENSOR_TYPE_TEMP }, diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c index 263f016f..ecae68b4 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c @@ -1,7 +1,7 @@ /************************************************************ * * - * Copyright 2015 Big Switch Networks, Inc. + * Copyright 2015-2016 Big Switch Networks, Inc. * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance @@ -31,117 +31,217 @@ #include -#include #include #include #include -#include #include "onlp_snmp_log.h" + /** * Individual Sensor Control structure. */ typedef struct onlp_snmp_sensor_s { - int sensor_id; /* sensor identification */ + list_links_t links; /* for tracking sensors of the same type */ + int sensor_id; /* onlp_oid_t for this sensor */ char name[ONLP_SNMP_CONFIG_MAX_NAME_LENGTH]; char desc[ONLP_SNMP_CONFIG_MAX_DESC_LENGTH]; - int sensor_type; + onlp_snmp_sensor_type_t sensor_type; + uint32_t index; /* snmp table column */ union sensor_info { onlp_thermal_info_t ti; onlp_fan_info_t fi; onlp_psu_info_t pi; - onlp_led_info_t li; - uint32_t mi; /* this is for misc value */ } sensor_info; - bool info_valid; /* true if sensor_info is valid */ - uint64_t last_update_time; /* last time called */ + /* for snmp table maintenance: + * table row is added when previously invalid sensor is now valid, + * table row is deleted when previously valid sensor is now invalid */ + bool previously_valid; + bool now_valid; } onlp_snmp_sensor_t; /** - * NET SNMP Handler + * NET SNMP handler */ typedef void (*onlp_snmp_handler_fn)(netsnmp_request_info *req, uint32_t index, onlp_snmp_sensor_t *ss); +/** + * Update handler + */ +typedef int (*update_handler_fn)(onlp_snmp_sensor_t *ss); -/* index into sensor_handler_fn array */ -#define UPDATE_HANDLER_IDX 0 /* * Sensor Value Update period */ static uint32_t update_period__ = ONLP_SNMP_CONFIG_UPDATE_PERIOD; -#define SENSOR_NEEDS_UPDATE(_current_time, _sensor) \ - ((_current_time - _sensor->last_update_time) > update_period__ * 1000 * 1000) - -#define SENSOR_SET_VALIDITY(_rv, _current_time, _sensor) \ - do { \ - if (_rv < 0) { \ - _sensor->info_valid = false; \ - } else { \ - _sensor->info_valid = true; \ - _sensor->last_update_time = _current_time; \ - } \ - } while(0) - - +/* + * Sensor control block, one for each sensor type + */ typedef struct onlp_snmp_sensor_ctrl_s { char name[20]; - - /* Handle sensor OIDs */ - uint32_t handler_cnt; - onlp_snmp_handler_fn *handlers; - - uint32_t sensor_cnt; - /* - * Base index starts from 1, thus we add 1 - * Each sensor has a callback to get its value - */ - onlp_snmp_sensor_t *sensor_list[ONLP_SNMP_CONFIG_DEV_MAX_INDEX+1]; - + list_head_t sensors; } onlp_snmp_sensor_ctrl_t; static onlp_snmp_sensor_ctrl_t sensor_ctrls__[ONLP_SNMP_SENSOR_TYPE_MAX+1]; -static onlp_snmp_sensor_t* -get_sensor_reg__(onlp_snmp_sensor_ctrl_t *ss_type, int index) -{ - return ss_type->sensor_list[index]; -} static onlp_snmp_sensor_ctrl_t* get_sensor_ctrl__(int sensor_type) { - return sensor_ctrls__ + sensor_type; + return &sensor_ctrls__[sensor_type]; } +/* for accessing netsnmp table info */ +static netsnmp_tdata *sensor_table__[ONLP_SNMP_SENSOR_TYPE_MAX+1]; + + +static void * +delete_table_row__(netsnmp_tdata *table, uint32_t index) +{ + /* the search oid is the index */ + oid o[] = { index }; + netsnmp_tdata_row *row = netsnmp_tdata_row_get_byoid(table, + o, OID_LENGTH(o)); + void *data = netsnmp_tdata_remove_and_delete_row(table, row); + return data; +} + +/* returns 0 if row is successfully populated, -1 if not */ +static int +add_table_row__(netsnmp_tdata *table, onlp_snmp_sensor_t *ss) +{ + netsnmp_tdata_row *row = netsnmp_tdata_create_row(); + netsnmp_variable_list *varlist; + int rv; + + if (row == NULL) { + AIM_LOG_ERROR("failed to allocate table row"); + return -1; + } + + /* assign sensor info to row's private data pointer for later retrieval by + * the table's oid handlers */ + row->data = ss; + + varlist = netsnmp_tdata_row_add_index(row, ASN_INTEGER, &ss->index, + sizeof(ss->index)); + AIM_ASSERT(varlist != NULL); + rv = netsnmp_tdata_add_row(table, row); + AIM_ASSERT(rv == SNMPERR_SUCCESS); + + return 0; +} + +static int +table_handler__(netsnmp_mib_handler *handler, + netsnmp_handler_registration *reg_info, + netsnmp_agent_request_info *req_info, + netsnmp_request_info *requests, + onlp_snmp_handler_fn table_handler_fns[]) +{ + netsnmp_request_info *req; + + if (req_info->mode != MODE_GET && req_info->mode != MODE_GETNEXT) { + return SNMP_ERR_NOERROR; + } + + for (req = requests; req; req = req->next) { + onlp_snmp_sensor_t *ss = + (onlp_snmp_sensor_t *) netsnmp_tdata_extract_entry(req); + netsnmp_table_request_info *table_info = + netsnmp_extract_table_info(req); + if (ss == NULL) { + netsnmp_set_request_error(req_info, req, SNMP_NOSUCHINSTANCE); + continue; + } + + if (table_handler_fns[table_info->colnum]) { + (*table_handler_fns[table_info->colnum])(req, table_info->colnum, + ss); + } else { + netsnmp_set_request_error(req_info, req, SNMP_NOSUCHINSTANCE); + continue; + } + } + + if (handler->next && handler->next->access_method) { + return netsnmp_call_next_handler(handler, reg_info, req_info, requests); + } + + return SNMP_ERR_NOERROR; +} + + +typedef int (*table_handler_fn)(netsnmp_mib_handler *, + netsnmp_handler_registration *, + netsnmp_agent_request_info *, + netsnmp_request_info *); + +static netsnmp_tdata * +register_table__(char table_name[], oid table_oid[], size_t table_oid_len, + unsigned int min_col, unsigned int max_col, + table_handler_fn handler_fn) +{ + netsnmp_tdata *table = netsnmp_tdata_create_table(table_name, 0); + if (table == NULL) { + AIM_LOG_ERROR("failed to create table %s", table_name); + return NULL; + } + + netsnmp_table_registration_info *table_info = + SNMP_MALLOC_TYPEDEF(netsnmp_table_registration_info); + if (table_info == NULL) { + AIM_LOG_ERROR("failed to create table registration info for %s", + table_name); + return NULL; + } + + netsnmp_table_helper_add_indexes(table_info, ASN_INTEGER, 0); + + table_info->min_column = min_col; + table_info->max_column = max_col; + + netsnmp_handler_registration *reg = + netsnmp_create_handler_registration(table_name, handler_fn, + table_oid, table_oid_len, + HANDLER_CAN_RONLY); + if (reg == NULL) { + AIM_LOG_ERROR("failed to create handler registration for %s", + table_name); + return NULL; + } + + /* use lower priority to override default handler registered at + * DEFAULT_MIB_PRIORITY on this OID */ + reg->priority = DEFAULT_MIB_PRIORITY - 1; + + if (netsnmp_tdata_register(reg, table, table_info) != + MIB_REGISTERED_OK) { + AIM_LOG_ERROR("failed to register table %s", table_name); + return NULL; + } + + return table; +} /** * Thermal Sensor Handlers */ -static void -temp_update_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) +static int +temp_update_handler__(onlp_snmp_sensor_t *ss) { - uint64_t current = os_time_monotonic(); + onlp_thermal_info_t *ti = &ss->sensor_info.ti; + onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - if (SENSOR_NEEDS_UPDATE(current, ss)) { - onlp_thermal_info_t *ti = &ss->sensor_info.ti; - onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - - int rv = onlp_thermal_info_get(oid, ti); - SENSOR_SET_VALIDITY(rv, current, ss); - } - - /* else use the last update info */ + return onlp_thermal_info_get(oid, ti); } static void @@ -151,7 +251,7 @@ temp_index_handler__(netsnmp_request_info *req, { snmp_set_var_typed_integer(req->requestvb, ASN_INTEGER, - index); + ss->index); } static void @@ -178,7 +278,7 @@ temp_status_handler__(netsnmp_request_info *req, int value; onlp_thermal_info_t *ti = &ss->sensor_info.ti; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -203,7 +303,7 @@ temp_value_handler__(netsnmp_request_info *req, int value; onlp_thermal_info_t *ti = &ss->sensor_info.ti; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -220,37 +320,34 @@ temp_value_handler__(netsnmp_request_info *req, } static onlp_snmp_handler_fn temp_handler_fn__[] = { - temp_update_handler__, + NULL, temp_index_handler__, temp_devname_handler__, temp_status_handler__, temp_value_handler__, }; - - - +static int +temp_table_handler__(netsnmp_mib_handler *handler, + netsnmp_handler_registration *reg, + netsnmp_agent_request_info *agent_req, + netsnmp_request_info *requests) +{ + return table_handler__(handler, reg, agent_req, requests, + temp_handler_fn__); +} /** * Fan Sensor Handlers */ -static void -fan_update_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) +static int +fan_update_handler__(onlp_snmp_sensor_t *ss) { - uint64_t current = os_time_monotonic(); + onlp_fan_info_t *fi = &ss->sensor_info.fi; + onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - if (SENSOR_NEEDS_UPDATE(current, ss)) { - onlp_fan_info_t *fi = &ss->sensor_info.fi; - onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - - int rv = onlp_fan_info_get(oid, fi); - SENSOR_SET_VALIDITY(rv, current, ss); - } - - /* else use the last update info */ + return onlp_fan_info_get(oid, fi); } static void @@ -260,7 +357,7 @@ fan_index_handler__(netsnmp_request_info *req, { snmp_set_var_typed_integer(req->requestvb, ASN_INTEGER, - index); + ss->index); } static void @@ -287,7 +384,7 @@ fan_status_handler__(netsnmp_request_info *req, int value; onlp_fan_info_t *fi = &ss->sensor_info.fi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -312,7 +409,7 @@ fan_flow_type_handler__(netsnmp_request_info *req, int name_index; onlp_fan_info_t *fi = &ss->sensor_info.fi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -342,7 +439,7 @@ fan_rpm_handler__(netsnmp_request_info *req, int value; onlp_fan_info_t *fi = &ss->sensor_info.fi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -367,7 +464,7 @@ fan_pct_handler__(netsnmp_request_info *req, int value; onlp_fan_info_t *fi = &ss->sensor_info.fi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -390,7 +487,7 @@ fan_model_handler__(netsnmp_request_info *req, { onlp_fan_info_t *fi = &ss->sensor_info.fi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -417,7 +514,7 @@ fan_serial_handler__(netsnmp_request_info *req, { onlp_fan_info_t *fi = &ss->sensor_info.fi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -438,7 +535,7 @@ fan_serial_handler__(netsnmp_request_info *req, } static onlp_snmp_handler_fn fan_handler_fn__[] = { - fan_update_handler__, + NULL, fan_index_handler__, fan_devname_handler__, fan_status_handler__, @@ -449,27 +546,27 @@ static onlp_snmp_handler_fn fan_handler_fn__[] = { fan_serial_handler__ }; +static int +fan_table_handler__(netsnmp_mib_handler *handler, + netsnmp_handler_registration *reg, + netsnmp_agent_request_info *agent_req, + netsnmp_request_info *requests) +{ + return table_handler__(handler, reg, agent_req, requests, + fan_handler_fn__); +} /** * PSU Handlers */ -static void -psu_update_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) +static int +psu_update_handler__(onlp_snmp_sensor_t *ss) { - uint64_t current = os_time_monotonic(); + onlp_psu_info_t *pi = &ss->sensor_info.pi; + onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - if (SENSOR_NEEDS_UPDATE(current, ss)) { - onlp_psu_info_t *pi = &ss->sensor_info.pi; - onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - - int rv = onlp_psu_info_get(oid, pi); - SENSOR_SET_VALIDITY(rv, current, ss); - } - - /* else use the last update info */ + return onlp_psu_info_get(oid, pi); } static void @@ -479,7 +576,7 @@ psu_index_handler__(netsnmp_request_info *req, { snmp_set_var_typed_integer(req->requestvb, ASN_INTEGER, - index); + ss->index); } static void @@ -505,7 +602,7 @@ psu_status_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -538,7 +635,7 @@ psu_current_type_handler__(netsnmp_request_info *req, int name_index; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -573,7 +670,7 @@ psu_model_handler__(netsnmp_request_info *req, onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -601,7 +698,7 @@ psu_serial_handler__(netsnmp_request_info *req, onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -629,7 +726,7 @@ psu_vin_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -654,7 +751,7 @@ psu_vout_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -678,7 +775,7 @@ psu_iin_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -702,7 +799,7 @@ psu_iout_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -726,7 +823,7 @@ psu_pin_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -750,7 +847,7 @@ psu_pout_handler__(netsnmp_request_info *req, int value; onlp_psu_info_t *pi = &ss->sensor_info.pi; - if (!ss->info_valid) { + if (!ss->now_valid) { return; } @@ -768,7 +865,7 @@ psu_pout_handler__(netsnmp_request_info *req, static onlp_snmp_handler_fn psu_handler_fn__[] = { - psu_update_handler__, + NULL, psu_index_handler__, psu_devname_handler__, psu_status_handler__, @@ -783,449 +880,73 @@ static onlp_snmp_handler_fn psu_handler_fn__[] = { psu_serial_handler__ }; +static int +psu_table_handler__(netsnmp_mib_handler *handler, + netsnmp_handler_registration *reg, + netsnmp_agent_request_info *agent_req, + netsnmp_request_info *requests) +{ + return table_handler__(handler, reg, agent_req, requests, + psu_handler_fn__); +} -/** - * LED Handlers + +/* + * All update handlers */ -static void -led_update_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - uint64_t current = os_time_monotonic(); - - if (SENSOR_NEEDS_UPDATE(current, ss)) { - onlp_led_info_t *li = &ss->sensor_info.li; - onlp_oid_t oid = (onlp_oid_t) ss->sensor_id; - - int rv = onlp_led_info_get(oid, li); - SENSOR_SET_VALIDITY(rv, current, ss); - } - - /* else use the last update info */ -} - -static void -led_index_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - snmp_set_var_typed_integer(req->requestvb, - ASN_INTEGER, - index); -} - -static void -led_devname_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - char device_name[ONLP_SNMP_CONFIG_MAX_NAME_LENGTH+ONLP_SNMP_CONFIG_MAX_DESC_LENGTH]; - snprintf(device_name, sizeof(device_name), - "%s %s%s", "Led", ss->name, ss->desc); - - snmp_set_var_typed_value(req->requestvb, - ASN_OCTET_STR, - (u_char *) device_name, - strlen(device_name)); -} - -static void -led_status_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - int value; - onlp_led_info_t *li = &ss->sensor_info.li; - - if (!ss->info_valid) { - return; - } - - value = li->status; - - snmp_set_var_typed_value(req->requestvb, - ASN_GAUGE, - (u_char *) &value, - sizeof(value)); -} - -static void -led_value_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - int value; - onlp_led_info_t *li = &ss->sensor_info.li; - - if (!ss->info_valid) { - return; - } - - if (!(li->status & ONLP_LED_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - value = li->mode; - - snmp_set_var_typed_value(req->requestvb, - ASN_GAUGE, - (u_char *) &value, - sizeof(value)); -} - -static onlp_snmp_handler_fn led_handler_fn__[] = { - led_update_handler__, - led_index_handler__, - led_devname_handler__, - led_status_handler__, - led_value_handler__, -}; - - -/** - * Misc Handlers. - * Placeholder for unknown types - */ -static void -misc_update_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - ss->info_valid = 1; -} - -static void -misc_index_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - snmp_set_var_typed_integer(req->requestvb, - ASN_INTEGER, - index); -} - -static void -misc_devname_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - char device_name[ONLP_SNMP_CONFIG_MAX_NAME_LENGTH+ONLP_SNMP_CONFIG_MAX_DESC_LENGTH]; - snprintf(device_name, sizeof(device_name), - "%s %s%s", "Misc", ss->name, ss->desc); - - snmp_set_var_typed_value(req->requestvb, - ASN_OCTET_STR, - (u_char *) device_name, - strlen(device_name)); -} - -static void -misc_status_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - int value = 0; - snmp_set_var_typed_value(req->requestvb, - ASN_GAUGE, - (u_char *) &value, - sizeof(value)); -} - -static void -misc_value_handler__(netsnmp_request_info *req, - uint32_t index, - onlp_snmp_sensor_t *ss) -{ - int value = 0; - snmp_set_var_typed_value(req->requestvb, - ASN_GAUGE, - (u_char *) &value, - sizeof(value)); -} - -static onlp_snmp_handler_fn misc_handler_fn__[] = { - misc_update_handler__, - misc_index_handler__, - misc_devname_handler__, - misc_status_handler__, - misc_value_handler__, +static update_handler_fn all_update_handler_fns__[] = { + NULL, + temp_update_handler__, + fan_update_handler__, + psu_update_handler__, }; /* - * OID HANDLER for all sensor types - * This is registered to NETSNMP using agentX + * Add a sensor to the appropriate type-specific control structure. */ -static int -onlp_snmp_sensor_handler__(netsnmp_mib_handler *handler, - netsnmp_handler_registration *reg, - netsnmp_agent_request_info *agent_req, - netsnmp_request_info *req) +static void +add_sensor__(int sensor_type, onlp_snmp_sensor_t *new_sensor) { - - int ret = SNMP_ERR_NOERROR; - onlp_snmp_sensor_t *sensor; - - int sensor_type = req->requestvb->name[req->requestvb->name_length - OID_SENSOR_TYPE_INDEX]; - onlp_snmp_sensor_ctrl_t *ss_type = get_sensor_ctrl__(sensor_type); - - /* This is for index / device / value handler */ - int column = req->requestvb->name[req->requestvb->name_length - OID_SENSOR_COL_INDEX]; - - /* This is device index */ - int index = req->requestvb->name[req->requestvb->name_length - OID_SENSOR_DEV_INDEX]; - - if (agent_req->mode != MODE_GET && agent_req->mode != MODE_GETNEXT) { - /* If happen, just return */ - return ret; - } - - if(!onlp_snmp_sensor_type_valid(sensor_type)) { - /* If happen, just return */ - return ret; - } - - if (column >= ss_type->handler_cnt) { - /* If happen, just return */ - return ret; - } - - /* index start from 1 and equal to sensor_cnt */ - if (index < ONLP_SNMP_CONFIG_DEV_BASE_INDEX || index > ss_type->sensor_cnt) { - /* If happen, just return */ - return ret; - } - - sensor = get_sensor_reg__(ss_type, index); - if (!sensor) { - /* If happen, just return */ - return ret; - } - - if (!ss_type->handlers[UPDATE_HANDLER_IDX]) { - snmp_log(LOG_ALERT, - "No update handler for type=%d column=%d, index=%d", - sensor_type, column, index); - return ret; - } - ss_type->handlers[UPDATE_HANDLER_IDX](req, index, sensor); - - /* We select index/devname/value to handle for each device */ - if (ss_type->handlers[column]) { - (*ss_type->handlers[column])(req, index, sensor); - } - - return ret; -} - - - - -/* Register OID handler for a sensor type */ -static int -reg_snmp_sensor_helper__(int sensor_type, - oid *reg_oid, - size_t oid_len, - int dev_idx) -{ - int ret = MIB_REGISTRATION_FAILED; - - Netsnmp_Node_Handler *handler = onlp_snmp_sensor_handler__; - onlp_snmp_sensor_ctrl_t *ss_type = get_sensor_ctrl__(sensor_type); - uint32_t col_cnt = ss_type->handler_cnt; - char *table_name = ss_type->name; - - /* Use this to increase the column index in oid */ - u_long *oid_col = ®_oid[oid_len - OID_SENSOR_COL_INDEX]; - - /* Use this to increase the dev index of oid */ - reg_oid[oid_len - OID_SENSOR_DEV_INDEX] = dev_idx; - - snmp_log(LOG_DEBUG, "oid registrations: %s for dev_idx=%d", - table_name, dev_idx); - - /* - * Caller makes sure that this loop is run - * since *oid_col starts as 1st col - */ - for (; *oid_col < col_cnt; (*oid_col)++) { - netsnmp_handler_registration *reg; - - if (!ss_type->handlers[*oid_col]) - continue; - - reg = netsnmp_create_handler_registration(table_name, - handler, - reg_oid, - oid_len, - HANDLER_CAN_RONLY); - - /* Ofad enables verbose/trace to see this */ - snmp_log(LOG_INFO, "registering handler for %s column %ld, index %d", - table_name, *oid_col, dev_idx); - - /* If reg is null, this returns error */ - ret = netsnmp_register_instance(reg); - if(ret) { - break; - } - } - - return ret; - -} - -/* - * Register a sensor - * Caller must make sure that 1 sensor registered only once - * If it calls this twice, it will get 2 oid entries - * for the same sensor - * - * We want to keep this snmp code as simple as possible - */ -static int -onlp_snmp_sensor_reg__(int sensor_type, - onlp_snmp_sensor_t *sensor) -{ - oid otemp[] = { ONLP_SNMP_SENSOR_TEMP_OID }; - oid ofan[] = { ONLP_SNMP_SENSOR_FAN_OID }; - oid opsu[] = { ONLP_SNMP_SENSOR_PSU_OID }; - oid oled[] = { ONLP_SNMP_SENSOR_LED_OID }; - oid omisc[] = { ONLP_SNMP_SENSOR_MISC_OID }; - oid *o; - u_long o_len; - int ret = MIB_REGISTRATION_FAILED; - - onlp_snmp_sensor_ctrl_t *ss_type = get_sensor_ctrl__(sensor_type); + onlp_snmp_sensor_ctrl_t *ctrl = get_sensor_ctrl__(sensor_type); + list_links_t *curr; + onlp_snmp_sensor_t *ss; /* We start with Base 1 */ AIM_TRUE_OR_DIE(onlp_snmp_sensor_type_valid(sensor_type)); - AIM_TRUE_OR_DIE(sensor); - AIM_TRUE_OR_DIE(ss_type); + AIM_TRUE_OR_DIE(new_sensor); + AIM_TRUE_OR_DIE(ctrl); - switch(sensor_type) - { - case ONLP_SNMP_SENSOR_TYPE_TEMP: - o = otemp; - o_len = OID_LENGTH(otemp); - - /* Not init yet, init oid table */ - if (!ss_type->handlers) { - ss_type->handler_cnt = sizeof(temp_handler_fn__) / sizeof(temp_handler_fn__[0]); - ss_type->handlers = temp_handler_fn__; - snprintf(ss_type->name, sizeof(ss_type->name), "%s", "temp_table"); - } - break; - - case ONLP_SNMP_SENSOR_TYPE_FAN: - o = ofan; - o_len = OID_LENGTH(ofan); - - /* Not init yet, init oid table */ - if (!ss_type->handlers) { - ss_type->handler_cnt = sizeof(fan_handler_fn__) / sizeof(fan_handler_fn__[0]); - ss_type->handlers = fan_handler_fn__; - snprintf(ss_type->name, sizeof(ss_type->name), "%s", "fan_table"); - } - break; - - case ONLP_SNMP_SENSOR_TYPE_PSU: - o = opsu; - o_len = OID_LENGTH(opsu); - - /* Not init yet, init oid table */ - if (!ss_type->handlers) { - ss_type->handler_cnt = sizeof(psu_handler_fn__) / sizeof(psu_handler_fn__[0]); - ss_type->handlers = psu_handler_fn__; - snprintf(ss_type->name, sizeof(ss_type->name), "%s", "psu_table"); - - } - break; - - case ONLP_SNMP_SENSOR_TYPE_LED: - o = oled; - o_len = OID_LENGTH(oled); - - /* Not init yet, init oid table */ - if (!ss_type->handlers) { - ss_type->handler_cnt = sizeof(led_handler_fn__) / sizeof(led_handler_fn__[0]); - ss_type->handlers = led_handler_fn__; - snprintf(ss_type->name, sizeof(ss_type->name), "%s", "led_table"); - } - break; - - case ONLP_SNMP_SENSOR_TYPE_MISC: - o = omisc; - o_len = OID_LENGTH(omisc); - - /* Not init yet, init oid table */ - if (!ss_type->handlers) { - ss_type->handler_cnt = sizeof(misc_handler_fn__) / sizeof(misc_handler_fn__[0]); - ss_type->handlers = misc_handler_fn__; - snprintf(ss_type->name, sizeof(ss_type->name), "%s", "misc_table"); - } - break; - - default: - AIM_DIE("Invalid sensor value."); - break; + /* check if the sensor already exists */ + LIST_FOREACH(&ctrl->sensors, curr) { + ss = container_of(curr, links, onlp_snmp_sensor_t); + if (new_sensor->sensor_id == ss->sensor_id) { + /* no need to add sensor */ + AIM_LOG_TRACE("skipping existing sensor %08x", ss->sensor_id); + ss->now_valid = true; + return; + } } - /* - * sensor_cnt original is 0 - * When sensor_cnt == ONLP_SNMP_CONFIG_DEV_MAX_INDEX - * We stop adding - */ - if (ss_type->sensor_cnt < ONLP_SNMP_CONFIG_DEV_MAX_INDEX) { - /* Device index equal to ss_type->sensor_cnt */ - ss_type->sensor_cnt++; + ss = AIM_MALLOC(sizeof(onlp_snmp_sensor_t)); + AIM_TRUE_OR_DIE(ss); + AIM_MEMCPY(ss, new_sensor, sizeof(*new_sensor)); + ss->sensor_type = sensor_type; + ss->now_valid = true; - /* This entry must be null */ - AIM_TRUE_OR_DIE(!ss_type->sensor_list[ss_type->sensor_cnt]); - - snmp_log(LOG_INFO, "init type=%d, index=%d, id=%d", - sensor_type, ss_type->sensor_cnt, sensor->sensor_id); - - onlp_snmp_sensor_t *ss = AIM_MALLOC(sizeof(onlp_snmp_sensor_t)); - AIM_TRUE_OR_DIE(ss); - AIM_MEMCPY(ss, sensor, sizeof(*sensor)); - ss->sensor_type = sensor_type; - ss->info_valid = 0; - ss->last_update_time = 0; - - /* Assign sensor to the list */ - ss_type->sensor_list[ss_type->sensor_cnt] = ss; - - } else { - snmp_log(LOG_ALERT, - "Failed to register sensor type=%d id=%d, resource limited", - sensor_type, sensor->sensor_id); - return ret; - } - - AIM_TRUE_OR_DIE(o_len == ONLP_SNMP_SENSOR_OID_LENGTH, - "invalid oid length=%d", o_len); - - ret = reg_snmp_sensor_helper__(sensor_type, o, o_len, - ss_type->sensor_cnt); - if (ret) { - snmp_log(LOG_ALERT, - "Failed to register sensor type=%d id=%d, MIB_ERROR=%d", - sensor_type, sensor->sensor_id, ret); - } - - return ret; + /* finally add sensor */ + list_push(&ctrl->sensors, &ss->links); } static int -onlp_snmp_sensor_register_oid__(onlp_oid_t oid, void* cookie) +collect_sensors__(onlp_oid_t oid, void* cookie) { onlp_oid_hdr_t hdr; onlp_snmp_sensor_t s; onlp_oid_hdr_get(oid, &hdr); + AIM_LOG_MSG("collect: %{onlp_oid}", oid); AIM_MEMSET(&s, 0x0, sizeof(onlp_snmp_sensor_t)); switch(ONLP_OID_TYPE_GET(oid)) @@ -1233,22 +954,20 @@ onlp_snmp_sensor_register_oid__(onlp_oid_t oid, void* cookie) case ONLP_OID_TYPE_THERMAL: #if ONLP_SNMP_CONFIG_INCLUDE_THERMALS == 1 s.sensor_id = oid; + s.index = ONLP_OID_ID_GET(oid); sprintf(s.name, "%d - ", ONLP_OID_ID_GET(oid)); aim_strlcpy(s.desc, hdr.description, sizeof(s.desc)); - if(onlp_snmp_sensor_reg__(ONLP_SNMP_SENSOR_TYPE_TEMP, &s) < 0) { - AIM_LOG_ERROR("onlp_snmp_sensor_reg for OID 0x%x failed.", oid); - } + add_sensor__(ONLP_SNMP_SENSOR_TYPE_TEMP, &s); #endif break; case ONLP_OID_TYPE_FAN: #if ONLP_SNMP_CONFIG_INCLUDE_FANS == 1 s.sensor_id = oid; + s.index = ONLP_OID_ID_GET(oid); sprintf(s.name, "%d - ", ONLP_OID_ID_GET(oid)); aim_strlcpy(s.desc, hdr.description, sizeof(s.desc)); - if(onlp_snmp_sensor_reg__(ONLP_SNMP_SENSOR_TYPE_FAN, &s) < 0) { - AIM_LOG_ERROR("onlp_snmp_sensor_reg for OID 0x%x failed.", oid); - } + add_sensor__(ONLP_SNMP_SENSOR_TYPE_FAN, &s); #endif break; @@ -1256,18 +975,17 @@ onlp_snmp_sensor_register_oid__(onlp_oid_t oid, void* cookie) #if ONLP_SNMP_CONFIG_INCLUDE_PSUS == 1 /* Register Sensors for VIN,VOUT,IIN,IOUT,PIN,POUT */ s.sensor_id = oid; + s.index = ONLP_OID_ID_GET(oid); sprintf(s.name, "%d - ", ONLP_OID_ID_GET(oid)); aim_strlcpy(s.desc, hdr.description, sizeof(s.desc)); - if(onlp_snmp_sensor_reg__(ONLP_SNMP_SENSOR_TYPE_PSU, &s) < 0) { - AIM_LOG_ERROR("onlp_snmp_sensor_reg for OID 0x%x failed.", oid); - } + add_sensor__(ONLP_SNMP_SENSOR_TYPE_PSU, &s); #endif break; default: - AIM_LOG_INFO("snmp type %s id %d unsupported", - onlp_oid_type_name(ONLP_OID_TYPE_GET(oid)), - ONLP_OID_ID_GET(oid)); + AIM_LOG_VERBOSE("snmp type %s id %d unsupported", + onlp_oid_type_name(ONLP_OID_TYPE_GET(oid)), + ONLP_OID_ID_GET(oid)); break; } @@ -1275,109 +993,152 @@ onlp_snmp_sensor_register_oid__(onlp_oid_t oid, void* cookie) } -/** - * Register Sensors - */ -void onlp_snmp_sensors_init(void) +static int +update_all_tables__(void) { - int rv; - AIM_LOG_MSG("%s", __FUNCTION__); + int i; + onlp_snmp_sensor_ctrl_t *ctrl; + list_links_t *curr; + list_links_t *next; + onlp_snmp_sensor_t *ss; - /* Register all sensor OIDs */ - rv = onlp_oid_iterate(ONLP_OID_SYS, 0, onlp_snmp_sensor_register_oid__, NULL); - if (rv != ONLP_STATUS_OK) { - AIM_LOG_ERROR("%s error %d", __FUNCTION__, rv); - } else { - AIM_LOG_MSG("%s succeeded.", __FUNCTION__); + /* for each table: save old state */ + for (i = ONLP_SNMP_SENSOR_TYPE_TEMP; i <= ONLP_SNMP_SENSOR_TYPE_MAX; i++) { + ctrl = get_sensor_ctrl__(i); + LIST_FOREACH(&ctrl->sensors, curr) { + ss = container_of(curr, links, onlp_snmp_sensor_t); + ss->previously_valid = ss->now_valid; + ss->now_valid = false; + } } + + /* discover new sensors for all tables */ + onlp_oid_iterate(ONLP_OID_SYS, 0, collect_sensors__, NULL); + + /* for each table: update all sensor info */ + for (i = ONLP_SNMP_SENSOR_TYPE_TEMP; i <= ONLP_SNMP_SENSOR_TYPE_MAX; i++) { + ctrl = get_sensor_ctrl__(i); + LIST_FOREACH(&ctrl->sensors, curr) { + ss = container_of(curr, links, onlp_snmp_sensor_t); + if (ss->now_valid) { + AIM_LOG_TRACE("update sensor %s%s", ss->name, ss->desc); + /* invoke update handler */ + if ((*all_update_handler_fns__[i])(ss) != ONLP_STATUS_OK) { + AIM_LOG_ERROR("failed to update %s%s", ss->name, ss->desc); + ss->now_valid = false; + } + } + } + } + + /* for each table: add or delete rows as necessary */ + for (i = ONLP_SNMP_SENSOR_TYPE_TEMP; i <= ONLP_SNMP_SENSOR_TYPE_MAX; i++) { + ctrl = get_sensor_ctrl__(i); + LIST_FOREACH_SAFE(&ctrl->sensors, curr, next) { + ss = container_of(curr, links, onlp_snmp_sensor_t); + if (!ss->previously_valid && ss->now_valid) { + snmp_log(LOG_INFO, "Adding %s%s, id=%08x", + ss->name, ss->desc, ss->sensor_id); + AIM_LOG_VERBOSE("add row %d to %s for %s%s", + ss->index, ctrl->name, ss->name, ss->desc); + add_table_row__(sensor_table__[i], ss); + } else if (ss->previously_valid && !ss->now_valid) { + snmp_log(LOG_INFO, "Deleting %s%s, id=%08x", + ss->name, ss->desc, ss->sensor_id); + AIM_LOG_VERBOSE("delete row %d from %s for %s%s", + ss->index, ctrl->name, ss->name, ss->desc); + delete_table_row__(sensor_table__[i], ss->index); + list_remove(curr); + aim_free(ss); + } + } + } + + return 0; } + +typedef struct table_cfg_s { + onlp_snmp_sensor_type_t type; + char name[32]; + unsigned int min_col; + unsigned int max_col; + table_handler_fn handler; +} table_cfg_t; + static void -platform_string_register(int index, const char* desc, char* value) +init_all_tables__(void) { - oid tree[] = { 1, 3, 6, 1, 4, 1, 42623, 1, 1, 1, 1, 1}; - tree[11] = index; + int i; - if(!value || !value[0]) { - return; + /* initialize control blocks */ + for (i = ONLP_SNMP_SENSOR_TYPE_TEMP; i <= ONLP_SNMP_SENSOR_TYPE_MAX; i++) { + onlp_snmp_sensor_ctrl_t *ctrl = get_sensor_ctrl__(i); + aim_strlcpy(ctrl->name, onlp_snmp_sensor_type_name(i), + sizeof(ctrl->name)); + list_init(&ctrl->sensors); } - char* s = aim_strdup(value); + /* register oids with netsnmp */ + table_cfg_t cfgs[] = { + { + .type = ONLP_SNMP_SENSOR_TYPE_TEMP, + .name = "onlTempTable", + .min_col = 1, + .max_col = AIM_ARRAYSIZE(temp_handler_fn__)-1, + .handler = temp_table_handler__, + }, + { + .type = ONLP_SNMP_SENSOR_TYPE_FAN, + .name = "onlFanTable", + .min_col = 1, + .max_col = AIM_ARRAYSIZE(fan_handler_fn__)-1, + .handler = fan_table_handler__, + }, + { + .type = ONLP_SNMP_SENSOR_TYPE_PSU, + .name = "onlPsuTable", + .min_col = 1, + .max_col = AIM_ARRAYSIZE(psu_handler_fn__)-1, + .handler = psu_table_handler__, + }, + }; - netsnmp_handler_registration *reg = - netsnmp_create_handler_registration( - desc, NULL, - tree, OID_LENGTH(tree), - HANDLER_CAN_RONLY); - netsnmp_watcher_info *winfo = - netsnmp_create_watcher_info( - s, strlen(s), - ASN_OCTET_STR, WATCHER_FIXED_SIZE); - netsnmp_register_watched_scalar( reg, winfo ); -} - -void -platform_int_register(int index, char* desc, int value) -{ - oid tree[] = { 1, 3, 6, 1, 4, 1, 42623, 1, 1, 1, 1, 1}; - tree[11] = index; - - int* v = aim_zmalloc(sizeof(value)); - *v = value; - - netsnmp_register_int_instance(desc, - tree, - OID_LENGTH(tree), - v, NULL); -} - - -void -onlp_snmp_platform_init(void) -{ - /** - * This is the base of the platform:general:system tree - */ - onlp_sys_info_t si; - if(onlp_sys_info_get(&si) >= 0) { - -#define REGISTER_STR(_index, _field) \ - do { \ - platform_string_register(_index, #_field, (char*)si.onie_info._field); \ - } while(0) - -#define REGISTER_INT(_index, _field) \ - do { \ - platform_int_register(_index, #_field, si.onie_info._field); \ - } while(0) - - REGISTER_STR(1, product_name); - REGISTER_STR(2, part_number); - REGISTER_STR(3, serial_number); - char* mstring = aim_fstrdup("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", - si.onie_info.mac[0], si.onie_info.mac[1], si.onie_info.mac[2], - si.onie_info.mac[3], si.onie_info.mac[4], si.onie_info.mac[5]); - platform_string_register(4, "mac", mstring); - aim_free(mstring); - - REGISTER_INT(5, mac_range); - REGISTER_STR(6, manufacturer); - REGISTER_STR(7, manufacture_date); - REGISTER_STR(8, vendor); - REGISTER_STR(9, platform_name); - REGISTER_INT(10, device_version); - REGISTER_STR(11, label_revision); - REGISTER_STR(12, country_code); - REGISTER_STR(13, diag_version); - REGISTER_STR(14, service_tag); - REGISTER_STR(15, onie_version); + for (i = 0; i < AIM_ARRAYSIZE(cfgs); i++) { + table_cfg_t *cfg = &cfgs[i]; + oid o[] = { ONLP_SNMP_SENSOR_OID, cfg->type }; + sensor_table__[cfg->type] = + register_table__(cfg->name, o, OID_LENGTH(o), + cfg->min_col, cfg->max_col, cfg->handler); + AIM_TRUE_OR_DIE(sensor_table__[cfg->type]); } } + +/* helper function to be registered with snmp_alarm_register; + * table updates happen within alarm handler, thus avoiding crashes + * when table is changed while handling snmp requests */ +static void +periodic_update__(unsigned int reg, void *clientarg) +{ + update_all_tables__(); +} + +/* populates initial stats and sets up periodic timer */ +static void +setup_alarm__(void) +{ + /* initial stats population */ + update_all_tables__(); + /* registration of periodic timer */ + snmp_alarm_register(update_period__, SA_REPEAT, periodic_update__, NULL); +} + + int onlp_snmp_sensors_client(int enable, void* cookie) { - onlp_snmp_sensors_init(); - onlp_snmp_platform_init(); + init_all_tables__(); + setup_alarm__(); return 0; } From 51511aa6214c70bc9e97b7368027387c8a3cf254 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 12 Dec 2016 20:12:39 +0000 Subject: [PATCH 148/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 935e30c6..87021ef3 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 935e30c6c8fcd48245ec9c2bbe75da563bf59b4c +Subproject commit 87021ef339b28f8556b9013722752ca0520b17f9 From d6c3ebb61f25c17f15d735f26be961234e04c84a Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 12 Dec 2016 20:18:51 +0000 Subject: [PATCH 149/255] Add sysstat packge. --- builds/any/rootfs/wheezy/common/all-base-packages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/builds/any/rootfs/wheezy/common/all-base-packages.yml b/builds/any/rootfs/wheezy/common/all-base-packages.yml index 3f74f183..eec465a1 100644 --- a/builds/any/rootfs/wheezy/common/all-base-packages.yml +++ b/builds/any/rootfs/wheezy/common/all-base-packages.yml @@ -76,3 +76,4 @@ - gdb - tcpdump - strace +- sysstat From 525e518716096f4a32fccc1b71b45f9e311c0f70 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 12 Dec 2016 22:01:27 +0000 Subject: [PATCH 150/255] Add new OCP-ONL-RESOURCE-MIB Used to define custom host resource objects. --- docs/mibs/OCP-ONL-RESOURCE-MIB.txt | 48 +++++++++++++++++++ packages/base/any/onlp-snmpd/bin/onl-snmpwalk | 2 +- 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 docs/mibs/OCP-ONL-RESOURCE-MIB.txt diff --git a/docs/mibs/OCP-ONL-RESOURCE-MIB.txt b/docs/mibs/OCP-ONL-RESOURCE-MIB.txt new file mode 100644 index 00000000..21da5843 --- /dev/null +++ b/docs/mibs/OCP-ONL-RESOURCE-MIB.txt @@ -0,0 +1,48 @@ +-- ---------------------------------------------------------------------- +-- Open Network Linux Resource MIB +-- ---------------------------------------------------------------------- + +OCP-ONL-RESOURCE-MIB DEFINITIONS ::= BEGIN + +IMPORTS + OBJECT-TYPE, MODULE-IDENTITY, Integer32, enterprises, Gauge32 FROM SNMPv2-SMI + DisplayString FROM SNMPv2-TC + ocp FROM OCP-MIB + OpenNetworkLinux FROM OCP-ONL-MIB; + +onlResource MODULE-IDENTITY + LAST-UPDATED "201612120000Z" + ORGANIZATION "Open Compute Project" + CONTACT-INFO "http://www.opencompute.org" + DESCRIPTION + "This MIB describes objects for host resources used in Open Network Linux." + REVISION "201612120000Z" + DESCRIPTION "Initial revision" + ::= { OpenNetworkLinux 3 } + + +-- +-- Basic Resource Objects +-- +-- These are simplified and useful version of common resource measurements. +-- + +Basic OBJECT IDENTIFIER ::= { onlResource 1 } + +CpuAllPercentUtilization OBJECT-TYPE + SYNTAX Gauge32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The average CPU utilization (in percent). Provided by mpstat." + ::= { Basic 1 } + +CpuAllPercentIdle OBJECT-TYPE + SYNTAX Gauge32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The average CPU idle time (in percent). Provided by mpstat." + ::= { Basic 2 } + +END diff --git a/packages/base/any/onlp-snmpd/bin/onl-snmpwalk b/packages/base/any/onlp-snmpd/bin/onl-snmpwalk index c88e606f..6e8b0026 100755 --- a/packages/base/any/onlp-snmpd/bin/onl-snmpwalk +++ b/packages/base/any/onlp-snmpd/bin/onl-snmpwalk @@ -2,7 +2,7 @@ ############################################################ SERVER=localhost ONL_TREE=.1.3.6.1.4.1.42623.1 -MIBS=OCP-ONL-PLATFORM-MIB:OCP-ONL-SENSOR-MIB +MIBS=OCP-ONL-PLATFORM-MIB:OCP-ONL-SENSOR-MIB:OCP-ONL-RESOURCE-MIB MIB_ARGS="-m $MIBS" COMMUNITY=public ECHO= From dd32721354412dd9ed4b2a4cbbe74c849153f3be Mon Sep 17 00:00:00 2001 From: Steven Noble Date: Tue, 13 Dec 2016 09:29:16 -0800 Subject: [PATCH 151/255] Remove ORC from supported --- docs/SupportedHardware.md | 70 +++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/docs/SupportedHardware.md b/docs/SupportedHardware.md index d8fa0b41..07a26204 100644 --- a/docs/SupportedHardware.md +++ b/docs/SupportedHardware.md @@ -9,12 +9,12 @@ Quanta - + - - - - + + + +
Device Ports CPU Forwarding ONL Certified In Lab ORC OF-DPA OpenNSL SAI
Device Ports CPU Forwarding ONL Certified In Lab OF-DPA OpenNSL SAI
QuantaMesh T1048-LB9 48x1G + 4x10G FreeScale P2020 Broadcom BCM56534 (Firebolt3) Yes Yes Yes No No No
QuantaMesh T3048-LY2 48x10G + 4x40G FreeScale P2020 Broadcom BCM56846 (Trident+) Yes Yes Yes Yes No No
QuantaMesh T3048-LY8 48x10G + 6x40G Intel Rangeley C2758 x86 Broadcom BCM56854 (Trident2) Yes* No No No No No
QuantaMesh T5032-LY6 32x40G Intel Rangeley C2758 x86 Broadcom BCM56850 (Trident2) Yes* No No No No No
QuantaMesh T1048-LB9 48x1G + 4x10G FreeScale P2020 Broadcom BCM56534 (Firebolt3) Yes Yes No No No
QuantaMesh T3048-LY2 48x10G + 4x40G FreeScale P2020 Broadcom BCM56846 (Trident+) Yes Yes Yes No No
QuantaMesh T3048-LY8 48x10G + 6x40G Intel Rangeley C2758 x86 Broadcom BCM56854 (Trident2) Yes* No No No No
QuantaMesh T5032-LY6 32x40G Intel Rangeley C2758 x86 Broadcom BCM56850 (Trident2) Yes* No No No No
@@ -23,24 +23,24 @@ Accton/Edge-Core - + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + +
Device Ports CPU Forwarding ONL Certified In Lab ORC OF-DPA OpenNSL SAI
Device Ports CPU Forwarding ONL Certified In Lab OF-DPA OpenNSL SAI
Accton AS4600-54T 48x1G + 4x10G FreeScale P2020 Broadcom BCM56540 (Apollo2) Yes Yes Yes Yes*** Yes*** No
Accton AS4610-54P 48x1G + 4x10G + 2x20G Dual-core ARM Cortex A9 1GHz Broadcom BCM56340 (Helix4) Yes Yes No No No No
Accton AS5600-52X 48x10G + 4x40G FreeScale P2020 Broadcom BCM56846 (Trident+) Yes Yes Yes No No No
Accton AS5610-52X 48x10G + 4x40G FreeScale P2020 Broadcom BCM56846 (Trident+) Yes Yes Yes No No No
Accton AS5710-54X 48x10G + 6x40G FreeScale P2041 Broadcom BCM56854 (Trident2) Yes Yes Yes Yes*** Yes*** No
Accton AS6700-32X 32x40G FreeScale P2041 Broadcom BCM56850 (Trident2) Yes Yes Yes No No No
Accton AS5512-54X 48x10G + 6x40G Intel Rangeley C2538 x86 MediaTek/Nephos MT3258 No No No No No No
Accton AS5712-54X 48x10G + 6x40G Intel Rangeley C2538 x86 Broadcom BCM56854 (Trident2) Yes Yes Yes Yes*** Yes*** No
Accton AS6712-32X 32x40G Intel Rangeley C2538 x86 Broadcom BCM56850 (Trident2) Yes Yes Yes Yes*** Yes*** No
Accton AS5812-54T 48x10G + 6x40G Intel Rangeley C2538 x86 Broadcom BCM56864 (Trident2+) Yes Yes No No No No
Accton AS5812-54X 48x10G + 6x40G Intel Rangeley C2538 x86 Broadcom BCM56864 (Trident2+) Yes Yes No Yes*** Yes*** No
Accton AS6812-32X 32x40G Intel Rangeley C2538 x86 Broadcom BCM56864 (Trident2+) Yes Yes No Yes*** Yes*** No
Accton AS7712-32X 32x100G Intel Rangeley C2538 x86 Broadcom BCM56960 (Tomahawk) Yes Yes Yes Yes*** Yes*** No
Accton AS7716-32X 32x100G Intel Xeon D-1518 x86 Broadcom BCM56960 (Tomahawk) Yes Yes No Yes*** Yes*** No
Accton Wedge-16X 16x40G Intel Rangeley C2550 x86 Broadcom BCM56864 (Trident2+) Work In Progress** Yes No No Yes No
Accton (FB) Wedge 100 32x100G Intel Bay Trail E3845 x86 Broadcom BCM56960 (Tomahawk) Work In Progress** Yes No No Yes No
Accton AS4600-54T 48x1G + 4x10G FreeScale P2020 Broadcom BCM56540 (Apollo2) Yes Yes Yes*** Yes*** No
Accton AS4610-54P 48x1G + 4x10G + 2x20G Dual-core ARM Cortex A9 1GHz Broadcom BCM56340 (Helix4) Yes Yes No No No
Accton AS5600-52X 48x10G + 4x40G FreeScale P2020 Broadcom BCM56846 (Trident+) Yes Yes No No No
Accton AS5610-52X 48x10G + 4x40G FreeScale P2020 Broadcom BCM56846 (Trident+) Yes Yes No No No
Accton AS5710-54X 48x10G + 6x40G FreeScale P2041 Broadcom BCM56854 (Trident2) Yes Yes Yes*** Yes*** No
Accton AS6700-32X 32x40G FreeScale P2041 Broadcom BCM56850 (Trident2) Yes Yes No No No
Accton AS5512-54X 48x10G + 6x40G Intel Rangeley C2538 x86 MediaTek/Nephos MT3258 No No No No No
Accton AS5712-54X 48x10G + 6x40G Intel Rangeley C2538 x86 Broadcom BCM56854 (Trident2) Yes Yes Yes*** Yes*** No
Accton AS6712-32X 32x40G Intel Rangeley C2538 x86 Broadcom BCM56850 (Trident2) Yes Yes Yes*** Yes*** No
Accton AS5812-54T 48x10G + 6x40G Intel Rangeley C2538 x86 Broadcom BCM56864 (Trident2+) Yes Yes No No No
Accton AS5812-54X 48x10G + 6x40G Intel Rangeley C2538 x86 Broadcom BCM56864 (Trident2+) Yes Yes Yes*** Yes*** No
Accton AS6812-32X 32x40G Intel Rangeley C2538 x86 Broadcom BCM56864 (Trident2+) Yes Yes Yes*** Yes*** No
Accton AS7712-32X 32x100G Intel Rangeley C2538 x86 Broadcom BCM56960 (Tomahawk) Yes Yes Yes*** Yes*** No
Accton AS7716-32X 32x100G Intel Xeon D-1518 x86 Broadcom BCM56960 (Tomahawk) Yes Yes Yes*** Yes*** No
Accton Wedge-16X 16x40G Intel Rangeley C2550 x86 Broadcom BCM56864 (Trident2+) Work In Progress** Yes No Yes No
Accton (FB) Wedge 100 32x100G Intel Bay Trail E3845 x86 Broadcom BCM56960 (Tomahawk) Work In Progress** Yes No Yes No
DNI/Agema @@ -48,9 +48,9 @@ DNI/Agema - + - +
Device Ports CPU Forwarding ONL Certified In Lab ORC OF-DPA OpenNSL SAI
Device Ports CPU Forwarding ONL Certified In Lab OF-DPA OpenNSL SAI
AG-7448CU 48x10G + 4x40G FreeScale P2020 Broadcom BCM56845 (Trident) Yes Yes Yes No No No
AG-7448CU 48x10G + 4x40G FreeScale P2020 Broadcom BCM56845 (Trident) Yes Yes No No No
Dell @@ -58,12 +58,12 @@ Dell - + - - - - + + + +
Device Ports CPU Forwarding ONL Certified In Lab ORC OF-DPA OpenNSL SAI
Device Ports CPU Forwarding ONL Certified In Lab OF-DPA OpenNSL SAI
S4810-ON 48x10G + 4x40G FreeScale P2020 Broadcom BCM56845 (Trident) Yes Yes Yes No No No
S4048-ON 48x10G + 6x40G Intel Atom C2338 Broadcom BCM56854 (Trident2) Yes Yes Yes No No No
S6000-ON 32x40G Intel Atom S1220 Broadcom BCM56850 (Trident2) Yes Yes Yes No No No
Z9100-ON 32x100G Intel Atom C2538 Broadcom BCM56960 (Tomahawk) Yes Yes No No No No
S4810-ON 48x10G + 4x40G FreeScale P2020 Broadcom BCM56845 (Trident) Yes Yes No No No
S4048-ON 48x10G + 6x40G Intel Atom C2338 Broadcom BCM56854 (Trident2) Yes Yes No No No
S6000-ON 32x40G Intel Atom S1220 Broadcom BCM56850 (Trident2) Yes Yes No No No
Z9100-ON 32x100G Intel Atom C2538 Broadcom BCM56960 (Tomahawk) Yes Yes No No No
Interface Masters Technologies, Inc. @@ -71,13 +71,13 @@ Interface Masters Technologies, Inc. - + - - - - - + + + + +
Device Ports CPU Forwarding ONL Certified In Lab ORC OF-DPA OpenNSL SAI
Device Ports CPU Forwarding ONL Certified In Lab OF-DPA OpenNSL SAI
Niagara 2948X12XLm 48x10G + 12x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No No Yes*** Yes*** No
Niagara 2960X6XLm 60x10G + 6x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No No Yes*** Yes*** No
Niagara 2972Xm 72x10G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** Yes No Yes*** Yes*** No
Niagara 2932XL 32x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No No Yes*** Yes*** No
Niagara 2948X6XL 48x10G + 6x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No No Yes*** Yes No
Niagara 2948X12XLm 48x10G + 12x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No Yes*** Yes*** No
Niagara 2960X6XLm 60x10G + 6x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No Yes*** Yes*** No
Niagara 2972Xm 72x10G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** Yes Yes*** Yes*** No
Niagara 2932XL 32x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No Yes*** Yes*** No
Niagara 2948X6XL 48x10G + 6x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No Yes*** Yes No
Notes: From aada217a4c7698f227f1abc384f935f6dc5b28fb Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 13 Dec 2016 21:31:41 +0000 Subject: [PATCH 152/255] Use /bin/sh instead of /bin/bash (for execution under the loader). --- packages/base/all/vendor-config-onl/src/bin/onlswi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/bin/onlswi b/packages/base/all/vendor-config-onl/src/bin/onlswi index e228dc40..c57d2c7b 100755 --- a/packages/base/all/vendor-config-onl/src/bin/onlswi +++ b/packages/base/all/vendor-config-onl/src/bin/onlswi @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh ############################################################ onlfs mount images --rw (cd /mnt/onl/images && rm -f *.swi && wget $1) From 201832017430b427d11090a8037df48baa9a04f7 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 13 Dec 2016 21:32:08 +0000 Subject: [PATCH 153/255] Except only if required. --- .../all/vendor-config-onl/src/python/onl/platform/base.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index 5d43ce15..68e7d0d9 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -158,8 +158,12 @@ class OnlPlatformBase(object): def add_info_json(self, name, f, klass=None, required=True): if os.path.exists(f): - d = json.load(file(f)) - self.add_info_dict(name, d, klass) + try: + d = json.load(file(f)) + self.add_info_dict(name, d, klass) + except ValueError, e: + if required: + raise e elif required: raise RuntimeError("A required system file (%s) is missing." % f) From e458a648b5a3f98a46c74100d323d879116f7943 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 13 Dec 2016 22:01:37 +0000 Subject: [PATCH 154/255] latest --- sm/bigcode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sm/bigcode b/sm/bigcode index b5c5ef26..0b10e643 160000 --- a/sm/bigcode +++ b/sm/bigcode @@ -1 +1 @@ -Subproject commit b5c5ef26e618de765ecaf114984807172f3a500b +Subproject commit 0b10e643e80deb76b9ecb05a707c96818fd376be From 8db58bd0d8be7381d1c77357907e423a8fd78869 Mon Sep 17 00:00:00 2001 From: brandonchuang Date: Wed, 14 Dec 2016 11:06:39 +0800 Subject: [PATCH 155/255] [as5710] Support DC12V power supply(PSU-12V-650) --- .../onlp/builds/src/module/src/platform_lib.c | 49 +++++++++++++---- .../onlp/builds/src/module/src/platform_lib.h | 6 +- .../onlp/builds/src/module/src/psui.c | 55 +++++++++++++++++++ 3 files changed, 96 insertions(+), 14 deletions(-) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.c index a6c114be..95b10fa9 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.c @@ -121,26 +121,28 @@ int deviceNodeReadString(char *filename, char *buffer, int buf_size, int data_le } #define I2C_PSU_MODEL_NAME_LEN 13 +#define I2C_PSU_FAN_DIR_LEN 3 psu_type_t get_psu_type(int id, char* modelname, int modelname_len) { char *node = NULL; char model_name[I2C_PSU_MODEL_NAME_LEN + 1] = {0}; + char fan_dir[I2C_PSU_FAN_DIR_LEN + 1] = {0}; /* Check AC model name */ node = (id == PSU1_ID) ? PSU1_AC_HWMON_NODE(psu_model_name) : PSU2_AC_HWMON_NODE(psu_model_name); if (deviceNodeReadString(node, model_name, sizeof(model_name), 0) == 0) { if (strncmp(model_name, "CPR-4011-4M11", strlen("CPR-4011-4M11")) == 0) { - if (modelname) { - strncpy(modelname, model_name, modelname_len-1); - } + if (modelname) { + strncpy(modelname, model_name, 13); + } return PSU_TYPE_AC_F2B; } else if (strncmp(model_name, "CPR-4011-4M21", strlen("CPR-4011-4M21")) == 0) { - if (modelname) { - strncpy(modelname, model_name, modelname_len-1); - } + if (modelname) { + strncpy(modelname, model_name, 13); + } return PSU_TYPE_AC_B2F; } } @@ -151,17 +153,40 @@ psu_type_t get_psu_type(int id, char* modelname, int modelname_len) if (deviceNodeReadString(node, model_name, sizeof(model_name), 0) == 0) { if (strncmp(model_name, "um400d01G", strlen("um400d01G")) == 0) { - if (modelname) { - strncpy(modelname, model_name, modelname_len-1); - } + if (modelname) { + strncpy(modelname, model_name, 9); + } return PSU_TYPE_DC_48V_B2F; } else if (strncmp(model_name, "um400d01-01G", strlen("um400d01-01G")) == 0) { - if (modelname) { - strncpy(modelname, model_name, modelname_len-1); - } + if (modelname) { + strncpy(modelname, model_name, 12); + } return PSU_TYPE_DC_48V_F2B; } + + if (strncmp(model_name, "PSU-12V-650", 11) == 0) { + if (modelname) { + strncpy(modelname, model_name, 11); + } + + node = (id == PSU1_ID) ? PSU1_DC_HWMON_NODE(psu_fan_dir) : PSU2_DC_HWMON_NODE(psu_fan_dir); + if (deviceNodeReadString(node, fan_dir, sizeof(fan_dir), 0) != 0) { + return PSU_TYPE_UNKNOWN; + } + + if (strncmp(fan_dir, "F2B", 3) == 0) { + return PSU_TYPE_DC_12V_F2B; + } + + if (strncmp(fan_dir, "B2F", 3) == 0) { + return PSU_TYPE_DC_12V_B2F; + } + + if (strncmp(fan_dir, "NON", 3) == 0) { + return PSU_TYPE_DC_12V_FANLESS; + } + } } return PSU_TYPE_UNKNOWN; diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h index 9aa30654..396938d3 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h @@ -52,7 +52,6 @@ #define SFP_HWMON_NODE(node) SFP_HWMON_PREFIX#node #define SFP_HWMON_DOM_PREFIX "/sys/bus/i2c/devices/3-0051/" #define SFP_HWMON_DOM_NODE(node) SFP_HWMON_DOM_PREFIX#node -#define SFP_BUS 3 int deviceNodeWriteInt(char *filename, int value, int data_len); int deviceNodeReadBinary(char *filename, char *buffer, int buf_size, int data_len); @@ -63,7 +62,10 @@ typedef enum psu_type { PSU_TYPE_AC_F2B, PSU_TYPE_AC_B2F, PSU_TYPE_DC_48V_F2B, - PSU_TYPE_DC_48V_B2F + PSU_TYPE_DC_48V_B2F, + PSU_TYPE_DC_12V_F2B, + PSU_TYPE_DC_12V_B2F, + PSU_TYPE_DC_12V_FANLESS } psu_type_t; psu_type_t get_psu_type(int id, char* modelname, int modelname_len); diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/psui.c b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/psui.c index 58e2deba..97100e26 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/psui.c +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/psui.c @@ -169,6 +169,56 @@ psu_um400d_info_get(onlp_psu_info_t* info) return ONLP_STATUS_OK; } +#include +#define DC12V_650_REG_TO_CURRENT(low, high) (((low << 4 | high >> 4) * 20 * 1000) / 754) +#define DC12V_650_REG_TO_VOLTAGE(low, high) ((low << 4 | high >> 4) * 25) + +static int +psu_dc12v_650_info_get(onlp_psu_info_t* info) +{ + int pid = ONLP_OID_ID_GET(info->hdr.id); + int bus = (PSU1_ID == pid) ? 5 : 6; + int iout_low, iout_high; + int vout_low, vout_high; + + /* Set capability + */ + info->caps = ONLP_PSU_CAPS_DC12; + + if (info->status & ONLP_PSU_STATUS_FAILED) { + return ONLP_STATUS_OK; + } + + /* Get current + */ + iout_low = onlp_i2c_readb(bus, 0x6f, 0x0, ONLP_I2C_F_FORCE); + iout_high = onlp_i2c_readb(bus, 0x6f, 0x1, ONLP_I2C_F_FORCE); + + if ((iout_low >= 0) && (iout_high >= 0)) { + info->miout = DC12V_650_REG_TO_CURRENT(iout_low, iout_high); + info->caps |= ONLP_PSU_CAPS_IOUT; + } + + /* Get voltage + */ + vout_low = onlp_i2c_readb(bus, 0x6f, 0x2, ONLP_I2C_F_FORCE); + vout_high = onlp_i2c_readb(bus, 0x6f, 0x3, ONLP_I2C_F_FORCE); + + if ((vout_low >= 0) && (vout_high >= 0)) { + info->mvout = DC12V_650_REG_TO_VOLTAGE(vout_low, vout_high); + info->caps |= ONLP_PSU_CAPS_VOUT; + } + + /* Get power based on current and voltage + */ + if ((info->caps & ONLP_PSU_CAPS_IOUT) && (info->caps & ONLP_PSU_CAPS_VOUT)) { + info->mpout = (info->miout * info->mvout) / 1000; + info->caps |= ONLP_PSU_CAPS_POUT; + } + + return ONLP_STATUS_OK; +} + /* * Get all information about the given PSU oid. */ @@ -231,6 +281,11 @@ onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) case PSU_TYPE_DC_48V_B2F: ret = psu_um400d_info_get(info); break; + case PSU_TYPE_DC_12V_F2B: + case PSU_TYPE_DC_12V_B2F: + case PSU_TYPE_DC_12V_FANLESS: + ret = psu_dc12v_650_info_get(info); + break; default: ret = ONLP_STATUS_E_UNSUPPORTED; break; From 721ad5758b4cc1c69571297eadb090dddc3cb377 Mon Sep 17 00:00:00 2001 From: brandonchuang Date: Wed, 14 Dec 2016 11:23:09 +0800 Subject: [PATCH 156/255] [as5710] Restore the define of SFP_BUS --- .../onlp/builds/src/module/src/platform_lib.h | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h index 396938d3..0ffcd9df 100644 --- a/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h +++ b/packages/platforms/accton/powerpc/powerpc-accton-as5710-54x/onlp/builds/src/module/src/platform_lib.h @@ -52,6 +52,7 @@ #define SFP_HWMON_NODE(node) SFP_HWMON_PREFIX#node #define SFP_HWMON_DOM_PREFIX "/sys/bus/i2c/devices/3-0051/" #define SFP_HWMON_DOM_NODE(node) SFP_HWMON_DOM_PREFIX#node +#define SFP_BUS 3 int deviceNodeWriteInt(char *filename, int value, int data_len); int deviceNodeReadBinary(char *filename, char *buffer, int buf_size, int data_len); From 1c7beb800513403f6f768e152567fcf116a3b948 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 14 Dec 2016 16:53:35 +0000 Subject: [PATCH 157/255] - Add platform objects These were accidentally overwritten by the sensor registration changes. They belong in their own file anyways so the code has been split and the registration moved to a common location. --- .../onlp_snmp/module/src/onlp_snmp_module.c | 20 ++- .../onlp_snmp/module/src/onlp_snmp_platform.c | 114 ++++++++++++++++++ .../onlp_snmp/module/src/onlp_snmp_sensors.c | 2 +- 3 files changed, 132 insertions(+), 4 deletions(-) create mode 100644 packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_module.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_module.c index 76b7ac4f..3b9adbbb 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_module.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_module.c @@ -34,16 +34,30 @@ void __onlp_snmp_module_init__(void) #include +static int +onlp_snmp_client__(int enable, void* cookie) +{ + /* onlp_snmp_sensors.c */ + extern int onlp_snmp_sensors_init(void); + /* onlp_snmp_platform.c */ + extern int onlp_snmp_platform_init(void); + + onlp_snmp_sensors_init(); + onlp_snmp_platform_init(); + + return 0; +} + int onlp_snmp_snmp_subagent_register(void) { - return snmp_subagent_client_register("onlp_snmp_sensors", - onlp_snmp_sensors_client, + return snmp_subagent_client_register("onlp_snmp_client", + onlp_snmp_client__, NULL); } int onlp_snmp_snmp_subagent_unregister(void) { - return snmp_subagent_client_unregister("onlp_snmp_sensors"); + return snmp_subagent_client_unregister("onlp_snmp_client"); } #endif /* DEPENDMODULE_INCLUDE_SNMP_SUBAGENT */ diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c new file mode 100644 index 00000000..1a6b3e55 --- /dev/null +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c @@ -0,0 +1,114 @@ +/************************************************************ + * + * + * Copyright 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include "onlp_snmp_log.h" + +#include +#include +#include +#include + +static void +platform_string_register(int index, const char* desc, char* value) +{ + oid tree[] = { 1, 3, 6, 1, 4, 1, 42623, 1, 1, 1, 1, 1}; + tree[11] = index; + + if(!value || !value[0]) { + return; + } + + char* s = aim_strdup(value); + + netsnmp_handler_registration *reg = + netsnmp_create_handler_registration( + desc, NULL, + tree, OID_LENGTH(tree), + HANDLER_CAN_RONLY); + netsnmp_watcher_info *winfo = + netsnmp_create_watcher_info( + s, strlen(s), + ASN_OCTET_STR, WATCHER_FIXED_SIZE); + netsnmp_register_watched_scalar( reg, winfo ); +} + +void +platform_int_register(int index, char* desc, int value) +{ + oid tree[] = { 1, 3, 6, 1, 4, 1, 42623, 1, 1, 1, 1, 1}; + tree[11] = index; + + int* v = aim_zmalloc(sizeof(value)); + *v = value; + + netsnmp_register_int_instance(desc, + tree, + OID_LENGTH(tree), + v, NULL); +} + + +void +onlp_snmp_platform_init(void) +{ + /** + * This is the base of the platform:general:system tree + */ + onlp_sys_info_t si; + if(onlp_sys_info_get(&si) >= 0) { + +#define REGISTER_STR(_index, _field) \ + do { \ + platform_string_register(_index, #_field, (char*)si.onie_info._field); \ + } while(0) + +#define REGISTER_INT(_index, _field) \ + do { \ + platform_int_register(_index, #_field, si.onie_info._field); \ + } while(0) + + REGISTER_STR(1, product_name); + REGISTER_STR(2, part_number); + REGISTER_STR(3, serial_number); + char* mstring = aim_fstrdup("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", + si.onie_info.mac[0], si.onie_info.mac[1], si.onie_info.mac[2], + si.onie_info.mac[3], si.onie_info.mac[4], si.onie_info.mac[5]); + platform_string_register(4, "mac", mstring); + aim_free(mstring); + + REGISTER_INT(5, mac_range); + REGISTER_STR(6, manufacturer); + REGISTER_STR(7, manufacture_date); + REGISTER_STR(8, vendor); + REGISTER_STR(9, platform_name); + REGISTER_INT(10, device_version); + REGISTER_STR(11, label_revision); + REGISTER_STR(12, country_code); + REGISTER_STR(13, diag_version); + REGISTER_STR(14, service_tag); + REGISTER_STR(15, onie_version); + } +} + diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c index ecae68b4..84b18308 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c @@ -1136,7 +1136,7 @@ setup_alarm__(void) int -onlp_snmp_sensors_client(int enable, void* cookie) +onlp_snmp_sensors_init(void) { init_all_tables__(); setup_alarm__(); From 7d6aeb18fd08c5c884d7d739bb8d37bcb41ef16b Mon Sep 17 00:00:00 2001 From: Ken Chiang Date: Wed, 14 Dec 2016 15:57:02 -0800 Subject: [PATCH 158/255] Always call snmp_set_var_typed_value so that snmpwalk does not prematurely terminate. If sensor is not present, return a value of zero or a zero-length string. --- .../onlp_snmp/module/src/onlp_snmp_sensors.c | 101 +++--------------- 1 file changed, 13 insertions(+), 88 deletions(-) diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c index 84b18308..1737fd23 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_sensors.c @@ -307,11 +307,7 @@ temp_value_handler__(netsnmp_request_info *req, return; } - if (!(ti->status & ONLP_THERMAL_STATUS_PRESENT)) { - return; - } - - value = ti->mcelsius; + value = (ti->status & ONLP_THERMAL_STATUS_PRESENT)? ti->mcelsius: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -443,12 +439,7 @@ fan_rpm_handler__(netsnmp_request_info *req, return; } - if (!(fi->status & ONLP_FAN_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - - value = fi->rpm; + value = (fi->status & ONLP_FAN_STATUS_PRESENT)? fi->rpm: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -468,11 +459,7 @@ fan_pct_handler__(netsnmp_request_info *req, return; } - if (!(fi->status & ONLP_FAN_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - value = fi->percentage; + value = (fi->status & ONLP_FAN_STATUS_PRESENT)? fi->percentage: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -491,15 +478,7 @@ fan_model_handler__(netsnmp_request_info *req, return; } - if (!(fi->status & ONLP_FAN_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - - int len = strlen(fi->model); - if (len == 0) { - return; - } + int len = (fi->status & ONLP_FAN_STATUS_PRESENT)? strlen(fi->model): 0; snmp_set_var_typed_value(req->requestvb, ASN_OCTET_STR, @@ -518,15 +497,7 @@ fan_serial_handler__(netsnmp_request_info *req, return; } - if (!(fi->status & ONLP_FAN_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - - int len = strlen(fi->serial); - if (len == 0) { - return; - } + int len = (fi->status & ONLP_FAN_STATUS_PRESENT)? strlen(fi->serial): 0; snmp_set_var_typed_value(req->requestvb, ASN_OCTET_STR, @@ -639,11 +610,6 @@ psu_current_type_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - name_index = ONLP_SNMP_PSU_TYPE_UNKNOWN; /* These values are mutual exclusive */ if (pi->caps & ONLP_PSU_CAPS_AC) { @@ -674,15 +640,7 @@ psu_model_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - - int len = strlen(pi->model); - if (len == 0) { - return; - } + int len = (pi->status & ONLP_PSU_STATUS_PRESENT)? strlen(pi->model): 0; snmp_set_var_typed_value(req->requestvb, ASN_OCTET_STR, @@ -702,15 +660,7 @@ psu_serial_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - - int len = strlen(pi->serial); - if (len == 0) { - return; - } + int len = (pi->status & ONLP_PSU_STATUS_PRESENT)? strlen(pi->serial): 0; snmp_set_var_typed_value(req->requestvb, ASN_OCTET_STR, @@ -730,12 +680,7 @@ psu_vin_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - - value = pi->mvin; + value = (pi->status & ONLP_PSU_STATUS_PRESENT)? pi->mvin: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -755,11 +700,7 @@ psu_vout_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present */ - return; - } - value = pi->mvout; + value = (pi->status & ONLP_PSU_STATUS_PRESENT)? pi->mvout: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -779,11 +720,7 @@ psu_iin_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - value = pi->miin; + value = (pi->status & ONLP_PSU_STATUS_PRESENT)? pi->miin: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -803,11 +740,7 @@ psu_iout_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - value = pi->miout; + value = (pi->status & ONLP_PSU_STATUS_PRESENT)? pi->miout: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -827,11 +760,7 @@ psu_pin_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - value = pi->mpin; + value = (pi->status & ONLP_PSU_STATUS_PRESENT)? pi->mpin: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, @@ -851,11 +780,7 @@ psu_pout_handler__(netsnmp_request_info *req, return; } - if (!(pi->status & ONLP_PSU_STATUS_PRESENT)) { - /* Simply return if failed to get or not present*/ - return; - } - value = pi->mpout; + value = (pi->status & ONLP_PSU_STATUS_PRESENT)? pi->mpout: 0; snmp_set_var_typed_value(req->requestvb, ASN_GAUGE, From 799a402c37052a17c74ccfff8ac23a05141e057e Mon Sep 17 00:00:00 2001 From: Michael Shych Date: Thu, 15 Dec 2016 17:09:37 +0000 Subject: [PATCH 159/255] Changes in usage of onlp_file_read/onlp_file_write APIs. Signed-off-by: Michael Shych --- .../onlp/builds/src/module/src/fani.c | 14 ++++--------- .../onlp/builds/src/module/src/ledi.c | 13 ++++-------- .../onlp/builds/src/module/src/psui.c | 10 ++++------ .../onlp/builds/src/module/src/thermali.c | 7 ++----- .../onlp/builds/src/module/src/fani.c | 20 ++++++------------- .../onlp/builds/src/module/src/ledi.c | 12 ++++------- .../onlp/builds/src/module/src/platform_lib.c | 5 ++--- .../onlp/builds/src/module/src/psui.c | 10 ++++------ .../onlp/builds/src/module/src/thermali.c | 7 ++----- .../onlp/builds/src/module/src/fani.c | 19 +++++------------- .../onlp/builds/src/module/src/ledi.c | 12 ++++------- .../onlp/builds/src/module/src/platform_lib.c | 5 ++--- .../onlp/builds/src/module/src/psui.c | 10 ++++------ .../onlp/builds/src/module/src/thermali.c | 7 ++----- 14 files changed, 49 insertions(+), 102 deletions(-) diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c index 7dd59e0d..931d6b0d 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/fani.c @@ -213,7 +213,6 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) float temp = 0.0; int rv = 0, local_id = 0, nbytes = 10; char r_data[10] = {0}; - char fullpath[LEN_FILE_NAME] = {0}; onlp_fan_info_t* info = NULL; VALIDATE(id); @@ -230,9 +229,6 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) return ONLP_STATUS_E_INVALID; } - snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, - fan_path[local_id].r_speed_set); - /* Set fan speed Converting percent to driver value. Driver accept value in range between 153 and 255. @@ -254,7 +250,8 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) snprintf(r_data, sizeof(r_data), "%d", (int)temp); nbytes = strnlen(r_data, sizeof(r_data)); - rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + rv = onlp_file_write((uint8_t*)r_data, nbytes, "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } @@ -276,7 +273,6 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) float temp = 0.0; int rv = 0, local_id = 0, nbytes = 10; char r_data[10] = {0}; - char fullpath[LEN_FILE_NAME] = {0}; onlp_fan_info_t* info = NULL; VALIDATE(id); @@ -296,9 +292,6 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) return ONLP_STATUS_E_PARAM; } - snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, - fan_path[local_id].r_speed_set); - /* Set fan speed Converting percent to driver value. Driver accept value in range between 153 and 255. @@ -310,7 +303,8 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) snprintf(r_data, sizeof(r_data), "%d", (int)temp); nbytes = strnlen(r_data, sizeof(r_data)); - rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + rv = onlp_file_write((uint8_t*)r_data, nbytes, "%s%s", + PREFIX_PATH, fan_path[local_id].r_speed_set); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c index 459207e7..6966e6bd 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/ledi.c @@ -200,20 +200,17 @@ onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) { int len, local_id = 0; uint8_t data[driver_value_len] = {0}; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - /* get fullpath */ - snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); - /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[ONLP_OID_ID_GET(id)]; /* Get LED mode */ - if (onlp_file_read(data, sizeof(data), &len, fullpath) != 0) { + if (onlp_file_read(data, sizeof(data), &len, "%s%s", + prefix_path, file_names[local_id]) != 0) { return ONLP_STATUS_E_INTERNAL; } @@ -258,15 +255,13 @@ int onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) { int local_id; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); - if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, fullpath) != 0) - { + if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, + "%s%s", prefix_path, file_names[local_id]) != 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c index e48cee9f..8bfc5357 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/psui.c @@ -49,12 +49,11 @@ psu_module_info_get(int id, char *node, int *value) { int len, ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; - char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; *value = 0; - sprintf(node_path, PSU_MODULE_PREFIX, id, node); - ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, + PSU_MODULE_PREFIX, id, node); if (ret == 0) { *value = atoi(buf); } @@ -67,12 +66,11 @@ psu_power_info_get(int id, char *node, int *value) { int len, ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; - char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; *value = 0; - sprintf(node_path, PSU_POWER_PREFIX, id, node); - ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, + PSU_POWER_PREFIX, id, node); if (ret == 0) { *value = atoi(buf); } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c index 32b9d254..faaa08c8 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/thermali.c @@ -146,7 +146,6 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { int rv, len = 10, temp_base=1, local_id = 0; char r_data[10] = {0}; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); @@ -154,10 +153,8 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; - /* get fullpath */ - snprintf(fullpath, sizeof(fullpath), "%s/%s", prefix_path, last_path[local_id]); - - rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, fullpath); + rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, "%s/%s", + prefix_path, last_path[local_id]); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c index dfcb39c9..a1e23036 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/fani.c @@ -166,7 +166,6 @@ _onlp_fani_read_fan_eeprom(int local_id, onlp_fan_info_t* info) uint8_t temp = 0; int rv = 0; int len = 0; - char path[LEN_FILE_NAME] = {0}; /* We have 4 FRU with 2 fans(total 8 fans). Eeprom is per FRU but not per fan. @@ -177,9 +176,8 @@ _onlp_fani_read_fan_eeprom(int local_id, onlp_fan_info_t* info) local_id /= 2; } - /* Reading FRU eeprom. */ - snprintf(path, sizeof(path), IDPROM_PATH, "fan", local_id); - rv = onlp_file_read(data, sizeof(data), &len, path); + rv = onlp_file_read(data, sizeof(data), &len, + IDPROM_PATH, "fan", local_id); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } @@ -401,7 +399,6 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) float temp = 0.0; int rv = 0, local_id = 0, nbytes = 10; char r_data[10] = {0}; - char fullpath[LEN_FILE_NAME] = {0}; onlp_fan_info_t* info = NULL; VALIDATE(id); @@ -418,9 +415,6 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) return ONLP_STATUS_E_INVALID; } - snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, - fan_path[local_id].r_speed_set); - /* Set fan speed Converting percent to driver value. Driver accept value in range between 153 and 255. @@ -442,7 +436,8 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) snprintf(r_data, sizeof(r_data), "%d", (int)temp); nbytes = strnlen(r_data, sizeof(r_data)); - rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + rv = onlp_file_write((uint8_t*)r_data, nbytes, "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } @@ -464,7 +459,6 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) float temp = 0.0; int rv = 0, local_id = 0, nbytes = 10; char r_data[10] = {0}; - char fullpath[LEN_FILE_NAME] = {0}; onlp_fan_info_t* info = NULL; VALIDATE(id); @@ -484,9 +478,6 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) return ONLP_STATUS_E_PARAM; } - snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, - fan_path[local_id].r_speed_set); - /* Set fan speed Converting percent to driver value. Driver accept value in range between 153 and 255. @@ -498,7 +489,8 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) snprintf(r_data, sizeof(r_data), "%d", (int)temp); nbytes = strnlen(r_data, sizeof(r_data)); - rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + rv = onlp_file_write((uint8_t*)r_data, nbytes, "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c index 27b39485..592d58e1 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/ledi.c @@ -217,20 +217,17 @@ onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) { int len, local_id = 0; uint8_t data[driver_value_len] = {0}; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - /* get fullpath */ - snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); - /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[ONLP_OID_ID_GET(id)]; /* Get LED mode */ - if (onlp_file_read(data, sizeof(data), &len, fullpath) != 0) { + if (onlp_file_read(data, sizeof(data), &len, "%s%s", + prefix_path, file_names[local_id]) != 0) { return ONLP_STATUS_E_INTERNAL; } @@ -275,14 +272,13 @@ int onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) { int local_id; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); - if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, fullpath) != 0) + if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, + "%s%s", prefix_path, file_names[local_id]) != 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c index c0c1765e..5ee85a2a 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/platform_lib.c @@ -35,15 +35,14 @@ int psu_read_eeprom(int psu_index, onlp_psu_info_t* psu_info, onlp_fan_info_t* fan_info) { - char path[64] = {0}; const char sanity_check[] = "MLNX"; const uint8_t serial_len = 24; char data[256] = {0}; bool sanity_found = false; int index = 0, rv = 0, len = 0; - snprintf(path, sizeof(path), IDPROM_PATH, "psu", psu_index); - rv = onlp_file_read((uint8_t* )data, sizeof(data)-1, &len, path); + rv = onlp_file_read((uint8_t* )data, sizeof(data)-1, &len, + IDPROM_PATH, "psu", psu_index); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c index 610b034e..ff734bdc 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/psui.c @@ -47,12 +47,11 @@ psu_module_info_get(int id, char *node, int *value) { int len, ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; - char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; *value = 0; - sprintf(node_path, PSU_MODULE_PREFIX, id, node); - ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, + PSU_MODULE_PREFIX, id, node); if (ret == 0) { *value = atoi(buf); } @@ -65,12 +64,11 @@ psu_power_info_get(int id, char *node, int *value) { int len, ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; - char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; *value = 0; - sprintf(node_path, PSU_POWER_PREFIX, id, node); - ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, + PSU_POWER_PREFIX, id, node); if (ret == 0) { *value = atoi(buf); } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c index d2e1a533..7b563eec 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/thermali.c @@ -156,7 +156,6 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { int rv, len = 10, temp_base=1, local_id = 0; char r_data[10] = {0}; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); @@ -164,10 +163,8 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; - /* get fullpath */ - snprintf(fullpath, sizeof(fullpath), "%s/%s", prefix_path, last_path[local_id]); - - rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, fullpath); + rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, + "%s/%s", prefix_path, last_path[local_id]); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c index dfcb39c9..c5840918 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/fani.c @@ -166,7 +166,6 @@ _onlp_fani_read_fan_eeprom(int local_id, onlp_fan_info_t* info) uint8_t temp = 0; int rv = 0; int len = 0; - char path[LEN_FILE_NAME] = {0}; /* We have 4 FRU with 2 fans(total 8 fans). Eeprom is per FRU but not per fan. @@ -177,9 +176,7 @@ _onlp_fani_read_fan_eeprom(int local_id, onlp_fan_info_t* info) local_id /= 2; } - /* Reading FRU eeprom. */ - snprintf(path, sizeof(path), IDPROM_PATH, "fan", local_id); - rv = onlp_file_read(data, sizeof(data), &len, path); + rv = onlp_file_read(data, sizeof(data), &len, IDPROM_PATH, "fan", local_id); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } @@ -401,7 +398,6 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) float temp = 0.0; int rv = 0, local_id = 0, nbytes = 10; char r_data[10] = {0}; - char fullpath[LEN_FILE_NAME] = {0}; onlp_fan_info_t* info = NULL; VALIDATE(id); @@ -418,9 +414,6 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) return ONLP_STATUS_E_INVALID; } - snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, - fan_path[local_id].r_speed_set); - /* Set fan speed Converting percent to driver value. Driver accept value in range between 153 and 255. @@ -442,7 +435,8 @@ onlp_fani_rpm_set(onlp_oid_t id, int rpm) snprintf(r_data, sizeof(r_data), "%d", (int)temp); nbytes = strnlen(r_data, sizeof(r_data)); - rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + rv = onlp_file_write((uint8_t*)r_data, nbytes, "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } @@ -464,7 +458,6 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) float temp = 0.0; int rv = 0, local_id = 0, nbytes = 10; char r_data[10] = {0}; - char fullpath[LEN_FILE_NAME] = {0}; onlp_fan_info_t* info = NULL; VALIDATE(id); @@ -484,9 +477,6 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) return ONLP_STATUS_E_PARAM; } - snprintf(fullpath, sizeof(fullpath), "%s%s", PREFIX_PATH, - fan_path[local_id].r_speed_set); - /* Set fan speed Converting percent to driver value. Driver accept value in range between 153 and 255. @@ -498,7 +488,8 @@ onlp_fani_percentage_set(onlp_oid_t id, int p) snprintf(r_data, sizeof(r_data), "%d", (int)temp); nbytes = strnlen(r_data, sizeof(r_data)); - rv = onlp_file_write((uint8_t*)r_data, nbytes, fullpath); + rv = onlp_file_write((uint8_t*)r_data, nbytes, "%s%s", PREFIX_PATH, + fan_path[local_id].r_speed_set); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c index c8b71de3..bdc4d56a 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/ledi.c @@ -217,20 +217,17 @@ onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) { int len, local_id = 0; uint8_t data[driver_value_len] = {0}; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - /* get fullpath */ - snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); - /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[ONLP_OID_ID_GET(id)]; /* Get LED mode */ - if (onlp_file_read(data, sizeof(data), &len, fullpath) != 0) { + if (onlp_file_read(data, sizeof(data), &len, "%s%s", + prefix_path, file_names[local_id]) != 0) { return ONLP_STATUS_E_INTERNAL; } @@ -275,14 +272,13 @@ int onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) { int local_id; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - snprintf(fullpath, sizeof(fullpath), "%s%s", prefix_path, file_names[local_id]); - if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, fullpath) != 0) + if (onlp_file_write((uint8_t*)onlp_to_driver_led_mode(local_id, mode), driver_value_len, + "%s%s", prefix_path, file_names[local_id]) != 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c index c0c1765e..5ee85a2a 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/platform_lib.c @@ -35,15 +35,14 @@ int psu_read_eeprom(int psu_index, onlp_psu_info_t* psu_info, onlp_fan_info_t* fan_info) { - char path[64] = {0}; const char sanity_check[] = "MLNX"; const uint8_t serial_len = 24; char data[256] = {0}; bool sanity_found = false; int index = 0, rv = 0, len = 0; - snprintf(path, sizeof(path), IDPROM_PATH, "psu", psu_index); - rv = onlp_file_read((uint8_t* )data, sizeof(data)-1, &len, path); + rv = onlp_file_read((uint8_t* )data, sizeof(data)-1, &len, + IDPROM_PATH, "psu", psu_index); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c index 610b034e..ff734bdc 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/psui.c @@ -47,12 +47,11 @@ psu_module_info_get(int id, char *node, int *value) { int len, ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; - char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; *value = 0; - sprintf(node_path, PSU_MODULE_PREFIX, id, node); - ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, + PSU_MODULE_PREFIX, id, node); if (ret == 0) { *value = atoi(buf); } @@ -65,12 +64,11 @@ psu_power_info_get(int id, char *node, int *value) { int len, ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; - char node_path[PSU_NODE_MAX_PATH_LEN] = {0}; *value = 0; - sprintf(node_path, PSU_POWER_PREFIX, id, node); - ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, node_path); + ret = onlp_file_read((uint8_t*)buf, sizeof(buf), &len, + PSU_POWER_PREFIX, id, node); if (ret == 0) { *value = atoi(buf); } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c index d2e1a533..698be008 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/thermali.c @@ -156,7 +156,6 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { int rv, len = 10, temp_base=1, local_id = 0; char r_data[10] = {0}; - char fullpath[50] = {0}; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); @@ -164,10 +163,8 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; - /* get fullpath */ - snprintf(fullpath, sizeof(fullpath), "%s/%s", prefix_path, last_path[local_id]); - - rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, fullpath); + rv = onlp_file_read((uint8_t*)r_data, sizeof(r_data), &len, "%s/%s", + prefix_path, last_path[local_id]); if (rv < 0) { return ONLP_STATUS_E_INTERNAL; } From d4d574e351331757e2aca729412ab5b8e965eafc Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 15 Dec 2016 16:09:50 +0000 Subject: [PATCH 160/255] Add subcommand "rw" This subcommand can be used to execute a shell command under read-write context. For example: #> onlfs rw images rm /mnt/onl/images/*.swi --- .../src/python/onl/mounts/__init__.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py index 3d0a2a6d..7f33e0c3 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py @@ -273,6 +273,17 @@ class OnlMountManager(object): o.init() o.mount(args.labels, mode=mode) + @staticmethod + def cmdRw(args, register=False): + if register: + p = args.add_parser('rw') + p.add_argument("label") + p.add_argument("cmd", nargs='+') + p.set_defaults(func=OnlMountManager.cmdRw) + else: + with OnlMountContextReadWrite(args.label, logger=None): + rc = subprocess.call(" ".join(args.cmd), shell=True) + sys.exit(rc) @staticmethod def cmdFsck(args, register=False): @@ -366,4 +377,3 @@ class OnlOnieBootContext(MountContext): if not os.path.exists(mdir): os.makedirs(mdir) MountContext.__init__(self, device, mdir, mode, logger) - From 49f8921a136446524fdcffdb2c5a1f7a857527ba Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 15 Dec 2016 19:31:42 +0000 Subject: [PATCH 161/255] - Improve ONIE TLV processing. Still WIP. - Read ONIE data from the platform's onie-syseeprom contents. --- .../any/onlp/src/onlp/module/src/onlp_main.c | 19 ++++- packages/base/any/onlp/src/onlplib/.module | 2 +- .../onlp/src/onlplib/module/auto/onlplib.yml | 21 ++++- .../src/onlplib/module/inc/onlplib/onie.h | 5 ++ .../src/onlplib/module/inc/onlplib/onlplib.x | 34 ++++++-- .../any/onlp/src/onlplib/module/src/onie.c | 82 +++++++++++++++++++ packages/base/any/onlp/src/onlplib/onlplib.mk | 2 +- 7 files changed, 152 insertions(+), 13 deletions(-) diff --git a/packages/base/any/onlp/src/onlp/module/src/onlp_main.c b/packages/base/any/onlp/src/onlp/module/src/onlp_main.c index 51cbb36d..6dd5ccfe 100644 --- a/packages/base/any/onlp/src/onlp/module/src/onlp_main.c +++ b/packages/base/any/onlp/src/onlp/module/src/onlp_main.c @@ -184,6 +184,7 @@ onlpdump_main(int argc, char* argv[]) char* pidfile = NULL; const char* O = NULL; const char* t = NULL; + const char* J = NULL; /** * debug trap @@ -198,7 +199,7 @@ onlpdump_main(int argc, char* argv[]) } } - while( (c = getopt(argc, argv, "srehdojmyM:ipxlSt:O:b")) != -1) { + while( (c = getopt(argc, argv, "srehdojmyM:ipxlSt:O:bJ:")) != -1) { switch(c) { case 's': show=1; break; @@ -218,6 +219,7 @@ onlpdump_main(int argc, char* argv[]) case 'S': S=1; break; case 'l': l=1; break; case 'b': b=1; break; + case 'J': J = optarg; break; case 'y': show=1; showflags |= ONLP_OID_SHOW_F_YAML; break; default: help=1; rv = 1; break; } @@ -242,9 +244,24 @@ onlpdump_main(int argc, char* argv[]) printf(" -S Decode SFP Inventory\n"); printf(" -b Decode SFP Inventory into SFF database entries.\n"); printf(" -l API Lock test.\n"); + printf(" -J Decode ONIE JSON data.\n"); return rv; } + if(J) { + int rv; + onlp_onie_info_t onie; + rv = onlp_onie_read_json(&onie, J); + if(rv < 0) { + fprintf(stderr, "onie read json failed: %d\n", rv); + return 1; + } + else { + onlp_onie_show(&onie, &aim_pvs_stdout); + onlp_onie_info_free(&onie); + return 0; + } + } if(t) { int rv; diff --git a/packages/base/any/onlp/src/onlplib/.module b/packages/base/any/onlp/src/onlplib/.module index 7cf3968c..f3d0bc49 100644 --- a/packages/base/any/onlp/src/onlplib/.module +++ b/packages/base/any/onlp/src/onlplib/.module @@ -1,2 +1,2 @@ name: onlplib -depends: cjson +depends: cjson_util diff --git a/packages/base/any/onlp/src/onlplib/module/auto/onlplib.yml b/packages/base/any/onlp/src/onlplib/module/auto/onlplib.yml index b13f63ab..bec06562 100644 --- a/packages/base/any/onlp/src/onlplib/module/auto/onlplib.yml +++ b/packages/base/any/onlp/src/onlplib/module/auto/onlplib.yml @@ -70,4 +70,23 @@ definitions: - vsnprintf - snprintf - strlen - - atoi \ No newline at end of file + - atoi + + xmacro: + ONIE_TLV_ENTRY: + members: + - product_name, Product Name, 0x21, str + - part_number, Part Number, 0x22, str + - serial_number, Serial Number, 0x23, str + - mac, MAC, 0x24, mac + - manufacture_date, Manufacture Date, 0x25, str + - device_version, Device Version, 0x26, byte + - label_revision, Label Revision, 0x27, str + - platform_name, Platform Name, 0x28, str + - onie_version, ONIE Version, 0x29, str + - mac_range, MAC Range, 0x2A, int16 + - manufacturer, Manufacturer, 0x2B, str + - country_code, Country Code, 0x2C, str + - vendor, Vendor, 0x2D, str + - diag_version, Diag Version, 0x2E, str + - service_tag, Service Tag, 0x2F, str diff --git a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onie.h b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onie.h index 89e9255a..e1b50894 100644 --- a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onie.h +++ b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onie.h @@ -100,4 +100,9 @@ void onlp_onie_show(onlp_onie_info_t* info, aim_pvs_t* pvs); */ void onlp_onie_show_json(onlp_onie_info_t* info, aim_pvs_t* pvs); +/** + * Read ONIE fields from a JSON file. + */ +int onlp_onie_read_json(onlp_onie_info_t* info, const char* fname); + #endif /* __ONLP_ONIE_H__ */ diff --git a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onlplib.x b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onlplib.x index f8b4ac4d..cd729e23 100644 --- a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onlplib.x +++ b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/onlplib.x @@ -1,21 +1,21 @@ /************************************************************ * - * - * Copyright 2014, 2015 Big Switch Networks, Inc. - * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.eclipse.org/legal/epl-v10.html - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. - * + * * ************************************************************ * @@ -23,9 +23,25 @@ * ***********************************************************/ -#include - -/* <--auto.start.xmacro(ALL).define> */ +/* */ +#ifdef ONIE_TLV_ENTRY +ONIE_TLV_ENTRY(product_name, Product Name, 0x21, str) +ONIE_TLV_ENTRY(part_number, Part Number, 0x22, str) +ONIE_TLV_ENTRY(serial_number, Serial Number, 0x23, str) +ONIE_TLV_ENTRY(mac, MAC, 0x24, mac) +ONIE_TLV_ENTRY(manufacture_date, Manufacture Date, 0x25, str) +ONIE_TLV_ENTRY(device_version, Device Version, 0x26, byte) +ONIE_TLV_ENTRY(label_revision, Label Revision, 0x27, str) +ONIE_TLV_ENTRY(platform_name, Platform Name, 0x28, str) +ONIE_TLV_ENTRY(onie_version, ONIE Version, 0x29, str) +ONIE_TLV_ENTRY(mac_range, MAC Range, 0x2A, int16) +ONIE_TLV_ENTRY(manufacturer, Manufacturer, 0x2B, str) +ONIE_TLV_ENTRY(country_code, Country Code, 0x2C, str) +ONIE_TLV_ENTRY(vendor, Vendor, 0x2D, str) +ONIE_TLV_ENTRY(diag_version, Diag Version, 0x2E, str) +ONIE_TLV_ENTRY(service_tag, Service Tag, 0x2F, str) +#undef ONIE_TLV_ENTRY +#endif /* */ /* <--auto.start.xenum(ALL).define> */ diff --git a/packages/base/any/onlp/src/onlplib/module/src/onie.c b/packages/base/any/onlp/src/onlplib/module/src/onie.c index eecd5feb..2060e2e3 100644 --- a/packages/base/any/onlp/src/onlplib/module/src/onie.c +++ b/packages/base/any/onlp/src/onlplib/module/src/onie.c @@ -396,6 +396,7 @@ onlp_onie_show(onlp_onie_info_t* info, aim_pvs_t* pvs) } #include +#include void onlp_onie_show_json(onlp_onie_info_t* info, aim_pvs_t* pvs) @@ -444,4 +445,85 @@ onlp_onie_show_json(onlp_onie_info_t* info, aim_pvs_t* pvs) cJSON_Delete(cj); } +static char* +lookup_entry__(cJSON* cj, const char* name, int code) +{ + char* str = NULL; + int rv = cjson_util_lookup_string(cj, &str, "0x%x", code); + if(rv < 0) { + rv = cjson_util_lookup_string(cj, &str, name); + } + if(rv < 0) { + return NULL; + } + else { + return aim_strdup(str); + } +} + +int +onlp_onie_read_json(onlp_onie_info_t* info, const char* fname) +{ + cJSON* cj; + + memset(info, 0, sizeof(*info)); + + list_init(&info->vx_list); + + int rv = cjson_util_parse_file(fname, &cj); + if(rv < 0) { + AIM_LOG_ERROR("Could not parse ONIE JSON file '%s' rv=%{aim_error}", + fname, rv); + return rv; + } + +#define ONIE_TLV_ENTRY_str(_member, _name, _code) \ + do { \ + info->_member = lookup_entry__(cj, #_name, _code); \ + } while(0) + +#define ONIE_TLV_ENTRY_mac(_member, _name, _code) \ + do { \ + char* str = lookup_entry__(cj, #_name, _code); \ + int mac[6] = {0}; \ + if(str) { \ + int i; \ + sscanf(str, "%x:%x:%x:%x:%x:%x", \ + mac+0, mac+1, mac+2, mac+3, mac+4, mac+5); \ + for(i = 0; i < 6; i++) info->mac[i] = mac[i]; \ + aim_free(str); \ + } \ + } while(0) + +#define ONIE_TLV_ENTRY_byte(_member, _name, _code) \ + do { \ + char* v = lookup_entry__(cj, #_name, _code); \ + if(v) { \ + info->_member = atoi(v); \ + aim_free(v); \ + } \ + } while(0) + +#define ONIE_TLV_ENTRY_int16(_member, _name, _code) \ + do { \ + char* v = lookup_entry__(cj, #_name, _code); \ + if(v) { \ + info->_member = atoi(v); \ + aim_free(v); \ + } \ + } while(0) + +#define ONIE_TLV_ENTRY(_member, _name, _code, _type) \ + ONIE_TLV_ENTRY_##_type(_member, _name, _code); + + #include + + + cJSON_Delete(cj); + return 0; +} + + + + diff --git a/packages/base/any/onlp/src/onlplib/onlplib.mk b/packages/base/any/onlp/src/onlplib/onlplib.mk index c33c4de3..0f69b303 100644 --- a/packages/base/any/onlp/src/onlplib/onlplib.mk +++ b/packages/base/any/onlp/src/onlplib/onlplib.mk @@ -3,7 +3,7 @@ # # Inclusive Makefile for the onlplib module. # -# Autogenerated 2016-05-17 17:43:05.779760 +# Autogenerated 2016-12-15 17:09:12.738344 # ############################################################################### onlplib_BASEDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) From 0427357fca30aa9fb7906627dfcccc880f156ac9 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 15 Dec 2016 11:56:54 -0800 Subject: [PATCH 162/255] Initial. --- packages/platforms/mellanox/Makefile | 1 + packages/platforms/mellanox/x86-64/Makefile | 1 + 2 files changed, 2 insertions(+) create mode 100644 packages/platforms/mellanox/Makefile create mode 100644 packages/platforms/mellanox/x86-64/Makefile diff --git a/packages/platforms/mellanox/Makefile b/packages/platforms/mellanox/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/mellanox/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/mellanox/x86-64/Makefile b/packages/platforms/mellanox/x86-64/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/mellanox/x86-64/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file From fa2bf42480421eeada39a29129c02edcd6f036a6 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 15 Dec 2016 13:31:11 -0800 Subject: [PATCH 163/255] Add method to rewrite eeprom.json --- .../src/python/onl/platform/base.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index 68e7d0d9..e98f56b4 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -210,8 +210,10 @@ class OnlPlatformBase(object): return data + ONIE_EEPROM_JSON='etc/onie/eeprom.json' + def onie_syseeprom_get(self): - se = self.basedir_onl("etc/onie/eeprom.json") + se = self.basedir_onl(self.ONIE_EEPROM_JSON) if not os.path.exists(se): data = {} extensions = [] @@ -229,15 +231,20 @@ class OnlPlatformBase(object): if len(extensions): data['0xfd'] = extensions - if not os.path.exists(os.path.dirname(se)): - os.makedirs(os.path.dirname(se)) - - with open(se, "w") as f: - f.write(json.dumps(data, indent=2)) + self.onie_syseeprom_set(data) else: data = json.load(open(se)) return data + def onie_syseeprom_set(self, data): + se = self.basedir_onl(self.ONIE_EEPROM_JSON) + if not os.path.exists(os.path.dirname(se)): + os.makedirs(os.path.dirname(se)) + + with open(se, "w") as f: + f.write(json.dumps(data, indent=2)) + + def platform(self): return self.PLATFORM From bfe8894c25ae464f774b4f3e899e727270baafa4 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 15 Dec 2016 14:35:56 -0800 Subject: [PATCH 164/255] Use the new common ONIE syseeprom and machine.conf infrastructure. The ONL platform base class supports extracting, parsing, and caching the contents of the ONIE machine.conf file and the TLVs in the ONIE system eeprom. The mellanox platforms that rely on this information now request it from the common baseclass and use the common ONIE JSON parsing infrastructure to populate the onie_info structure in the SYSI implementation. --- .../src/python/mellanox/__init__.py | 12 ++ .../onlp/builds/src/module/src/sysi.c | 147 +---------------- .../python/x86_64_mlnx_msn2100_r0/__init__.py | 4 +- .../onlp/builds/src/module/src/sysi.c | 147 +---------------- .../python/x86_64_mlnx_msn2410_r0/__init__.py | 4 +- .../onlp/builds/src/module/src/sysi.c | 150 ------------------ .../python/x86_64_mlnx_msn2700_r0/__init__.py | 4 +- 7 files changed, 23 insertions(+), 445 deletions(-) diff --git a/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py b/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py index 2d089e30..56cd4d71 100644 --- a/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py +++ b/packages/platforms/mellanox/vendor-config/src/python/mellanox/__init__.py @@ -5,3 +5,15 @@ from onl.platform.base import * class OnlPlatformMellanox(OnlPlatformBase): MANUFACTURER='Mellanox' PRIVATE_ENTERPRISE_NUMBER=33049 + + # + # Some platforms rely on the output of the onie-syseeprom tool + # and the machine.conf file to implement parts of ONLP. + # + def syseeprom_export(self): + print "Caching ONIE System EEPROM..." + onie = self.onie_syseeprom_get() + mc = self.onie_machine_get() + if 'onie_version' in mc: + onie['0x29'] = mc['onie_version'] + self.onie_syseeprom_set(onie) diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c index 74b17de7..e104052c 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c @@ -48,30 +48,6 @@ static char arr_cplddev_name[NUM_OF_CPLD][30] = "cpld_mgmt_version" }; -static void -_onlp_sysi_execute_command(char *command, char buffer[COMMAND_OUTPUT_BUFFER]) -{ - FILE *fp = NULL; - - /* Open the command for reading. */ - fp = popen(command, "r"); - if (NULL == fp) { - AIM_LOG_WARN("Failed to run command '%s'\n", command); - } - - /* Read the output */ - if (fgets(buffer, COMMAND_OUTPUT_BUFFER-1, fp) == NULL) { - AIM_LOG_WARN("Failed to read output of command '%s'\n", command); - pclose(fp); - } - - /* The last symbol is '\n', so remote it */ - buffer[strnlen(buffer, COMMAND_OUTPUT_BUFFER) - 1] = '\0'; - - /* close */ - pclose(fp); -} - const char* onlp_sysi_platform_get(void) { @@ -131,128 +107,11 @@ onlp_sysi_oids_get(onlp_oid_t* table, int max) return 0; } -static int -_onlp_sysi_grep_output(char value[256], const char *attr, const char *tmp_file) -{ - int value_offset = 30; /* value offset in onie-syseeprom */ - char command[256] = {0}; - char buffer[COMMAND_OUTPUT_BUFFER] = {0}; - int v = 0; - - snprintf(command, sizeof(command), "cat '%s' | grep '%s'", tmp_file, attr); - _onlp_sysi_execute_command(command, buffer); - - /* Reading value from buffer with command output */ - while (buffer[value_offset] != '\n' && - buffer[value_offset] != '\r' && - buffer[value_offset] != '\0') { - value[v] = buffer[value_offset]; - v++; - value_offset++; - } - value[v] = '\0'; - - AIM_LOG_VERBOSE("Value for sytem attribute '%s' is '%s' \n", attr, value); - - return ONLP_STATUS_OK; -} +#include int onlp_sysi_onie_info_get(onlp_onie_info_t* onie) { - - const char onie_version_file[] = "/bsp/onie-version"; - const char onie_version_command[] = "onie-shell -c 'onie-sysinfo -v' > /bsp/onie-version"; - const char onie_syseeprom_file[] = "/bsp/onie-syseeprom"; - const char onie_syseeprom_command[] = "onie-shell -c onie-syseeprom > /bsp/onie-syseeprom"; - struct stat stat_buf; - char value[256] = {0}; - char command[256] = {0}; - int rc = 0; - int exit_status; - - /* We must initialize this otherwise crash occurs while free memory */ - list_init(&onie->vx_list); - - /* Check if cache file exist */ - rc = stat(onie_syseeprom_file, &stat_buf); - if (-1 == rc) { - rc = system(onie_syseeprom_command); - if (-1 == rc) { - return rc; - } - exit_status = WEXITSTATUS(rc); - if (EXIT_SUCCESS != exit_status) { - return ONLP_STATUS_E_GENERIC; - } - } - - rc = _onlp_sysi_grep_output(value, "Product Name", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->product_name = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Part Number", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->part_number = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Serial Number", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->serial_number = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Base MAC Address", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - strncpy((char*)onie->mac, value, sizeof(onie->mac)); - rc = _onlp_sysi_grep_output(value, "Manufacture Date", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacture_date = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Device Version", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->device_version = atoi(value); - rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacturer = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacturer = aim_strdup(value); - onie->vendor = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "MAC Addresses", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->mac_range = atoi(value); - /* Check if onie version first run and cache file exist */ - rc = stat(onie_version_file, &stat_buf); - if (-1 == rc) - { - rc = system(onie_version_command); - if (-1 == rc) { - return rc; - } - exit_status = WEXITSTATUS(rc); - if (EXIT_SUCCESS != exit_status) { - return ONLP_STATUS_E_GENERIC; - }} - snprintf(command, sizeof(command), "cat '%s'", onie_version_file); - _onlp_sysi_execute_command(command, value); - /* ONIE version */ - onie->onie_version = aim_strdup(value); - - /* Platform name */ - onie->platform_name = aim_strdup("x86_64-mlnx_msn2100-r0"); - - return ONLP_STATUS_OK; + return onlp_onie_read_json(onie, + "/lib/platform-config/current/onl/etc/onie/eeprom.json"); } - diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py index 07c284ea..f7e82a25 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/python/x86_64_mlnx_msn2100_r0/__init__.py @@ -10,8 +10,8 @@ class OnlPlatform_x86_64_mlnx_msn2100_r0(OnlPlatformMellanox, def baseconfig(self): # load modules import os - # necessary if there are issues with the install + # necessary if there are issues with the install # os.system("/usr/bin/apt-get install") os.system("/etc/mlnx/mlnx-hw-management start") - + self.syseeprom_export(); return True diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c index 5ad5efd1..157396a6 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c @@ -52,30 +52,6 @@ static char arr_cplddev_name[NUM_OF_CPLD][30] = "cpld_port_version" }; -static void -_onlp_sysi_execute_command(char *command, char buffer[COMMAND_OUTPUT_BUFFER]) -{ - FILE *fp = NULL; - - /* Open the command for reading. */ - fp = popen(command, "r"); - if (NULL == fp) { - AIM_LOG_WARN("Failed to run command '%s'\n", command); - } - - /* Read the output */ - if (fgets(buffer, COMMAND_OUTPUT_BUFFER-1, fp) == NULL) { - AIM_LOG_WARN("Failed to read output of command '%s'\n", command); - pclose(fp); - } - - /* The last symbol is '\n', so remote it */ - buffer[strnlen(buffer, COMMAND_OUTPUT_BUFFER) - 1] = '\0'; - - /* close */ - pclose(fp); -} - const char* onlp_sysi_platform_get(void) { @@ -139,128 +115,9 @@ onlp_sysi_oids_get(onlp_oid_t* table, int max) return 0; } -static int -_onlp_sysi_grep_output(char value[256], const char *attr, const char *tmp_file) -{ - int value_offset = 30; /* value offset in onie-syseeprom */ - char command[256] = {0}; - char buffer[COMMAND_OUTPUT_BUFFER] = {0}; - int v = 0; - - snprintf(command, sizeof(command), "cat '%s' | grep '%s'", tmp_file, attr); - _onlp_sysi_execute_command(command, buffer); - - /* Reading value from buffer with command output */ - while (buffer[value_offset] != '\n' && - buffer[value_offset] != '\r' && - buffer[value_offset] != '\0') { - value[v] = buffer[value_offset]; - v++; - value_offset++; - } - value[v] = '\0'; - - AIM_LOG_VERBOSE("Value for sytem attribute '%s' is '%s' \n", attr, value); - - return ONLP_STATUS_OK; -} - int onlp_sysi_onie_info_get(onlp_onie_info_t* onie) { - - const char onie_version_file[] = "/bsp/onie-version"; - const char onie_version_command[] = "onie-shell -c 'onie-sysinfo -v' > /bsp/onie-version"; - const char onie_syseeprom_file[] = "/bsp/onie-syseeprom"; - const char onie_syseeprom_command[] = "onie-shell -c onie-syseeprom > /bsp/onie-syseeprom"; - struct stat stat_buf; - char value[256] = {0}; - char command[256] = {0}; - int rc = 0; - int exit_status; - - /* We must initialize this otherwise crash occurs while free memory */ - list_init(&onie->vx_list); - - /* Check if cache file exist */ - rc = stat(onie_syseeprom_file, &stat_buf); - if (-1 == rc) { - rc = system(onie_syseeprom_command); - if (-1 == rc) { - return rc; - } - exit_status = WEXITSTATUS(rc); - if (EXIT_SUCCESS != exit_status) { - return ONLP_STATUS_E_GENERIC; - } - } - - rc = _onlp_sysi_grep_output(value, "Product Name", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->product_name = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Part Number", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->part_number = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Serial Number", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->serial_number = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Base MAC Address", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - strncpy((char*)onie->mac, value, sizeof(onie->mac)); - rc = _onlp_sysi_grep_output(value, "Manufacture Date", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacture_date = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Device Version", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->device_version = atoi(value); - rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacturer = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacturer = aim_strdup(value); - onie->vendor = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "MAC Addresses", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->mac_range = atoi(value); - /* Check if onie version first run and cache file exist */ - rc = stat(onie_version_file, &stat_buf); - if (-1 == rc) - { - rc = system(onie_version_command); - if (-1 == rc) { - return rc; - } - exit_status = WEXITSTATUS(rc); - if (EXIT_SUCCESS != exit_status) { - return ONLP_STATUS_E_GENERIC; - }} - snprintf(command, sizeof(command), "cat '%s'", onie_version_file); - _onlp_sysi_execute_command(command, value); - /* ONIE version */ - onie->onie_version = aim_strdup(value); - - /* Platform name */ - onie->platform_name = aim_strdup("x86_64-mlnx_msn2410-r0"); - - return ONLP_STATUS_OK; + return onlp_onie_read_json(onie, + "/lib/platform-config/current/onl/etc/onie/eeprom.json"); } - diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py index 5fea9592..88e38724 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/python/x86_64_mlnx_msn2410_r0/__init__.py @@ -10,8 +10,8 @@ class OnlPlatform_x86_64_mlnx_msn2410_r0(OnlPlatformMellanox, def baseconfig(self): # load modules import os - # necessary if there are issues with the install + # necessary if there are issues with the install # os.system("/usr/bin/apt-get install") os.system("/etc/mlnx/mlnx-hw-management start") - + self.syseeprom_export(); return True diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c index 89b806be..b9b22ede 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c @@ -52,30 +52,6 @@ static char arr_cplddev_name[NUM_OF_CPLD][30] = "cpld_port_version" }; -static void -_onlp_sysi_execute_command(char *command, char buffer[COMMAND_OUTPUT_BUFFER]) -{ - FILE *fp = NULL; - - /* Open the command for reading. */ - fp = popen(command, "r"); - if (NULL == fp) { - AIM_LOG_WARN("Failed to run command '%s'\n", command); - } - - /* Read the output */ - if (fgets(buffer, COMMAND_OUTPUT_BUFFER-1, fp) == NULL) { - AIM_LOG_WARN("Failed to read output of command '%s'\n", command); - pclose(fp); - } - - /* The last symbol is '\n', so remote it */ - buffer[strnlen(buffer, COMMAND_OUTPUT_BUFFER) - 1] = '\0'; - - /* close */ - pclose(fp); -} - const char* onlp_sysi_platform_get(void) { @@ -138,129 +114,3 @@ onlp_sysi_oids_get(onlp_oid_t* table, int max) return 0; } - -static int -_onlp_sysi_grep_output(char value[256], const char *attr, const char *tmp_file) -{ - int value_offset = 30; /* value offset in onie-syseeprom */ - char command[256] = {0}; - char buffer[COMMAND_OUTPUT_BUFFER] = {0}; - int v = 0; - - snprintf(command, sizeof(command), "cat '%s' | grep '%s'", tmp_file, attr); - _onlp_sysi_execute_command(command, buffer); - - /* Reading value from buffer with command output */ - while (buffer[value_offset] != '\n' && - buffer[value_offset] != '\r' && - buffer[value_offset] != '\0') { - value[v] = buffer[value_offset]; - v++; - value_offset++; - } - value[v] = '\0'; - - AIM_LOG_VERBOSE("Value for sytem attribute '%s' is '%s' \n", attr, value); - - return ONLP_STATUS_OK; -} - -int -onlp_sysi_onie_info_get(onlp_onie_info_t* onie) -{ - - const char onie_version_file[] = "/bsp/onie-version"; - const char onie_version_command[] = "onie-shell -c 'onie-sysinfo -v' > /bsp/onie-version"; - const char onie_syseeprom_file[] = "/bsp/onie-syseeprom"; - const char onie_syseeprom_command[] = "onie-shell -c onie-syseeprom > /bsp/onie-syseeprom"; - struct stat stat_buf; - char value[256] = {0}; - char command[256] = {0}; - int rc = 0; - int exit_status; - - /* We must initialize this otherwise crash occurs while free memory */ - list_init(&onie->vx_list); - - /* Check if cache file exist */ - rc = stat(onie_syseeprom_file, &stat_buf); - if (-1 == rc) { - rc = system(onie_syseeprom_command); - if (-1 == rc) { - return rc; - } - exit_status = WEXITSTATUS(rc); - if (EXIT_SUCCESS != exit_status) { - return ONLP_STATUS_E_GENERIC; - } - } - - rc = _onlp_sysi_grep_output(value, "Product Name", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->product_name = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Part Number", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->part_number = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Serial Number", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->serial_number = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Base MAC Address", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - strncpy((char*)onie->mac, value, sizeof(onie->mac)); - rc = _onlp_sysi_grep_output(value, "Manufacture Date", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacture_date = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Device Version", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->device_version = atoi(value); - rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacturer = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "Manufacturer", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->manufacturer = aim_strdup(value); - onie->vendor = aim_strdup(value); - rc = _onlp_sysi_grep_output(value, "MAC Addresses", onie_syseeprom_file); - if (ONLP_STATUS_OK != rc) { - return rc; - } - onie->mac_range = atoi(value); - /* Check if onie version first run and cache file exist */ - rc = stat(onie_version_file, &stat_buf); - if (-1 == rc) - { - rc = system(onie_version_command); - if (-1 == rc) { - return rc; - } - exit_status = WEXITSTATUS(rc); - if (EXIT_SUCCESS != exit_status) { - return ONLP_STATUS_E_GENERIC; - }} - snprintf(command, sizeof(command), "cat '%s'", onie_version_file); - _onlp_sysi_execute_command(command, value); - /* ONIE version */ - onie->onie_version = aim_strdup(value); - - /* Platform name */ - onie->platform_name = aim_strdup("x86_64-mlnx_msn2700-r0"); - - return ONLP_STATUS_OK; -} - diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py index 4e52aa01..c12120d1 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/python/x86_64_mlnx_msn2700_r0/__init__.py @@ -10,8 +10,8 @@ class OnlPlatform_x86_64_mlnx_msn2700_r0(OnlPlatformMellanox, def baseconfig(self): # load modules import os - # necessary if there are issues with the install + # necessary if there are issues with the install # os.system("/usr/bin/apt-get install") os.system("/etc/mlnx/mlnx-hw-management start") - + self.syseeprom_export(); return True From f2f35df0ec304e1b9a97c5b049e02849e09ddb99 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 16 Dec 2016 22:03:34 +0000 Subject: [PATCH 165/255] Add new onlp_sys_hdr_get() wrapper around onlp_sysi_oids_get() to improve OID iteration. We don't need the full system information (ONIE and platform) retrieved each time we want to iterate the OIDs. --- .../any/onlp/src/onlp/module/inc/onlp/sys.h | 5 +++++ .../base/any/onlp/src/onlp/module/src/oids.c | 18 +++++++----------- .../base/any/onlp/src/onlp/module/src/sys.c | 9 +++++++++ 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/packages/base/any/onlp/src/onlp/module/inc/onlp/sys.h b/packages/base/any/onlp/src/onlp/module/inc/onlp/sys.h index 655f1d57..ce2c98a3 100644 --- a/packages/base/any/onlp/src/onlp/module/inc/onlp/sys.h +++ b/packages/base/any/onlp/src/onlp/module/inc/onlp/sys.h @@ -62,6 +62,11 @@ int onlp_sys_info_get(onlp_sys_info_t* rv); */ void onlp_sys_info_free(onlp_sys_info_t* info); +/** + * @brief Get the system header. + */ +int onlp_sys_hdr_get(onlp_oid_hdr_t* hdr); + /** * @brief SYS OID debug dump. * @param id The SYS OID. diff --git a/packages/base/any/onlp/src/onlp/module/src/oids.c b/packages/base/any/onlp/src/onlp/module/src/oids.c index 9bac8979..baea7610 100644 --- a/packages/base/any/onlp/src/onlp/module/src/oids.c +++ b/packages/base/any/onlp/src/onlp/module/src/oids.c @@ -1,21 +1,21 @@ /************************************************************ * - * - * Copyright 2014, 2015 Big Switch Networks, Inc. - * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.eclipse.org/legal/epl-v10.html - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. - * + * * ************************************************************ * @@ -80,11 +80,7 @@ oid_type_unknown_dump__(onlp_oid_t oid, aim_pvs_t* pvs, uint32_t flags) static int oid_type_SYS_hdr_get__(onlp_oid_t oid, onlp_oid_hdr_t* hdr) { - int rv; - onlp_sys_info_t si; - rv = onlp_sys_info_get(&si); - memcpy(hdr, &si.hdr, sizeof(si.hdr)); - return rv; + return onlp_sys_hdr_get(hdr); } static int diff --git a/packages/base/any/onlp/src/onlp/module/src/sys.c b/packages/base/any/onlp/src/onlp/module/src/sys.c index 22b366d2..c8090476 100644 --- a/packages/base/any/onlp/src/onlp/module/src/sys.c +++ b/packages/base/any/onlp/src/onlp/module/src/sys.c @@ -168,6 +168,15 @@ onlp_sys_info_free(onlp_sys_info_t* info) onlp_sysi_platform_info_free(&info->platform_info); } +static int +onlp_sys_hdr_get_locked__(onlp_oid_hdr_t* hdr) +{ + memset(hdr, 0, sizeof(*hdr)); + return onlp_sysi_oids_get(hdr->coids, AIM_ARRAYSIZE(hdr->coids)); +} +ONLP_LOCKED_API1(onlp_sys_hdr_get, onlp_oid_hdr_t*, hdr); + + void onlp_sys_dump(onlp_oid_t id, aim_pvs_t* pvs, uint32_t flags) { From 40e85b57199f7098e47ba0e3a1517d753e0a855c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 19 Dec 2016 14:46:48 +0000 Subject: [PATCH 166/255] The default build is now Debian 8. --- docker/tools/onlbuilder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/tools/onlbuilder b/docker/tools/onlbuilder index 8e5e8c47..265e3349 100755 --- a/docker/tools/onlbuilder +++ b/docker/tools/onlbuilder @@ -20,7 +20,7 @@ g_timestamp = datetime.datetime.now().strftime("%Y-%m-%d.%H%M%S") g_builder7_image_name="opennetworklinux/builder7:1.2" g_builder8_image_name="opennetworklinux/builder8:1.5" -g_default_image_name=g_builder7_image_name +g_default_image_name=g_builder8_image_name g_default_container_name = "%s_%s" % (g_current_user, g_timestamp) g_default_user="%s:%s" % (g_current_user, g_current_uid) From d713874b113382b9c15c4b259e42f4688dde330d Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 19 Dec 2016 06:53:00 -0800 Subject: [PATCH 167/255] Update version. --- docker/tools/PKG.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/tools/PKG.yml b/docker/tools/PKG.yml index 9a7db5b2..e763aff6 100644 --- a/docker/tools/PKG.yml +++ b/docker/tools/PKG.yml @@ -1,6 +1,6 @@ common: arch: all - version: 1.1.0 + version: 1.3.0 copyright: Copyright 2013, 2014, 2015 Big Switch Networks maintainer: support@bigswitch.com support: opennetworklinux@googlegroups.com From 2449ac19c5ad68f106261bae7a5edd990647efc8 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 19 Dec 2016 15:59:57 +0000 Subject: [PATCH 168/255] Set platform_name to correct value. --- .../onlp/builds/src/module/src/sysi.c | 18 ++++++++++--- .../onlp/builds/src/module/src/sysi.c | 17 +++++++++--- .../onlp/builds/src/module/src/sysi.c | 27 ++++++++++++++++--- 3 files changed, 51 insertions(+), 11 deletions(-) diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c index e104052c..621e6525 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/onlp/builds/src/module/src/sysi.c @@ -36,7 +36,8 @@ #include "x86_64_mlnx_msn2100_int.h" #include "x86_64_mlnx_msn2100_log.h" - +#define ONL_PLATFORM_NAME "x86-64-mlnx-msn2100-r0" +#define ONIE_PLATFORM_NAME "x86_64-mlnx_msn2100-r0" #define COMMAND_OUTPUT_BUFFER 256 @@ -51,7 +52,7 @@ static char arr_cplddev_name[NUM_OF_CPLD][30] = const char* onlp_sysi_platform_get(void) { - return "x86-64-mlnx-msn2100-r0"; + return ONL_PLATFORM_NAME; } int @@ -109,9 +110,18 @@ onlp_sysi_oids_get(onlp_oid_t* table, int max) #include + int onlp_sysi_onie_info_get(onlp_onie_info_t* onie) { - return onlp_onie_read_json(onie, - "/lib/platform-config/current/onl/etc/onie/eeprom.json"); + int rv = onlp_onie_read_json(onie, + "/lib/platform-config/current/onl/etc/onie/eeprom.json"); + if(rv >= 0) { + if(onie->platform_name) { + aim_free(onie->platform_name); + } + onie->platform_name = aim_strdup(ONIE_PLATFORM_NAME); + } + + return rv; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c index 157396a6..ff753835 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/onlp/builds/src/module/src/sysi.c @@ -36,6 +36,9 @@ #include "x86_64_mlnx_msn2410_int.h" #include "x86_64_mlnx_msn2410_log.h" +#define ONL_PLATFORM_NAME "x86-64-mlnx-msn2410-r0" +#define ONIE_PLATFORM_NAME "x86-64-mlnx_msn2410-r0" + #define NUM_OF_THERMAL_ON_MAIN_BROAD CHASSIS_THERMAL_COUNT #define NUM_OF_FAN_ON_MAIN_BROAD CHASSIS_FAN_COUNT #define NUM_OF_PSU_ON_MAIN_BROAD 2 @@ -55,7 +58,7 @@ static char arr_cplddev_name[NUM_OF_CPLD][30] = const char* onlp_sysi_platform_get(void) { - return "x86-64-mlnx-msn2410-r0"; + return ONL_PLATFORM_NAME; } int @@ -118,6 +121,14 @@ onlp_sysi_oids_get(onlp_oid_t* table, int max) int onlp_sysi_onie_info_get(onlp_onie_info_t* onie) { - return onlp_onie_read_json(onie, - "/lib/platform-config/current/onl/etc/onie/eeprom.json"); + int rv = onlp_onie_read_json(onie, + "/lib/platform-config/current/onl/etc/onie/eeprom.json"); + if(rv >= 0) { + if(onie->platform_name) { + aim_free(onie->platform_name); + } + onie->platform_name = aim_strdup(ONIE_PLATFORM_NAME); + } + + return rv; } diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c index b9b22ede..c9225e19 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/onlp/builds/src/module/src/sysi.c @@ -36,15 +36,19 @@ #include "x86_64_mlnx_msn2700_int.h" #include "x86_64_mlnx_msn2700_log.h" +#define ONL_PLATFORM_NAME "x86-64-mlnx-msn2700-r0" +#define ONIE_PLATFORM_NAME "x86_64-mlnx_msn2700-r0" + #define NUM_OF_THERMAL_ON_MAIN_BROAD CHASSIS_THERMAL_COUNT #define NUM_OF_FAN_ON_MAIN_BROAD CHASSIS_FAN_COUNT #define NUM_OF_PSU_ON_MAIN_BROAD 2 #define NUM_OF_LED_ON_MAIN_BROAD 6 -#define COMMAND_OUTPUT_BUFFER 256 +#define COMMAND_OUTPUT_BUFFER 256 + +#define PREFIX_PATH_ON_CPLD_DEV "/bsp/cpld" +#define NUM_OF_CPLD 3 -#define PREFIX_PATH_ON_CPLD_DEV "/bsp/cpld" -#define NUM_OF_CPLD 3 static char arr_cplddev_name[NUM_OF_CPLD][30] = { "cpld_brd_version", @@ -55,7 +59,7 @@ static char arr_cplddev_name[NUM_OF_CPLD][30] = const char* onlp_sysi_platform_get(void) { - return "x86-64-mlnx-msn2700-r0"; + return ONL_PLATFORM_NAME; } int @@ -114,3 +118,18 @@ onlp_sysi_oids_get(onlp_oid_t* table, int max) return 0; } + +int +onlp_sysi_onie_info_get(onlp_onie_info_t* onie) +{ + int rv = onlp_onie_read_json(onie, + "/lib/platform-config/current/onl/etc/onie/eeprom.json"); + if(rv >= 0) { + if(onie->platform_name) { + aim_free(onie->platform_name); + } + onie->platform_name = aim_strdup(ONIE_PLATFORM_NAME); + } + + return rv; +} From 68da278bed1f56b7a887cd4f66949a805fdbe08a Mon Sep 17 00:00:00 2001 From: Ken Chiang Date: Mon, 19 Dec 2016 11:23:56 -0800 Subject: [PATCH 169/255] Add CpuAllPercentUtilization and CpuAllPercentIdle. --- packages/base/any/onlp-snmpd/APKG.yml | 1 + .../base/any/onlp-snmpd/bin/onl-snmp-mpstat | 33 ++++++ packages/base/any/onlp-snmpd/builds/Makefile | 2 +- .../onlp_snmp/module/src/onlp_snmp_platform.c | 110 +++++++++++++++++- 4 files changed, 144 insertions(+), 2 deletions(-) create mode 100755 packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat diff --git a/packages/base/any/onlp-snmpd/APKG.yml b/packages/base/any/onlp-snmpd/APKG.yml index 76214600..bb37f217 100644 --- a/packages/base/any/onlp-snmpd/APKG.yml +++ b/packages/base/any/onlp-snmpd/APKG.yml @@ -16,6 +16,7 @@ packages: files: builds/$BUILD_DIR/${TOOLCHAIN}/bin/onlp-snmpd: /usr/bin/onlp-snmpd ${ONL}/packages/base/any/onlp-snmpd/bin/onl-snmpwalk : /usr/bin/onl-snmpwalk + ${ONL}/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat : /usr/bin/onl-snmp-mpstat init: ${ONL}/packages/base/any/onlp-snmpd/onlp-snmpd.init diff --git a/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat b/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat new file mode 100755 index 00000000..4b41d1b8 --- /dev/null +++ b/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat @@ -0,0 +1,33 @@ +#!/usr/bin/python + +# call mpstat and generate json output containing stats for all cpus + +""" +sample output from "mpstat 1 1": +# mpstat 1 1 +Linux 3.8.13-OpenNetworkLinux-e500mc-1.5 (as6700-3) 2016-12-15 _ppc_(4 CPU) + +04:59:31 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle +04:59:32 PM all 5.17 0.00 2.07 0.00 0.00 0.00 0.00 0.00 92.76 +Average: all 5.17 0.00 2.07 0.00 0.00 0.00 0.00 0.00 92.76 +""" + +import subprocess +import json + +stats = {} + +# 1 second interval, 1 count +out = subprocess.check_output(['mpstat','1','1']) + +for line in out.split('\n'): + if "%idle" in line: + # extract keys from header line, skipping over time and AM/PM + keys = line.split()[2:] + + if "Average" in line: + vals = line.split()[1:] + stats[vals[0]] = { k:int(round(float(v))) \ + for (k,v) in zip(keys[1:],vals[1:]) } + +print json.dumps(stats) diff --git a/packages/base/any/onlp-snmpd/builds/Makefile b/packages/base/any/onlp-snmpd/builds/Makefile index f423035f..c57a98a8 100644 --- a/packages/base/any/onlp-snmpd/builds/Makefile +++ b/packages/base/any/onlp-snmpd/builds/Makefile @@ -3,7 +3,7 @@ include $(ONL)/make/any.mk MODULE := onlp-snmpd include $(BUILDER)/standardinit.mk -DEPENDMODULES := onlp_snmp AIM OS snmp_subagent IOF onlplib +DEPENDMODULES := onlp_snmp AIM OS snmp_subagent IOF onlplib OS cjson cjson_util DEPENDMODULE_HEADERS := onlp include $(BUILDER)/dependmodules.mk diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c index 1a6b3e55..f0e83ab6 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c @@ -25,6 +25,9 @@ #include #include "onlp_snmp_log.h" +#include +#include +#include #include #include #include @@ -54,7 +57,7 @@ platform_string_register(int index, const char* desc, char* value) netsnmp_register_watched_scalar( reg, winfo ); } -void +static void platform_int_register(int index, char* desc, int value) { oid tree[] = { 1, 3, 6, 1, 4, 1, 42623, 1, 1, 1, 1, 1}; @@ -69,6 +72,108 @@ platform_int_register(int index, char* desc, int value) v, NULL); } +static void +resource_int_register(int index, const char* desc, + Netsnmp_Node_Handler *handler) +{ + oid tree[] = { 1, 3, 6, 1, 4, 1, 42623, 1, 3, 1, 1 }; + tree[10] = index; + + netsnmp_handler_registration *reg = + netsnmp_create_handler_registration(desc, handler, + tree, OID_LENGTH(tree), + HANDLER_CAN_RONLY); + if (netsnmp_register_scalar(reg) != MIB_REGISTERED_OK) { + AIM_LOG_ERROR("registering handler for %s failed", desc); + } +} + + +/* resource objects refreshed with this period; units in seconds */ +#define RESOURCE_UPDATE_PERIOD 5 + +/* resource objects */ +typedef struct { + uint32_t utilization_percent; + uint32_t idle_percent; +} resources_t; + +static resources_t resources; +static uint64_t resource_update_time; + +void resource_update(void) +{ + uint64_t now = os_time_monotonic(); + if (now - resource_update_time > RESOURCE_UPDATE_PERIOD * 1000 * 1000) { + resource_update_time = now; + AIM_LOG_INFO("update resource objects"); + + /* invoke mpstat collection script for json output */ + FILE *fp = popen("/usr/bin/onl-snmp-mpstat", "r"); + if (fp == NULL) { + AIM_LOG_ERROR("failed invoking onl-snmp-mpstat"); + return; + } + + /* parse json output */ + char line[1024]; + while (fgets(line, sizeof(line), fp) != NULL) { + cJSON *root = cJSON_Parse(line); + int result; + int rv = cjson_util_lookup_int(root, &result, "all.%%idle"); + if (rv == 0) { + /* save it */ + resources.idle_percent = result; + resources.utilization_percent = 100 - result; + } + cJSON_Delete(root); + } + } +} + +static int +utilization_handler(netsnmp_mib_handler *handler, + netsnmp_handler_registration *reginfo, + netsnmp_agent_request_info *reqinfo, + netsnmp_request_info *requests) +{ + if (MODE_GET == reqinfo->mode) { + resource_update(); + snmp_set_var_typed_value(requests->requestvb, ASN_GAUGE, + (u_char *) &resources.utilization_percent, + sizeof(resources.utilization_percent)); + } else { + netsnmp_assert("bad mode in RO handler"); + } + + if (handler->next && handler->next->access_method) { + return netsnmp_call_next_handler(handler, reginfo, reqinfo, requests); + } + + return SNMP_ERR_NOERROR; +} + +static int +idle_handler(netsnmp_mib_handler *handler, + netsnmp_handler_registration *reginfo, + netsnmp_agent_request_info *reqinfo, + netsnmp_request_info *requests) +{ + if (MODE_GET == reqinfo->mode) { + resource_update(); + snmp_set_var_typed_value(requests->requestvb, ASN_GAUGE, + (u_char *) &resources.idle_percent, + sizeof(resources.idle_percent)); + } else { + netsnmp_assert("bad mode in RO handler"); + } + + if (handler->next && handler->next->access_method) { + return netsnmp_call_next_handler(handler, reginfo, reqinfo, requests); + } + + return SNMP_ERR_NOERROR; +} void onlp_snmp_platform_init(void) @@ -110,5 +215,8 @@ onlp_snmp_platform_init(void) REGISTER_STR(14, service_tag); REGISTER_STR(15, onie_version); } + + resource_int_register(1, "CpuAllPercentUtilization", utilization_handler); + resource_int_register(2, "CpuAllPercentIdle", idle_handler); } From e85b4d45f00537a68f9790cdf611f8e5e32c0ba4 Mon Sep 17 00:00:00 2001 From: Ken Chiang Date: Mon, 19 Dec 2016 13:33:55 -0800 Subject: [PATCH 170/255] Redefine CpuAllPercentUtilization and CpuAllPercentIdle to be 100 times the percentage rounded to the nearest integer. Use aim_time_monotonic. Add ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS. --- docs/mibs/OCP-ONL-RESOURCE-MIB.txt | 4 ++-- packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat | 2 +- packages/base/any/onlp-snmpd/builds/Makefile | 2 +- .../builds/src/onlp_snmp/module/auto/onlp_snmp.yml | 3 +++ .../module/inc/onlp_snmp/onlp_snmp_config.h | 10 ++++++++++ .../src/onlp_snmp/module/src/onlp_snmp_config.c | 5 +++++ .../src/onlp_snmp/module/src/onlp_snmp_platform.c | 12 +++++------- 7 files changed, 27 insertions(+), 11 deletions(-) diff --git a/docs/mibs/OCP-ONL-RESOURCE-MIB.txt b/docs/mibs/OCP-ONL-RESOURCE-MIB.txt index 21da5843..3167c31b 100644 --- a/docs/mibs/OCP-ONL-RESOURCE-MIB.txt +++ b/docs/mibs/OCP-ONL-RESOURCE-MIB.txt @@ -34,7 +34,7 @@ CpuAllPercentUtilization OBJECT-TYPE MAX-ACCESS read-only STATUS current DESCRIPTION - "The average CPU utilization (in percent). Provided by mpstat." + "The average CPU utilization in percent, multiplied by 100 and rounded to the nearest integer. Provided by mpstat." ::= { Basic 1 } CpuAllPercentIdle OBJECT-TYPE @@ -42,7 +42,7 @@ CpuAllPercentIdle OBJECT-TYPE MAX-ACCESS read-only STATUS current DESCRIPTION - "The average CPU idle time (in percent). Provided by mpstat." + "The average CPU idle time in percent, multiplied by 100 and rounded to the nearest integer. Provided by mpstat." ::= { Basic 2 } END diff --git a/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat b/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat index 4b41d1b8..e554a223 100755 --- a/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat +++ b/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat @@ -27,7 +27,7 @@ for line in out.split('\n'): if "Average" in line: vals = line.split()[1:] - stats[vals[0]] = { k:int(round(float(v))) \ + stats[vals[0]] = { k:int(round(100*float(v))) \ for (k,v) in zip(keys[1:],vals[1:]) } print json.dumps(stats) diff --git a/packages/base/any/onlp-snmpd/builds/Makefile b/packages/base/any/onlp-snmpd/builds/Makefile index c57a98a8..6a5f08da 100644 --- a/packages/base/any/onlp-snmpd/builds/Makefile +++ b/packages/base/any/onlp-snmpd/builds/Makefile @@ -3,7 +3,7 @@ include $(ONL)/make/any.mk MODULE := onlp-snmpd include $(BUILDER)/standardinit.mk -DEPENDMODULES := onlp_snmp AIM OS snmp_subagent IOF onlplib OS cjson cjson_util +DEPENDMODULES := onlp_snmp AIM OS snmp_subagent IOF onlplib cjson cjson_util DEPENDMODULE_HEADERS := onlp include $(BUILDER)/dependmodules.mk diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml index 6e62a16c..0feaa662 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/auto/onlp_snmp.yml @@ -56,6 +56,9 @@ cdefs: &cdefs - ONLP_SNMP_CONFIG_AS_SUBAGENT: doc: "Configure as an snmp_subagent client." default: 0 +- ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS: + doc: "Resource object update period in seconds." + default: 5 definitions: cdefs: diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h index 383142b2..ef7bf971 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/inc/onlp_snmp/onlp_snmp_config.h @@ -191,6 +191,16 @@ #define ONLP_SNMP_CONFIG_AS_SUBAGENT 0 #endif +/** + * ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS + * + * Resource object update period in seconds. */ + + +#ifndef ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS +#define ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS 5 +#endif + /** diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c index 339c5f42..418b89f1 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_config.c @@ -94,6 +94,11 @@ onlp_snmp_config_settings_t onlp_snmp_config_settings[] = { __onlp_snmp_config_STRINGIFY_NAME(ONLP_SNMP_CONFIG_AS_SUBAGENT), __onlp_snmp_config_STRINGIFY_VALUE(ONLP_SNMP_CONFIG_AS_SUBAGENT) }, #else { ONLP_SNMP_CONFIG_AS_SUBAGENT(__onlp_snmp_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS + { __onlp_snmp_config_STRINGIFY_NAME(ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS), __onlp_snmp_config_STRINGIFY_VALUE(ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS) }, +#else +{ ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS(__onlp_snmp_config_STRINGIFY_NAME), "__undefined__" }, #endif { NULL, NULL } }; diff --git a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c index f0e83ab6..88a5ef52 100644 --- a/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c +++ b/packages/base/any/onlp-snmpd/builds/src/onlp_snmp/module/src/onlp_snmp_platform.c @@ -25,7 +25,7 @@ #include #include "onlp_snmp_log.h" -#include +#include #include #include #include @@ -89,9 +89,6 @@ resource_int_register(int index, const char* desc, } -/* resource objects refreshed with this period; units in seconds */ -#define RESOURCE_UPDATE_PERIOD 5 - /* resource objects */ typedef struct { uint32_t utilization_percent; @@ -103,8 +100,9 @@ static uint64_t resource_update_time; void resource_update(void) { - uint64_t now = os_time_monotonic(); - if (now - resource_update_time > RESOURCE_UPDATE_PERIOD * 1000 * 1000) { + uint64_t now = aim_time_monotonic(); + if (now - resource_update_time > + (ONLP_SNMP_CONFIG_RESOURCE_UPDATE_SECONDS * 1000 * 1000)) { resource_update_time = now; AIM_LOG_INFO("update resource objects"); @@ -124,7 +122,7 @@ void resource_update(void) if (rv == 0) { /* save it */ resources.idle_percent = result; - resources.utilization_percent = 100 - result; + resources.utilization_percent = 100*100 - result; } cJSON_Delete(root); } From 2b4a25a7c61480bc439c973800706b534f1c97cb Mon Sep 17 00:00:00 2001 From: Steven Noble Date: Mon, 19 Dec 2016 19:20:00 -0800 Subject: [PATCH 171/255] Added Mellanox Switches @micshy please confirm the data is correct --- docs/SupportedHardware.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/SupportedHardware.md b/docs/SupportedHardware.md index 07a26204..225d22c4 100644 --- a/docs/SupportedHardware.md +++ b/docs/SupportedHardware.md @@ -80,6 +80,21 @@ Interface Masters Technologies, Inc. Niagara 2948X6XL 48x10G + 6x40G Intel/AMD x86 Broadcom BCM56850 (Trident2) Work In Progress** No Yes*** Yes No +Mellanox +--- + + + + + + + + + + + +
Device Ports CPU Forwarding ONL Certified In Lab SAI
SN2100 16x100G Intel Rangeley C2558 Mellanox Spectrum Yes Yes Yes
SN2100B 16x40G Intel Rangeley C2558 Mellanox Spectrum Yes No Yes
SN2410 48x25G + 8x100G Intel Ivybridge 1047UE Mellanox Spectrum Yes Yes Yes
SN2410B 48x10G + 8x100G Intel Ivybridge 1047UE Mellanox Spectrum Yes No Yes
SN2700 32x100G Intel Ivybridge 1047UE Mellanox Spectrum Yes Yes Yes
SN2700B 32x40G Intel Ivybridge 1047UE Mellanox Spectrum Yes No Yes
+ Notes: --- From ff67e4623b68eb7eca3baf3a06ed1f5399f7f75d Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 20 Dec 2016 18:22:47 +0000 Subject: [PATCH 172/255] Add sysstat to jessie builds. --- builds/any/rootfs/jessie/common/all-base-packages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/builds/any/rootfs/jessie/common/all-base-packages.yml b/builds/any/rootfs/jessie/common/all-base-packages.yml index 6e75713d..c33d5e8e 100644 --- a/builds/any/rootfs/jessie/common/all-base-packages.yml +++ b/builds/any/rootfs/jessie/common/all-base-packages.yml @@ -77,3 +77,4 @@ - gdb - tcpdump - strace +- sysstat From 6ab5f38d33716b5856c4747ecfd02b68895d875b Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 20 Dec 2016 21:10:08 +0000 Subject: [PATCH 173/255] The column output may differ based on the locale settings. When run from init the mpstat output is in 24 time and does not have the AM/PM column. This change handles both possibilities. --- packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat b/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat index e554a223..d7247f94 100755 --- a/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat +++ b/packages/base/any/onlp-snmpd/bin/onl-snmp-mpstat @@ -22,8 +22,9 @@ out = subprocess.check_output(['mpstat','1','1']) for line in out.split('\n'): if "%idle" in line: - # extract keys from header line, skipping over time and AM/PM - keys = line.split()[2:] + # extract keys from header line, skipping over time and AM/PM if present + tokens = line.split() + keys = tokens[tokens.index('CPU'):] if "Average" in line: vals = line.split()[1:] From bb5c865b97e1cfac862733526fc9be3b8643cf4e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 15:28:35 +0000 Subject: [PATCH 174/255] Enable/Disable ISMT driver through module parameter. --- ...iver-i2c-bus-intel-ismt-enable-param.patch | 27 +++++++++++++++++++ .../base/any/kernels/3.16+deb8/patches/series | 1 + 2 files changed, 28 insertions(+) create mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch new file mode 100644 index 00000000..612b02db --- /dev/null +++ b/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch @@ -0,0 +1,27 @@ +diff -urpN a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +--- a/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:12:49.589201206 +0000 ++++ b/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:15:03.973204122 +0000 +@@ -200,6 +200,11 @@ MODULE_PARM_DESC(bus_speed, "Bus Speed i + module_param(delay, uint, S_IRUGO); + MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)"); + ++/* Enable/Disable driver */ ++static unsigned int enable = 1; ++module_param(enable, uint, S_IRUGO); ++MODULE_PARM_DESC(enable, "Enable or disable the ISMT driver (enabled by default)"); ++ + /** + * __ismt_desc_dump() - dump the contents of a specific descriptor + */ +@@ -852,6 +857,11 @@ ismt_probe(struct pci_dev *pdev, const s + struct ismt_priv *priv; + unsigned long start, len; + ++ if(!enable) { ++ dev_warn(&pdev->dev, "module is disabled.\n"); ++ return -ENODEV; ++ } ++ + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; diff --git a/packages/base/any/kernels/3.16+deb8/patches/series b/packages/base/any/kernels/3.16+deb8/patches/series index f1785767..24a54939 100644 --- a/packages/base/any/kernels/3.16+deb8/patches/series +++ b/packages/base/any/kernels/3.16+deb8/patches/series @@ -13,3 +13,4 @@ driver-arista-piix4-mux-patch.patch 3.16-fs-overlayfs.patch driver-igb-version-5.3.54.patch driver-support-intel-igb-bcm5461X-phy.patch +driver-i2c-bus-intel-ismt-enable-param.patch From 77bbf186196c3cbd490358733596aee6a6ab83eb Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 15:29:46 +0000 Subject: [PATCH 175/255] Re-enable ISMT support. --- .../any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config index d0d25a42..94506877 100644 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config @@ -1877,7 +1877,7 @@ CONFIG_I2C_ALGOPCA=y # CONFIG_I2C_AMD8111 is not set CONFIG_I2C_I801=y CONFIG_I2C_ISCH=y -# CONFIG_I2C_ISMT is not set +CONFIG_I2C_ISMT=y # CONFIG_I2C_PIIX4 is not set # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_SIS5595 is not set From cd552a0a2ba469100c8e4c86574b5440b3903f01 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 15:30:16 +0000 Subject: [PATCH 176/255] The ISMT driver should be disabled on these platforms. --- .../platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml | 3 ++- .../platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml | 3 ++- .../platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml index b2a9eb99..6cf8f529 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2100/platform-config/r0/src/lib/x86-64-mlnx-msn2100-r0.yml @@ -17,7 +17,7 @@ x86-64-mlnx-msn2100-r0: --parity=0 --stop=1 - kernel: + kernel: <<: *kernel-3-16 args: >- @@ -27,6 +27,7 @@ x86-64-mlnx-msn2100-r0: rd_NO_LUKS acpi_enforce_resources=lax acpi=noirq + i2c-ismt.enable=0 ##network ## interfaces: diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml index 11fc5728..88a54b31 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2410/platform-config/r0/src/lib/x86-64-mlnx-msn2410-r0.yml @@ -17,7 +17,7 @@ x86-64-mlnx-msn2410-r0: --parity=0 --stop=1 - kernel: + kernel: <<: *kernel-3-16 args: >- @@ -27,6 +27,7 @@ x86-64-mlnx-msn2410-r0: rd_NO_LUKS acpi_enforce_resources=lax acpi=noirq + i2c-ismt.enable=0 ##network ## interfaces: diff --git a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml index 2cc6742e..ebe3cb48 100644 --- a/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml +++ b/packages/platforms/mellanox/x86-64/x86-64-mlnx-msn2700/platform-config/r0/src/lib/x86-64-mlnx-msn2700-r0.yml @@ -17,7 +17,7 @@ x86-64-mlnx-msn2700-r0: --parity=0 --stop=1 - kernel: + kernel: <<: *kernel-3-16 args: >- @@ -27,6 +27,7 @@ x86-64-mlnx-msn2700-r0: rd_NO_LUKS acpi_enforce_resources=lax acpi=noirq + i2c-ismt.enable=0 ##network ## interfaces: From 35f6859251ff8528f88cbc33470b932f9460ff88 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 22:12:39 +0000 Subject: [PATCH 177/255] - Allow comments - Add patch separator --- tools/scripts/apply-patches.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/scripts/apply-patches.sh b/tools/scripts/apply-patches.sh index 5ec1d439..cb921001 100755 --- a/tools/scripts/apply-patches.sh +++ b/tools/scripts/apply-patches.sh @@ -1,4 +1,4 @@ -#! /bin/sh +#! /bin/bash ############################################################ # # @@ -34,7 +34,10 @@ if [ -f "${PATCH_SERIES}" ]; then # The series file contains the patch order. # for p in `cat ${PATCH_SERIES}`; do - echo "Appying ${p}..." + if [[ $p = \#* ]]; then + continue; + fi + echo "*** Appying ${p}..." if [ -x "${PATCHDIR}/${p}" ]; then "${PATCHDIR}/${p}" "${KERNDIR}" else From a7a195e2f50f6991d9f011e7534bca8b0c9c8fe1 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 22:42:50 +0000 Subject: [PATCH 178/255] 3.16 LTS --- .../3.16-lts/configs/x86_64-all/.gitignore | 3 + .../3.16-lts/configs/x86_64-all/Makefile | 37 + .../configs/x86_64-all/x86_64-all.config | 3567 ++ packages/base/any/kernels/3.16-lts/kconfig.mk | 30 + .../kernels/3.16-lts/patches/changelog.patch | 18 + .../driver-arista-piix4-mux-patch.patch | 146 + ...-at24-fix-odd-length-two-byte-access.patch | 34 + .../driver-hwmon-max6620-fix-rpm-calc.patch | 196 + .../patches/driver-hwmon-max6620-update.patch | 113 + .../patches/driver-hwmon-max6620.patch | 753 + ...river-hwmon-pmbus-add-dps460-support.patch | 78 + ...n-pmbus-dni_dps460-update-pmbus-core.patch | 96 + .../driver-hwmon-pmbus-dni_dps460.patch | 304 + .../driver-hwmon-pmbus-ucd9200-mlnx.patch | 89 + ...r-i2c-bus-intel-ismt-add-delay-param.patch | 57 + ...iver-i2c-bus-intel-ismt-enable-param.patch | 27 + .../patches/driver-igb-version-5.3.54.patch | 48795 ++++++++++++++++ ...river-support-intel-igb-bcm5461X-phy.patch | 242 + ...river-support-sff-8436-eeprom-update.patch | 141 + .../driver-support-sff-8436-eeprom.patch | 1086 + .../kernels/3.16-lts/patches/overlayfs.patch | 4309 ++ .../base/any/kernels/3.16-lts/patches/series | 16 + 22 files changed, 60137 insertions(+) create mode 100644 packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore create mode 100644 packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile create mode 100644 packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config create mode 100644 packages/base/any/kernels/3.16-lts/kconfig.mk create mode 100644 packages/base/any/kernels/3.16-lts/patches/changelog.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-support-intel-igb-bcm5461X-phy.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom-update.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/overlayfs.patch create mode 100644 packages/base/any/kernels/3.16-lts/patches/series diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore new file mode 100644 index 00000000..5dbdc5b9 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore @@ -0,0 +1,3 @@ +kernel-3.16* +linux-* + diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile new file mode 100644 index 00000000..2936639a --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile @@ -0,0 +1,37 @@ +############################################################ +# +# +# Copyright 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) +include $(ONL)/make/config.mk + +export ARCH := x86_64 +ifndef K_TARGET_DIR +K_TARGET_DIR := $(THIS_DIR) +endif + +include ../../kconfig.mk +K_CONFIG := x86_64-all.config +K_BUILD_TARGET := bzImage +K_COPY_SRC := arch/x86/boot/bzImage +ifndef K_COPY_DST +K_COPY_DST := kernel-3.16-lts-x86_64-all +endif + +include $(ONL)/make/kbuild.mk diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config new file mode 100644 index 00000000..94506877 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config @@ -0,0 +1,3567 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86_64 3.16.7-ckt25 Kernel Configuration +# +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_MMU=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_X86_64_SMP=y +CONFIG_X86_HT=y +CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-OpenNetworkLinux" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_BZIP2=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_IRQ_DOMAIN=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_PREEMPT_RCU is not set +CONFIG_RCU_STALL_COMMON=y +# CONFIG_RCU_USER_QS is not set +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_FANOUT_LEAF=16 +# CONFIG_RCU_FANOUT_EXACT is not set +CONFIG_RCU_FAST_NO_HZ=y +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_MEMCG=y +# CONFIG_MEMCG_DISABLED is not set +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +# CONFIG_CGROUP_HUGETLB is not set +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_PCI_QUIRKS=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +# CONFIG_KPROBES is not set +# CONFIG_JUMP_LABEL is not set +# CONFIG_UPROBES is not set +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_CC_STACKPROTECTOR_NONE=y +# CONFIG_CC_STACKPROTECTOR_REGULAR is not set +# CONFIG_CC_STACKPROTECTOR_STRONG is not set +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +CONFIG_STOP_MACHINE=y +CONFIG_BLOCK=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_THROTTLING is not set +# CONFIG_BLK_CMDLINE_PARSER is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +CONFIG_ACORN_PARTITION_CUMANA=y +CONFIG_ACORN_PARTITION_EESOX=y +CONFIG_ACORN_PARTITION_ICS=y +CONFIG_ACORN_PARTITION_ADFS=y +CONFIG_ACORN_PARTITION_POWERTEC=y +CONFIG_ACORN_PARTITION_RISCIX=y +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUE_RWLOCK=y +CONFIG_QUEUE_RWLOCK=y +CONFIG_FREEZER=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_MPPARSE=y +# CONFIG_X86_EXTENDED_PLATFORM is not set +# CONFIG_X86_INTEL_LPSS is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +# CONFIG_HYPERVISOR_GUEST is not set +CONFIG_NO_BOOTMEM=y +CONFIG_MEMTEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +CONFIG_CALGARY_IOMMU=y +CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS=512 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_X86_UP_APIC_MSI=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +# CONFIG_X86_MCE_INJECT is not set +CONFIG_X86_THERMAL_VECTOR=y +# CONFIG_X86_16BIT is not set +# CONFIG_I8K is not set +# CONFIG_MICROCODE is not set +# CONFIG_MICROCODE_INTEL_EARLY is not set +# CONFIG_MICROCODE_AMD_EARLY is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DIRECT_GBPAGES=y +# CONFIG_NUMA is not set +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_ZONE_DMA_FLAG=1 +CONFIG_BOUNCE=y +CONFIG_NEED_BOUNCE_POOL=y +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=y +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +# CONFIG_EFI is not set +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y + +# +# Power management and ACPI options +# +# CONFIG_SUSPEND is not set +# CONFIG_HIBERNATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_ACPI=y +# CONFIG_ACPI_PROCFS_POWER is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set +CONFIG_ACPI_THERMAL=y +# CONFIG_ACPI_CUSTOM_DSDT is not set +# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +CONFIG_X86_PM_TIMER=y +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_HOTPLUG_MEMORY is not set +# CONFIG_ACPI_SBS is not set +# CONFIG_ACPI_HED is not set +CONFIG_ACPI_CUSTOM_METHOD=y +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +# CONFIG_ACPI_APEI is not set +# CONFIG_ACPI_EXTLOG is not set +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_STAT_DETAILS is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y + +# +# x86 CPU frequency scaling drivers +# +# CONFIG_X86_INTEL_PSTATE is not set +# CONFIG_X86_PCC_CPUFREQ is not set +# CONFIG_X86_ACPI_CPUFREQ is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=y + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=y + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_INTEL_IDLE=y + +# +# Memory power savings +# +CONFIG_I7300_IDLE_IOAT_CHANNEL=y +CONFIG_I7300_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIEAER_INJECT=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCI_MSI=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +CONFIG_HT_IRQ=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_IOAPIC=y +CONFIG_PCI_LABEL=y + +# +# PCI host controller drivers +# +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +CONFIG_PCCARD=y +CONFIG_PCMCIA=y +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +# CONFIG_YENTA is not set +CONFIG_PD6729=y +CONFIG_I82092=y +CONFIG_PCCARD_NONSTATIC=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_ACPI is not set +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=y +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=y +CONFIG_HOTPLUG_PCI_SHPC=y +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Executable file formats / Emulations +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +CONFIG_IA32_EMULATION=y +CONFIG_IA32_AOUT=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_KEYS_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_IOSF_MBI=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_NET_IP_TUNNEL=y +# CONFIG_IP_MROUTE is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_LRO=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=y +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=y +CONFIG_TCP_CONG_HTCP=y +CONFIG_TCP_CONG_HSTCP=y +CONFIG_TCP_CONG_HYBLA=y +CONFIG_TCP_CONG_VEGAS=y +CONFIG_TCP_CONG_SCALABLE=y +CONFIG_TCP_CONG_LP=y +CONFIG_TCP_CONG_VENO=y +CONFIG_TCP_CONG_YEAH=y +CONFIG_TCP_CONG_ILLINOIS=y +# CONFIG_DEFAULT_BIC is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_HYBLA is not set +# CONFIG_DEFAULT_VEGAS is not set +# CONFIG_DEFAULT_VENO is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=y +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +# CONFIG_IPV6_GRE is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NF_CT_NETLINK_HELPER=y +CONFIG_NETFILTER_NETLINK_QUEUE_CT=y +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_SIP=y +CONFIG_NF_NAT_TFTP=y +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y +CONFIG_NETFILTER_XT_SET=y + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=y +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_CT=y +CONFIG_NETFILTER_XT_TARGET_DSCP=y +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_HMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +# CONFIG_NETFILTER_XT_TARGET_LED is not set +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NETMAP=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_RATEEST=y +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CLUSTER=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_DCCP=y +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y +CONFIG_NETFILTER_XT_MATCH_L2TP=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_NFACCT=y +CONFIG_NETFILTER_XT_MATCH_OSF=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_RATEEST=y +CONFIG_NETFILTER_XT_MATCH_REALM=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SCTP=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_SET=y +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=y +CONFIG_IP_SET_BITMAP_IPMAC=y +CONFIG_IP_SET_BITMAP_PORT=y +CONFIG_IP_SET_HASH_IP=y +# CONFIG_IP_SET_HASH_IPMARK is not set +CONFIG_IP_SET_HASH_IPPORT=y +CONFIG_IP_SET_HASH_IPPORTIP=y +CONFIG_IP_SET_HASH_IPPORTNET=y +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=y +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=y +CONFIG_IP_SET_HASH_NETIFACE=y +CONFIG_IP_SET_LIST_SET=y +CONFIG_IP_VS=y +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=y +CONFIG_IP_VS_WRR=y +CONFIG_IP_VS_LC=y +CONFIG_IP_VS_WLC=y +CONFIG_IP_VS_LBLC=y +CONFIG_IP_VS_LBLCR=y +CONFIG_IP_VS_DH=y +CONFIG_IP_VS_SH=y +CONFIG_IP_VS_SED=y +CONFIG_IP_VS_NQ=y + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS application helper +# +# CONFIG_IP_VS_FTP is not set +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=y + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +# CONFIG_IP_NF_MATCH_RPFILTER is not set +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT_IPV4=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_REDIRECT is not set +CONFIG_NF_NAT_SNMP_BASIC=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_CLUSTERIP=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +# CONFIG_IP6_NF_MATCH_RPFILTER is not set +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_NF_NAT_IPV6 is not set +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_802_3=y +CONFIG_BRIDGE_EBT_AMONG=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_LIMIT=y +CONFIG_BRIDGE_EBT_MARK=y +CONFIG_BRIDGE_EBT_PKTTYPE=y +CONFIG_BRIDGE_EBT_STP=y +CONFIG_BRIDGE_EBT_VLAN=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_MARK_T=y +CONFIG_BRIDGE_EBT_REDIRECT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE_EBT_LOG=y +# CONFIG_BRIDGE_EBT_ULOG is not set +CONFIG_BRIDGE_EBT_NFLOG=y +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=y +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +CONFIG_VLAN_8021Q=y +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +CONFIG_LLC2=y +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +CONFIG_NETLINK_MMAP=y +CONFIG_NETLINK_DIAG=y +# CONFIG_NET_MPLS_GSO is not set +# CONFIG_HSR is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +# CONFIG_BPF_JIT is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=y +CONFIG_NET_DROP_MONITOR=y +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_AF_RXRPC=y +# CONFIG_AF_RXRPC_DEBUG is not set +# CONFIG_RXKAD is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=y +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +CONFIG_HAVE_BPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +# CONFIG_DMA_SHARED_BUFFER is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_MTD is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_OSD is not set +CONFIG_BLK_DEV_SX8=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=65536 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_BLK_DEV_HD is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +CONFIG_DUMMY_IRQ=y +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_TI_DAC7512=y +# CONFIG_BMP085_I2C is not set +# CONFIG_BMP085_SPI is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=y +CONFIG_EEPROM_AT25=y +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_93CX6=y +# CONFIG_EEPROM_93XX46 is not set +CONFIG_EEPROM_SFF_8436=y +CONFIG_CB710_CORE=y +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set + +# +# Intel MIC Host Driver +# +# CONFIG_INTEL_MIC_HOST is not set + +# +# Intel MIC Card Driver +# +# CONFIG_INTEL_MIC_CARD is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_TGT is not set +CONFIG_SCSI_NETLINK=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=y +CONFIG_SCSI_CXGB4_ISCSI=y +CONFIG_SCSI_BNX2_ISCSI=y +CONFIG_SCSI_BNX2X_FCOE=y +CONFIG_BE2ISCSI=y +CONFIG_BLK_DEV_3W_XXXX_RAID=y +CONFIG_SCSI_HPSA=y +CONFIG_SCSI_3W_9XXX=y +CONFIG_SCSI_3W_SAS=y +CONFIG_SCSI_ACARD=y +CONFIG_SCSI_AACRAID=y +CONFIG_SCSI_AIC7XXX=y +CONFIG_AIC7XXX_CMDS_PER_DEVICE=8 +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=y +CONFIG_AIC79XX_CMDS_PER_DEVICE=32 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +CONFIG_AIC79XX_DEBUG_ENABLE=y +CONFIG_AIC79XX_DEBUG_MASK=0 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC94XX=y +# CONFIG_AIC94XX_DEBUG is not set +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +# CONFIG_SCSI_MVSAS_TASKLET is not set +CONFIG_SCSI_MVUMI=y +CONFIG_SCSI_DPT_I2O=y +CONFIG_SCSI_ADVANSYS=y +CONFIG_SCSI_ARCMSR=y +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT2SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS_LOGGING is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_SCSI_HPTIOP=y +CONFIG_SCSI_BUSLOGIC=y +# CONFIG_SCSI_FLASHPOINT is not set +CONFIG_VMWARE_PVSCSI=y +CONFIG_LIBFC=y +CONFIG_LIBFCOE=y +CONFIG_FCOE=y +CONFIG_FCOE_FNIC=y +CONFIG_SCSI_DMX3191D=y +CONFIG_SCSI_EATA=y +CONFIG_SCSI_EATA_TAGGED_QUEUE=y +CONFIG_SCSI_EATA_LINKED_COMMANDS=y +CONFIG_SCSI_EATA_MAX_TAGS=16 +CONFIG_SCSI_FUTURE_DOMAIN=y +CONFIG_SCSI_GDTH=y +CONFIG_SCSI_ISCI=y +CONFIG_SCSI_IPS=y +CONFIG_SCSI_INITIO=y +CONFIG_SCSI_INIA100=y +CONFIG_SCSI_STEX=y +CONFIG_SCSI_SYM53C8XX_2=y +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +CONFIG_SCSI_IPR=y +# CONFIG_SCSI_IPR_TRACE is not set +# CONFIG_SCSI_IPR_DUMP is not set +CONFIG_SCSI_QLOGIC_1280=y +CONFIG_SCSI_QLA_FC=y +CONFIG_SCSI_QLA_ISCSI=y +CONFIG_SCSI_LPFC=y +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_DC395x=y +CONFIG_SCSI_DC390T=y +CONFIG_SCSI_DEBUG=y +CONFIG_SCSI_PMCRAID=y +CONFIG_SCSI_PM8001=y +# CONFIG_SCSI_SRP is not set +CONFIG_SCSI_BFA_FC=y +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_CHELSIO_FCOE is not set +CONFIG_SCSI_LOWLEVEL_PCMCIA=y +# CONFIG_PCMCIA_AHA152X is not set +# CONFIG_PCMCIA_FDOMAIN is not set +# CONFIG_PCMCIA_QLOGIC is not set +# CONFIG_PCMCIA_SYM53C500 is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_SCSI_OSD_INITIATOR=y +CONFIG_SCSI_OSD_ULD=y +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +# CONFIG_SCSI_OSD_DEBUG is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_SATA_INIC162X is not set +CONFIG_SATA_ACARD_AHCI=y +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=y +CONFIG_SATA_QSTOR=y +CONFIG_SATA_SX4=y +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +CONFIG_SATA_MV=y +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_SVW=y +CONFIG_SATA_ULI=y +CONFIG_SATA_VIA=y +CONFIG_SATA_VITESSE=y + +# +# PATA SFF controllers with BMDMA +# +CONFIG_PATA_ALI=y +CONFIG_PATA_AMD=y +CONFIG_PATA_ARTOP=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_ATP867X=y +CONFIG_PATA_CMD64X=y +# CONFIG_PATA_CYPRESS is not set +CONFIG_PATA_EFAR=y +CONFIG_PATA_HPT366=y +CONFIG_PATA_HPT37X=y +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +CONFIG_PATA_IT8213=y +CONFIG_PATA_IT821X=y +CONFIG_PATA_JMICRON=y +CONFIG_PATA_MARVELL=y +CONFIG_PATA_NETCELL=y +CONFIG_PATA_NINJA32=y +CONFIG_PATA_NS87415=y +CONFIG_PATA_OLDPIIX=y +# CONFIG_PATA_OPTIDMA is not set +CONFIG_PATA_PDC2027X=y +CONFIG_PATA_PDC_OLD=y +# CONFIG_PATA_RADISYS is not set +CONFIG_PATA_RDC=y +CONFIG_PATA_SCH=y +CONFIG_PATA_SERVERWORKS=y +CONFIG_PATA_SIL680=y +CONFIG_PATA_SIS=y +CONFIG_PATA_TOSHIBA=y +CONFIG_PATA_TRIFLEX=y +CONFIG_PATA_VIA=y +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +CONFIG_PATA_MPIIX=y +CONFIG_PATA_NS87410=y +# CONFIG_PATA_OPTI is not set +CONFIG_PATA_PCMCIA=y +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_RZ1000=y + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=y +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +# CONFIG_MD_AUTODETECT is not set +# CONFIG_MD_LINEAR is not set +# CONFIG_MD_RAID0 is not set +# CONFIG_MD_RAID1 is not set +# CONFIG_MD_RAID10 is not set +# CONFIG_MD_RAID456 is not set +# CONFIG_MD_MULTIPATH is not set +# CONFIG_MD_FAULTY is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_CACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=y +CONFIG_FIREWIRE_OHCI=y +CONFIG_FIREWIRE_SBP2=y +CONFIG_FIREWIRE_NET=y +CONFIG_FIREWIRE_NOSY=y +# CONFIG_I2O is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +CONFIG_DUMMY=y +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +# CONFIG_VXLAN is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_TUN=y +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set +# CONFIG_NET_DSA_MV88E6131 is not set +# CONFIG_NET_DSA_MV88E6123_61_65 is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_XGENE is not set +CONFIG_NET_VENDOR_ARC=y +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_B44=y +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_BNX2=y +CONFIG_CNIC=y +CONFIG_TIGON3=y +CONFIG_BNX2X=y +CONFIG_BNX2X_SRIOV=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_CALXEDA_XGMAC is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=y +CONFIG_CHELSIO_T4=y +CONFIG_CHELSIO_T4VF=y +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_FUJITSU is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=y +CONFIG_IXGB=y +CONFIG_IXGBE=y +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBEVF=y +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +CONFIG_NET_VENDOR_I825XX=y +# CONFIG_IP1000 is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLX4_EN is not set +# CONFIG_MLX4_CORE is not set +# CONFIG_MLX5_CORE is not set +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +CONFIG_ENC28J60=y +CONFIG_ENC28J60_WRITEVERIFY=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +CONFIG_R8169=y +# CONFIG_SH_ETH is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_SFC is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WIZNET=y +# CONFIG_WIZNET_W5100 is not set +# CONFIG_WIZNET_W5300 is not set +# CONFIG_NET_VENDOR_XIRCOM is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_AT803X_PHY is not set +# CONFIG_AMD_PHY is not set +CONFIG_MARVELL_PHY=y +CONFIG_DAVICOM_PHY=y +CONFIG_QSEMI_PHY=y +CONFIG_LXT_PHY=y +CONFIG_CICADA_PHY=y +CONFIG_VITESSE_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_BROADCOM_PHY=y +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_ICPLUS_PHY is not set +CONFIG_REALTEK_PHY=y +CONFIG_NATIONAL_PHY=y +CONFIG_STE10XP=y +CONFIG_LSI_ET1011C_PHY=y +CONFIG_MICREL_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_MDIO_BITBANG=y +# CONFIG_MDIO_GPIO is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=y +# CONFIG_PPP_BSDCOMP is not set +# CONFIG_PPP_DEFLATE is not set +# CONFIG_PPP_FILTER is not set +# CONFIG_PPP_MPPE is not set +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPPOE is not set +# CONFIG_PPP_ASYNC is not set +# CONFIG_PPP_SYNC_TTY is not set +# CONFIG_SLIP is not set +CONFIG_SLHC=y + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=y +# CONFIG_USB_NET_CDC_EEM is not set +CONFIG_USB_NET_CDC_NCM=y +# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set +# CONFIG_USB_NET_CDC_MBIM is not set +# CONFIG_USB_NET_DM9601 is not set +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +# CONFIG_USB_NET_SMSC75XX is not set +# CONFIG_USB_NET_SMSC95XX is not set +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=y +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +CONFIG_USB_NET_CDC_SUBSET=y +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_BELKIN=y +# CONFIG_USB_ARMLINUX is not set +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +CONFIG_USB_NET_ZAURUS=y +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_SIERRA_NET is not set +# CONFIG_USB_VL600 is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +# CONFIG_SYNCLINK is not set +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_CS=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +# CONFIG_SERIAL_8250_DW is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +CONFIG_SERIAL_MFD_HSU=y +# CONFIG_SERIAL_MFD_HSU_CONSOLE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=y +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=y +CONFIG_HW_RANDOM_INTEL=y +CONFIG_HW_RANDOM_AMD=y +CONFIG_HW_RANDOM_VIA=y +# CONFIG_HW_RANDOM_VIRTIO is not set +CONFIG_NVRAM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +CONFIG_SYNCLINK_CS=y +CONFIG_CARDMAN_4000=y +CONFIG_CARDMAN_4040=y +CONFIG_IPWIRELESS=y +# CONFIG_MWAVE is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_HPET is not set +# CONFIG_HANGCHECK_TIMER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_TELCLOCK is not set +CONFIG_DEVPORT=y +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_MUX_GPIO=y +CONFIG_I2C_MUX_PCA9541=y +CONFIG_I2C_MUX_PCA954x=y +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +CONFIG_I2C_I801=y +CONFIG_I2C_ISCH=y +CONFIG_I2C_ISMT=y +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=y +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set + +# +# PPS support +# +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_DEVRES=y +CONFIG_GPIO_ACPI=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y +CONFIG_GPIO_MAX730X=y + +# +# Memory mapped GPIO drivers: +# +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_IT8761E is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_SCH311X is not set +CONFIG_GPIO_SCH=y +# CONFIG_GPIO_ICH is not set +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_LYNXPOINT is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=y +# CONFIG_GPIO_PCA953X_IRQ is not set +CONFIG_GPIO_PCF857X=y +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_INTEL_MID is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders: +# +CONFIG_GPIO_MAX7301=y +CONFIG_GPIO_MC33880=y + +# +# AC97 GPIO expanders: +# + +# +# LPC GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# + +# +# USB GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_POWER_RESET is not set +# CONFIG_POWER_AVS is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +CONFIG_SENSORS_ADM1021=y +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +CONFIG_SENSORS_GPIO_FAN=y +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_CORETEMP=y +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +CONFIG_SENSORS_LTC4151=y +CONFIG_SENSORS_LTC4215=y +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=y +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=y +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +CONFIG_SENSORS_MAX6650=y +CONFIG_SENSORS_MAX6620=y +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_HTU21 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +CONFIG_SENSORS_LM75=y +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +CONFIG_SENSORS_LM85=y +# CONFIG_SENSORS_LM87 is not set +CONFIG_SENSORS_LM90=y +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=y +CONFIG_SENSORS_PMBUS=y +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LTC2978 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX34440 is not set +CONFIG_SENSORS_DNI_DPS460=y +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_UCD9000 is not set +CONFIG_SENSORS_UCD9200=y +# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +CONFIG_SENSORS_W83781D=y +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_INTEL_POWERCLAMP is not set +CONFIG_X86_PKG_TEMP_THERMAL=m +# CONFIG_ACPI_INT3403_THERMAL is not set +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# Texas Instruments thermal drivers +# +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +CONFIG_SSB=y +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +# CONFIG_SSB_B43_PCI_BRIDGE is not set +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y +CONFIG_SSB_PCMCIAHOST=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_GPIO is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +CONFIG_BCMA=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +# CONFIG_BCMA_DRIVER_GMAC_CMN is not set +# CONFIG_BCMA_DRIVER_GPIO is not set +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_CS5535 is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_AXP20X is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=y +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RTSX_USB is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +CONFIG_MFD_WL1273_CORE=y +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TIMBERDALE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_AGP is not set +# CONFIG_VGA_ARB is not set +# CONFIG_VGA_SWITCHEROO is not set + +# +# Direct Rendering Manager +# +# CONFIG_DRM is not set + +# +# Frame buffer Devices +# +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +# CONFIG_VGASTATE is not set + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +# CONFIG_VGACON_SOFT_SCROLLBACK is not set +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_HUION is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO_TPKBD is not set +# CONFIG_HID_LOGITECH is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_OTG_FSM is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_FUSBH200_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_SSB is not set +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +# CONFIG_USB_SERIAL_GENERIC is not set +# CONFIG_USB_SERIAL_SIMPLE is not set +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MXUPORT is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +# CONFIG_USB_SERIAL_PL2303 is not set +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_XSENS_MT is not set +# CONFIG_USB_SERIAL_WISHBONE is not set +# CONFIG_USB_SERIAL_ZTE is not set +# CONFIG_USB_SERIAL_SSU100 is not set +# CONFIG_USB_SERIAL_QT2 is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HSIC_USB3503 is not set + +# +# USB Physical Layer drivers +# +# CONFIG_USB_PHY is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_SAMSUNG_USB2PHY is not set +# CONFIG_SAMSUNG_USB3PHY is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_GADGET is not set +# CONFIG_UWB is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_CLKGATE is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PCI=y +# CONFIG_MMC_RICOH_MMC is not set +# CONFIG_MMC_SDHCI_ACPI is not set +CONFIG_MMC_SDHCI_PLTFM=y +# CONFIG_MMC_WBSD is not set +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MMC_SPI=y +# CONFIG_MMC_SDRICOH_CS is not set +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_INTEL_SS4200 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +CONFIG_LEDS_TRIGGER_GPIO=y +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +# CONFIG_EDAC is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_DS1307=y +CONFIG_RTC_DRV_DS1374=y +CONFIG_RTC_DRV_DS1672=y +CONFIG_RTC_DRV_DS3232=y +CONFIG_RTC_DRV_MAX6900=y +CONFIG_RTC_DRV_RS5C372=y +CONFIG_RTC_DRV_ISL1208=y +CONFIG_RTC_DRV_ISL12022=y +# CONFIG_RTC_DRV_ISL12057 is not set +CONFIG_RTC_DRV_X1205=y +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +CONFIG_RTC_DRV_PCF8563=y +CONFIG_RTC_DRV_PCF8583=y +CONFIG_RTC_DRV_M41T80=y +# CONFIG_RTC_DRV_M41T80_WDT is not set +CONFIG_RTC_DRV_BQ32K=y +CONFIG_RTC_DRV_S35390A=y +CONFIG_RTC_DRV_FM3130=y +CONFIG_RTC_DRV_RX8581=y +CONFIG_RTC_DRV_RX8025=y +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_MCP795 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=y +CONFIG_RTC_DRV_DS1511=y +CONFIG_RTC_DRV_DS1553=y +CONFIG_RTC_DRV_DS1742=y +CONFIG_RTC_DRV_STK17TA8=y +CONFIG_RTC_DRV_M48T86=y +CONFIG_RTC_DRV_M48T35=y +CONFIG_RTC_DRV_M48T59=y +CONFIG_RTC_DRV_MSM6242=y +CONFIG_RTC_DRV_BQ4802=y +CONFIG_RTC_DRV_RP5C01=y +CONFIG_RTC_DRV_V3020=y +# CONFIG_RTC_DRV_DS2404 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_MOXART is not set +# CONFIG_RTC_DRV_XGENE is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=y +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO=y + +# +# Virtio drivers +# +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACERHDF is not set +# CONFIG_ASUS_LAPTOP is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_HP_ACCEL is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_THINKPAD_ACPI is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_EEEPC_LAPTOP is not set +# CONFIG_ACPI_WMI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_INTEL_IPS is not set +# CONFIG_IBM_RTL is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_INTEL_RST is not set +# CONFIG_INTEL_SMARTCONNECT is not set +# CONFIG_PVPANIC is not set +# CONFIG_CHROME_PLATFORMS is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Hardware Spinlock drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_SUPPORT=y +# CONFIG_AMD_IOMMU is not set +# CONFIG_INTEL_IOMMU is not set +# CONFIG_IRQ_REMAP is not set + +# +# Remoteproc drivers +# +# CONFIG_STE_MODEM_RPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set +# CONFIG_THUNDERBOLT is not set + +# +# Firmware Drivers +# +CONFIG_EDD=y +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DELL_RBU=y +CONFIG_DCDBAS=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_DEFAULTS_TO_ORDERED=y +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set +CONFIG_OVERLAYFS_FS=y + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=y +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EXOFS_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_AUFS_FS=y +CONFIG_AUFS_BRANCH_MAX_127=y +# CONFIG_AUFS_BRANCH_MAX_511 is not set +# CONFIG_AUFS_BRANCH_MAX_1023 is not set +# CONFIG_AUFS_BRANCH_MAX_32767 is not set +CONFIG_AUFS_SBILIST=y +# CONFIG_AUFS_HNOTIFY is not set +# CONFIG_AUFS_EXPORT is not set +# CONFIG_AUFS_FHSM is not set +# CONFIG_AUFS_RDU is not set +# CONFIG_AUFS_SHWH is not set +# CONFIG_AUFS_BR_RAMFS is not set +CONFIG_AUFS_BDEV_LOOP=y +# CONFIG_AUFS_DEBUG is not set +CONFIG_ORE=y +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +# CONFIG_NFS_V4_2 is not set +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=y +CONFIG_PNFS_OBJLAYOUT=y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +# CONFIG_NFS_FSCACHE is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFSD=y +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SUNRPC_DEBUG is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 +CONFIG_BOOT_PRINTK_DELAY=y +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y +# CONFIG_FRAME_POINTER is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KMEMCHECK=y +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +CONFIG_TIMER_STATS=y + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_CPU_STALL_INFO is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y +# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +CONFIG_BLK_DEV_IO_TRACE=y +# CONFIG_UPROBE_EVENT is not set +# CONFIG_PROBE_EVENTS is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_TEST_MODULE is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_STRICT_DEVMEM=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +# CONFIG_EARLY_PRINTK_DBGP is not set +# CONFIG_X86_PTDUMP is not set +CONFIG_DEBUG_RODATA=y +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_SET_MODULE_RONX is not set +# CONFIG_DEBUG_NX_TEST is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +# CONFIG_IOMMU_STRESS is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +# CONFIG_DEBUG_BOOT_PARAMS is not set +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_ABLK_HELPER=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +CONFIG_CRYPTO_PCBC=y +CONFIG_CRYPTO_XTS=y + +# +# Hash modes +# +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_VMAC=y + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=y +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_CRC32_PCLMUL is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=y +CONFIG_CRYPTO_RMD128=y +CONFIG_CRYPTO_RMD160=y +CONFIG_CRYPTO_RMD256=y +CONFIG_CRYPTO_RMD320=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=y +# CONFIG_CRYPTO_SHA256_SSSE3 is not set +# CONFIG_CRYPTO_SHA512_SSSE3 is not set +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_TGR192=y +CONFIG_CRYPTO_WP512=y +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_ANUBIS=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_BLOWFISH_COMMON=y +CONFIG_CRYPTO_BLOWFISH_X86_64=y +CONFIG_CRYPTO_CAMELLIA=y +# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set +CONFIG_CRYPTO_CAST_COMMON=y +CONFIG_CRYPTO_CAST5=y +# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set +CONFIG_CRYPTO_CAST6=y +# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=y +CONFIG_CRYPTO_KHAZAD=y +CONFIG_CRYPTO_SALSA20=y +CONFIG_CRYPTO_SALSA20_X86_64=y +CONFIG_CRYPTO_SEED=y +CONFIG_CRYPTO_SERPENT=y +# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set +CONFIG_CRYPTO_TEA=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y +CONFIG_CRYPTO_TWOFISH_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y +# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_ZLIB=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=y +CONFIG_CRYPTO_DEV_PADLOCK_AES=y +CONFIG_CRYPTO_DEV_PADLOCK_SHA=y +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set +CONFIG_HAVE_KVM=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC7=y +CONFIG_LIBCRC32C=y +CONFIG_CRC8=y +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_NLATTR=y +CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_AVERAGE=y +CONFIG_CORDIC=y +# CONFIG_DDR is not set +CONFIG_OID_REGISTRY=y diff --git a/packages/base/any/kernels/3.16-lts/kconfig.mk b/packages/base/any/kernels/3.16-lts/kconfig.mk new file mode 100644 index 00000000..ddd9b87e --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/kconfig.mk @@ -0,0 +1,30 @@ +############################################################ +# +# +# Copyright 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# 3.16 Kernel Builds +# +############################################################ +THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) +K_MAJOR_VERSION := 3 +K_PATCH_LEVEL := 16 +K_SUB_LEVEL := 39 +K_SUFFIX := +K_PATCH_DIR := $(THIS_DIR)/patches diff --git a/packages/base/any/kernels/3.16-lts/patches/changelog.patch b/packages/base/any/kernels/3.16-lts/patches/changelog.patch new file mode 100644 index 00000000..99ef7c6f --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/changelog.patch @@ -0,0 +1,18 @@ +--- debian/changelog 2015-08-04 00:50:04.000000000 +0000 ++++ changelog 2015-12-20 04:20:25.032779900 +0000 +@@ -1,3 +1,15 @@ ++linux (3.16.7-ckt11-2+acs8u2) acs; urgency=high ++ ++ * add driver patches for MLNX SN2700 ++ ++ -- Guohan Lu Sun, 19 Dec 2015 01:50:04 +0100 ++ ++linux (3.16.7-ckt11-2+acs8u1) acs; urgency=high ++ ++ * add support for S6000 ++ ++ -- Shuotian Cheng Sun, 19 Dec 2015 01:50:04 +0100 ++ + linux (3.16.7-ckt11-1+deb8u3) jessie-security; urgency=high + + * path_openat(): fix double fput() (CVE-2015-5706) diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch b/packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch new file mode 100644 index 00000000..040d6b88 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch @@ -0,0 +1,146 @@ +From f75a16bc0dfc83cf3df1db7ede4d7357e7be5952 Mon Sep 17 00:00:00 2001 +From: Chulei Wu +Date: Wed, 2 Mar 2016 04:09:53 +0000 +Subject: [PATCH] arista piix4 mux patch + +--- + drivers/i2c/busses/i2c-piix4.c | 63 +++++++++++++++++++++++++++++++++++++----- + 1 file changed, 56 insertions(+), 7 deletions(-) + +diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c +index a6f54ba..eafc035 100644 +--- a/drivers/i2c/busses/i2c-piix4.c ++++ b/drivers/i2c/busses/i2c-piix4.c +@@ -128,6 +128,7 @@ static const struct dmi_system_id piix4_dmi_ibm[] = { + + struct i2c_piix4_adapdata { + unsigned short smba; ++ int mux; + }; + + static int piix4_setup(struct pci_dev *PIIX4_dev, +@@ -528,6 +529,43 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, + return 0; + } + ++static s32 piix4_access_mux(struct i2c_adapter * adap, u16 addr, ++ unsigned short flags, char read_write, ++ u8 command, int size, union i2c_smbus_data * data) ++{ ++ static DEFINE_MUTEX(mux_mutex); ++ struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); ++ int piix4_mux = adapdata->mux; ++ static int last_mux = -1; ++ s32 ret; ++ unsigned short smba_idx = 0xcd6; ++ u8 smb_en = 0x2c; ++ u8 val; ++ ++ if ( piix4_mux == -1 ) { ++ return piix4_access(adap, addr, flags, read_write, command, size, data); ++ } ++ ++ mutex_lock(&mux_mutex); ++ ++ if ( last_mux != piix4_mux ) { ++ /* Select the correct bus mux*/ ++ outb_p(smb_en, smba_idx); ++ val = inb_p(smba_idx + 1); ++ val = (val & 0xf9) | (piix4_mux << 1); ++ outb_p(val, smba_idx + 1); ++ ++ last_mux = piix4_mux; ++ dev_dbg(&adap->dev, "set mux to 0x%02x\n", piix4_mux); ++ } ++ ++ ret = piix4_access(adap, addr, flags, read_write, command, size, data); ++ ++ mutex_unlock(&mux_mutex); ++ ++ return ret; ++} ++ + static u32 piix4_func(struct i2c_adapter *adapter) + { + return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | +@@ -536,7 +574,7 @@ static u32 piix4_func(struct i2c_adapter *adapter) + } + + static const struct i2c_algorithm smbus_algorithm = { +- .smbus_xfer = piix4_access, ++ .smbus_xfer = piix4_access_mux, + .functionality = piix4_func, + }; + +@@ -569,7 +607,7 @@ static struct i2c_adapter *piix4_main_adapter; + static struct i2c_adapter *piix4_aux_adapter; + + static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, +- struct i2c_adapter **padap) ++ struct i2c_adapter **padap, int mux) + { + struct i2c_adapter *adap; + struct i2c_piix4_adapdata *adapdata; +@@ -593,6 +631,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, + } + + adapdata->smba = smba; ++ adapdata->mux = mux; + + /* set up the sysfs linkage to our parent device */ + adap->dev.parent = &dev->dev; +@@ -618,6 +657,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, + static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) + { + int retval; ++ int mux = -1; ++ int aux_smba; + + if ((dev->vendor == PCI_VENDOR_ID_ATI && + dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && +@@ -633,7 +674,14 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) + return retval; + + /* Try to register main SMBus adapter, give up if we can't */ +- retval = piix4_add_adapter(dev, retval, &piix4_main_adapter); ++ aux_smba = retval; ++ if (dev->vendor == PCI_VENDOR_ID_AMD && ++ dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { ++ mux = -1; ++ } else { ++ mux = 0; ++ } ++ retval = piix4_add_adapter(dev, retval, &piix4_main_adapter, mux); + if (retval < 0) + return retval; + +@@ -644,21 +692,22 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) + dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) { + if (dev->revision < 0x40) { + retval = piix4_setup_aux(dev, id, 0x58); ++ mux = -1; + } else { +- /* SB800 added aux bus too */ +- retval = piix4_setup_sb800(dev, id, 1); ++ retval = aux_smba; ++ mux = 1; + } + } + + if (dev->vendor == PCI_VENDOR_ID_AMD && + dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { + retval = piix4_setup_sb800(dev, id, 1); ++ mux = -1; + } +- + if (retval > 0) { + /* Try to add the aux adapter if it exists, + * piix4_add_adapter will clean up if this fails */ +- piix4_add_adapter(dev, retval, &piix4_aux_adapter); ++ piix4_add_adapter(dev, retval, &piix4_aux_adapter, mux); + } + + return 0; +-- +2.1.4 + diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch b/packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch new file mode 100644 index 00000000..6060b15e --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch @@ -0,0 +1,34 @@ +--- a/drivers/misc/eeprom/at24.c 2016-10-06 12:45:49.290365545 +0000 ++++ b/drivers/misc/eeprom/at24.c 2016-10-06 12:47:08.630368526 +0000 +@@ -84,9 +84,9 @@ + * + * This value is forced to be a power of two so that writes align on pages. + */ +-static unsigned io_limit = 128; ++static unsigned io_limit = 32; + module_param(io_limit, uint, 0); +-MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); ++MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 32)"); + + /* + * Specs often allow 5 msec for a page write, sometimes 20 msec; +@@ -192,7 +192,8 @@ + count = I2C_SMBUS_BLOCK_MAX; + break; + case I2C_SMBUS_WORD_DATA: +- count = 2; ++ /* Check for odd length transaction */ ++ count = (count == 1) ? 1 : 2; + break; + case I2C_SMBUS_BYTE_DATA: + count = 1; +@@ -237,7 +238,8 @@ + status = i2c_smbus_read_word_data(client, offset); + if (status >= 0) { + buf[0] = status & 0xff; +- buf[1] = status >> 8; ++ if (count == 2) ++ buf[1] = status >> 8; + status = count; + } + break; diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch new file mode 100644 index 00000000..e5401626 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch @@ -0,0 +1,196 @@ +MAX6620 fix rpm calculation accuracy + +From: Cumulus Networks + +The driver only fills the most significant 8 bits of the fan tach +count (11 bit value). Fixing the driver to use all of 11 bits for +more accuracy. +--- + drivers/hwmon/max6620.c | 105 +++++++++++++++++++++-------------------------- + 1 file changed, 46 insertions(+), 59 deletions(-) + +diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c +index 3c337c7..76c1f7f 100644 +--- a/drivers/hwmon/max6620.c ++++ b/drivers/hwmon/max6620.c +@@ -46,6 +46,8 @@ + + /* clock: The clock frequency of the chip the driver should assume */ + static int clock = 8192; ++static u32 sr = 2; ++static u32 np = 2; + + module_param(clock, int, S_IRUGO); + +@@ -213,22 +215,22 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, cha + + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct max6620_data *data = max6620_update_device(dev); +- int rpm; +- +- /* +- * Calculation details: +- * +- * Each tachometer counts over an interval given by the "count" +- * register (0.25, 0.5, 1 or 2 seconds). This module assumes +- * that the fans produce two pulses per revolution (this seems +- * to be the most common). +- */ +- if(data->tach[attr->index] == 0 || data->tach[attr->index] == 255) { ++ struct i2c_client *client = to_i2c_client(dev); ++ u32 rpm = 0; ++ u32 tach = 0; ++ u32 tach1 = 0; ++ u32 tach2 = 0; ++ ++ tach1 = i2c_smbus_read_byte_data(client, tach_reg[attr->index]); ++ tach1 = (tach1 << 3) & 0x7f8; ++ tach2 = i2c_smbus_read_byte_data(client, tach_reg[attr->index] + 1); ++ tach2 = (tach2 >> 5) & 0x7; ++ tach = tach1 | tach2; ++ if (tach == 0) { + rpm = 0; + } else { +- rpm = ((clock / (data->tach[attr->index] << 3)) * 30 * DIV_FROM_REG(data->fandyn[attr->index])); ++ rpm = (60 * sr * clock)/(tach * np); + } +- + return sprintf(buf, "%d\n", rpm); + } + +@@ -236,22 +238,21 @@ static ssize_t get_target(struct device *dev, struct device_attribute *devattr, + + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct max6620_data *data = max6620_update_device(dev); +- int kscale, ktach, rpm; +- +- /* +- * Use the datasheet equation: +- * +- * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)] +- * +- * then multiply by 60 to give rpm. +- */ +- +- kscale = DIV_FROM_REG(data->fandyn[attr->index]); +- ktach = data->target[attr->index]; +- if(ktach == 0) { ++ struct i2c_client *client = to_i2c_client(dev); ++ u32 rpm; ++ u32 target; ++ u32 target1; ++ u32 target2; ++ ++ target1 = i2c_smbus_read_byte_data(client, target_reg[attr->index]); ++ target1 = (target1 << 3) & 0x7f8; ++ target2 = i2c_smbus_read_byte_data(client, target_reg[attr->index] + 1); ++ target2 = (target2 >> 5) & 0x7; ++ target = target1 | target2; ++ if (target == 0) { + rpm = 0; + } else { +- rpm = ((60 * kscale * clock) / (ktach << 3)); ++ rpm = (60 * sr * clock)/(target * np); + } + return sprintf(buf, "%d\n", rpm); + } +@@ -261,9 +262,11 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, + struct i2c_client *client = to_i2c_client(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct max6620_data *data = i2c_get_clientdata(client); +- int kscale, ktach; +- unsigned long rpm; ++ u32 rpm; + int err; ++ u32 target; ++ u32 target1; ++ u32 target2; + + err = kstrtoul(buf, 10, &rpm); + if (err) +@@ -271,25 +274,13 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, + + rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); + +- /* +- * Divide the required speed by 60 to get from rpm to rps, then +- * use the datasheet equation: +- * +- * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1 +- */ +- + mutex_lock(&data->update_lock); + +- kscale = DIV_FROM_REG(data->fandyn[attr->index]); +- ktach = ((60 * kscale * clock) / rpm); +- if (ktach < 0) +- ktach = 0; +- if (ktach > 255) +- ktach = 255; +- data->target[attr->index] = ktach; +- +- i2c_smbus_write_byte_data(client, target_reg[attr->index], data->target[attr->index]); +- i2c_smbus_write_byte_data(client, target_reg[attr->index]+0x01, 0x00); ++ target = (60 * sr * 8192)/(rpm * np); ++ target1 = (target >> 3) & 0xff; ++ target2 = (target << 5) & 0xe0; ++ i2c_smbus_write_byte_data(client, target_reg[attr->index], target1); ++ i2c_smbus_write_byte_data(client, target_reg[attr->index] + 1, target2); + + mutex_unlock(&data->update_lock); + +@@ -609,8 +600,11 @@ static int max6620_init_client(struct i2c_client *client) { + } + + +- +- if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config)) { ++ /* ++ * Set bit 4, disable other fans from going full speed on a fail ++ * failure. ++ */ ++ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config | 0x10)) { + dev_err(&client->dev, "Config write error, aborting.\n"); + return err; + } +@@ -618,28 +612,21 @@ static int max6620_init_client(struct i2c_client *client) { + data->config = config; + for (i = 0; i < 4; i++) { + data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); +- data->fancfg[i] |= 0x80; // enable TACH monitoring ++ data->fancfg[i] |= 0xa8; // enable TACH monitoring + i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); + data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); +- data-> fandyn[i] |= 0x1C; ++ /* 2 counts (001) and Rate change 100 (0.125 secs) */ ++ data-> fandyn[i] = 0x30; + i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); + data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); + data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); + data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); + data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); + +- +- + } +- +- +- + return 0; + } + +- +- +- + static struct max6620_data *max6620_update_device(struct device *dev) + { + int i; +@@ -678,7 +665,7 @@ static struct max6620_data *max6620_update_device(struct device *dev) + return data; + } + +-module_i2c_driver(max6620_driver); ++// module_i2c_driver(max6620_driver); + + static int __init max6620_init(void) + { diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch new file mode 100644 index 00000000..b4cfe0cf --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch @@ -0,0 +1,113 @@ +Update MAX6620 driver to support newer kernel version + +From: Shuotian Cheng + + +--- + drivers/hwmon/max6620.c | 25 +++++++++++-------------- + 1 file changed, 11 insertions(+), 14 deletions(-) + +diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c +index 76c1f7f..fb49195 100644 +--- a/drivers/hwmon/max6620.c ++++ b/drivers/hwmon/max6620.c +@@ -183,7 +183,7 @@ static struct i2c_driver max6620_driver = { + .name = "max6620", + }, + .probe = max6620_probe, +- .remove = __devexit_p(max6620_remove), ++ .remove = max6620_remove, + .id_table = max6620_id, + .address_list = normal_i2c, + }; +@@ -231,6 +231,7 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, cha + } else { + rpm = (60 * sr * clock)/(tach * np); + } ++ + return sprintf(buf, "%d\n", rpm); + } + +@@ -262,17 +263,17 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, + struct i2c_client *client = to_i2c_client(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct max6620_data *data = i2c_get_clientdata(client); +- u32 rpm; ++ unsigned long rpm; + int err; +- u32 target; +- u32 target1; +- u32 target2; ++ unsigned long target; ++ unsigned long target1; ++ unsigned long target2; + + err = kstrtoul(buf, 10, &rpm); + if (err) + return err; + +- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); ++ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX); + + mutex_lock(&data->update_lock); + +@@ -326,7 +327,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, con + if (err) + return err; + +- pwm = SENSORS_LIMIT(pwm, 0, 255); ++ pwm = clamp_val(pwm, 0, 255); + + mutex_lock(&data->update_lock); + +@@ -534,7 +535,7 @@ static struct attribute_group max6620_attr_grp = { + * Real code + */ + +-static int __devinit max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { ++static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { + + struct max6620_data *data; + int err; +@@ -575,7 +576,7 @@ dev_info(&client->dev, "Sysfs entries created\n"); + return err; + } + +-static int __devexit max6620_remove(struct i2c_client *client) { ++static int max6620_remove(struct i2c_client *client) { + + struct max6620_data *data = i2c_get_clientdata(client); + +@@ -599,7 +600,6 @@ static int max6620_init_client(struct i2c_client *client) { + return err; + } + +- + /* + * Set bit 4, disable other fans from going full speed on a fail + * failure. +@@ -615,14 +615,13 @@ static int max6620_init_client(struct i2c_client *client) { + data->fancfg[i] |= 0xa8; // enable TACH monitoring + i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); + data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); +- /* 2 counts (001) and Rate change 100 (0.125 secs) */ ++ /* 2 counts (001) and Rate change 100 (0.125 secs) */ + data-> fandyn[i] = 0x30; + i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); + data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); + data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); + data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); + data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); +- + } + return 0; + } +@@ -665,8 +664,6 @@ static struct max6620_data *max6620_update_device(struct device *dev) + return data; + } + +-// module_i2c_driver(max6620_driver); +- + static int __init max6620_init(void) + { + return i2c_add_driver(&max6620_driver); diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch new file mode 100644 index 00000000..119c12ee --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch @@ -0,0 +1,753 @@ +Driver for MAX6620 Fan sensor + +From: Cumulus Networks + + +--- + drivers/hwmon/Kconfig | 10 + + drivers/hwmon/Makefile | 1 + drivers/hwmon/max6620.c | 702 +++++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 713 insertions(+) + create mode 100644 drivers/hwmon/max6620.c + +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig +index 02d3d85..ca38e05 100644 +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -784,6 +784,16 @@ config SENSORS_MAX6650 + This driver can also be built as a module. If so, the module + will be called max6650. + ++config SENSORS_MAX6620 ++ tristate "Maxim MAX6620 sensor chip" ++ depends on I2C ++ help ++ If you say yes here you get support for the MAX6620 ++ sensor chips. ++ ++ This driver can also be built as a module. If so, the module ++ will be called max6620. ++ + config SENSORS_MAX6697 + tristate "Maxim MAX6697 and compatibles" + depends on I2C +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile +index 3dc0f02..8837a7b 100644 +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -111,6 +111,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o + obj-$(CONFIG_SENSORS_MAX6639) += max6639.o + obj-$(CONFIG_SENSORS_MAX6642) += max6642.o + obj-$(CONFIG_SENSORS_MAX6650) += max6650.o ++obj-$(CONFIG_SENSORS_MAX6620) += max6620.o + obj-$(CONFIG_SENSORS_MAX6697) += max6697.o + obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o + obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o +diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c +new file mode 100644 +index 0000000..3c337c7 +--- /dev/null ++++ b/drivers/hwmon/max6620.c +@@ -0,0 +1,702 @@ ++/* ++ * max6620.c - Linux Kernel module for hardware monitoring. ++ * ++ * (C) 2012 by L. Grunenberg ++ * ++ * based on code written by : ++ * 2007 by Hans J. Koch ++ * John Morris ++ * Copyright (c) 2003 Spirent Communications ++ * and Claus Gindhart ++ * ++ * This module has only been tested with the MAX6620 chip. ++ * ++ * The datasheet was last seen at: ++ * ++ * http://pdfserv.maxim-ic.com/en/ds/MAX6620.pdf ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * Insmod parameters ++ */ ++ ++ ++/* clock: The clock frequency of the chip the driver should assume */ ++static int clock = 8192; ++ ++module_param(clock, int, S_IRUGO); ++ ++static const unsigned short normal_i2c[] = {0x0a, 0x1a, 0x2a, I2C_CLIENT_END}; ++ ++/* ++ * MAX 6620 registers ++ */ ++ ++#define MAX6620_REG_CONFIG 0x00 ++#define MAX6620_REG_FAULT 0x01 ++#define MAX6620_REG_CONF_FAN0 0x02 ++#define MAX6620_REG_CONF_FAN1 0x03 ++#define MAX6620_REG_CONF_FAN2 0x04 ++#define MAX6620_REG_CONF_FAN3 0x05 ++#define MAX6620_REG_DYN_FAN0 0x06 ++#define MAX6620_REG_DYN_FAN1 0x07 ++#define MAX6620_REG_DYN_FAN2 0x08 ++#define MAX6620_REG_DYN_FAN3 0x09 ++#define MAX6620_REG_TACH0 0x10 ++#define MAX6620_REG_TACH1 0x12 ++#define MAX6620_REG_TACH2 0x14 ++#define MAX6620_REG_TACH3 0x16 ++#define MAX6620_REG_VOLT0 0x18 ++#define MAX6620_REG_VOLT1 0x1A ++#define MAX6620_REG_VOLT2 0x1C ++#define MAX6620_REG_VOLT3 0x1E ++#define MAX6620_REG_TAR0 0x20 ++#define MAX6620_REG_TAR1 0x22 ++#define MAX6620_REG_TAR2 0x24 ++#define MAX6620_REG_TAR3 0x26 ++#define MAX6620_REG_DAC0 0x28 ++#define MAX6620_REG_DAC1 0x2A ++#define MAX6620_REG_DAC2 0x2C ++#define MAX6620_REG_DAC3 0x2E ++ ++/* ++ * Config register bits ++ */ ++ ++#define MAX6620_CFG_RUN 0x80 ++#define MAX6620_CFG_POR 0x40 ++#define MAX6620_CFG_TIMEOUT 0x20 ++#define MAX6620_CFG_FULLFAN 0x10 ++#define MAX6620_CFG_OSC 0x08 ++#define MAX6620_CFG_WD_MASK 0x06 ++#define MAX6620_CFG_WD_2 0x02 ++#define MAX6620_CFG_WD_6 0x04 ++#define MAX6620_CFG_WD10 0x06 ++#define MAX6620_CFG_WD 0x01 ++ ++ ++/* ++ * Failure status register bits ++ */ ++ ++#define MAX6620_FAIL_TACH0 0x10 ++#define MAX6620_FAIL_TACH1 0x20 ++#define MAX6620_FAIL_TACH2 0x40 ++#define MAX6620_FAIL_TACH3 0x80 ++#define MAX6620_FAIL_MASK0 0x01 ++#define MAX6620_FAIL_MASK1 0x02 ++#define MAX6620_FAIL_MASK2 0x04 ++#define MAX6620_FAIL_MASK3 0x08 ++ ++ ++/* Minimum and maximum values of the FAN-RPM */ ++#define FAN_RPM_MIN 240 ++#define FAN_RPM_MAX 30000 ++ ++#define DIV_FROM_REG(reg) (1 << ((reg & 0xE0) >> 5)) ++ ++static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id); ++static int max6620_init_client(struct i2c_client *client); ++static int max6620_remove(struct i2c_client *client); ++static struct max6620_data *max6620_update_device(struct device *dev); ++ ++static const u8 config_reg[] = { ++ MAX6620_REG_CONF_FAN0, ++ MAX6620_REG_CONF_FAN1, ++ MAX6620_REG_CONF_FAN2, ++ MAX6620_REG_CONF_FAN3, ++}; ++ ++static const u8 dyn_reg[] = { ++ MAX6620_REG_DYN_FAN0, ++ MAX6620_REG_DYN_FAN1, ++ MAX6620_REG_DYN_FAN2, ++ MAX6620_REG_DYN_FAN3, ++}; ++ ++static const u8 tach_reg[] = { ++ MAX6620_REG_TACH0, ++ MAX6620_REG_TACH1, ++ MAX6620_REG_TACH2, ++ MAX6620_REG_TACH3, ++}; ++ ++static const u8 volt_reg[] = { ++ MAX6620_REG_VOLT0, ++ MAX6620_REG_VOLT1, ++ MAX6620_REG_VOLT2, ++ MAX6620_REG_VOLT3, ++}; ++ ++static const u8 target_reg[] = { ++ MAX6620_REG_TAR0, ++ MAX6620_REG_TAR1, ++ MAX6620_REG_TAR2, ++ MAX6620_REG_TAR3, ++}; ++ ++static const u8 dac_reg[] = { ++ MAX6620_REG_DAC0, ++ MAX6620_REG_DAC1, ++ MAX6620_REG_DAC2, ++ MAX6620_REG_DAC3, ++}; ++ ++/* ++ * Driver data (common to all clients) ++ */ ++ ++static const struct i2c_device_id max6620_id[] = { ++ { "max6620", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, max6620_id); ++ ++static struct i2c_driver max6620_driver = { ++ .class = I2C_CLASS_HWMON, ++ .driver = { ++ .name = "max6620", ++ }, ++ .probe = max6620_probe, ++ .remove = __devexit_p(max6620_remove), ++ .id_table = max6620_id, ++ .address_list = normal_i2c, ++}; ++ ++/* ++ * Client data (each client gets its own) ++ */ ++ ++struct max6620_data { ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ int nr_fans; ++ char valid; /* zero until following fields are valid */ ++ unsigned long last_updated; /* in jiffies */ ++ ++ /* register values */ ++ u8 speed[4]; ++ u8 config; ++ u8 fancfg[4]; ++ u8 fandyn[4]; ++ u8 tach[4]; ++ u8 volt[4]; ++ u8 target[4]; ++ u8 dac[4]; ++ u8 fault; ++}; ++ ++static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ int rpm; ++ ++ /* ++ * Calculation details: ++ * ++ * Each tachometer counts over an interval given by the "count" ++ * register (0.25, 0.5, 1 or 2 seconds). This module assumes ++ * that the fans produce two pulses per revolution (this seems ++ * to be the most common). ++ */ ++ if(data->tach[attr->index] == 0 || data->tach[attr->index] == 255) { ++ rpm = 0; ++ } else { ++ rpm = ((clock / (data->tach[attr->index] << 3)) * 30 * DIV_FROM_REG(data->fandyn[attr->index])); ++ } ++ ++ return sprintf(buf, "%d\n", rpm); ++} ++ ++static ssize_t get_target(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ int kscale, ktach, rpm; ++ ++ /* ++ * Use the datasheet equation: ++ * ++ * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)] ++ * ++ * then multiply by 60 to give rpm. ++ */ ++ ++ kscale = DIV_FROM_REG(data->fandyn[attr->index]); ++ ktach = data->target[attr->index]; ++ if(ktach == 0) { ++ rpm = 0; ++ } else { ++ rpm = ((60 * kscale * clock) / (ktach << 3)); ++ } ++ return sprintf(buf, "%d\n", rpm); ++} ++ ++static ssize_t set_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ int kscale, ktach; ++ unsigned long rpm; ++ int err; ++ ++ err = kstrtoul(buf, 10, &rpm); ++ if (err) ++ return err; ++ ++ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); ++ ++ /* ++ * Divide the required speed by 60 to get from rpm to rps, then ++ * use the datasheet equation: ++ * ++ * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1 ++ */ ++ ++ mutex_lock(&data->update_lock); ++ ++ kscale = DIV_FROM_REG(data->fandyn[attr->index]); ++ ktach = ((60 * kscale * clock) / rpm); ++ if (ktach < 0) ++ ktach = 0; ++ if (ktach > 255) ++ ktach = 255; ++ data->target[attr->index] = ktach; ++ ++ i2c_smbus_write_byte_data(client, target_reg[attr->index], data->target[attr->index]); ++ i2c_smbus_write_byte_data(client, target_reg[attr->index]+0x01, 0x00); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Get/set the fan speed in open loop mode using pwm1 sysfs file. ++ * Speed is given as a relative value from 0 to 255, where 255 is maximum ++ * speed. Note that this is done by writing directly to the chip's DAC, ++ * it won't change the closed loop speed set by fan1_target. ++ * Also note that due to rounding errors it is possible that you don't read ++ * back exactly the value you have set. ++ */ ++ ++static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ int pwm; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ ++ /* ++ * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans. ++ * Lower DAC values mean higher speeds. ++ */ ++ pwm = ((int)data->volt[attr->index]); ++ ++ if (pwm < 0) ++ pwm = 0; ++ ++ return sprintf(buf, "%d\n", pwm); ++} ++ ++static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ unsigned long pwm; ++ int err; ++ ++ err = kstrtoul(buf, 10, &pwm); ++ if (err) ++ return err; ++ ++ pwm = SENSORS_LIMIT(pwm, 0, 255); ++ ++ mutex_lock(&data->update_lock); ++ ++ data->dac[attr->index] = pwm; ++ ++ ++ i2c_smbus_write_byte_data(client, dac_reg[attr->index], data->dac[attr->index]); ++ i2c_smbus_write_byte_data(client, dac_reg[attr->index]+1, 0x00); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Get/Set controller mode: ++ * Possible values: ++ * 0 = Fan always on ++ * 1 = Open loop, Voltage is set according to speed, not regulated. ++ * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer ++ */ ++ ++static ssize_t get_enable(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct max6620_data *data = max6620_update_device(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ int mode = (data->fancfg[attr->index] & 0x80 ) >> 7; ++ int sysfs_modes[2] = {1, 2}; ++ ++ return sprintf(buf, "%d\n", sysfs_modes[mode]); ++} ++ ++static ssize_t set_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ int max6620_modes[3] = {0, 1, 0}; ++ unsigned long mode; ++ int err; ++ ++ err = kstrtoul(buf, 10, &mode); ++ if (err) ++ return err; ++ ++ if (mode > 2) ++ return -EINVAL; ++ ++ mutex_lock(&data->update_lock); ++ ++ data->fancfg[attr->index] = i2c_smbus_read_byte_data(client, config_reg[attr->index]); ++ data->fancfg[attr->index] = (data->fancfg[attr->index] & ~0x80) ++ | (max6620_modes[mode] << 7); ++ ++ i2c_smbus_write_byte_data(client, config_reg[attr->index], data->fancfg[attr->index]); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Read/write functions for fan1_div sysfs file. The MAX6620 has no such ++ * divider. We handle this by converting between divider and counttime: ++ * ++ * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, 3, 4 or 5 ++ * ++ * Lower values of k allow to connect a faster fan without the risk of ++ * counter overflow. The price is lower resolution. You can also set counttime ++ * using the module parameter. Note that the module parameter "prescaler" also ++ * influences the behaviour. Unfortunately, there's no sysfs attribute ++ * defined for that. See the data sheet for details. ++ */ ++ ++static ssize_t get_div(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct max6620_data *data = max6620_update_device(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ ++ return sprintf(buf, "%d\n", DIV_FROM_REG(data->fandyn[attr->index])); ++} ++ ++static ssize_t set_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ unsigned long div; ++ int err; ++ u8 div_bin; ++ ++ err = kstrtoul(buf, 10, &div); ++ if (err) ++ return err; ++ ++ mutex_lock(&data->update_lock); ++ switch (div) { ++ case 1: ++ div_bin = 0; ++ break; ++ case 2: ++ div_bin = 1; ++ break; ++ case 4: ++ div_bin = 2; ++ break; ++ case 8: ++ div_bin = 3; ++ break; ++ case 16: ++ div_bin = 4; ++ break; ++ case 32: ++ div_bin = 5; ++ break; ++ default: ++ mutex_unlock(&data->update_lock); ++ return -EINVAL; ++ } ++ data->fandyn[attr->index] &= 0x1F; ++ data->fandyn[attr->index] |= div_bin << 5; ++ i2c_smbus_write_byte_data(client, dyn_reg[attr->index], data->fandyn[attr->index]); ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Get alarm stati: ++ * Possible values: ++ * 0 = no alarm ++ * 1 = alarm ++ */ ++ ++static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ struct i2c_client *client = to_i2c_client(dev); ++ int alarm = 0; ++ ++ if (data->fault & (1 << attr->index)) { ++ mutex_lock(&data->update_lock); ++ alarm = 1; ++ data->fault &= ~(1 << attr->index); ++ data->fault |= i2c_smbus_read_byte_data(client, ++ MAX6620_REG_FAULT); ++ mutex_unlock(&data->update_lock); ++ } ++ ++ return sprintf(buf, "%d\n", alarm); ++} ++ ++static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0); ++static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1); ++static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2); ++static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3); ++static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target, 0); ++static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div, 0); ++// static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 0); ++static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0); ++static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO, get_target, set_target, 1); ++static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, get_div, set_div, 1); ++// static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 1); ++static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1); ++static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO, get_target, set_target, 2); ++static SENSOR_DEVICE_ATTR(fan3_div, S_IWUSR | S_IRUGO, get_div, set_div, 2); ++// static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 2); ++static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2); ++static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO, get_target, set_target, 3); ++static SENSOR_DEVICE_ATTR(fan4_div, S_IWUSR | S_IRUGO, get_div, set_div, 3); ++// static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 3); ++static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3); ++ ++static struct attribute *max6620_attrs[] = { ++ &sensor_dev_attr_fan1_input.dev_attr.attr, ++ &sensor_dev_attr_fan2_input.dev_attr.attr, ++ &sensor_dev_attr_fan3_input.dev_attr.attr, ++ &sensor_dev_attr_fan4_input.dev_attr.attr, ++ &sensor_dev_attr_fan1_target.dev_attr.attr, ++ &sensor_dev_attr_fan1_div.dev_attr.attr, ++// &sensor_dev_attr_pwm1_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm1.dev_attr.attr, ++ &sensor_dev_attr_fan2_target.dev_attr.attr, ++ &sensor_dev_attr_fan2_div.dev_attr.attr, ++// &sensor_dev_attr_pwm2_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm2.dev_attr.attr, ++ &sensor_dev_attr_fan3_target.dev_attr.attr, ++ &sensor_dev_attr_fan3_div.dev_attr.attr, ++// &sensor_dev_attr_pwm3_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm3.dev_attr.attr, ++ &sensor_dev_attr_fan4_target.dev_attr.attr, ++ &sensor_dev_attr_fan4_div.dev_attr.attr, ++// &sensor_dev_attr_pwm4_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm4.dev_attr.attr, ++ NULL ++}; ++ ++static struct attribute_group max6620_attr_grp = { ++ .attrs = max6620_attrs, ++}; ++ ++ ++/* ++ * Real code ++ */ ++ ++static int __devinit max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { ++ ++ struct max6620_data *data; ++ int err; ++ ++ data = devm_kzalloc(&client->dev, sizeof(struct max6620_data), GFP_KERNEL); ++ if (!data) { ++ dev_err(&client->dev, "out of memory.\n"); ++ return -ENOMEM; ++ } ++ ++ i2c_set_clientdata(client, data); ++ mutex_init(&data->update_lock); ++ data->nr_fans = id->driver_data; ++ ++ /* ++ * Initialize the max6620 chip ++ */ ++ dev_info(&client->dev, "About to initialize module\n"); ++ ++ err = max6620_init_client(client); ++ if (err) ++ return err; ++ dev_info(&client->dev, "Module initialized\n"); ++ ++ err = sysfs_create_group(&client->dev.kobj, &max6620_attr_grp); ++ if (err) ++ return err; ++dev_info(&client->dev, "Sysfs entries created\n"); ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (!IS_ERR(data->hwmon_dev)) ++ return 0; ++ ++ err = PTR_ERR(data->hwmon_dev); ++ dev_err(&client->dev, "error registering hwmon device.\n"); ++ ++ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp); ++ return err; ++} ++ ++static int __devexit max6620_remove(struct i2c_client *client) { ++ ++ struct max6620_data *data = i2c_get_clientdata(client); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ ++ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp); ++ return 0; ++} ++ ++static int max6620_init_client(struct i2c_client *client) { ++ ++ struct max6620_data *data = i2c_get_clientdata(client); ++ int config; ++ int err = -EIO; ++ int i; ++ ++ config = i2c_smbus_read_byte_data(client, MAX6620_REG_CONFIG); ++ ++ if (config < 0) { ++ dev_err(&client->dev, "Error reading config, aborting.\n"); ++ return err; ++ } ++ ++ ++ ++ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config)) { ++ dev_err(&client->dev, "Config write error, aborting.\n"); ++ return err; ++ } ++ ++ data->config = config; ++ for (i = 0; i < 4; i++) { ++ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); ++ data->fancfg[i] |= 0x80; // enable TACH monitoring ++ i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); ++ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); ++ data-> fandyn[i] |= 0x1C; ++ i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); ++ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); ++ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); ++ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); ++ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); ++ ++ ++ ++ } ++ ++ ++ ++ return 0; ++} ++ ++ ++ ++ ++static struct max6620_data *max6620_update_device(struct device *dev) ++{ ++ int i; ++ struct i2c_client *client = to_i2c_client(dev); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { ++ ++ for (i = 0; i < 4; i++) { ++ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); ++ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); ++ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); ++ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); ++ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); ++ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); ++ } ++ ++ ++ /* ++ * Alarms are cleared on read in case the condition that ++ * caused the alarm is removed. Keep the value latched here ++ * for providing the register through different alarm files. ++ */ ++ u8 fault_reg; ++ fault_reg = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT); ++ data->fault |= (fault_reg >> 4) & (fault_reg & 0x0F); ++ ++ data->last_updated = jiffies; ++ data->valid = 1; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ ++ return data; ++} ++ ++module_i2c_driver(max6620_driver); ++ ++static int __init max6620_init(void) ++{ ++ return i2c_add_driver(&max6620_driver); ++} ++module_init(max6620_init); ++ ++/** ++ * sht21_init() - clean up driver ++ * ++ * Called when module is removed. ++ */ ++static void __exit max6620_exit(void) ++{ ++ i2c_del_driver(&max6620_driver); ++} ++module_exit(max6620_exit); ++ ++MODULE_AUTHOR("Lucas Grunenberg"); ++MODULE_DESCRIPTION("MAX6620 sensor driver"); ++MODULE_LICENSE("GPL"); diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch new file mode 100644 index 00000000..812f619a --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch @@ -0,0 +1,78 @@ +enable PMBUS_SKIP_STATUS_CHECK for dps460 + +From: Vadim Pasternak + +Patch for pmbus - includes disabling of PMBus status check through platform data structure. +This is due to some PMBus don't support the STATUS_CML register, or report communication errors +for no explicable reason. For such chips, checking the status register must be disabled. +--- + drivers/hwmon/pmbus/pmbus.c | 14 ++++++++++++++ + drivers/hwmon/pmbus/pmbus_core.c | 3 +++ + 2 files changed, 17 insertions(+) + +diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c +index 7e91700..6dd75fb 100644 +--- a/drivers/hwmon/pmbus/pmbus.c ++++ b/drivers/hwmon/pmbus/pmbus.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include "pmbus.h" + + /* +@@ -166,14 +167,26 @@ static int pmbus_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { + struct pmbus_driver_info *info; ++ struct pmbus_platform_data *pdata = NULL; ++ struct device *dev = &client->dev; + + info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + ++ if (!strncmp(id->name, "dps460", sizeof("dps460"))) { ++ pdata = kzalloc(sizeof(struct pmbus_platform_data), GFP_KERNEL); ++ if (!pdata) { ++ kfree(info); ++ return -ENOMEM; ++ } ++ pdata->flags = PMBUS_SKIP_STATUS_CHECK; ++ } ++ + info->pages = id->driver_data; + info->identify = pmbus_identify; ++ dev->platform_data = pdata; + + return pmbus_do_probe(client, id, info); + } +@@ -195,6 +208,7 @@ static const struct i2c_device_id pmbus_id[] = { + {"tps40400", 1}, + {"tps40422", 2}, + {"udt020", 1}, ++ {"dps460", 1}, + {} + }; + +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c +index 291d11f..09b123f 100644 +--- a/drivers/hwmon/pmbus/pmbus_core.c ++++ b/drivers/hwmon/pmbus/pmbus_core.c +@@ -1792,8 +1792,11 @@ EXPORT_SYMBOL_GPL(pmbus_do_probe); + int pmbus_do_remove(struct i2c_client *client) + { + struct pmbus_data *data = i2c_get_clientdata(client); ++ const struct pmbus_platform_data *pdata = dev_get_platdata(&client->dev); + hwmon_device_unregister(data->hwmon_dev); + kfree(data->group.attrs); ++ if (pdata) ++ kfree(pdata); + return 0; + } + EXPORT_SYMBOL_GPL(pmbus_do_remove); +-- +2.1.4 + diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch new file mode 100644 index 00000000..38550707 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch @@ -0,0 +1,96 @@ +Update pmbus_data data structure to meet kernel implementation + +From: Shuotian Cheng + +The pmbus_data data structure is pasted in the driver. +Cumulus patch is for kernel 3.2.x. +Update this data structure to meet current kernel (3.16.x) implementation. +--- + drivers/hwmon/pmbus/dni_dps460.c | 42 +++++++++++++++----------------------- + 1 file changed, 17 insertions(+), 25 deletions(-) + +diff --git a/drivers/hwmon/pmbus/dni_dps460.c b/drivers/hwmon/pmbus/dni_dps460.c +index c687217..1607b65 100644 +--- a/drivers/hwmon/pmbus/dni_dps460.c ++++ b/drivers/hwmon/pmbus/dni_dps460.c +@@ -39,41 +39,32 @@ enum chips { dni_dps460 }; + #define FAN_VALUE_MAX 0x64 + + /* Needed to access the mutex. Copied from pmbus_core.c */ +-#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) ++#define PB_STATUS_BASE 0 ++#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES) ++#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES) ++#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES) ++#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES) ++#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) ++#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES) ++#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1) ++#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1) + struct pmbus_data { ++ struct device *dev; + struct device *hwmon_dev; + + u32 flags; /* from platform data */ + +- int exponent; /* linear mode: exponent for output voltages */ ++ int exponent[PMBUS_PAGES]; ++ /* linear mode: exponent for output voltages */ + + const struct pmbus_driver_info *info; + + int max_attributes; + int num_attributes; +- struct attribute **attributes; + struct attribute_group group; ++ const struct attribute_group *groups[2]; + +- /* +- * Sensors cover both sensor and limit registers. +- */ +- int max_sensors; +- int num_sensors; + struct pmbus_sensor *sensors; +- /* +- * Booleans are used for alarms. +- * Values are determined from status registers. +- */ +- int max_booleans; +- int num_booleans; +- struct pmbus_boolean *booleans; +- /* +- * Labels are used to map generic names (e.g., "in1") +- * to PMBus specific names (e.g., "vin" or "vout1"). +- */ +- int max_labels; +- int num_labels; +- struct pmbus_label *labels; + + struct mutex update_lock; + bool valid; +@@ -84,6 +75,7 @@ struct pmbus_data { + * so we keep them all together. + */ + u8 status[PB_NUM_STATUS_REG]; ++ u8 status_register; + + u8 currpage; + }; +@@ -123,14 +115,14 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, + struct i2c_client *client = to_i2c_client(dev); + struct pmbus_data *data = i2c_get_clientdata(client); + int err; +- unsigned int val; +- unsigned int rpm; ++ unsigned long val; ++ unsigned long rpm; + + err = kstrtol(buf, 10, &rpm); + if (err) + return err; + +- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); ++ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX); + + mutex_lock(&data->update_lock); + diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch new file mode 100644 index 00000000..8d93c157 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch @@ -0,0 +1,304 @@ +Add PMBUS driver for DNI DPS460 Power Supply + +From: Cumulus Networks + + +--- + drivers/hwmon/pmbus/Kconfig | 10 ++ + drivers/hwmon/pmbus/Makefile | 1 + drivers/hwmon/pmbus/dni_dps460.c | 253 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 264 insertions(+) + create mode 100644 drivers/hwmon/pmbus/dni_dps460.c + +diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig +index ec48945..7d3b1aa 100644 +--- a/drivers/hwmon/pmbus/Kconfig ++++ b/drivers/hwmon/pmbus/Kconfig +@@ -77,6 +77,16 @@ config SENSORS_MAX34440 + This driver can also be built as a module. If so, the module will + be called max34440. + ++config SENSORS_DNI_DPS460 ++ tristate "Delta DPS460" ++ default n ++ help ++ If you say yes here you get hardware monitoring support for Delta ++ DPS460. ++ ++ This driver can also be built as a module. If so, the module will ++ be called dni_dps460. ++ + config SENSORS_MAX8688 + tristate "Maxim MAX8688" + default n +diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile +index 5e6c316..767d086 100644 +--- a/drivers/hwmon/pmbus/Makefile ++++ b/drivers/hwmon/pmbus/Makefile +@@ -9,6 +9,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o + obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o + obj-$(CONFIG_SENSORS_MAX16064) += max16064.o + obj-$(CONFIG_SENSORS_MAX34440) += max34440.o ++obj-$(CONFIG_SENSORS_DNI_DPS460) += dni_dps460.o + obj-$(CONFIG_SENSORS_MAX8688) += max8688.o + obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o + obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o +diff --git a/drivers/hwmon/pmbus/dni_dps460.c b/drivers/hwmon/pmbus/dni_dps460.c +new file mode 100644 +index 0000000..c687217 +--- /dev/null ++++ b/drivers/hwmon/pmbus/dni_dps460.c +@@ -0,0 +1,253 @@ ++/* ++ * Hardware monitoring driver for Delta DPS460 ++ * ++ * Copyright (C) 2014 Cumulus Networks, LLC ++ * Author: Puneet Shenoy ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pmbus.h" ++ ++enum chips { dni_dps460 }; ++ ++/* Data provided by DELL Inc */ ++#define FAN_RPM_MIN 7200 ++#define FAN_RPM_MAX 18000 ++#define FAN_VALUE_MIN 0x28 ++#define FAN_VALUE_MAX 0x64 ++ ++/* Needed to access the mutex. Copied from pmbus_core.c */ ++#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) ++struct pmbus_data { ++ struct device *hwmon_dev; ++ ++ u32 flags; /* from platform data */ ++ ++ int exponent; /* linear mode: exponent for output voltages */ ++ ++ const struct pmbus_driver_info *info; ++ ++ int max_attributes; ++ int num_attributes; ++ struct attribute **attributes; ++ struct attribute_group group; ++ ++ /* ++ * Sensors cover both sensor and limit registers. ++ */ ++ int max_sensors; ++ int num_sensors; ++ struct pmbus_sensor *sensors; ++ /* ++ * Booleans are used for alarms. ++ * Values are determined from status registers. ++ */ ++ int max_booleans; ++ int num_booleans; ++ struct pmbus_boolean *booleans; ++ /* ++ * Labels are used to map generic names (e.g., "in1") ++ * to PMBus specific names (e.g., "vin" or "vout1"). ++ */ ++ int max_labels; ++ int num_labels; ++ struct pmbus_label *labels; ++ ++ struct mutex update_lock; ++ bool valid; ++ unsigned long last_updated; /* in jiffies */ ++ ++ /* ++ * A single status register covers multiple attributes, ++ * so we keep them all together. ++ */ ++ u8 status[PB_NUM_STATUS_REG]; ++ ++ u8 currpage; ++}; ++ ++/* ++ * We are only concerned with the first fan. The get_target and set_target are ++ * are written accordingly. ++ */ ++static ssize_t get_target(struct device *dev, struct device_attribute *devattr, ++ char *buf) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct pmbus_data *data = i2c_get_clientdata(client); ++ int val; ++ u32 rpm; ++ ++ /* ++ * The FAN_COMMAND_n takes a value which is not the RPM. ++ * The value and RPM have a liner relation. ++ * rpm = (FAN_RPM_MIN/FAN_VALUE_MIN) * val ++ * The slope is (FAN_RPM_MIN/FAN_VALUE_MIN) = 180 ++ */ ++ mutex_lock(&data->update_lock); ++ val = pmbus_read_word_data(client, 0, PMBUS_FAN_COMMAND_1); ++ pmbus_clear_faults(client); ++ mutex_unlock(&data->update_lock); ++ if (val < 0) { ++ return val; ++ } ++ rpm = val * (FAN_RPM_MIN/FAN_VALUE_MIN); ++ return sprintf(buf, "%d\n", rpm); ++} ++ ++static ssize_t set_target(struct device *dev, struct device_attribute *devattr, ++ const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct pmbus_data *data = i2c_get_clientdata(client); ++ int err; ++ unsigned int val; ++ unsigned int rpm; ++ ++ err = kstrtol(buf, 10, &rpm); ++ if (err) ++ return err; ++ ++ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); ++ ++ mutex_lock(&data->update_lock); ++ ++ val = FAN_VALUE_MIN * rpm; ++ val /= FAN_RPM_MIN; ++ pmbus_write_word_data(client, 0, PMBUS_FAN_COMMAND_1, (u16)val); ++ pmbus_clear_faults(client); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++static ssize_t show_pec(struct device *dev, struct device_attribute *dummy, ++ char *buf) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC)); ++} ++ ++static ssize_t set_pec(struct device *dev, struct device_attribute *dummy, ++ const char *buf, size_t count) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ long val; ++ int err; ++ ++ err = strict_strtol(buf, 10, &val); ++ if (err < 0) ++ return err; ++ ++ if (val != 0) ++ client->flags |= I2C_CLIENT_PEC; ++ else ++ client->flags &= ~I2C_CLIENT_PEC; ++ ++ return count; ++} ++ ++static SENSOR_DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec, 0); ++static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, ++ set_target, 0); ++ ++static struct attribute *dni_dps460_attrs[] = { ++ &sensor_dev_attr_fan1_target.dev_attr.attr, ++ &sensor_dev_attr_pec.dev_attr.attr, ++ NULL ++}; ++static struct attribute_group dni_dps460_attr_grp = { ++ .attrs = dni_dps460_attrs, ++}; ++ ++static int dni_dps460_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ struct pmbus_driver_info *info; ++ int ret; ++ ++ if (!i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_WORD_DATA | ++ I2C_FUNC_SMBUS_PEC)) ++ return -ENODEV; ++ ++ /* Needs PEC(PACKET ERROR CODE). Writes wont work without this. */ ++ client->flags = I2C_CLIENT_PEC; ++ ++ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL); ++ if (!info) ++ return -ENOMEM; ++ ++ /* Use only 1 page with 1 Fan, 2 Temps. */ ++ info->pages = 1; ++ info->func[0] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | ++ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP; ++ ++ ret = pmbus_do_probe(client, id, info); ++ if (ret < 0) ++ goto out; ++ ++ ret = sysfs_create_group(&client->dev.kobj, &dni_dps460_attr_grp); ++ if (ret) ++ goto out; ++ return 0; ++out: ++ kfree(info); ++ return ret; ++} ++ ++static int dni_dps460_remove(struct i2c_client *client) ++{ ++ struct pmbus_data *data = i2c_get_clientdata(client); ++ ++ sysfs_remove_group(&client->dev.kobj, &dni_dps460_attr_grp); ++ if (data->info) ++ kfree(data->info); ++ pmbus_do_remove(client); ++ return 0; ++} ++ ++static const struct i2c_device_id dni_dps460_id[] = { ++ {"dni_dps460", dni_dps460}, ++ {} ++}; ++MODULE_DEVICE_TABLE(i2c, dni_dps460_id); ++ ++static struct i2c_driver dni_dps460_driver = { ++ .driver = { ++ .name = "dni_dps460", ++ }, ++ .probe = dni_dps460_probe, ++ .remove = dni_dps460_remove, ++ .id_table = dni_dps460_id, ++}; ++ ++module_i2c_driver(dni_dps460_driver); ++ ++MODULE_AUTHOR("Puneet Shenoy"); ++MODULE_DESCRIPTION("PMBus driver for Delta DPS460"); ++MODULE_LICENSE("GPL"); diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch new file mode 100644 index 00000000..5d948675 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch @@ -0,0 +1,89 @@ +mlnx patch for UCD9200 + +From: Vadim Pasternak + +Patch replaces in device probing routine (ucd9000_probe) call +i2c_smbus_read_block_data with i2c_smbus_read_i2c_block_data. + +The first call executes the SMBus "block read" protocol. +Using this function requires that the client's adapter support +the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter +drivers support this. In particular Mellanox i2c controller doesn't +support it. API i2c_smbus_read_i2c_block_data is supposed to be +more generic and be supported by all i2c client adapters. +--- + drivers/hwmon/pmbus/ucd9200.c | 26 +++++++++++++++++++++----- + 1 files changed, 21 insertions(+), 5 deletions(-) + +diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c +index 033d6ac..119130c 100644 +--- a/drivers/hwmon/pmbus/ucd9200.c ++++ b/drivers/hwmon/pmbus/ucd9200.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include "pmbus.h" + + #define UCD9200_PHASE_INFO 0xd2 +@@ -52,14 +53,15 @@ static int ucd9200_probe(struct i2c_client *client, + u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; + struct pmbus_driver_info *info; + const struct i2c_device_id *mid; +- int i, j, ret; ++ int i, j, ret, n, len; ++ u8* buff; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + +- ret = i2c_smbus_read_block_data(client, UCD9200_DEVICE_ID, ++ ret = i2c_smbus_read_i2c_block_data(client, UCD9200_DEVICE_ID, 8, + block_buffer); + if (ret < 0) { + dev_err(&client->dev, "Failed to read device ID\n"); +@@ -68,8 +70,22 @@ static int ucd9200_probe(struct i2c_client *client, + block_buffer[ret] = '\0'; + dev_info(&client->dev, "Device ID %s\n", block_buffer); + ++ len = strlen(block_buffer); ++ for (n=0; n < len; n++) { ++ if (isalnum(block_buffer[n])) ++ break; ++ } ++ if (n >= len) { ++ dev_err(&client->dev, "Incorrect device name\n"); ++ return -ENODEV; ++ } ++ buff = &block_buffer[n]; ++ len = strlen(buff); ++ + for (mid = ucd9200_id; mid->name[0]; mid++) { +- if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) ++ if (len != strlen(mid->name)) ++ continue; ++ if (!strncasecmp(mid->name, buff, strlen(mid->name))) + break; + } + if (!mid->name[0]) { +@@ -86,7 +102,7 @@ static int ucd9200_probe(struct i2c_client *client, + if (!info) + return -ENOMEM; + +- ret = i2c_smbus_read_block_data(client, UCD9200_PHASE_INFO, ++ ret = i2c_smbus_read_i2c_block_data(client, UCD9200_PHASE_INFO, 4, + block_buffer); + if (ret < 0) { + dev_err(&client->dev, "Failed to read phase information\n"); +@@ -100,7 +116,7 @@ static int ucd9200_probe(struct i2c_client *client, + * the first unconfigured rail. + */ + info->pages = 0; +- for (i = 0; i < ret; i++) { ++ for (i = 1; i < ret; i++) { + if (!block_buffer[i]) + break; + info->pages++; diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch new file mode 100644 index 00000000..bf6c4fc7 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch @@ -0,0 +1,57 @@ +Add 'delay' module param to the driver. + +From: Cumulus Networks + +This is needed on S6000 for safe PMBUS access. +Without setting the 'delay', the ismt driver throws 'completion wait +timed out' error message. +--- + drivers/i2c/busses/i2c-ismt.c | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index d9ee43c..b2b3856 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -70,6 +70,7 @@ + #include + #include + #include ++#include + + #include + +@@ -192,9 +193,12 @@ static const struct pci_device_id ismt_ids[] = { + MODULE_DEVICE_TABLE(pci, ismt_ids); + + /* Bus speed control bits for slow debuggers - refer to the docs for usage */ +-static unsigned int bus_speed; ++static unsigned int bus_speed = 100; ++static unsigned int delay = 1000; + module_param(bus_speed, uint, S_IRUGO); +-MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)"); ++MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (1000 by default)"); ++module_param(delay, uint, S_IRUGO); ++MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)"); + + /** + * __ismt_desc_dump() - dump the contents of a specific descriptor +@@ -391,6 +395,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, + struct ismt_priv *priv = i2c_get_adapdata(adap); + struct device *dev = &priv->pci_dev->dev; + ++ if (delay > 0) ++ udelay(delay); ++ + desc = &priv->hw[priv->head]; + + /* Initialize the DMA buffer */ +@@ -756,7 +763,7 @@ static void ismt_hw_init(struct ismt_priv *priv) + bus_speed = 1000; + break; + } +- dev_dbg(dev, "SMBus clock is running at %d kHz\n", bus_speed); ++ dev_info(dev, "SMBus clock is running at %d kHz with delay %d us\n", bus_speed, delay); + } + + /** diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch new file mode 100644 index 00000000..612b02db --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch @@ -0,0 +1,27 @@ +diff -urpN a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +--- a/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:12:49.589201206 +0000 ++++ b/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:15:03.973204122 +0000 +@@ -200,6 +200,11 @@ MODULE_PARM_DESC(bus_speed, "Bus Speed i + module_param(delay, uint, S_IRUGO); + MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)"); + ++/* Enable/Disable driver */ ++static unsigned int enable = 1; ++module_param(enable, uint, S_IRUGO); ++MODULE_PARM_DESC(enable, "Enable or disable the ISMT driver (enabled by default)"); ++ + /** + * __ismt_desc_dump() - dump the contents of a specific descriptor + */ +@@ -852,6 +857,11 @@ ismt_probe(struct pci_dev *pdev, const s + struct ismt_priv *priv; + unsigned long start, len; + ++ if(!enable) { ++ dev_warn(&pdev->dev, "module is disabled.\n"); ++ return -ENODEV; ++ } ++ + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch b/packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch new file mode 100644 index 00000000..a3134c43 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch @@ -0,0 +1,48795 @@ +diff -Nu a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile +--- a/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 09:20:24.786171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 10:43:55.318238134 +0000 +@@ -32,5 +32,7 @@ + obj-$(CONFIG_IGB) += igb.o + + igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ +- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ +- e1000_i210.o igb_ptp.o igb_hwmon.o ++ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ ++ e1000_i210.o igb_ptp.o igb_hwmon.o \ ++ e1000_manage.o igb_param.o kcompat.o e1000_api.o \ ++ igb_vmdq.o igb_procfs.o igb_debugfs.o +diff -Nu a/drivers/net/ethernet/intel/igb/Module.supported b/drivers/net/ethernet/intel/igb/Module.supported +--- a/drivers/net/ethernet/intel/igb/Module.supported 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/Module.supported 2016-11-13 10:27:24.246224975 +0000 +@@ -0,0 +1 @@ ++igb.ko external +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c +--- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 14:32:08.575567168 +0000 +@@ -1,94 +1,134 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ +- +-/* e1000_82575 +- * e1000_82576 +- */ ++/******************************************************************************* + +-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. + +-#include +-#include +-#include ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ++ * 82575EB Gigabit Network Connection ++ * 82575EB Gigabit Backplane Connection ++ * 82575GB Gigabit Network Connection ++ * 82576 Gigabit Network Connection ++ * 82576 Quad Port Gigabit Mezzanine Adapter ++ * 82580 Gigabit Network Connection ++ * I350 Gigabit Network Connection ++ */ + +-#include "e1000_mac.h" +-#include "e1000_82575.h" ++#include "e1000_api.h" + #include "e1000_i210.h" + +-static s32 igb_get_invariants_82575(struct e1000_hw *); +-static s32 igb_acquire_phy_82575(struct e1000_hw *); +-static void igb_release_phy_82575(struct e1000_hw *); +-static s32 igb_acquire_nvm_82575(struct e1000_hw *); +-static void igb_release_nvm_82575(struct e1000_hw *); +-static s32 igb_check_for_link_82575(struct e1000_hw *); +-static s32 igb_get_cfg_done_82575(struct e1000_hw *); +-static s32 igb_init_hw_82575(struct e1000_hw *); +-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +-static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); +-static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); +-static s32 igb_reset_hw_82575(struct e1000_hw *); +-static s32 igb_reset_hw_82580(struct e1000_hw *); +-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +-static s32 igb_setup_copper_link_82575(struct e1000_hw *); +-static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +-static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, +- u16 *); +-static s32 igb_get_phy_id_82575(struct e1000_hw *); +-static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +-static bool igb_sgmii_active_82575(struct e1000_hw *); +-static s32 igb_reset_init_script_82575(struct e1000_hw *); +-static s32 igb_read_mac_addr_82575(struct e1000_hw *); +-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); ++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); ++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); ++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); ++static void e1000_release_phy_82575(struct e1000_hw *hw); ++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); ++static void e1000_release_nvm_82575(struct e1000_hw *hw); ++static s32 e1000_check_for_link_82575(struct e1000_hw *hw); ++static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); ++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); ++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, ++ u16 *duplex); ++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); ++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, ++ u16 *data); ++static s32 e1000_reset_hw_82575(struct e1000_hw *hw); ++static s32 e1000_reset_hw_82580(struct e1000_hw *hw); ++static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, ++ u32 offset, u16 *data); ++static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, ++ u32 offset, u16 data); ++static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, ++ bool active); ++static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, ++ bool active); ++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, ++ bool active); ++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); ++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); ++static s32 e1000_get_media_type_82575(struct e1000_hw *hw); ++static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); ++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); ++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, ++ u32 offset, u16 data); ++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); ++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); ++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, ++ u16 *speed, u16 *duplex); ++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); ++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); ++static bool e1000_sgmii_active_82575(struct e1000_hw *hw); ++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); ++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); ++static void e1000_config_collision_dist_82575(struct e1000_hw *hw); ++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); ++static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); ++static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); ++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); ++static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); ++static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); ++static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); ++static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, ++ u16 offset); ++static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, ++ u16 offset); ++static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); ++static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); ++static void e1000_clear_vfta_i350(struct e1000_hw *hw); ++ ++static void e1000_i2c_start(struct e1000_hw *hw); ++static void e1000_i2c_stop(struct e1000_hw *hw); ++static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); ++static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); ++static s32 e1000_get_i2c_ack(struct e1000_hw *hw); ++static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); ++static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); ++static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); ++static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); ++static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); ++static bool e1000_get_i2c_data(u32 *i2cctl); ++ + static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; ++#define E1000_82580_RXPBS_TABLE_SIZE \ ++ (sizeof(e1000_82580_rxpbs_table) / \ ++ sizeof(e1000_82580_rxpbs_table[0])) + + /** +- * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO ++ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +-static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) ++static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) + { + u32 reg = 0; + bool ext_mdio = false; + ++ DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); ++ + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: +- reg = rd32(E1000_MDIC); ++ reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: +@@ -96,7 +136,7 @@ + case e1000_i354: + case e1000_i210: + case e1000_i211: +- reg = rd32(E1000_MDICNFG); ++ reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: +@@ -106,135 +146,98 @@ + } + + /** +- * igb_check_for_link_media_swap - Check which M88E1112 interface linked +- * @hw: pointer to the HW structure +- * +- * Poll the M88E1112 interfaces to see which interface achieved link. +- */ +-static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +-{ +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; +- u16 data; +- u8 port = 0; +- +- /* Check the copper medium. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); +- if (ret_val) +- return ret_val; +- +- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); +- if (ret_val) +- return ret_val; +- +- if (data & E1000_M88E1112_STATUS_LINK) +- port = E1000_MEDIA_PORT_COPPER; +- +- /* Check the other medium. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); +- if (ret_val) +- return ret_val; +- +- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); +- if (ret_val) +- return ret_val; +- +- /* reset page to 0 */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); +- if (ret_val) +- return ret_val; +- +- if (data & E1000_M88E1112_STATUS_LINK) +- port = E1000_MEDIA_PORT_OTHER; +- +- /* Determine if a swap needs to happen. */ +- if (port && (hw->dev_spec._82575.media_port != port)) { +- hw->dev_spec._82575.media_port = port; +- hw->dev_spec._82575.media_changed = true; +- } else { +- ret_val = igb_check_for_link_82575(hw); +- } +- +- return 0; +-} +- +-/** +- * igb_init_phy_params_82575 - Init PHY func ptrs. ++ * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +-static s32 igb_init_phy_params_82575(struct e1000_hw *hw) ++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + ++ DEBUGFUNC("e1000_init_phy_params_82575"); ++ ++ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; ++ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; ++ + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + ++ phy->ops.power_up = igb_e1000_power_up_phy_copper; ++ phy->ops.power_down = e1000_power_down_phy_copper_82575; ++ + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ phy->ops.acquire = e1000_acquire_phy_82575; ++ phy->ops.check_reset_block = e1000_check_reset_block_generic; ++ phy->ops.commit = e1000_phy_sw_reset_generic; ++ phy->ops.get_cfg_done = e1000_get_cfg_done_82575; ++ phy->ops.release = e1000_release_phy_82575; + +- if (igb_sgmii_active_82575(hw)) { +- phy->ops.reset = igb_phy_hw_reset_sgmii_82575; ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ++ if (e1000_sgmii_active_82575(hw)) { ++ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { +- phy->ops.reset = igb_phy_hw_reset; ++ phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + +- wr32(E1000_CTRL_EXT, ctrl_ext); +- igb_reset_mdicnfg_82580(hw); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ e1000_reset_mdicnfg_82580(hw); + +- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { +- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; +- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; ++ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { ++ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; ++ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: +- phy->ops.read_reg = igb_read_phy_reg_82580; +- phy->ops.write_reg = igb_write_phy_reg_82580; ++ phy->ops.read_reg = e1000_read_phy_reg_82580; ++ phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: +- phy->ops.read_reg = igb_read_phy_reg_gs40g; +- phy->ops.write_reg = igb_write_phy_reg_gs40g; ++ phy->ops.read_reg = e1000_read_phy_reg_gs40g; ++ phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: +- phy->ops.read_reg = igb_read_phy_reg_igp; +- phy->ops.write_reg = igb_write_phy_reg_igp; ++ phy->ops.read_reg = e1000_read_phy_reg_igp; ++ phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } + +- /* set lan id */ +- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> +- E1000_STATUS_FUNC_SHIFT; +- + /* Set phy->phy_addr and phy->id. */ +- ret_val = igb_get_phy_id_82575(hw); +- if (ret_val) +- return ret_val; ++ ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; +- phy->ops.check_polarity = igb_check_polarity_m88; +- phy->ops.get_phy_info = igb_get_phy_info_m88; +- if (phy->id != M88E1111_I_PHY_ID) ++ phy->ops.check_polarity = igb_e1000_check_polarity_m88; ++ phy->ops.get_info = e1000_get_phy_info_m88; ++ if (phy->id == I347AT4_E_PHY_ID || ++ phy->id == M88E1112_E_PHY_ID || ++ phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = +- igb_get_cable_length_m88_gen2; ++ e1000_get_cable_length_m88_gen2; ++ else if (phy->id == M88E1543_E_PHY_ID || ++ phy->id == M88E1512_E_PHY_ID) ++ phy->ops.get_cable_length = ++ e1000_get_cable_length_m88_gen2; + else +- phy->ops.get_cable_length = igb_get_cable_length_m88; +- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; ++ phy->ops.get_cable_length = e1000_get_cable_length_m88; ++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; +@@ -256,35 +259,48 @@ + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = +- igb_check_for_link_media_swap; ++ e1000_check_for_link_media_swap; ++ } ++ if (phy->id == M88E1512_E_PHY_ID) { ++ ret_val = e1000_initialize_M88E1512_phy(hw); ++ if (ret_val) ++ goto out; ++ } ++ if (phy->id == M88E1543_E_PHY_ID) { ++ ret_val = e1000_initialize_M88E1543_phy(hw); ++ if (ret_val) ++ goto out; + } + break; + case IGP03E1000_E_PHY_ID: ++ case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; +- phy->ops.get_phy_info = igb_get_phy_info_igp; +- phy->ops.get_cable_length = igb_get_cable_length_igp_2; +- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; +- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; +- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; ++ phy->ops.check_polarity = igb_e1000_check_polarity_igp; ++ phy->ops.get_info = e1000_get_phy_info_igp; ++ phy->ops.get_cable_length = e1000_get_cable_length_igp_2; ++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; ++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; ++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; ++ phy->ops.check_polarity = igb_e1000_check_polarity_82577; + phy->ops.force_speed_duplex = +- igb_phy_force_speed_duplex_82580; +- phy->ops.get_cable_length = igb_get_cable_length_82580; +- phy->ops.get_phy_info = igb_get_phy_info_82580; +- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; +- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; ++ igb_e1000_phy_force_speed_duplex_82577; ++ phy->ops.get_cable_length = igb_e1000_get_cable_length_82577; ++ phy->ops.get_info = igb_e1000_get_phy_info_82577; ++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; ++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; +- phy->ops.check_polarity = igb_check_polarity_m88; +- phy->ops.get_phy_info = igb_get_phy_info_m88; +- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; +- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; +- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; +- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; ++ phy->ops.check_polarity = igb_e1000_check_polarity_m88; ++ phy->ops.get_info = e1000_get_phy_info_m88; ++ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; ++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; ++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; ++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; +@@ -296,19 +312,21 @@ + } + + /** +- * igb_init_nvm_params_82575 - Init NVM func ptrs. ++ * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +-static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) ++s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + ++ DEBUGFUNC("e1000_init_nvm_params_82575"); ++ + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); +- +- /* Added to a constant, "size" becomes the left-shift value ++ /* ++ * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; +@@ -320,433 +338,272 @@ + size = 15; + + nvm->word_size = 1 << size; +- nvm->opcode_bits = 8; +- nvm->delay_usec = 1; ++ if (hw->mac.type < e1000_i210) { ++ nvm->opcode_bits = 8; ++ nvm->delay_usec = 1; ++ ++ switch (nvm->override) { ++ case e1000_nvm_override_spi_large: ++ nvm->page_size = 32; ++ nvm->address_bits = 16; ++ break; ++ case e1000_nvm_override_spi_small: ++ nvm->page_size = 8; ++ nvm->address_bits = 8; ++ break; ++ default: ++ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; ++ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? ++ 16 : 8; ++ break; ++ } ++ if (nvm->word_size == (1 << 15)) ++ nvm->page_size = 128; + +- switch (nvm->override) { +- case e1000_nvm_override_spi_large: +- nvm->page_size = 32; +- nvm->address_bits = 16; +- break; +- case e1000_nvm_override_spi_small: +- nvm->page_size = 8; +- nvm->address_bits = 8; +- break; +- default: +- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; +- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? +- 16 : 8; +- break; +- } +- if (nvm->word_size == (1 << 15)) +- nvm->page_size = 128; +- +- nvm->type = e1000_nvm_eeprom_spi; +- +- /* NVM Function Pointers */ +- nvm->ops.acquire = igb_acquire_nvm_82575; +- nvm->ops.release = igb_release_nvm_82575; +- nvm->ops.write = igb_write_nvm_spi; +- nvm->ops.validate = igb_validate_nvm_checksum; +- nvm->ops.update = igb_update_nvm_checksum; ++ nvm->type = e1000_nvm_eeprom_spi; ++ } else { ++ nvm->type = e1000_nvm_flash_hw; ++ } ++ ++ /* Function Pointers */ ++ nvm->ops.acquire = e1000_acquire_nvm_82575; ++ nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) +- nvm->ops.read = igb_read_nvm_eerd; ++ nvm->ops.read = e1000_read_nvm_eerd; + else +- nvm->ops.read = igb_read_nvm_spi; ++ nvm->ops.read = e1000_read_nvm_spi; ++ ++ nvm->ops.write = e1000_write_nvm_spi; ++ nvm->ops.validate = e1000_validate_nvm_checksum_generic; ++ nvm->ops.update = e1000_update_nvm_checksum_generic; ++ nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: +- nvm->ops.validate = igb_validate_nvm_checksum_82580; +- nvm->ops.update = igb_update_nvm_checksum_82580; ++ nvm->ops.validate = e1000_validate_nvm_checksum_82580; ++ nvm->ops.update = e1000_update_nvm_checksum_82580; + break; +- case e1000_i354: + case e1000_i350: +- nvm->ops.validate = igb_validate_nvm_checksum_i350; +- nvm->ops.update = igb_update_nvm_checksum_i350; ++ case e1000_i354: ++ nvm->ops.validate = e1000_validate_nvm_checksum_i350; ++ nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_init_mac_params_82575 - Init MAC func ptrs. ++ * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +-static s32 igb_init_mac_params_82575(struct e1000_hw *hw) ++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + ++ DEBUGFUNC("e1000_init_mac_params_82575"); ++ ++ /* Derives media type */ ++ e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; ++ /* Set uta register count */ ++ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ +- switch (mac->type) { +- case e1000_82576: ++ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; ++ if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; +- break; +- case e1000_82580: ++ if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; +- break; +- case e1000_i350: +- case e1000_i354: ++ if (mac->type == e1000_i350 || mac->type == e1000_i354) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; +- break; +- default: +- mac->rar_entry_count = E1000_RAR_ENTRIES_82575; +- break; +- } +- /* reset */ +- if (mac->type >= e1000_82580) +- mac->ops.reset_hw = igb_reset_hw_82580; +- else +- mac->ops.reset_hw = igb_reset_hw_82575; + +- if (mac->type >= e1000_i210) { +- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; +- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; ++ /* Enable EEE default settings for EEE supported devices */ ++ if (mac->type >= e1000_i350) ++ dev_spec->eee_disable = false; + +- } else { +- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; +- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; +- } ++ /* Allow a single clear of the SW semaphore on I210 and newer */ ++ if (mac->type >= e1000_i210) ++ dev_spec->clear_semaphore_once = true; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; +- /* Set if manageability features are enabled. */ ++ /* FWSM register */ ++ mac->has_fwsm = true; ++ /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = +- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) +- ? true : false; +- /* enable EEE on i350 parts and later parts */ +- if (mac->type >= e1000_i350) +- dev_spec->eee_disable = false; ++ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); ++ ++ /* Function pointers */ ++ ++ /* bus type/speed/width */ ++ mac->ops.get_bus_info = igb_e1000_get_bus_info_pcie_generic; ++ /* reset */ ++ if (mac->type >= e1000_82580) ++ mac->ops.reset_hw = e1000_reset_hw_82580; + else +- dev_spec->eee_disable = true; +- /* Allow a single clear of the SW semaphore on I210 and newer */ +- if (mac->type >= e1000_i210) +- dev_spec->clear_semaphore_once = true; ++ mac->ops.reset_hw = e1000_reset_hw_82575; ++ /* hw initialization */ ++ if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) ++ mac->ops.init_hw = e1000_init_hw_i210; ++ else ++ mac->ops.init_hw = e1000_init_hw_82575; ++ /* link setup */ ++ mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) +- ? igb_setup_copper_link_82575 +- : igb_setup_serdes_link_82575; +- +- if (mac->type == e1000_82580) { +- switch (hw->device_id) { +- /* feature not supported on these id's */ +- case E1000_DEV_ID_DH89XXCC_SGMII: +- case E1000_DEV_ID_DH89XXCC_SERDES: +- case E1000_DEV_ID_DH89XXCC_BACKPLANE: +- case E1000_DEV_ID_DH89XXCC_SFP: +- break; +- default: +- hw->dev_spec._82575.mas_capable = true; +- break; +- } ++ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; ++ /* physical interface shutdown */ ++ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; ++ /* physical interface power up */ ++ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; ++ /* check for link */ ++ mac->ops.check_for_link = e1000_check_for_link_82575; ++ /* read mac address */ ++ mac->ops.read_mac_addr = e1000_read_mac_addr_82575; ++ /* configure collision distance */ ++ mac->ops.config_collision_dist = e1000_config_collision_dist_82575; ++ /* multicast address update */ ++ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; ++ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { ++ /* writing VFTA */ ++ mac->ops.write_vfta = e1000_write_vfta_i350; ++ /* clearing VFTA */ ++ mac->ops.clear_vfta = e1000_clear_vfta_i350; ++ } else { ++ /* writing VFTA */ ++ mac->ops.write_vfta = igb_e1000_write_vfta_generic; ++ /* clearing VFTA */ ++ mac->ops.clear_vfta = igb_e1000_clear_vfta_generic; ++ } ++ if (hw->mac.type >= e1000_82580) ++ mac->ops.validate_mdi_setting = ++ e1000_validate_mdi_setting_crossover_generic; ++ /* ID LED init */ ++ mac->ops.id_led_init = e1000_id_led_init_generic; ++ /* blink LED */ ++ mac->ops.blink_led = e1000_blink_led_generic; ++ /* setup LED */ ++ mac->ops.setup_led = e1000_setup_led_generic; ++ /* cleanup LED */ ++ mac->ops.cleanup_led = e1000_cleanup_led_generic; ++ /* turn on/off LED */ ++ mac->ops.led_on = e1000_led_on_generic; ++ mac->ops.led_off = e1000_led_off_generic; ++ /* clear hardware counters */ ++ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; ++ /* link info */ ++ mac->ops.get_link_up_info = e1000_get_link_up_info_82575; ++ /* get thermal sensor data */ ++ mac->ops.get_thermal_sensor_data = ++ e1000_get_thermal_sensor_data_generic; ++ mac->ops.init_thermal_sensor_thresh = ++ e1000_init_thermal_sensor_thresh_generic; ++ /* acquire SW_FW sync */ ++ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; ++ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; ++ if (mac->type >= e1000_i210) { ++ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; ++ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } +- return 0; ++ ++ /* set lan id for port to determine which phy lock to use */ ++ hw->mac.ops.set_lan_id(hw); ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_set_sfp_media_type_82575 - derives SFP module media type. ++ * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * +- * The media type is chosen based on SFP module. +- * compatibility flags retrieved from SFP ID EEPROM. ++ * Called to initialize all function pointers and parameters. + **/ +-static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) ++void e1000_init_function_pointers_82575(struct e1000_hw *hw) + { +- s32 ret_val = E1000_ERR_CONFIG; +- u32 ctrl_ext = 0; +- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; +- u8 tranceiver_type = 0; +- s32 timeout = 3; ++ DEBUGFUNC("e1000_init_function_pointers_82575"); + +- /* Turn I2C interface ON and power on sfp cage */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; +- wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); ++ hw->mac.ops.init_params = e1000_init_mac_params_82575; ++ hw->nvm.ops.init_params = e1000_init_nvm_params_82575; ++ hw->phy.ops.init_params = e1000_init_phy_params_82575; ++ hw->mbx.ops.init_params = e1000_init_mbx_params_pf; ++} + +- wrfl(); ++/** ++ * e1000_acquire_phy_82575 - Acquire rights to access PHY ++ * @hw: pointer to the HW structure ++ * ++ * Acquire access rights to the correct PHY. ++ **/ ++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) ++{ ++ u16 mask = E1000_SWFW_PHY0_SM; + +- /* Read SFP module data */ +- while (timeout) { +- ret_val = igb_read_sfp_data_byte(hw, +- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), +- &tranceiver_type); +- if (ret_val == 0) +- break; +- msleep(100); +- timeout--; +- } +- if (ret_val != 0) +- goto out; ++ DEBUGFUNC("e1000_acquire_phy_82575"); + +- ret_val = igb_read_sfp_data_byte(hw, +- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), +- (u8 *)eth_flags); +- if (ret_val != 0) +- goto out; ++ if (hw->bus.func == E1000_FUNC_1) ++ mask = E1000_SWFW_PHY1_SM; ++ else if (hw->bus.func == E1000_FUNC_2) ++ mask = E1000_SWFW_PHY2_SM; ++ else if (hw->bus.func == E1000_FUNC_3) ++ mask = E1000_SWFW_PHY3_SM; + +- /* Check if there is some SFP module plugged and powered */ +- if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || +- (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { +- dev_spec->module_plugged = true; +- if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { +- hw->phy.media_type = e1000_media_type_internal_serdes; +- } else if (eth_flags->e100_base_fx) { +- dev_spec->sgmii_active = true; +- hw->phy.media_type = e1000_media_type_internal_serdes; +- } else if (eth_flags->e1000_base_t) { +- dev_spec->sgmii_active = true; +- hw->phy.media_type = e1000_media_type_copper; +- } else { +- hw->phy.media_type = e1000_media_type_unknown; +- hw_dbg("PHY module has not been recognized\n"); +- goto out; +- } +- } else { +- hw->phy.media_type = e1000_media_type_unknown; +- } +- ret_val = 0; +-out: +- /* Restore I2C interface setting */ +- wr32(E1000_CTRL_EXT, ctrl_ext); +- return ret_val; ++ return hw->mac.ops.acquire_swfw_sync(hw, mask); + } + +-static s32 igb_get_invariants_82575(struct e1000_hw *hw) ++/** ++ * e1000_release_phy_82575 - Release rights to access PHY ++ * @hw: pointer to the HW structure ++ * ++ * A wrapper to release access rights to the correct PHY. ++ **/ ++static void e1000_release_phy_82575(struct e1000_hw *hw) + { +- struct e1000_mac_info *mac = &hw->mac; +- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- s32 ret_val; +- u32 ctrl_ext = 0; +- u32 link_mode = 0; ++ u16 mask = E1000_SWFW_PHY0_SM; + +- switch (hw->device_id) { +- case E1000_DEV_ID_82575EB_COPPER: +- case E1000_DEV_ID_82575EB_FIBER_SERDES: +- case E1000_DEV_ID_82575GB_QUAD_COPPER: +- mac->type = e1000_82575; +- break; +- case E1000_DEV_ID_82576: +- case E1000_DEV_ID_82576_NS: +- case E1000_DEV_ID_82576_NS_SERDES: +- case E1000_DEV_ID_82576_FIBER: +- case E1000_DEV_ID_82576_SERDES: +- case E1000_DEV_ID_82576_QUAD_COPPER: +- case E1000_DEV_ID_82576_QUAD_COPPER_ET2: +- case E1000_DEV_ID_82576_SERDES_QUAD: +- mac->type = e1000_82576; +- break; +- case E1000_DEV_ID_82580_COPPER: +- case E1000_DEV_ID_82580_FIBER: +- case E1000_DEV_ID_82580_QUAD_FIBER: +- case E1000_DEV_ID_82580_SERDES: +- case E1000_DEV_ID_82580_SGMII: +- case E1000_DEV_ID_82580_COPPER_DUAL: +- case E1000_DEV_ID_DH89XXCC_SGMII: +- case E1000_DEV_ID_DH89XXCC_SERDES: +- case E1000_DEV_ID_DH89XXCC_BACKPLANE: +- case E1000_DEV_ID_DH89XXCC_SFP: +- mac->type = e1000_82580; +- break; +- case E1000_DEV_ID_I350_COPPER: +- case E1000_DEV_ID_I350_FIBER: +- case E1000_DEV_ID_I350_SERDES: +- case E1000_DEV_ID_I350_SGMII: +- mac->type = e1000_i350; +- break; +- case E1000_DEV_ID_I210_COPPER: +- case E1000_DEV_ID_I210_FIBER: +- case E1000_DEV_ID_I210_SERDES: +- case E1000_DEV_ID_I210_SGMII: +- case E1000_DEV_ID_I210_COPPER_FLASHLESS: +- case E1000_DEV_ID_I210_SERDES_FLASHLESS: +- mac->type = e1000_i210; +- break; +- case E1000_DEV_ID_I211_COPPER: +- mac->type = e1000_i211; +- break; +- case E1000_DEV_ID_I354_BACKPLANE_1GBPS: +- case E1000_DEV_ID_I354_SGMII: +- case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: +- mac->type = e1000_i354; +- break; +- default: +- return -E1000_ERR_MAC_INIT; +- break; +- } ++ DEBUGFUNC("e1000_release_phy_82575"); + +- /* Set media type */ +- /* The 82575 uses bits 22:23 for link mode. The mode can be changed +- * based on the EEPROM. We cannot rely upon device ID. There +- * is no distinguishable difference between fiber and internal +- * SerDes mode on the 82575. There can be an external PHY attached +- * on the SGMII interface. For this, we'll set sgmii_active to true. +- */ +- hw->phy.media_type = e1000_media_type_copper; +- dev_spec->sgmii_active = false; +- dev_spec->module_plugged = false; ++ if (hw->bus.func == E1000_FUNC_1) ++ mask = E1000_SWFW_PHY1_SM; ++ else if (hw->bus.func == E1000_FUNC_2) ++ mask = E1000_SWFW_PHY2_SM; ++ else if (hw->bus.func == E1000_FUNC_3) ++ mask = E1000_SWFW_PHY3_SM; + +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ hw->mac.ops.release_swfw_sync(hw, mask); ++} + +- link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; +- switch (link_mode) { +- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: +- hw->phy.media_type = e1000_media_type_internal_serdes; +- break; +- case E1000_CTRL_EXT_LINK_MODE_SGMII: +- /* Get phy control interface type set (MDIO vs. I2C)*/ +- if (igb_sgmii_uses_mdio_82575(hw)) { +- hw->phy.media_type = e1000_media_type_copper; +- dev_spec->sgmii_active = true; +- break; +- } +- /* fall through for I2C based SGMII */ +- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: +- /* read media type from SFP EEPROM */ +- ret_val = igb_set_sfp_media_type_82575(hw); +- if ((ret_val != 0) || +- (hw->phy.media_type == e1000_media_type_unknown)) { +- /* If media type was not identified then return media +- * type defined by the CTRL_EXT settings. +- */ +- hw->phy.media_type = e1000_media_type_internal_serdes; ++/** ++ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Reads the PHY register at offset using the serial gigabit media independent ++ * interface and stores the retrieved information in data. ++ **/ ++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, ++ u16 *data) ++{ ++ s32 ret_val = -E1000_ERR_PARAM; + +- if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { +- hw->phy.media_type = e1000_media_type_copper; +- dev_spec->sgmii_active = true; +- } ++ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + +- break; +- } ++ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { ++ DEBUGOUT1("PHY Address %u is out of range\n", offset); ++ goto out; ++ } + +- /* do not change link mode for 100BaseFX */ +- if (dev_spec->eth_flags.e100_base_fx) +- break; ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ goto out; + +- /* change current link mode setting */ +- ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; +- +- if (hw->phy.media_type == e1000_media_type_copper) +- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; +- else +- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; +- +- wr32(E1000_CTRL_EXT, ctrl_ext); +- +- break; +- default: +- break; +- } +- +- /* mac initialization and operations */ +- ret_val = igb_init_mac_params_82575(hw); +- if (ret_val) +- goto out; +- +- /* NVM initialization */ +- ret_val = igb_init_nvm_params_82575(hw); +- switch (hw->mac.type) { +- case e1000_i210: +- case e1000_i211: +- ret_val = igb_init_nvm_params_i210(hw); +- break; +- default: +- break; +- } +- +- if (ret_val) +- goto out; +- +- /* if part supports SR-IOV then initialize mailbox parameters */ +- switch (mac->type) { +- case e1000_82576: +- case e1000_i350: +- igb_init_mbx_params_pf(hw); +- break; +- default: +- break; +- } +- +- /* setup PHY parameters */ +- ret_val = igb_init_phy_params_82575(hw); +- +-out: +- return ret_val; +-} +- +-/** +- * igb_acquire_phy_82575 - Acquire rights to access PHY +- * @hw: pointer to the HW structure +- * +- * Acquire access rights to the correct PHY. This is a +- * function pointer entry point called by the api module. +- **/ +-static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +-{ +- u16 mask = E1000_SWFW_PHY0_SM; +- +- if (hw->bus.func == E1000_FUNC_1) +- mask = E1000_SWFW_PHY1_SM; +- else if (hw->bus.func == E1000_FUNC_2) +- mask = E1000_SWFW_PHY2_SM; +- else if (hw->bus.func == E1000_FUNC_3) +- mask = E1000_SWFW_PHY3_SM; +- +- return hw->mac.ops.acquire_swfw_sync(hw, mask); +-} +- +-/** +- * igb_release_phy_82575 - Release rights to access PHY +- * @hw: pointer to the HW structure +- * +- * A wrapper to release access rights to the correct PHY. This is a +- * function pointer entry point called by the api module. +- **/ +-static void igb_release_phy_82575(struct e1000_hw *hw) +-{ +- u16 mask = E1000_SWFW_PHY0_SM; +- +- if (hw->bus.func == E1000_FUNC_1) +- mask = E1000_SWFW_PHY1_SM; +- else if (hw->bus.func == E1000_FUNC_2) +- mask = E1000_SWFW_PHY2_SM; +- else if (hw->bus.func == E1000_FUNC_3) +- mask = E1000_SWFW_PHY3_SM; +- +- hw->mac.ops.release_swfw_sync(hw, mask); +-} +- +-/** +- * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii +- * @hw: pointer to the HW structure +- * @offset: register offset to be read +- * @data: pointer to the read data +- * +- * Reads the PHY register at offset using the serial gigabit media independent +- * interface and stores the retrieved information in data. +- **/ +-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, +- u16 *data) +-{ +- s32 ret_val = -E1000_ERR_PARAM; +- +- if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { +- hw_dbg("PHY Address %u is out of range\n", offset); +- goto out; +- } +- +- ret_val = hw->phy.ops.acquire(hw); +- if (ret_val) +- goto out; +- +- ret_val = igb_read_phy_reg_i2c(hw, offset, data); ++ ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -755,7 +612,7 @@ + } + + /** +- * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii ++ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset +@@ -763,14 +620,15 @@ + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, ++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) + { + s32 ret_val = -E1000_ERR_PARAM; + ++ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { +- hw_dbg("PHY Address %d is out of range\n", offset); ++ DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + +@@ -778,7 +636,7 @@ + if (ret_val) + goto out; + +- ret_val = igb_write_phy_reg_i2c(hw, offset, data); ++ ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -787,41 +645,44 @@ + } + + /** +- * igb_get_phy_id_82575 - Retrieve PHY addr and id ++ * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +-static s32 igb_get_phy_id_82575(struct e1000_hw *hw) ++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + +- /* Extra read required for some PHY's on i354 */ ++ DEBUGFUNC("e1000_get_phy_id_82575"); ++ ++ /* some i354 devices need an extra read for phy id */ + if (hw->mac.type == e1000_i354) +- igb_get_phy_id(hw); ++ e1000_get_phy_id(hw); + +- /* For SGMII PHYs, we try the list of possible addresses until ++ /* ++ * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ +- if (!(igb_sgmii_active_82575(hw))) { ++ if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; +- ret_val = igb_get_phy_id(hw); ++ ret_val = e1000_get_phy_id(hw); + goto out; + } + +- if (igb_sgmii_uses_mdio_82575(hw)) { ++ if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: +- mdic = rd32(E1000_MDIC); ++ mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; +@@ -830,7 +691,7 @@ + case e1000_i354: + case e1000_i210: + case e1000_i211: +- mdic = rd32(E1000_MDICNFG); ++ mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; +@@ -839,31 +700,35 @@ + goto out; + break; + } +- ret_val = igb_get_phy_id(hw); ++ ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); +- wrfl(); +- msleep(300); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ++ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(300); + +- /* The address field in the I2CCMD register is 3 bits and 0 is invalid. ++ /* ++ * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { +- ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); +- if (ret_val == 0) { +- hw_dbg("Vendor ID 0x%08X read at address %u\n", +- phy_id, phy->addr); +- /* At the time of this writing, The M88 part is ++ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); ++ if (ret_val == E1000_SUCCESS) { ++ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", ++ phy_id, phy->addr); ++ /* ++ * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { +- hw_dbg("PHY address %u was unreadable\n", phy->addr); ++ DEBUGOUT1("PHY address %u was unreadable\n", ++ phy->addr); + } + } + +@@ -871,49 +736,60 @@ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; +- goto out; + } else { +- ret_val = igb_get_phy_id(hw); ++ ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ +- wr32(E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + out: + return ret_val; + } + + /** +- * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset ++ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) ++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) + { +- s32 ret_val; ++ s32 ret_val = E1000_SUCCESS; ++ struct e1000_phy_info *phy = &hw->phy; + +- /* This isn't a true "hard" reset, but is the only reset ++ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); ++ ++ /* ++ * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + +- hw_dbg("Soft resetting SGMII attached PHY...\n"); ++ DEBUGOUT("Soft resetting SGMII attached PHY...\n"); ++ ++ if (!(hw->phy.ops.write_reg)) ++ goto out; + +- /* SFP documentation requires the following to configure the SPF module ++ /* ++ * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = hw->phy.ops.commit(hw); ++ if (ret_val) ++ goto out; + ++ if (phy->id == M88E1512_E_PHY_ID) ++ ret_val = e1000_initialize_M88E1512_phy(hw); + out: + return ret_val; + } + + /** +- * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state ++ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * +@@ -925,12 +801,17 @@ + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) ++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; ++ s32 ret_val = E1000_SUCCESS; + u16 data; + ++ DEBUGFUNC("e1000_set_d0_lplu_state_82575"); ++ ++ if (!(hw->phy.ops.read_reg)) ++ goto out; ++ + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; +@@ -938,47 +819,52 @@ + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, +- data); ++ data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, +- &data); ++ &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, +- data); ++ data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, +- data); +- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used ++ data); ++ /* ++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, &data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, &data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ data); + if (ret_val) + goto out; + } +@@ -989,7 +875,7 @@ + } + + /** +- * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state ++ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * +@@ -1001,12 +887,14 @@ + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) ++static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- u16 data; ++ u32 data; + +- data = rd32(E1000_82580_PHY_POWER_MGMT); ++ DEBUGFUNC("e1000_set_d0_lplu_state_82580"); ++ ++ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; +@@ -1016,7 +904,8 @@ + } else { + data &= ~E1000_82580_PM_D0_LPLU; + +- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used ++ /* ++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. +@@ -1024,14 +913,15 @@ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) +- data &= ~E1000_82580_PM_SPD; } ++ data &= ~E1000_82580_PM_SPD; ++ } + +- wr32(E1000_82580_PHY_POWER_MGMT, data); +- return 0; ++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); ++ return E1000_SUCCESS; + } + + /** +- * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 ++ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * +@@ -1044,16 +934,19 @@ + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) ++s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- u16 data; ++ u32 data; + +- data = rd32(E1000_82580_PHY_POWER_MGMT); ++ DEBUGFUNC("e1000_set_d3_lplu_state_82580"); ++ ++ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; +- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used ++ /* ++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. +@@ -1070,12 +963,12 @@ + data &= ~E1000_82580_PM_SPD; + } + +- wr32(E1000_82580_PHY_POWER_MGMT, data); +- return 0; ++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); ++ return E1000_SUCCESS; + } + + /** +- * igb_acquire_nvm_82575 - Request for access to EEPROM ++ * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. +@@ -1083,148 +976,183 @@ + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +-static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) ++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) + { +- s32 ret_val; ++ s32 ret_val = E1000_SUCCESS; + +- ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); ++ DEBUGFUNC("e1000_acquire_nvm_82575"); ++ ++ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + +- ret_val = igb_acquire_nvm(hw); ++ /* ++ * Check if there is some access ++ * error this access may hook on ++ */ ++ if (hw->mac.type == e1000_i350) { ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); ++ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | ++ E1000_EECD_TIMEOUT)) { ++ /* Clear all access error flags */ ++ E1000_WRITE_REG(hw, E1000_EECD, eecd | ++ E1000_EECD_ERROR_CLR); ++ DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); ++ } ++ } ++ ++ if (hw->mac.type == e1000_82580) { ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); ++ if (eecd & E1000_EECD_BLOCKED) { ++ /* Clear access error flag */ ++ E1000_WRITE_REG(hw, E1000_EECD, eecd | ++ E1000_EECD_BLOCKED); ++ DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); ++ } ++ } + ++ ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) +- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); ++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + + out: + return ret_val; + } + + /** +- * igb_release_nvm_82575 - Release exclusive access to EEPROM ++ * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +-static void igb_release_nvm_82575(struct e1000_hw *hw) ++static void e1000_release_nvm_82575(struct e1000_hw *hw) + { +- igb_release_nvm(hw); +- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); ++ DEBUGFUNC("e1000_release_nvm_82575"); ++ ++ e1000_release_nvm_generic(hw); ++ ++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + } + + /** +- * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore ++ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) ++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; +- s32 ret_val = 0; +- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ ++ s32 ret_val = E1000_SUCCESS; ++ s32 i = 0, timeout = 200; ++ ++ DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { +- if (igb_get_hw_semaphore(hw)) { ++ if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + +- /* Firmware currently using resource (fwmask) ++ /* ++ * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ +- igb_put_hw_semaphore(hw); +- mdelay(5); ++ e1000_put_hw_semaphore_generic(hw); ++ msec_delay_irq(5); + i++; + } + + if (i == timeout) { +- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); ++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + +- igb_put_hw_semaphore(hw); ++ e1000_put_hw_semaphore_generic(hw); + + out: + return ret_val; + } + + /** +- * igb_release_swfw_sync_82575 - Release SW/FW semaphore ++ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +-static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) ++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + +- while (igb_get_hw_semaphore(hw) != 0) ++ DEBUGFUNC("e1000_release_swfw_sync_82575"); ++ ++ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + +- igb_put_hw_semaphore(hw); ++ e1000_put_hw_semaphore_generic(hw); + } + + /** +- * igb_get_cfg_done_82575 - Read config done bit ++ * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns +- * 0. If we were to return with error, EEPROM-less silicon ++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +-static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) ++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) + { + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + +- if (hw->bus.func == 1) ++ DEBUGFUNC("e1000_get_cfg_done_82575"); ++ ++ if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; +- + while (timeout) { +- if (rd32(E1000_EEMNGCTL) & mask) ++ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + timeout--; + } + if (!timeout) +- hw_dbg("MNG configuration cycle has not completed.\n"); ++ DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ +- if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && ++ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) +- igb_phy_init_script_igp3(hw); ++ e1000_phy_init_script_igp3(hw); + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_link_up_info_82575 - Get link speed/duplex info ++ * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex +@@ -1233,87 +1161,156 @@ + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +-static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, ++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_get_link_up_info_82575"); ++ + if (hw->phy.media_type != e1000_media_type_copper) +- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, ++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else +- ret_val = igb_get_speed_and_duplex_copper(hw, speed, ++ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; + } + + /** +- * igb_check_for_link_82575 - Check for link ++ * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +-static s32 igb_check_for_link_82575(struct e1000_hw *hw) ++static s32 e1000_check_for_link_82575(struct e1000_hw *hw) + { + s32 ret_val; + u16 speed, duplex; + ++ DEBUGFUNC("e1000_check_for_link_82575"); ++ + if (hw->phy.media_type != e1000_media_type_copper) { +- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, +- &duplex); +- /* Use this flag to determine if link needs to be checked or +- * not. If we have link clear the flag so that we do not ++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, ++ &duplex); ++ /* ++ * Use this flag to determine if link needs to be checked or ++ * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + +- /* Configure Flow Control now that Auto-Neg has completed. ++ /* ++ * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ +- ret_val = igb_config_fc_after_link_up(hw); ++ ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) +- hw_dbg("Error configuring flow control\n"); ++ DEBUGOUT("Error configuring flow control\n"); + } else { +- ret_val = igb_check_for_copper_link(hw); ++ ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; + } + + /** +- * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown ++ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked ++ * @hw: pointer to the HW structure ++ * ++ * Poll the M88E1112 interfaces to see which interface achieved link. ++ */ ++static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data; ++ u8 port = 0; ++ ++ DEBUGFUNC("e1000_check_for_link_media_swap"); ++ ++ /* Check for copper. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); ++ if (ret_val) ++ return ret_val; ++ ++ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); ++ if (ret_val) ++ return ret_val; ++ ++ if (data & E1000_M88E1112_STATUS_LINK) ++ port = E1000_MEDIA_PORT_COPPER; ++ ++ /* Check for other. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); ++ if (ret_val) ++ return ret_val; ++ ++ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); ++ if (ret_val) ++ return ret_val; ++ ++ if (data & E1000_M88E1112_STATUS_LINK) ++ port = E1000_MEDIA_PORT_OTHER; ++ ++ /* Determine if a swap needs to happen. */ ++ if (port && (hw->dev_spec._82575.media_port != port)) { ++ hw->dev_spec._82575.media_port = port; ++ hw->dev_spec._82575.media_changed = true; ++ } ++ ++ if (port == E1000_MEDIA_PORT_COPPER) { ++ /* reset page to 0 */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); ++ if (ret_val) ++ return ret_val; ++ e1000_check_for_link_82575(hw); ++ } else { ++ e1000_check_for_link_82575(hw); ++ /* reset page to 0 */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +-void igb_power_up_serdes_link_82575(struct e1000_hw *hw) ++static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) + { + u32 reg; + ++ DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && +- !igb_sgmii_active_82575(hw)) ++ !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ +- reg = rd32(E1000_PCS_CFG0); ++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; +- wr32(E1000_PCS_CFG0, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ +- reg = rd32(E1000_CTRL_EXT); ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; +- wr32(E1000_CTRL_EXT, reg); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ +- wrfl(); +- usleep_range(1000, 2000); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(1); + } + + /** +- * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex ++ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex +@@ -1321,28 +1318,26 @@ + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, +- u16 *duplex) ++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, ++ u16 *speed, u16 *duplex) + { + struct e1000_mac_info *mac = &hw->mac; +- u32 pcs, status; ++ u32 pcs; ++ u32 status; + +- /* Set up defaults for the return values of this function */ +- mac->serdes_has_link = false; +- *speed = 0; +- *duplex = 0; ++ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + +- /* Read the PCS Status register for link state. For non-copper mode, ++ /* ++ * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ +- pcs = rd32(E1000_PCS_LSTAT); ++ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + +- /* The link up bit determines when link is up on autoneg. The sync ok +- * gets set once both sides sync up and agree upon link. Stable link +- * can be determined by checking for both link up and link sync ok ++ /* ++ * The link up bit determines when link is up on autoneg. + */ +- if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { ++ if (pcs & E1000_PCS_LSTS_LINK_OK) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ +@@ -1359,192 +1354,202 @@ + else + *duplex = HALF_DUPLEX; + +- /* Check if it is an I354 2.5Gb backplane connection. */ ++ /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { +- status = rd32(E1000_STATUS); ++ status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; +- hw_dbg("2500 Mbs, "); +- hw_dbg("Full Duplex\n"); ++ DEBUGOUT("2500 Mbs, "); ++ DEBUGOUT("Full Duplex\n"); + } + } + ++ } else { ++ mac->serdes_has_link = false; ++ *speed = 0; ++ *duplex = 0; + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_shutdown_serdes_link_82575 - Remove link during power down ++ * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * +- * In the case of fiber serdes, shut down optics and PCS on driver unload ++ * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) ++void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) + { + u32 reg; + +- if (hw->phy.media_type != e1000_media_type_internal_serdes && +- igb_sgmii_active_82575(hw)) ++ DEBUGFUNC("e1000_shutdown_serdes_link_82575"); ++ ++ if ((hw->phy.media_type != e1000_media_type_internal_serdes) && ++ !e1000_sgmii_active_82575(hw)) + return; + +- if (!igb_enable_mng_pass_thru(hw)) { ++ if (!igb_e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ +- reg = rd32(E1000_PCS_CFG0); ++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; +- wr32(E1000_PCS_CFG0, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ +- reg = rd32(E1000_CTRL_EXT); ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; +- wr32(E1000_CTRL_EXT, reg); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ +- wrfl(); +- usleep_range(1000, 2000); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(1); + } ++ ++ return; + } + + /** +- * igb_reset_hw_82575 - Reset hardware ++ * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * +- * This resets the hardware into a known state. This is a +- * function pointer entry point called by the api module. ++ * This resets the hardware into a known state. + **/ +-static s32 igb_reset_hw_82575(struct e1000_hw *hw) ++static s32 e1000_reset_hw_82575(struct e1000_hw *hw) + { + u32 ctrl; + s32 ret_val; + +- /* Prevent the PCI-E bus from sticking if there is no TLP connection ++ DEBUGFUNC("e1000_reset_hw_82575"); ++ ++ /* ++ * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ +- ret_val = igb_disable_pcie_master(hw); ++ ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) +- hw_dbg("PCI-E Master disable polling has failed.\n"); ++ DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ +- ret_val = igb_set_pcie_completion_timeout(hw); ++ ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) +- hw_dbg("PCI-E Set completion timeout has failed.\n"); ++ DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + +- hw_dbg("Masking off all interrupts\n"); +- wr32(E1000_IMC, 0xffffffff); ++ DEBUGOUT("Masking off all interrupts\n"); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + +- wr32(E1000_RCTL, 0); +- wr32(E1000_TCTL, E1000_TCTL_PSP); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_RCTL, 0); ++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); ++ E1000_WRITE_FLUSH(hw); + +- usleep_range(10000, 20000); ++ msec_delay(10); + +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + +- hw_dbg("Issuing a global reset to MAC\n"); +- wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); ++ DEBUGOUT("Issuing a global reset to MAC\n"); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + +- ret_val = igb_get_auto_rd_done(hw); ++ ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { +- /* When auto config read does not complete, do not ++ /* ++ * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ +- hw_dbg("Auto Read Done did not complete\n"); ++ DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ +- if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) +- igb_reset_init_script_82575(hw); ++ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) ++ e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ +- wr32(E1000_IMC, 0xffffffff); +- rd32(E1000_ICR); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); ++ E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ +- ret_val = igb_check_alt_mac_addr(hw); ++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); + + return ret_val; + } + + /** +- * igb_init_hw_82575 - Initialize hardware ++ * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +-static s32 igb_init_hw_82575(struct e1000_hw *hw) ++s32 e1000_init_hw_82575(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + +- if ((hw->mac.type >= e1000_i210) && +- !(igb_get_flash_presence_i210(hw))) { +- ret_val = igb_pll_workaround_i210(hw); +- if (ret_val) +- return ret_val; +- } ++ DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ +- ret_val = igb_id_led_init(hw); ++ ret_val = mac->ops.id_led_init(hw); + if (ret_val) { +- hw_dbg("Error initializing identification LED\n"); ++ DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ +- hw_dbg("Initializing the IEEE VLAN\n"); +- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) +- igb_clear_vfta_i350(hw); +- else +- igb_clear_vfta(hw); ++ DEBUGOUT("Initializing the IEEE VLAN\n"); ++ mac->ops.clear_vfta(hw); + + /* Setup the receive address */ +- igb_init_rx_addrs(hw, rar_count); ++ e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ +- hw_dbg("Zeroing the MTA\n"); ++ DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) +- array_wr32(E1000_MTA, i, 0); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ +- hw_dbg("Zeroing the UTA\n"); ++ DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) +- array_wr32(E1000_UTA, i, 0); ++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ +- ret_val = igb_setup_link(hw); ++ ret_val = mac->ops.setup_link(hw); + +- /* Clear all of the statistics registers (clear on read). It is ++ /* Set the default MTU size */ ++ hw->dev_spec._82575.mtu = 1500; ++ ++ /* ++ * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ +- igb_clear_hw_cntrs_82575(hw); ++ e1000_clear_hw_cntrs_82575(hw); ++ + return ret_val; + } + + /** +- * igb_setup_copper_link_82575 - Configure copper link settings ++ * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +-static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) ++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) + { + u32 ctrl; +- s32 ret_val; ++ s32 ret_val; + u32 phpm_reg; + +- ctrl = rd32(E1000_CTRL); ++ DEBUGFUNC("e1000_setup_copper_link_82575"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { +@@ -1552,25 +1557,25 @@ + case e1000_i350: + case e1000_i210: + case e1000_i211: +- phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); ++ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; +- wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); ++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + +- ret_val = igb_setup_serdes_link_82575(hw); ++ ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + +- if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { ++ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ +- msleep(300); ++ msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { +- hw_dbg("Error resetting the PHY.\n"); ++ DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } +@@ -1580,20 +1585,22 @@ + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: +- ret_val = igb_copper_link_setup_m88_gen2(hw); ++ ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: +- ret_val = igb_copper_link_setup_m88(hw); ++ ret_val = e1000_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: +- ret_val = igb_copper_link_setup_igp(hw); ++ ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: +- ret_val = igb_copper_link_setup_82580(hw); ++ ret_val = igb_e1000_copper_link_setup_82577(hw); + break; + default: + ret_val = -E1000_ERR_PHY; +@@ -1603,13 +1610,13 @@ + if (ret_val) + goto out; + +- ret_val = igb_setup_copper_link(hw); ++ ret_val = e1000_setup_copper_link_generic(hw); + out: + return ret_val; + } + + /** +- * igb_setup_serdes_link_82575 - Setup link for serdes ++ * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is +@@ -1617,45 +1624,40 @@ + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +-static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) ++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) + { + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 data; + ++ DEBUGFUNC("e1000_setup_serdes_link_82575"); ++ + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && +- !igb_sgmii_active_82575(hw)) ++ !e1000_sgmii_active_82575(hw)) + return ret_val; + +- +- /* On the 82575, SerDes loopback mode persists until it is ++ /* ++ * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ +- wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); ++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + +- /* power on the sfp cage if present and turn on I2C */ +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ /* power on the sfp cage if present */ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; +- ctrl_ext |= E1000_CTRL_I2C_ENA; +- wr32(E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +- ctrl_reg = rd32(E1000_CTRL); ++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + +- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { +- /* set both sw defined pins */ ++ /* set both sw defined pins on 82575/82576*/ ++ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + +- /* Set switch control to serdes energy detect */ +- reg = rd32(E1000_CONNSW); +- reg |= E1000_CONNSW_ENRGSRC; +- wr32(E1000_CONNSW, reg); +- } +- +- reg = rd32(E1000_PCS_LCTL); ++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; +@@ -1670,12 +1672,13 @@ + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; ++ /* fall through to default case */ + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { +- hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); ++ DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + +@@ -1683,27 +1686,29 @@ + pcs_autoneg = false; + } + +- /* non-SGMII modes only supports a speed of 1000/Full for the ++ /* ++ * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | +- E1000_CTRL_FD | E1000_CTRL_FRCDPX; ++ E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + +- wr32(E1000_CTRL, ctrl_reg); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + +- /* New SerDes mode allows for forcing speed or autonegotiating speed ++ /* ++ * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | +- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); ++ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ +@@ -1714,8 +1719,9 @@ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ +- anadv_reg = rd32(E1000_PCS_ANADV); ++ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); ++ + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: +@@ -1728,251 +1734,480 @@ + default: + break; + } +- wr32(E1000_PCS_ANADV, anadv_reg); + +- hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); ++ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); ++ ++ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ +- reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ ++ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + +- hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); ++ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + +- wr32(E1000_PCS_LCTL, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + +- if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) +- igb_force_mac_fc(hw); ++ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) ++ e1000_force_mac_fc_generic(hw); + + return ret_val; + } + + /** +- * igb_sgmii_active_82575 - Return sgmii state ++ * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * +- * 82575 silicon has a serialized gigabit media independent interface (sgmii) +- * which can be enabled for use in the embedded applications. Simply +- * return the current state of the sgmii interface. ++ * The media type is chosen reflecting few settings. ++ * The following are taken into account: ++ * - link mode set in the current port Init Control Word #3 ++ * - current link mode settings in CSR register ++ * - MDIO vs. I2C PHY control interface chosen ++ * - SFP module media type + **/ +-static bool igb_sgmii_active_82575(struct e1000_hw *hw) ++static s32 e1000_get_media_type_82575(struct e1000_hw *hw) + { + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- return dev_spec->sgmii_active; +-} +- +-/** +- * igb_reset_init_script_82575 - Inits HW defaults after reset +- * @hw: pointer to the HW structure +- * +- * Inits recommended HW defaults after a reset when there is no EEPROM +- * detected. This is only for the 82575. +- **/ +-static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +-{ +- if (hw->mac.type == e1000_82575) { +- hw_dbg("Running reset init script for 82575\n"); +- /* SerDes configuration via SERDESCTRL */ +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); +- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); ++ s32 ret_val = E1000_SUCCESS; ++ u32 ctrl_ext = 0; ++ u32 link_mode = 0; + +- /* CCM configuration via CCMCTL register */ +- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); +- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); ++ /* Set internal phy as default */ ++ dev_spec->sgmii_active = false; ++ dev_spec->module_plugged = false; + +- /* PCIe lanes configuration */ +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); +- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); ++ /* Get CSR setting */ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + +- /* PCIe PLL Configuration */ +- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); +- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); +- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); +- } ++ /* extract link mode setting */ ++ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + +- return 0; +-} ++ switch (link_mode) { ++ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: ++ hw->phy.media_type = e1000_media_type_internal_serdes; ++ break; ++ case E1000_CTRL_EXT_LINK_MODE_GMII: ++ hw->phy.media_type = e1000_media_type_copper; ++ break; ++ case E1000_CTRL_EXT_LINK_MODE_SGMII: ++ /* Get phy control interface type set (MDIO vs. I2C)*/ ++ if (e1000_sgmii_uses_mdio_82575(hw)) { ++ hw->phy.media_type = e1000_media_type_copper; ++ dev_spec->sgmii_active = true; ++ break; ++ } ++ /* fall through for I2C based SGMII */ ++ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: ++ /* read media type from SFP EEPROM */ ++ ret_val = e1000_set_sfp_media_type_82575(hw); ++ if ((ret_val != E1000_SUCCESS) || ++ (hw->phy.media_type == e1000_media_type_unknown)) { ++ /* ++ * If media type was not identified then return media ++ * type defined by the CTRL_EXT settings. ++ */ ++ hw->phy.media_type = e1000_media_type_internal_serdes; + +-/** +- * igb_read_mac_addr_82575 - Read device MAC address +- * @hw: pointer to the HW structure +- **/ +-static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +-{ +- s32 ret_val = 0; ++ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { ++ hw->phy.media_type = e1000_media_type_copper; ++ dev_spec->sgmii_active = true; ++ } + +- /* If there's an alternate MAC address place it in RAR0 +- * so that it will override the Si installed default perm +- * address. +- */ +- ret_val = igb_check_alt_mac_addr(hw); +- if (ret_val) +- goto out; ++ break; ++ } + +- ret_val = igb_read_mac_addr(hw); ++ /* do not change link mode for 100BaseFX */ ++ if (dev_spec->eth_flags.e100_base_fx) ++ break; ++ ++ /* change current link mode setting */ ++ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; ++ ++ if (hw->phy.media_type == e1000_media_type_copper) ++ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; ++ else ++ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ++ ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ ++ break; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_set_sfp_media_type_82575 - derives SFP module media type. ++ * @hw: pointer to the HW structure ++ * ++ * The media type is chosen based on SFP module. ++ * compatibility flags retrieved from SFP ID EEPROM. ++ **/ ++static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_ERR_CONFIG; ++ u32 ctrl_ext = 0; ++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; ++ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; ++ u8 tranceiver_type = 0; ++ s32 timeout = 3; ++ ++ /* Turn I2C interface ON and power on sfp cage */ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); ++ ++ E1000_WRITE_FLUSH(hw); ++ ++ /* Read SFP module data */ ++ while (timeout) { ++ ret_val = e1000_read_sfp_data_byte(hw, ++ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), ++ &tranceiver_type); ++ if (ret_val == E1000_SUCCESS) ++ break; ++ msec_delay(100); ++ timeout--; ++ } ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ ++ ret_val = e1000_read_sfp_data_byte(hw, ++ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), ++ (u8 *)eth_flags); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ ++ /* Check if there is some SFP module plugged and powered */ ++ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || ++ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { ++ dev_spec->module_plugged = true; ++ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { ++ hw->phy.media_type = e1000_media_type_internal_serdes; ++ } else if (eth_flags->e100_base_fx) { ++ dev_spec->sgmii_active = true; ++ hw->phy.media_type = e1000_media_type_internal_serdes; ++ } else if (eth_flags->e1000_base_t) { ++ dev_spec->sgmii_active = true; ++ hw->phy.media_type = e1000_media_type_copper; ++ } else { ++ hw->phy.media_type = e1000_media_type_unknown; ++ DEBUGOUT("PHY module has not been recognized\n"); ++ goto out; ++ } ++ } else { ++ hw->phy.media_type = e1000_media_type_unknown; ++ } ++ ret_val = E1000_SUCCESS; ++out: ++ /* Restore I2C interface setting */ ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ return ret_val; ++} ++ ++/** ++ * e1000_valid_led_default_82575 - Verify a valid default LED config ++ * @hw: pointer to the HW structure ++ * @data: pointer to the NVM (EEPROM) ++ * ++ * Read the EEPROM for the current default LED configuration. If the ++ * LED configuration is not valid, set to a valid LED configuration. ++ **/ ++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_valid_led_default_82575"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ goto out; ++ } ++ ++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { ++ switch (hw->phy.media_type) { ++ case e1000_media_type_internal_serdes: ++ *data = ID_LED_DEFAULT_82575_SERDES; ++ break; ++ case e1000_media_type_copper: ++ default: ++ *data = ID_LED_DEFAULT; ++ break; ++ } ++ } ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_sgmii_active_82575 - Return sgmii state ++ * @hw: pointer to the HW structure ++ * ++ * 82575 silicon has a serialized gigabit media independent interface (sgmii) ++ * which can be enabled for use in the embedded applications. Simply ++ * return the current state of the sgmii interface. ++ **/ ++static bool e1000_sgmii_active_82575(struct e1000_hw *hw) ++{ ++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; ++ return dev_spec->sgmii_active; ++} ++ ++/** ++ * e1000_reset_init_script_82575 - Inits HW defaults after reset ++ * @hw: pointer to the HW structure ++ * ++ * Inits recommended HW defaults after a reset when there is no EEPROM ++ * detected. This is only for the 82575. ++ **/ ++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) ++{ ++ DEBUGFUNC("e1000_reset_init_script_82575"); ++ ++ if (hw->mac.type == e1000_82575) { ++ DEBUGOUT("Running reset init script for 82575\n"); ++ /* SerDes configuration via SERDESCTRL */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); ++ ++ /* CCM configuration via CCMCTL register */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); ++ ++ /* PCIe lanes configuration */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); ++ ++ /* PCIe PLL Configuration */ ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); ++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_mac_addr_82575 - Read device MAC address ++ * @hw: pointer to the HW structure ++ **/ ++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_read_mac_addr_82575"); ++ ++ /* ++ * If there's an alternate MAC address place it in RAR0 ++ * so that it will override the Si installed default perm ++ * address. ++ */ ++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = igb_e1000_read_mac_addr_generic(hw); + + out: + return ret_val; + } + + /** +- * igb_power_down_phy_copper_82575 - Remove link during PHY power down ++ * e1000_config_collision_dist_82575 - Configure collision distance ++ * @hw: pointer to the HW structure ++ * ++ * Configures the collision distance to the default value and is used ++ * during link setup. ++ **/ ++static void e1000_config_collision_dist_82575(struct e1000_hw *hw) ++{ ++ u32 tctl_ext; ++ ++ DEBUGFUNC("e1000_config_collision_dist_82575"); ++ ++ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); ++ ++ tctl_ext &= ~E1000_TCTL_EXT_COLD; ++ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; ++ ++ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); ++ E1000_WRITE_FLUSH(hw); ++} ++ ++/** ++ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +-void igb_power_down_phy_copper_82575(struct e1000_hw *hw) ++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) + { ++ struct e1000_phy_info *phy = &hw->phy; ++ ++ if (!(phy->ops.check_reset_block)) ++ return; ++ + /* If the management interface is not enabled, then power down */ +- if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) +- igb_power_down_phy_copper(hw); ++ if (!(igb_e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) ++ igb_e1000_power_down_phy_copper(hw); ++ ++ return; + } + + /** +- * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters ++ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +-static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) ++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) + { +- igb_clear_hw_cntrs_base(hw); ++ DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + +- rd32(E1000_PRC64); +- rd32(E1000_PRC127); +- rd32(E1000_PRC255); +- rd32(E1000_PRC511); +- rd32(E1000_PRC1023); +- rd32(E1000_PRC1522); +- rd32(E1000_PTC64); +- rd32(E1000_PTC127); +- rd32(E1000_PTC255); +- rd32(E1000_PTC511); +- rd32(E1000_PTC1023); +- rd32(E1000_PTC1522); +- +- rd32(E1000_ALGNERRC); +- rd32(E1000_RXERRC); +- rd32(E1000_TNCRS); +- rd32(E1000_CEXTERR); +- rd32(E1000_TSCTC); +- rd32(E1000_TSCTFC); +- +- rd32(E1000_MGTPRC); +- rd32(E1000_MGTPDC); +- rd32(E1000_MGTPTC); +- +- rd32(E1000_IAC); +- rd32(E1000_ICRXOC); +- +- rd32(E1000_ICRXPTC); +- rd32(E1000_ICRXATC); +- rd32(E1000_ICTXPTC); +- rd32(E1000_ICTXATC); +- rd32(E1000_ICTXQEC); +- rd32(E1000_ICTXQMTC); +- rd32(E1000_ICRXDMTC); +- +- rd32(E1000_CBTMPC); +- rd32(E1000_HTDPMC); +- rd32(E1000_CBRMPC); +- rd32(E1000_RPTHC); +- rd32(E1000_HGPTC); +- rd32(E1000_HTCBDPC); +- rd32(E1000_HGORCL); +- rd32(E1000_HGORCH); +- rd32(E1000_HGOTCL); +- rd32(E1000_HGOTCH); +- rd32(E1000_LENERRS); ++ e1000_clear_hw_cntrs_base_generic(hw); ++ ++ E1000_READ_REG(hw, E1000_PRC64); ++ E1000_READ_REG(hw, E1000_PRC127); ++ E1000_READ_REG(hw, E1000_PRC255); ++ E1000_READ_REG(hw, E1000_PRC511); ++ E1000_READ_REG(hw, E1000_PRC1023); ++ E1000_READ_REG(hw, E1000_PRC1522); ++ E1000_READ_REG(hw, E1000_PTC64); ++ E1000_READ_REG(hw, E1000_PTC127); ++ E1000_READ_REG(hw, E1000_PTC255); ++ E1000_READ_REG(hw, E1000_PTC511); ++ E1000_READ_REG(hw, E1000_PTC1023); ++ E1000_READ_REG(hw, E1000_PTC1522); ++ ++ E1000_READ_REG(hw, E1000_ALGNERRC); ++ E1000_READ_REG(hw, E1000_RXERRC); ++ E1000_READ_REG(hw, E1000_TNCRS); ++ E1000_READ_REG(hw, E1000_CEXTERR); ++ E1000_READ_REG(hw, E1000_TSCTC); ++ E1000_READ_REG(hw, E1000_TSCTFC); ++ ++ E1000_READ_REG(hw, E1000_MGTPRC); ++ E1000_READ_REG(hw, E1000_MGTPDC); ++ E1000_READ_REG(hw, E1000_MGTPTC); ++ ++ E1000_READ_REG(hw, E1000_IAC); ++ E1000_READ_REG(hw, E1000_ICRXOC); ++ ++ E1000_READ_REG(hw, E1000_ICRXPTC); ++ E1000_READ_REG(hw, E1000_ICRXATC); ++ E1000_READ_REG(hw, E1000_ICTXPTC); ++ E1000_READ_REG(hw, E1000_ICTXATC); ++ E1000_READ_REG(hw, E1000_ICTXQEC); ++ E1000_READ_REG(hw, E1000_ICTXQMTC); ++ E1000_READ_REG(hw, E1000_ICRXDMTC); ++ ++ E1000_READ_REG(hw, E1000_CBTMPC); ++ E1000_READ_REG(hw, E1000_HTDPMC); ++ E1000_READ_REG(hw, E1000_CBRMPC); ++ E1000_READ_REG(hw, E1000_RPTHC); ++ E1000_READ_REG(hw, E1000_HGPTC); ++ E1000_READ_REG(hw, E1000_HTCBDPC); ++ E1000_READ_REG(hw, E1000_HGORCL); ++ E1000_READ_REG(hw, E1000_HGORCH); ++ E1000_READ_REG(hw, E1000_HGOTCL); ++ E1000_READ_REG(hw, E1000_HGOTCH); ++ E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ +- if (hw->phy.media_type == e1000_media_type_internal_serdes || +- igb_sgmii_active_82575(hw)) +- rd32(E1000_SCVPC); ++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) || ++ e1000_sgmii_active_82575(hw)) ++ E1000_READ_REG(hw, E1000_SCVPC); + } + + /** +- * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable ++ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * +- * After rx enable if managability is enabled then there is likely some ++ * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +-void igb_rx_fifo_flush_82575(struct e1000_hw *hw) ++void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) + { + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + ++ DEBUGFUNC("e1000_rx_fifo_flush_82575"); ++ ++ /* disable IPv6 options as per hardware errata */ ++ rfctl = E1000_READ_REG(hw, E1000_RFCTL); ++ rfctl |= E1000_RFCTL_IPV6_EX_DIS; ++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); ++ + if (hw->mac.type != e1000_82575 || +- !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) ++ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + +- /* Disable all RX queues */ ++ /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { +- rxdctl[i] = rd32(E1000_RXDCTL(i)); +- wr32(E1000_RXDCTL(i), +- rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); ++ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(i), ++ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { +- usleep_range(1000, 2000); ++ msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) +- rx_enabled |= rd32(E1000_RXDCTL(i)); ++ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) +- hw_dbg("Queue disable timed out after 10ms\n"); ++ DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ +- rfctl = rd32(E1000_RFCTL); +- wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); ++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + +- rlpml = rd32(E1000_RLPML); +- wr32(E1000_RLPML, 0); ++ rlpml = E1000_READ_REG(hw, E1000_RLPML); ++ E1000_WRITE_REG(hw, E1000_RLPML, 0); + +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + +- wr32(E1000_RCTL, temp_rctl); +- wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); +- wrfl(); +- usleep_range(2000, 3000); ++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(2); + +- /* Enable RX queues that were previously enabled and restore our ++ /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) +- wr32(E1000_RXDCTL(i), rxdctl[i]); +- wr32(E1000_RCTL, rctl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ E1000_WRITE_FLUSH(hw); + +- wr32(E1000_RLPML, rlpml); +- wr32(E1000_RFCTL, rfctl); ++ E1000_WRITE_REG(hw, E1000_RLPML, rlpml); ++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ +- rd32(E1000_ROC); +- rd32(E1000_RNBC); +- rd32(E1000_MPC); ++ E1000_READ_REG(hw, E1000_ROC); ++ E1000_READ_REG(hw, E1000_RNBC); ++ E1000_READ_REG(hw, E1000_MPC); + } + + /** +- * igb_set_pcie_completion_timeout - set pci-e completion timeout ++ * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, +@@ -1981,17 +2216,18 @@ + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) ++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) + { +- u32 gcr = rd32(E1000_GCR); +- s32 ret_val = 0; ++ u32 gcr = E1000_READ_REG(hw, E1000_GCR); ++ s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + +- /* if capabilities version is type 1 we can write the ++ /* ++ * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { +@@ -1999,36 +2235,37 @@ + goto out; + } + +- /* for version 2 capabilities we need to write the config space ++ /* ++ * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ +- ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, +- &pcie_devctl2); ++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, ++ &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + +- ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, +- &pcie_devctl2); ++ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, ++ &pcie_devctl2); + out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + +- wr32(E1000_GCR, gcr); ++ E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; + } + + /** +- * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing ++ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) ++void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) + { + u32 reg_val, reg_offset; + +@@ -2044,7 +2281,7 @@ + return; + } + +- reg_val = rd32(reg_offset); ++ reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); +@@ -2056,66 +2293,67 @@ + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } +- wr32(reg_offset, reg_val); ++ E1000_WRITE_REG(hw, reg_offset, reg_val); + } + + /** +- * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback ++ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +-void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) ++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) + { + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: +- dtxswc = rd32(E1000_DTXSWC); ++ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; +- wr32(E1000_DTXSWC, dtxswc); ++ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; +- case e1000_i354: + case e1000_i350: +- dtxswc = rd32(E1000_TXSWC); ++ case e1000_i354: ++ dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; +- wr32(E1000_TXSWC, dtxswc); ++ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + ++ + } + + /** +- * igb_vmdq_set_replication_pf - enable or disable vmdq replication ++ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +-void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) ++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) + { +- u32 vt_ctl = rd32(E1000_VT_CTL); ++ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + +- wr32(E1000_VT_CTL, vt_ctl); ++ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + } + + /** +- * igb_read_phy_reg_82580 - Read 82580 MDI control register ++ * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data +@@ -2123,15 +2361,17 @@ + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +-static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) ++static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_read_phy_reg_82580"); ++ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + +- ret_val = igb_read_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -2140,23 +2380,24 @@ + } + + /** +- * igb_write_phy_reg_82580 - Write 82580 MDI control register ++ * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +-static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) ++static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + +- ret_val = igb_write_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +@@ -2165,123 +2406,133 @@ + } + + /** +- * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits ++ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) ++static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + ++ DEBUGFUNC("e1000_reset_mdicnfg_82580"); ++ + if (hw->mac.type != e1000_82580) + goto out; +- if (!igb_sgmii_active_82575(hw)) ++ if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + goto out; + } + +- mdicnfg = rd32(E1000_MDICNFG); ++ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; +- wr32(E1000_MDICNFG, mdicnfg); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + out: + return ret_val; + } + + /** +- * igb_reset_hw_82580 - Reset hardware ++ * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +-static s32 igb_reset_hw_82580(struct e1000_hw *hw) ++static s32 e1000_reset_hw_82580(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + ++ DEBUGFUNC("e1000_reset_hw_82580"); ++ + hw->dev_spec._82575.global_device_reset = false; + +- /* due to hw errata, global device reset doesn't always +- * work on 82580 +- */ ++ /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + +- /* Prevent the PCI-E bus from sticking if there is no TLP connection ++ /* ++ * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ +- ret_val = igb_disable_pcie_master(hw); ++ ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) +- hw_dbg("PCI-E Master disable polling has failed.\n"); ++ DEBUGOUT("PCI-E Master disable polling has failed.\n"); + +- hw_dbg("Masking off all interrupts\n"); +- wr32(E1000_IMC, 0xffffffff); +- wr32(E1000_RCTL, 0); +- wr32(E1000_TCTL, E1000_TCTL_PSP); +- wrfl(); ++ DEBUGOUT("Masking off all interrupts\n"); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); ++ E1000_WRITE_REG(hw, E1000_RCTL, 0); ++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); ++ E1000_WRITE_FLUSH(hw); + +- usleep_range(10000, 11000); ++ msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ +- if (global_device_reset && +- hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) ++ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, ++ swmbsw_mask)) + global_device_reset = false; + +- if (global_device_reset && +- !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) ++ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & ++ E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + +- wr32(E1000_CTRL, ctrl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +- /* Add delay to insure DEV_RST has time to complete */ +- if (global_device_reset) +- usleep_range(5000, 6000); ++ switch (hw->device_id) { ++ case E1000_DEV_ID_DH89XXCC_SGMII: ++ break; ++ default: ++ E1000_WRITE_FLUSH(hw); ++ break; ++ } ++ ++ /* Add delay to insure DEV_RST or RST has time to complete */ ++ msec_delay(5); + +- ret_val = igb_get_auto_rd_done(hw); ++ ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { +- /* When auto config read does not complete, do not ++ /* ++ * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ +- hw_dbg("Auto Read Done did not complete\n"); ++ DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ +- wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); ++ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ +- wr32(E1000_IMC, 0xffffffff); +- rd32(E1000_ICR); ++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); ++ E1000_READ_REG(hw, E1000_ICR); + +- ret_val = igb_reset_mdicnfg_82580(hw); ++ ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) +- hw_dbg("Could not reset MDICNFG based on EEPROM\n"); ++ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ +- ret_val = igb_check_alt_mac_addr(hw); ++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) +@@ -2291,7 +2542,7 @@ + } + + /** +- * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size ++ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. +@@ -2300,398 +2551,1222 @@ + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +-u16 igb_rxpbs_adjust_82580(u32 data) ++u16 e1000_rxpbs_adjust_82580(u32 data) + { + u16 ret_val = 0; + +- if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) ++ if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + +- return ret_val; ++ return ret_val; ++} ++ ++/** ++ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM ++ * checksum ++ * @hw: pointer to the HW structure ++ * @offset: offset in words of the checksum protected region ++ * ++ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM ++ * and then verifies that the sum of the EEPROM is equal to 0xBABA. ++ **/ ++s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u16 checksum = 0; ++ u16 i, nvm_data; ++ ++ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); ++ ++ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { ++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ goto out; ++ } ++ checksum += nvm_data; ++ } ++ ++ if (checksum != (u16) NVM_SUM) { ++ DEBUGOUT("NVM Checksum Invalid\n"); ++ ret_val = -E1000_ERR_NVM; ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_update_nvm_checksum_with_offset - Update EEPROM ++ * checksum ++ * @hw: pointer to the HW structure ++ * @offset: offset in words of the checksum protected region ++ * ++ * Updates the EEPROM checksum by reading/adding each word of the EEPROM ++ * up to the checksum. Then calculates the EEPROM checksum and writes the ++ * value to the EEPROM. ++ **/ ++s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) ++{ ++ s32 ret_val; ++ u16 checksum = 0; ++ u16 i, nvm_data; ++ ++ DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); ++ ++ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { ++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error while updating checksum.\n"); ++ goto out; ++ } ++ checksum += nvm_data; ++ } ++ checksum = (u16) NVM_SUM - checksum; ++ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, ++ &checksum); ++ if (ret_val) ++ DEBUGOUT("NVM Write Error while updating checksum.\n"); ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Calculates the EEPROM section checksum by reading/adding each word of ++ * the EEPROM and then verifies that the sum of the EEPROM is ++ * equal to 0xBABA. ++ **/ ++static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 eeprom_regions_count = 1; ++ u16 j, nvm_data; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_validate_nvm_checksum_82580"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ goto out; ++ } ++ ++ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { ++ /* if chekcsums compatibility bit is set validate checksums ++ * for all 4 ports. */ ++ eeprom_regions_count = 4; ++ } ++ ++ for (j = 0; j < eeprom_regions_count; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_validate_nvm_checksum_with_offset(hw, ++ nvm_offset); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Updates the EEPROM section checksums for all 4 ports by reading/adding ++ * each word of the EEPROM up to the checksum. Then calculates the EEPROM ++ * checksum and writes the value to the EEPROM. ++ **/ ++static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 j, nvm_data; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_update_nvm_checksum_82580"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); ++ goto out; ++ } ++ ++ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { ++ /* set compatibility bit to validate checksums appropriately */ ++ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; ++ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, ++ &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); ++ goto out; ++ } ++ } ++ ++ for (j = 0; j < 4; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); ++ if (ret_val) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Calculates the EEPROM section checksum by reading/adding each word of ++ * the EEPROM and then verifies that the sum of the EEPROM is ++ * equal to 0xBABA. ++ **/ ++static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u16 j; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_validate_nvm_checksum_i350"); ++ ++ for (j = 0; j < 4; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_validate_nvm_checksum_with_offset(hw, ++ nvm_offset); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum ++ * @hw: pointer to the HW structure ++ * ++ * Updates the EEPROM section checksums for all 4 ports by reading/adding ++ * each word of the EEPROM up to the checksum. Then calculates the EEPROM ++ * checksum and writes the value to the EEPROM. ++ **/ ++static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u16 j; ++ u16 nvm_offset; ++ ++ DEBUGFUNC("e1000_update_nvm_checksum_i350"); ++ ++ for (j = 0; j < 4; j++) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ++ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); ++ if (ret_val != E1000_SUCCESS) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * __e1000_access_emi_reg - Read/write EMI register ++ * @hw: pointer to the HW structure ++ * @addr: EMI address to program ++ * @data: pointer to value to read/write from/to the EMI address ++ * @read: boolean flag to indicate read or write ++ **/ ++static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, ++ u16 *data, bool read) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("__e1000_access_emi_reg"); ++ ++ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); ++ if (ret_val) ++ return ret_val; ++ ++ if (read) ++ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); ++ else ++ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_read_emi_reg - Read Extended Management Interface register ++ * @hw: pointer to the HW structure ++ * @addr: EMI address to program ++ * @data: value to be read from the EMI address ++ **/ ++s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) ++{ ++ DEBUGFUNC("e1000_read_emi_reg"); ++ ++ return __e1000_access_emi_reg(hw, addr, data, true); ++} ++ ++/** ++ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY ++ * @hw: pointer to the HW structure ++ * ++ * Initialize Marvell 1512 to work correctly with Avoton. ++ **/ ++s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_initialize_M88E1512_phy"); ++ ++ /* Check if this is correct PHY. */ ++ if (phy->id != M88E1512_E_PHY_ID) ++ goto out; ++ ++ /* Switch to PHY page 0xFF. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0xFB. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0x12. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); ++ if (ret_val) ++ goto out; ++ ++ /* Change mode to SGMII-to-Copper */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); ++ if (ret_val) ++ goto out; ++ ++ /* Return the PHY to page 0. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.commit(hw); ++ if (ret_val) { ++ DEBUGOUT("Error committing the PHY changes\n"); ++ return ret_val; ++ } ++ ++ msec_delay(1000); ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY ++ * @hw: pointer to the HW structure ++ * ++ * Initialize Marvell 1543 to work correctly with Avoton. ++ **/ ++s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_initialize_M88E1543_phy"); ++ ++ /* Check if this is correct PHY. */ ++ if (phy->id != M88E1543_E_PHY_ID) ++ goto out; ++ ++ /* Switch to PHY page 0xFF. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0xFB. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 0x12. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); ++ if (ret_val) ++ goto out; ++ ++ /* Change mode to SGMII-to-Copper */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); ++ if (ret_val) ++ goto out; ++ ++ /* Switch to PHY page 1. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); ++ if (ret_val) ++ goto out; ++ ++ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); ++ if (ret_val) ++ goto out; ++ ++ /* Return the PHY to page 0. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.commit(hw); ++ if (ret_val) { ++ DEBUGOUT("Error committing the PHY changes\n"); ++ return ret_val; ++ } ++ ++ msec_delay(1000); ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_set_eee_i350 - Enable/disable EEE support ++ * @hw: pointer to the HW structure ++ * @adv1g: boolean flag enabling 1G EEE advertisement ++ * @adv100m: boolean flag enabling 100M EEE advertisement ++ * ++ * Enable/disable EEE based on setting in dev_spec structure. ++ * ++ **/ ++s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) ++{ ++ u32 ipcnfg, eeer; ++ ++ DEBUGFUNC("e1000_set_eee_i350"); ++ ++ if ((hw->mac.type < e1000_i350) || ++ (hw->phy.media_type != e1000_media_type_copper)) ++ goto out; ++ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); ++ eeer = E1000_READ_REG(hw, E1000_EEER); ++ ++ /* enable or disable per user setting */ ++ if (!(hw->dev_spec._82575.eee_disable)) { ++ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); ++ ++ if (adv100M) ++ ipcnfg |= E1000_IPCNFG_EEE_100M_AN; ++ else ++ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; ++ ++ if (adv1G) ++ ipcnfg |= E1000_IPCNFG_EEE_1G_AN; ++ else ++ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; ++ ++ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | ++ E1000_EEER_LPI_FC); ++ ++ /* This bit should not be set in normal operation. */ ++ if (eee_su & E1000_EEE_SU_LPI_CLK_STP) ++ DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); ++ } else { ++ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); ++ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | ++ E1000_EEER_LPI_FC); ++ } ++ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); ++ E1000_WRITE_REG(hw, E1000_EEER, eeer); ++ E1000_READ_REG(hw, E1000_IPCNFG); ++ E1000_READ_REG(hw, E1000_EEER); ++out: ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_eee_i354 - Enable/disable EEE support ++ * @hw: pointer to the HW structure ++ * @adv1g: boolean flag enabling 1G EEE advertisement ++ * @adv100m: boolean flag enabling 100M EEE advertisement ++ * ++ * Enable/disable EEE legacy mode based on setting in dev_spec structure. ++ * ++ **/ ++s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ u16 phy_data; ++ ++ DEBUGFUNC("e1000_set_eee_i354"); ++ ++ if ((hw->phy.media_type != e1000_media_type_copper) || ++ ((phy->id != M88E1543_E_PHY_ID) && ++ (phy->id != M88E1512_E_PHY_ID))) ++ goto out; ++ ++ if (!hw->dev_spec._82575.eee_disable) { ++ /* Switch to PHY page 18. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); ++ if (ret_val) ++ goto out; ++ ++ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, ++ phy_data); ++ if (ret_val) ++ goto out; ++ ++ /* Return the PHY to page 0. */ ++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); ++ if (ret_val) ++ goto out; ++ ++ /* Turn on EEE advertisement. */ ++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ if (adv100M) ++ phy_data |= E1000_EEE_ADV_100_SUPPORTED; ++ else ++ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; ++ ++ if (adv1G) ++ phy_data |= E1000_EEE_ADV_1000_SUPPORTED; ++ else ++ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; ++ ++ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ phy_data); ++ } else { ++ /* Turn off EEE advertisement. */ ++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | ++ E1000_EEE_ADV_1000_SUPPORTED); ++ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, ++ E1000_EEE_ADV_DEV_I354, ++ phy_data); ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_get_eee_status_i354 - Get EEE status ++ * @hw: pointer to the HW structure ++ * @status: EEE status ++ * ++ * Get EEE status by guessing based on whether Tx or Rx LPI indications have ++ * been received. ++ **/ ++s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val = E1000_SUCCESS; ++ u16 phy_data; ++ ++ DEBUGFUNC("e1000_get_eee_status_i354"); ++ ++ /* Check if EEE is supported on this device. */ ++ if ((hw->phy.media_type != e1000_media_type_copper) || ++ ((phy->id != M88E1543_E_PHY_ID) && ++ (phy->id != M88E1512_E_PHY_ID))) ++ goto out; ++ ++ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, ++ E1000_PCS_STATUS_DEV_I354, ++ &phy_data); ++ if (ret_val) ++ goto out; ++ ++ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | ++ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; ++ ++out: ++ return ret_val; ++} ++ ++/* Due to a hw errata, if the host tries to configure the VFTA register ++ * while performing queries from the BMC or DMA, then the VFTA in some ++ * cases won't be written. ++ */ ++ ++/** ++ * e1000_clear_vfta_i350 - Clear VLAN filter table ++ * @hw: pointer to the HW structure ++ * ++ * Clears the register array which contains the VLAN filter table by ++ * setting all the values to 0. ++ **/ ++void e1000_clear_vfta_i350(struct e1000_hw *hw) ++{ ++ u32 offset; ++ int i; ++ ++ DEBUGFUNC("e1000_clear_vfta_350"); ++ ++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { ++ for (i = 0; i < 10; i++) ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); ++ ++ E1000_WRITE_FLUSH(hw); ++ } ++} ++ ++/** ++ * e1000_write_vfta_i350 - Write value to VLAN filter table ++ * @hw: pointer to the HW structure ++ * @offset: register offset in VLAN filter table ++ * @value: register value written to VLAN filter table ++ * ++ * Writes value at the given offset in the register array which stores ++ * the VLAN filter table. ++ **/ ++void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) ++{ ++ int i; ++ ++ DEBUGFUNC("e1000_write_vfta_350"); ++ ++ for (i = 0; i < 10; i++) ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); ++ ++ E1000_WRITE_FLUSH(hw); ++} ++ ++/** ++ * e1000_set_i2c_bb - Enable I2C bit-bang ++ * @hw: pointer to the HW structure ++ * ++ * Enable I2C bit-bang interface ++ * ++ **/ ++s32 e1000_set_i2c_bb(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ u32 ctrl_ext, i2cparams; ++ ++ DEBUGFUNC("e1000_set_i2c_bb"); ++ ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ctrl_ext |= E1000_CTRL_I2C_ENA; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_FLUSH(hw); ++ ++ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ i2cparams |= E1000_I2CBB_EN; ++ i2cparams |= E1000_I2C_DATA_OE_N; ++ i2cparams |= E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); ++ E1000_WRITE_FLUSH(hw); ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @dev_addr: device address ++ * @data: value read ++ * ++ * Performs byte read operation over I2C interface at ++ * a specified device address. ++ **/ ++s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) ++{ ++ s32 status = E1000_SUCCESS; ++ u32 max_retry = 10; ++ u32 retry = 1; ++ u16 swfw_mask = 0; ++ ++ bool nack = true; ++ ++ DEBUGFUNC("e1000_read_i2c_byte_generic"); ++ ++ swfw_mask = E1000_SWFW_PHY0_SM; ++ ++ do { ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) ++ != E1000_SUCCESS) { ++ status = E1000_ERR_SWFW_SYNC; ++ goto read_byte_out; ++ } ++ ++ e1000_i2c_start(hw); ++ ++ /* Device Address and write indication */ ++ status = e1000_clock_out_i2c_byte(hw, dev_addr); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_byte(hw, byte_offset); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ e1000_i2c_start(hw); ++ ++ /* Device Address and read indication */ ++ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_in_i2c_byte(hw, data); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_bit(hw, nack); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ e1000_i2c_stop(hw); ++ break; ++ ++fail: ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ msec_delay(100); ++ e1000_i2c_bus_clear(hw); ++ retry++; ++ if (retry < max_retry) ++ DEBUGOUT("I2C byte read error - Retrying.\n"); ++ else ++ DEBUGOUT("I2C byte read error.\n"); ++ ++ } while (retry < max_retry); ++ ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ ++read_byte_out: ++ ++ return status; ++} ++ ++/** ++ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: device address ++ * @data: value to write ++ * ++ * Performs byte write operation over I2C interface at ++ * a specified device address. ++ **/ ++s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) ++{ ++ s32 status = E1000_SUCCESS; ++ u32 max_retry = 1; ++ u32 retry = 0; ++ u16 swfw_mask = 0; ++ ++ DEBUGFUNC("e1000_write_i2c_byte_generic"); ++ ++ swfw_mask = E1000_SWFW_PHY0_SM; ++ ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { ++ status = E1000_ERR_SWFW_SYNC; ++ goto write_byte_out; ++ } ++ ++ do { ++ e1000_i2c_start(hw); ++ ++ status = e1000_clock_out_i2c_byte(hw, dev_addr); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_byte(hw, byte_offset); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_clock_out_i2c_byte(hw, data); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ status = e1000_get_i2c_ack(hw); ++ if (status != E1000_SUCCESS) ++ goto fail; ++ ++ e1000_i2c_stop(hw); ++ break; ++ ++fail: ++ e1000_i2c_bus_clear(hw); ++ retry++; ++ if (retry < max_retry) ++ DEBUGOUT("I2C byte write error - Retrying.\n"); ++ else ++ DEBUGOUT("I2C byte write error.\n"); ++ } while (retry < max_retry); ++ ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ ++write_byte_out: ++ ++ return status; ++} ++ ++/** ++ * e1000_i2c_start - Sets I2C start condition ++ * @hw: pointer to hardware structure ++ * ++ * Sets I2C start condition (High -> Low on SDA while SCL is High) ++ **/ ++static void e1000_i2c_start(struct e1000_hw *hw) ++{ ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ ++ DEBUGFUNC("e1000_i2c_start"); ++ ++ /* Start condition must begin with data and clock high */ ++ e1000_set_i2c_data(hw, &i2cctl, 1); ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Setup time for start condition (4.7us) */ ++ usec_delay(E1000_I2C_T_SU_STA); ++ ++ e1000_set_i2c_data(hw, &i2cctl, 0); ++ ++ /* Hold time for start condition (4us) */ ++ usec_delay(E1000_I2C_T_HD_STA); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ usec_delay(E1000_I2C_T_LOW); ++ + } + + /** +- * igb_validate_nvm_checksum_with_offset - Validate EEPROM +- * checksum +- * @hw: pointer to the HW structure +- * @offset: offset in words of the checksum protected region ++ * e1000_i2c_stop - Sets I2C stop condition ++ * @hw: pointer to hardware structure + * +- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM +- * and then verifies that the sum of the EEPROM is equal to 0xBABA. ++ * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +-static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, +- u16 offset) ++static void e1000_i2c_stop(struct e1000_hw *hw) + { +- s32 ret_val = 0; +- u16 checksum = 0; +- u16 i, nvm_data; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { +- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; +- } +- checksum += nvm_data; +- } ++ DEBUGFUNC("e1000_i2c_stop"); + +- if (checksum != (u16) NVM_SUM) { +- hw_dbg("NVM Checksum Invalid\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; +- } ++ /* Stop condition must begin with data low and clock high */ ++ e1000_set_i2c_data(hw, &i2cctl, 0); ++ e1000_raise_i2c_clk(hw, &i2cctl); + +-out: +- return ret_val; ++ /* Setup time for stop condition (4us) */ ++ usec_delay(E1000_I2C_T_SU_STO); ++ ++ e1000_set_i2c_data(hw, &i2cctl, 1); ++ ++ /* bus free time between stop and start (4.7us)*/ ++ usec_delay(E1000_I2C_T_BUF); + } + + /** +- * igb_update_nvm_checksum_with_offset - Update EEPROM +- * checksum +- * @hw: pointer to the HW structure +- * @offset: offset in words of the checksum protected region ++ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C ++ * @hw: pointer to hardware structure ++ * @data: data byte to clock in + * +- * Updates the EEPROM checksum by reading/adding each word of the EEPROM +- * up to the checksum. Then calculates the EEPROM checksum and writes the +- * value to the EEPROM. ++ * Clocks in one byte data via I2C data/clock + **/ +-static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) ++static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) + { +- s32 ret_val; +- u16 checksum = 0; +- u16 i, nvm_data; ++ s32 i; ++ bool bit = 0; + +- for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { +- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error while updating checksum.\n"); +- goto out; +- } +- checksum += nvm_data; ++ DEBUGFUNC("e1000_clock_in_i2c_byte"); ++ ++ *data = 0; ++ for (i = 7; i >= 0; i--) { ++ e1000_clock_in_i2c_bit(hw, &bit); ++ *data |= bit << i; + } +- checksum = (u16) NVM_SUM - checksum; +- ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, +- &checksum); +- if (ret_val) +- hw_dbg("NVM Write Error while updating checksum.\n"); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C ++ * @hw: pointer to hardware structure ++ * @data: data byte clocked out + * +- * Calculates the EEPROM section checksum by reading/adding each word of +- * the EEPROM and then verifies that the sum of the EEPROM is +- * equal to 0xBABA. ++ * Clocks out one byte data via I2C data/clock + **/ +-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) ++static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) + { +- s32 ret_val = 0; +- u16 eeprom_regions_count = 1; +- u16 j, nvm_data; +- u16 nvm_offset; ++ s32 status = E1000_SUCCESS; ++ s32 i; ++ u32 i2cctl; ++ bool bit = 0; + +- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; +- } ++ DEBUGFUNC("e1000_clock_out_i2c_byte"); + +- if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { +- /* if checksums compatibility bit is set validate checksums +- * for all 4 ports. +- */ +- eeprom_regions_count = 4; +- } ++ for (i = 7; i >= 0; i--) { ++ bit = (data >> i) & 0x1; ++ status = e1000_clock_out_i2c_bit(hw, bit); + +- for (j = 0; j < eeprom_regions_count; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_validate_nvm_checksum_with_offset(hw, +- nvm_offset); +- if (ret_val != 0) +- goto out; ++ if (status != E1000_SUCCESS) ++ break; + } + +-out: +- return ret_val; ++ /* Release SDA line (set high) */ ++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ ++ i2cctl |= E1000_I2C_DATA_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); ++ E1000_WRITE_FLUSH(hw); ++ ++ return status; + } + + /** +- * igb_update_nvm_checksum_82580 - Update EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_get_i2c_ack - Polls for I2C ACK ++ * @hw: pointer to hardware structure + * +- * Updates the EEPROM section checksums for all 4 ports by reading/adding +- * each word of the EEPROM up to the checksum. Then calculates the EEPROM +- * checksum and writes the value to the EEPROM. ++ * Clocks in/out one bit via I2C data/clock + **/ +-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) ++static s32 e1000_get_i2c_ack(struct e1000_hw *hw) + { +- s32 ret_val; +- u16 j, nvm_data; +- u16 nvm_offset; ++ s32 status = E1000_SUCCESS; ++ u32 i = 0; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ u32 timeout = 10; ++ bool ack = true; + +- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); +- goto out; +- } ++ DEBUGFUNC("e1000_get_i2c_ack"); + +- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { +- /* set compatibility bit to validate checksums appropriately */ +- nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; +- ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, +- &nvm_data); +- if (ret_val) { +- hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); +- goto out; +- } ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ /* Wait until SCL returns high */ ++ for (i = 0; i < timeout; i++) { ++ usec_delay(1); ++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ if (i2cctl & E1000_I2C_CLK_IN) ++ break; + } ++ if (!(i2cctl & E1000_I2C_CLK_IN)) ++ return E1000_ERR_I2C; + +- for (j = 0; j < 4; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); +- if (ret_val) +- goto out; ++ ack = e1000_get_i2c_data(&i2cctl); ++ if (ack) { ++ DEBUGOUT("I2C ack was not received.\n"); ++ status = E1000_ERR_I2C; + } + +-out: +- return ret_val; ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ usec_delay(E1000_I2C_T_LOW); ++ ++ return status; + } + + /** +- * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock ++ * @hw: pointer to hardware structure ++ * @data: read data value + * +- * Calculates the EEPROM section checksum by reading/adding each word of +- * the EEPROM and then verifies that the sum of the EEPROM is +- * equal to 0xBABA. ++ * Clocks in one bit via I2C data/clock + **/ +-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) ++static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) + { +- s32 ret_val = 0; +- u16 j; +- u16 nvm_offset; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- for (j = 0; j < 4; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_validate_nvm_checksum_with_offset(hw, +- nvm_offset); +- if (ret_val != 0) +- goto out; +- } ++ DEBUGFUNC("e1000_clock_in_i2c_bit"); + +-out: +- return ret_val; ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ *data = e1000_get_i2c_data(&i2cctl); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us */ ++ usec_delay(E1000_I2C_T_LOW); ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_update_nvm_checksum_i350 - Update EEPROM checksum +- * @hw: pointer to the HW structure ++ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock ++ * @hw: pointer to hardware structure ++ * @data: data value to write + * +- * Updates the EEPROM section checksums for all 4 ports by reading/adding +- * each word of the EEPROM up to the checksum. Then calculates the EEPROM +- * checksum and writes the value to the EEPROM. ++ * Clocks out one bit via I2C data/clock + **/ +-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) ++static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) + { +- s32 ret_val = 0; +- u16 j; +- u16 nvm_offset; ++ s32 status; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- for (j = 0; j < 4; j++) { +- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); +- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); +- if (ret_val != 0) +- goto out; ++ DEBUGFUNC("e1000_clock_out_i2c_bit"); ++ ++ status = e1000_set_i2c_data(hw, &i2cctl, data); ++ if (status == E1000_SUCCESS) { ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Minimum low period of clock is 4.7 us. ++ * This also takes care of the data hold time. ++ */ ++ usec_delay(E1000_I2C_T_LOW); ++ } else { ++ status = E1000_ERR_I2C; ++ DEBUGOUT1("I2C data was not set to %X\n", data); + } + +-out: +- return ret_val; ++ return status; + } +- + /** +- * __igb_access_emi_reg - Read/write EMI register +- * @hw: pointer to the HW structure +- * @addr: EMI address to program +- * @data: pointer to value to read/write from/to the EMI address +- * @read: boolean flag to indicate read or write ++ * e1000_raise_i2c_clk - Raises the I2C SCL clock ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Raises the I2C clock line '0'->'1' + **/ +-static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, +- u16 *data, bool read) ++static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) + { +- s32 ret_val = 0; +- +- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); +- if (ret_val) +- return ret_val; ++ DEBUGFUNC("e1000_raise_i2c_clk"); + +- if (read) +- ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); +- else +- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); ++ *i2cctl |= E1000_I2C_CLK_OUT; ++ *i2cctl &= ~E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); ++ E1000_WRITE_FLUSH(hw); + +- return ret_val; ++ /* SCL rise time (1000ns) */ ++ usec_delay(E1000_I2C_T_RISE); + } + + /** +- * igb_read_emi_reg - Read Extended Management Interface register +- * @hw: pointer to the HW structure +- * @addr: EMI address to program +- * @data: value to be read from the EMI address ++ * e1000_lower_i2c_clk - Lowers the I2C SCL clock ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Lowers the I2C clock line '1'->'0' + **/ +-s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) ++static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) + { +- return __igb_access_emi_reg(hw, addr, data, true); ++ ++ DEBUGFUNC("e1000_lower_i2c_clk"); ++ ++ *i2cctl &= ~E1000_I2C_CLK_OUT; ++ *i2cctl &= ~E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); ++ E1000_WRITE_FLUSH(hw); ++ ++ /* SCL fall time (300ns) */ ++ usec_delay(E1000_I2C_T_FALL); + } + + /** +- * igb_set_eee_i350 - Enable/disable EEE support +- * @hw: pointer to the HW structure +- * +- * Enable/disable EEE based on setting in dev_spec structure. ++ * e1000_set_i2c_data - Sets the I2C data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * @data: I2C data value (0 or 1) to set + * ++ * Sets the I2C data bit + **/ +-s32 igb_set_eee_i350(struct e1000_hw *hw) ++static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) + { +- u32 ipcnfg, eeer; ++ s32 status = E1000_SUCCESS; + +- if ((hw->mac.type < e1000_i350) || +- (hw->phy.media_type != e1000_media_type_copper)) +- goto out; +- ipcnfg = rd32(E1000_IPCNFG); +- eeer = rd32(E1000_EEER); ++ DEBUGFUNC("e1000_set_i2c_data"); + +- /* enable or disable per user setting */ +- if (!(hw->dev_spec._82575.eee_disable)) { +- u32 eee_su = rd32(E1000_EEE_SU); ++ if (data) ++ *i2cctl |= E1000_I2C_DATA_OUT; ++ else ++ *i2cctl &= ~E1000_I2C_DATA_OUT; + +- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); +- eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | +- E1000_EEER_LPI_FC); ++ *i2cctl &= ~E1000_I2C_DATA_OE_N; ++ *i2cctl |= E1000_I2C_CLK_OE_N; ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); ++ E1000_WRITE_FLUSH(hw); + +- /* This bit should not be set in normal operation. */ +- if (eee_su & E1000_EEE_SU_LPI_CLK_STP) +- hw_dbg("LPI Clock Stop Bit should not be set!\n"); ++ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ ++ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + +- } else { +- ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | +- E1000_IPCNFG_EEE_100M_AN); +- eeer &= ~(E1000_EEER_TX_LPI_EN | +- E1000_EEER_RX_LPI_EN | +- E1000_EEER_LPI_FC); ++ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ if (data != e1000_get_i2c_data(i2cctl)) { ++ status = E1000_ERR_I2C; ++ DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } +- wr32(E1000_IPCNFG, ipcnfg); +- wr32(E1000_EEER, eeer); +- rd32(E1000_IPCNFG); +- rd32(E1000_EEER); +-out: + +- return 0; ++ return status; + } + + /** +- * igb_set_eee_i354 - Enable/disable EEE support +- * @hw: pointer to the HW structure +- * +- * Enable/disable EEE legacy mode based on setting in dev_spec structure. ++ * e1000_get_i2c_data - Reads the I2C SDA data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register + * ++ * Returns the I2C data bit value + **/ +-s32 igb_set_eee_i354(struct e1000_hw *hw) ++static bool e1000_get_i2c_data(u32 *i2cctl) + { +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; +- u16 phy_data; +- +- if ((hw->phy.media_type != e1000_media_type_copper) || +- (phy->id != M88E1543_E_PHY_ID)) +- goto out; +- +- if (!hw->dev_spec._82575.eee_disable) { +- /* Switch to PHY page 18. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); +- if (ret_val) +- goto out; +- +- ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, +- &phy_data); +- if (ret_val) +- goto out; +- +- phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; +- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, +- phy_data); +- if (ret_val) +- goto out; +- +- /* Return the PHY to page 0. */ +- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); +- if (ret_val) +- goto out; +- +- /* Turn on EEE advertisement. */ +- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- &phy_data); +- if (ret_val) +- goto out; ++ bool data; + +- phy_data |= E1000_EEE_ADV_100_SUPPORTED | +- E1000_EEE_ADV_1000_SUPPORTED; +- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- phy_data); +- } else { +- /* Turn off EEE advertisement. */ +- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- &phy_data); +- if (ret_val) +- goto out; ++ DEBUGFUNC("e1000_get_i2c_data"); + +- phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | +- E1000_EEE_ADV_1000_SUPPORTED); +- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, +- E1000_EEE_ADV_DEV_I354, +- phy_data); +- } ++ if (*i2cctl & E1000_I2C_DATA_IN) ++ data = 1; ++ else ++ data = 0; + +-out: +- return ret_val; ++ return data; + } + + /** +- * igb_get_eee_status_i354 - Get EEE status +- * @hw: pointer to the HW structure +- * @status: EEE status ++ * e1000_i2c_bus_clear - Clears the I2C bus ++ * @hw: pointer to hardware structure + * +- * Get EEE status by guessing based on whether Tx or Rx LPI indications have +- * been received. ++ * Clears the I2C bus by sending nine clock pulses. ++ * Used when data line is stuck low. + **/ +-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) ++void e1000_i2c_bus_clear(struct e1000_hw *hw) + { +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; +- u16 phy_data; ++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); ++ u32 i; + +- /* Check if EEE is supported on this device. */ +- if ((hw->phy.media_type != e1000_media_type_copper) || +- (phy->id != M88E1543_E_PHY_ID)) +- goto out; ++ DEBUGFUNC("e1000_i2c_bus_clear"); + +- ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, +- E1000_PCS_STATUS_DEV_I354, +- &phy_data); +- if (ret_val) +- goto out; ++ e1000_i2c_start(hw); + +- *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | +- E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; ++ e1000_set_i2c_data(hw, &i2cctl, 1); + +-out: +- return ret_val; ++ for (i = 0; i < 9; i++) { ++ e1000_raise_i2c_clk(hw, &i2cctl); ++ ++ /* Min high period of clock is 4us */ ++ usec_delay(E1000_I2C_T_HIGH); ++ ++ e1000_lower_i2c_clk(hw, &i2cctl); ++ ++ /* Min low period of clock is 4.7us*/ ++ usec_delay(E1000_I2C_T_LOW); ++ } ++ ++ e1000_i2c_start(hw); ++ ++ /* Put the i2c bus back to default state */ ++ e1000_i2c_stop(hw); + } + + static const u8 e1000_emc_temp_data[4] = { +@@ -2707,14 +3782,13 @@ + E1000_EMC_DIODE3_THERM_LIMIT + }; + +-#ifdef CONFIG_IGB_HWMON + /** +- * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data ++ * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +-static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) ++s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) + { + u16 ets_offset; + u16 ets_cfg; +@@ -2725,17 +3799,19 @@ + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + ++ DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); ++ + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + +- data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); ++ data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ +- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); ++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) +- return 0; ++ return E1000_SUCCESS; + +- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); ++ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; +@@ -2745,7 +3821,7 @@ + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { +- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); ++ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> +@@ -2757,17 +3833,17 @@ + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds ++ * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +-static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) ++s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) + { + u16 ets_offset; + u16 ets_cfg; +@@ -2780,6 +3856,8 @@ + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + ++ DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); ++ + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + +@@ -2787,16 +3865,16 @@ + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = +- (rd32(E1000_THHIGHTC) & 0xFF); ++ (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = +- (rd32(E1000_THLOWTC) & 0xFF); ++ (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ +- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); ++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) +- return 0; ++ return E1000_SUCCESS; + +- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); ++ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; +@@ -2806,7 +3884,7 @@ + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { +- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); ++ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> +@@ -2825,41 +3903,5 @@ + low_thresh_delta; + } + } +- return 0; ++ return E1000_SUCCESS; + } +- +-#endif +-static struct e1000_mac_operations e1000_mac_ops_82575 = { +- .init_hw = igb_init_hw_82575, +- .check_for_link = igb_check_for_link_82575, +- .rar_set = igb_rar_set, +- .read_mac_addr = igb_read_mac_addr_82575, +- .get_speed_and_duplex = igb_get_link_up_info_82575, +-#ifdef CONFIG_IGB_HWMON +- .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, +- .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +-#endif +-}; +- +-static struct e1000_phy_operations e1000_phy_ops_82575 = { +- .acquire = igb_acquire_phy_82575, +- .get_cfg_done = igb_get_cfg_done_82575, +- .release = igb_release_phy_82575, +- .write_i2c_byte = igb_write_i2c_byte, +- .read_i2c_byte = igb_read_i2c_byte, +-}; +- +-static struct e1000_nvm_operations e1000_nvm_ops_82575 = { +- .acquire = igb_acquire_nvm_82575, +- .read = igb_read_nvm_eerd, +- .release = igb_release_nvm_82575, +- .write = igb_write_nvm_spi, +-}; +- +-const struct e1000_info e1000_82575_info = { +- .get_invariants = igb_get_invariants_82575, +- .mac_ops = &e1000_mac_ops_82575, +- .phy_ops = &e1000_phy_ops_82575, +- .nvm_ops = &e1000_nvm_ops_82575, +-}; +- +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h +--- a/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,67 +1,149 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_82575_H_ + #define _E1000_82575_H_ + +-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +-void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +-void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +-void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +-s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, +- u8 *data); +-s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, +- u8 data); +- +-#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ +- (ID_LED_DEF1_DEF2 << 8) | \ +- (ID_LED_DEF1_DEF2 << 4) | \ +- (ID_LED_OFF1_ON2)) +- +-#define E1000_RAR_ENTRIES_82575 16 +-#define E1000_RAR_ENTRIES_82576 24 +-#define E1000_RAR_ENTRIES_82580 24 +-#define E1000_RAR_ENTRIES_I350 32 +- +-#define E1000_SW_SYNCH_MB 0x00000100 +-#define E1000_STAT_DEV_RST_SET 0x00100000 +-#define E1000_CTRL_DEV_RST 0x20000000 +- +-/* SRRCTL bit definitions */ +-#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +-#define E1000_SRRCTL_DROP_EN 0x80000000 +-#define E1000_SRRCTL_TIMESTAMP 0x40000000 ++#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ ++ (ID_LED_DEF1_DEF2 << 8) | \ ++ (ID_LED_DEF1_DEF2 << 4) | \ ++ (ID_LED_OFF1_ON2)) ++/* ++ * Receive Address Register Count ++ * Number of high/low register pairs in the RAR. The RAR (Receive Address ++ * Registers) holds the directed and multicast addresses that we monitor. ++ * These entries are also used for MAC-based filtering. ++ */ ++/* ++ * For 82576, there are an additional set of RARs that begin at an offset ++ * separate from the first set of RARs. ++ */ ++#define E1000_RAR_ENTRIES_82575 16 ++#define E1000_RAR_ENTRIES_82576 24 ++#define E1000_RAR_ENTRIES_82580 24 ++#define E1000_RAR_ENTRIES_I350 32 ++#define E1000_SW_SYNCH_MB 0x00000100 ++#define E1000_STAT_DEV_RST_SET 0x00100000 ++#define E1000_CTRL_DEV_RST 0x20000000 ++ ++struct e1000_adv_data_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ union { ++ u32 data; ++ struct { ++ u32 datalen:16; /* Data buffer length */ ++ u32 rsvd:4; ++ u32 dtyp:4; /* Descriptor type */ ++ u32 dcmd:8; /* Descriptor command */ ++ } config; ++ } lower; ++ union { ++ u32 data; ++ struct { ++ u32 status:4; /* Descriptor status */ ++ u32 idx:4; ++ u32 popts:6; /* Packet Options */ ++ u32 paylen:18; /* Payload length */ ++ } options; ++ } upper; ++}; + ++#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ ++#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ ++#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ ++#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ ++#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ ++#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ ++#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ ++#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ ++#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ ++#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ ++#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ ++#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ ++#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ ++/* Extended Device Control */ ++#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ ++ ++struct e1000_adv_context_desc { ++ union { ++ u32 ip_config; ++ struct { ++ u32 iplen:9; ++ u32 maclen:7; ++ u32 vlan_tag:16; ++ } fields; ++ } ip_setup; ++ u32 seq_num; ++ union { ++ u64 l4_config; ++ struct { ++ u32 mkrloc:9; ++ u32 tucmd:11; ++ u32 dtyp:4; ++ u32 adv:8; ++ u32 rsvd:4; ++ u32 idx:4; ++ u32 l4len:8; ++ u32 mss:16; ++ } fields; ++ } l4_setup; ++}; + +-#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +-#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +-#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +-#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +-#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 ++/* SRRCTL bit definitions */ ++#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ ++#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 ++#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ ++#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 ++#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 ++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 ++#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 ++#define E1000_SRRCTL_TIMESTAMP 0x40000000 ++#define E1000_SRRCTL_DROP_EN 0x80000000 ++ ++#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F ++#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 ++ ++#define E1000_TX_HEAD_WB_ENABLE 0x1 ++#define E1000_TX_SEQNUM_WB_ENABLE 0x2 ++ ++#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 ++#define E1000_MRQC_ENABLE_VMDQ 0x00000003 ++#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 ++#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 ++#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 ++#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 ++#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 ++ ++#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 ++#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ ++ E1000_VMRCTL_MIRROR_PORT_SHIFT) ++#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) ++#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) ++#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + + #define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ +@@ -75,42 +157,114 @@ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + ++#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE ++#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE ++ ++#define EIMS_ENABLE_MASK ( \ ++ E1000_EIMS_RX_QUEUE | \ ++ E1000_EIMS_TX_QUEUE | \ ++ E1000_EIMS_TCP_TIMER | \ ++ E1000_EIMS_OTHER) ++ + /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +-#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +-#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ ++#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ ++#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ ++#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ ++#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ ++#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ ++#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ ++#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ ++#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ ++#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ ++#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + + /* Receive Descriptor - Advanced */ + union e1000_adv_rx_desc { + struct { +- __le64 pkt_addr; /* Packet buffer address */ +- __le64 hdr_addr; /* Header buffer address */ ++ __le64 pkt_addr; /* Packet buffer address */ ++ __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { +- struct { +- __le16 pkt_info; /* RSS type, Packet type */ +- __le16 hdr_info; /* Split Head, buf len */ ++ union { ++ __le32 data; ++ struct { ++ __le16 pkt_info; /*RSS type, Pkt type*/ ++ /* Split Header, header buffer len */ ++ __le16 hdr_info; ++ } hs_rss; + } lo_dword; + union { +- __le32 rss; /* RSS Hash */ ++ __le32 rss; /* RSS Hash */ + struct { +- __le16 ip_id; /* IP id */ +- __le16 csum; /* Packet Checksum */ ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { +- __le32 status_error; /* ext status/error */ +- __le16 length; /* Packet length */ +- __le16 vlan; /* VLAN tag */ ++ __le32 status_error; /* ext status/error */ ++ __le16 length; /* Packet length */ ++ __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ + }; + +-#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +-#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +-#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +-#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ ++#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F ++#define E1000_RXDADV_RSSTYPE_SHIFT 12 ++#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 ++#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 ++#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 ++#define E1000_RXDADV_SPH 0x8000 ++#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ ++#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ ++#define E1000_RXDADV_ERR_HBO 0x00800000 ++ ++/* RSS Hash results */ ++#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 ++#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 ++#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 ++#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 ++#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 ++#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 ++#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 ++#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 ++#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 ++#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 ++ ++/* RSS Packet Types as indicated in the receive descriptor */ ++#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 ++#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 ++#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 ++#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ ++#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ ++#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ ++#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ ++#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ ++#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ ++#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ ++#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ ++ ++#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ ++#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ ++#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ ++#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ ++#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ ++#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ ++ ++/* LinkSec results */ ++/* Security Processing bit Indication */ ++#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 ++#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 ++#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 ++#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 ++#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 ++ ++#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 ++#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 ++#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 ++#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 ++#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + + /* Transmit Descriptor - Advanced */ + union e1000_adv_tx_desc { +@@ -127,16 +281,26 @@ + }; + + /* Adv Transmit Descriptor Config Masks */ +-#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +-#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +-#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +-#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +-#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +-#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +-#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +-#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +-#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +-#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ ++#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ ++#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ ++#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ ++#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ ++#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ ++#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ ++#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ ++#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ ++#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ ++#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ ++#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ ++#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ ++#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ ++#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ ++#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ ++#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ ++/* 1st & Last TSO-full iSCSI PDU*/ ++#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 ++#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ ++#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + + /* Context descriptors */ + struct e1000_adv_tx_context_desc { +@@ -146,127 +310,174 @@ + __le32 mss_l4len_idx; + }; + +-#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +-#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +-#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ ++#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ ++#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ ++#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ ++#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ ++#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ ++#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ ++#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ ++#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ + /* IPSec Encrypt Enable for ESP */ +-#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +-#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ ++#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 ++/* Req requires Markers and CRC */ ++#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 ++#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ ++#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + /* Adv ctxt IPSec SA IDX mask */ ++#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF + /* Adv ctxt IPSec ESP len mask */ ++#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + + /* Additional Transmit Descriptor Control definitions */ +-#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ ++#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ ++#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ + /* Tx Queue Arbitration Priority 0=low, 1=high */ ++#define E1000_TXDCTL_PRIORITY 0x08000000 + + /* Additional Receive Descriptor Control definitions */ +-#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ ++#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ ++#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + + /* Direct Cache Access (DCA) definitions */ +-#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ ++#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ ++#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +-#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +- +-#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +- +-/* Additional DCA related definitions, note change in position of CPUID */ +-#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +-#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +-#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +-#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ ++#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ ++#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ ++ ++#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ ++#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ ++#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ ++#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ ++#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ ++ ++#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ ++#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ ++#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ ++#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ ++#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ ++ ++#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ ++#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ ++#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ ++#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ ++ ++/* Additional interrupt register bit definitions */ ++#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ ++#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ ++#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + + /* ETQF register bit definitions */ +-#define E1000_ETQF_FILTER_ENABLE (1 << 26) +-#define E1000_ETQF_1588 (1 << 30) ++#define E1000_ETQF_FILTER_ENABLE (1 << 26) ++#define E1000_ETQF_IMM_INT (1 << 29) ++#define E1000_ETQF_1588 (1 << 30) ++#define E1000_ETQF_QUEUE_ENABLE (1 << 31) ++/* ++ * ETQF filter list: one static filter per filter consumer. This is ++ * to avoid filter collisions later. Add new filters ++ * here!! ++ * ++ * Current filters: ++ * EAPOL 802.1x (0x888e): Filter 0 ++ */ ++#define E1000_ETQF_FILTER_EAPOL 0 + +-/* FTQF register bit definitions */ +-#define E1000_FTQF_VF_BP 0x00008000 +-#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +-#define E1000_FTQF_MASK 0xF0000000 +-#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +-#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 +- +-#define E1000_NVM_APME_82575 0x0400 +-#define MAX_NUM_VFS 8 +- +-#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +-#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +-#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +-#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ ++#define E1000_FTQF_VF_BP 0x00008000 ++#define E1000_FTQF_1588_TIME_STAMP 0x08000000 ++#define E1000_FTQF_MASK 0xF0000000 ++#define E1000_FTQF_MASK_PROTO_BP 0x10000000 ++#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 ++#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 ++#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 ++ ++#define E1000_NVM_APME_82575 0x0400 ++#define MAX_NUM_VFS 7 ++ ++#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ ++#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ ++#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ ++#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 ++#define E1000_DTXSWC_LLE_SHIFT 16 ++#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + + /* Easy defines for setting default pool, would normally be left a zero */ +-#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +-#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) ++#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 ++#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + + /* Other useful VMD_CTL register defines */ +-#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +-#define E1000_VT_CTL_VM_REPL_EN (1 << 30) ++#define E1000_VT_CTL_IGNORE_MAC (1 << 28) ++#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) ++#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + + /* Per VM Offload register setup */ +-#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +-#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +-#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +-#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +-#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +-#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +-#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +-#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +-#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +-#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ +- +-#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +-#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +-#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ +- +-#define E1000_VLVF_ARRAY_SIZE 32 +-#define E1000_VLVF_VLANID_MASK 0x00000FFF +-#define E1000_VLVF_POOLSEL_SHIFT 12 +-#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +-#define E1000_VLVF_LVLAN 0x00100000 +-#define E1000_VLVF_VLANID_ENABLE 0x80000000 +- +-#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +-#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ +- +-#define E1000_IOVCTL 0x05BBC +-#define E1000_IOVCTL_REUSE_VFQ 0x00000001 +- +-#define E1000_RPLOLR_STRVLAN 0x40000000 +-#define E1000_RPLOLR_STRCRC 0x80000000 +- +-#define E1000_DTXCTL_8023LL 0x0004 +-#define E1000_DTXCTL_VLAN_ADDED 0x0008 +-#define E1000_DTXCTL_OOS_ENABLE 0x0010 +-#define E1000_DTXCTL_MDP_EN 0x0020 +-#define E1000_DTXCTL_SPOOF_INT 0x0040 ++#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ ++#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ ++#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ ++#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ ++#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ ++#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ ++#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ ++#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ ++#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ ++#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ ++ ++#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ ++#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ ++#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ ++#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ ++#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ ++ ++#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ ++#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ ++ ++#define E1000_VLVF_ARRAY_SIZE 32 ++#define E1000_VLVF_VLANID_MASK 0x00000FFF ++#define E1000_VLVF_POOLSEL_SHIFT 12 ++#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) ++#define E1000_VLVF_LVLAN 0x00100000 ++#define E1000_VLVF_VLANID_ENABLE 0x80000000 ++ ++#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ ++#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ ++ ++#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++ ++#define E1000_IOVCTL 0x05BBC ++#define E1000_IOVCTL_REUSE_VFQ 0x00000001 ++ ++#define E1000_RPLOLR_STRVLAN 0x40000000 ++#define E1000_RPLOLR_STRCRC 0x80000000 ++ ++#define E1000_TCTL_EXT_COLD 0x000FFC00 ++#define E1000_TCTL_EXT_COLD_SHIFT 10 ++ ++#define E1000_DTXCTL_8023LL 0x0004 ++#define E1000_DTXCTL_VLAN_ADDED 0x0008 ++#define E1000_DTXCTL_OOS_ENABLE 0x0010 ++#define E1000_DTXCTL_MDP_EN 0x0020 ++#define E1000_DTXCTL_SPOOF_INT 0x0040 + + #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +-#define ALL_QUEUES 0xFFFF +- +-/* RX packet buffer size defines */ +-#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +-void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +-void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +-u16 igb_rxpbs_adjust_82580(u32 data); +-s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +-s32 igb_set_eee_i350(struct e1000_hw *); +-s32 igb_set_eee_i354(struct e1000_hw *); +-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); ++#define ALL_QUEUES 0xFFFF + ++/* Rx packet buffer size defines */ ++#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F ++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); ++void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); ++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); ++s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); ++s32 e1000_init_hw_82575(struct e1000_hw *hw); ++ ++void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); ++u16 e1000_rxpbs_adjust_82580(u32 data); ++s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); ++s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); ++s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); ++s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); ++s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); ++s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw); + #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 + #define E1000_EMC_INTERNAL_DATA 0x00 + #define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +@@ -276,4 +487,26 @@ + #define E1000_EMC_DIODE2_THERM_LIMIT 0x1A + #define E1000_EMC_DIODE3_DATA 0x2A + #define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +-#endif ++ ++s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); ++s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); ++ ++/* I2C SDA and SCL timing parameters for standard mode */ ++#define E1000_I2C_T_HD_STA 4 ++#define E1000_I2C_T_LOW 5 ++#define E1000_I2C_T_HIGH 4 ++#define E1000_I2C_T_SU_STA 5 ++#define E1000_I2C_T_HD_DATA 5 ++#define E1000_I2C_T_SU_DATA 1 ++#define E1000_I2C_T_RISE 1 ++#define E1000_I2C_T_FALL 1 ++#define E1000_I2C_T_SU_STO 4 ++#define E1000_I2C_T_BUF 5 ++ ++s32 e1000_set_i2c_bb(struct e1000_hw *hw); ++s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++void e1000_i2c_bus_clear(struct e1000_hw *hw); ++#endif /* _E1000_82575_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.c b/drivers/net/ethernet/intel/igb/e1000_api.c +--- a/drivers/net/ethernet/intel/igb/e1000_api.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_api.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,1184 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++/** ++ * e1000_init_mac_params - Initialize MAC function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the MAC ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_mac_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->mac.ops.init_params) { ++ ret_val = hw->mac.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("MAC Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("mac.init_mac_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_init_nvm_params - Initialize NVM function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the NVM ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_nvm_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->nvm.ops.init_params) { ++ ret_val = hw->nvm.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("NVM Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("nvm.init_nvm_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_init_phy_params - Initialize PHY function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the PHY ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_phy_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->phy.ops.init_params) { ++ ret_val = hw->phy.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("PHY Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("phy.init_phy_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_init_mbx_params - Initialize mailbox function pointers ++ * @hw: pointer to the HW structure ++ * ++ * This function initializes the function pointers for the PHY ++ * set of functions. Called by drivers or by e1000_setup_init_funcs. ++ **/ ++s32 e1000_init_mbx_params(struct e1000_hw *hw) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (hw->mbx.ops.init_params) { ++ ret_val = hw->mbx.ops.init_params(hw); ++ if (ret_val) { ++ DEBUGOUT("Mailbox Initialization Error\n"); ++ goto out; ++ } ++ } else { ++ DEBUGOUT("mbx.init_mbx_params was NULL\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_set_mac_type - Sets MAC type ++ * @hw: pointer to the HW structure ++ * ++ * This function sets the mac type of the adapter based on the ++ * device ID stored in the hw structure. ++ * MUST BE FIRST FUNCTION CALLED (explicitly or through ++ * e1000_setup_init_funcs()). ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_set_mac_type(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("igb_e1000_set_mac_type"); ++ ++ switch (hw->device_id) { ++ case E1000_DEV_ID_82575EB_COPPER: ++ case E1000_DEV_ID_82575EB_FIBER_SERDES: ++ case E1000_DEV_ID_82575GB_QUAD_COPPER: ++ mac->type = e1000_82575; ++ break; ++ case E1000_DEV_ID_82576: ++ case E1000_DEV_ID_82576_FIBER: ++ case E1000_DEV_ID_82576_SERDES: ++ case E1000_DEV_ID_82576_QUAD_COPPER: ++ case E1000_DEV_ID_82576_QUAD_COPPER_ET2: ++ case E1000_DEV_ID_82576_NS: ++ case E1000_DEV_ID_82576_NS_SERDES: ++ case E1000_DEV_ID_82576_SERDES_QUAD: ++ mac->type = e1000_82576; ++ break; ++ case E1000_DEV_ID_82580_COPPER: ++ case E1000_DEV_ID_82580_FIBER: ++ case E1000_DEV_ID_82580_SERDES: ++ case E1000_DEV_ID_82580_SGMII: ++ case E1000_DEV_ID_82580_COPPER_DUAL: ++ case E1000_DEV_ID_82580_QUAD_FIBER: ++ case E1000_DEV_ID_DH89XXCC_SGMII: ++ case E1000_DEV_ID_DH89XXCC_SERDES: ++ case E1000_DEV_ID_DH89XXCC_BACKPLANE: ++ case E1000_DEV_ID_DH89XXCC_SFP: ++ mac->type = e1000_82580; ++ break; ++ case E1000_DEV_ID_I350_COPPER: ++ case E1000_DEV_ID_I350_FIBER: ++ case E1000_DEV_ID_I350_SERDES: ++ case E1000_DEV_ID_I350_SGMII: ++ case E1000_DEV_ID_I350_DA4: ++ mac->type = e1000_i350; ++ break; ++ case E1000_DEV_ID_I210_COPPER_FLASHLESS: ++ case E1000_DEV_ID_I210_SERDES_FLASHLESS: ++ case E1000_DEV_ID_I210_COPPER: ++ case E1000_DEV_ID_I210_COPPER_OEM1: ++ case E1000_DEV_ID_I210_COPPER_IT: ++ case E1000_DEV_ID_I210_FIBER: ++ case E1000_DEV_ID_I210_SERDES: ++ case E1000_DEV_ID_I210_SGMII: ++ mac->type = e1000_i210; ++ break; ++ case E1000_DEV_ID_I211_COPPER: ++ mac->type = e1000_i211; ++ break; ++ ++ case E1000_DEV_ID_I354_BACKPLANE_1GBPS: ++ case E1000_DEV_ID_I354_SGMII: ++ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: ++ mac->type = e1000_i354; ++ break; ++ default: ++ /* Should never have loaded on this device */ ++ ret_val = -E1000_ERR_MAC_INIT; ++ break; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_setup_init_funcs - Initializes function pointers ++ * @hw: pointer to the HW structure ++ * @init_device: true will initialize the rest of the function pointers ++ * getting the device ready for use. false will only set ++ * MAC type and the function pointers for the other init ++ * functions. Passing false will not generate any hardware ++ * reads or writes. ++ * ++ * This function must be called by a driver in order to use the rest ++ * of the 'shared' code files. Called by drivers only. ++ **/ ++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) ++{ ++ s32 ret_val; ++ ++ /* Can't do much good without knowing the MAC type. */ ++ ret_val = igb_e1000_set_mac_type(hw); ++ if (ret_val) { ++ DEBUGOUT("ERROR: MAC type could not be set properly.\n"); ++ goto out; ++ } ++ ++ if (!hw->hw_addr) { ++ DEBUGOUT("ERROR: Registers not mapped\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ goto out; ++ } ++ ++ /* ++ * Init function pointers to generic implementations. We do this first ++ * allowing a driver module to override it afterward. ++ */ ++ e1000_init_mac_ops_generic(hw); ++ e1000_init_phy_ops_generic(hw); ++ e1000_init_nvm_ops_generic(hw); ++ e1000_init_mbx_ops_generic(hw); ++ ++ /* ++ * Set up the init function pointers. These are functions within the ++ * adapter family file that sets up function pointers for the rest of ++ * the functions in that family. ++ */ ++ switch (hw->mac.type) { ++ case e1000_82575: ++ case e1000_82576: ++ case e1000_82580: ++ case e1000_i350: ++ case e1000_i354: ++ e1000_init_function_pointers_82575(hw); ++ break; ++ case e1000_i210: ++ case e1000_i211: ++ e1000_init_function_pointers_i210(hw); ++ break; ++ default: ++ DEBUGOUT("Hardware not supported\n"); ++ ret_val = -E1000_ERR_CONFIG; ++ break; ++ } ++ ++ /* ++ * Initialize the rest of the function pointers. These require some ++ * register reads/writes in some cases. ++ */ ++ if (!(ret_val) && init_device) { ++ ret_val = e1000_init_mac_params(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = e1000_init_nvm_params(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = e1000_init_phy_params(hw); ++ if (ret_val) ++ goto out; ++ ++ ret_val = e1000_init_mbx_params(hw); ++ if (ret_val) ++ goto out; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_get_bus_info - Obtain bus information for adapter ++ * @hw: pointer to the HW structure ++ * ++ * This will obtain information about the HW bus for which the ++ * adapter is attached and stores it in the hw structure. This is a ++ * function pointer entry point called by drivers. ++ **/ ++ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_bus_info(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.get_bus_info) ++ return hw->mac.ops.get_bus_info(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_clear_vfta - Clear VLAN filter table ++ * @hw: pointer to the HW structure ++ * ++ * This clears the VLAN filter table on the adapter. This is a function ++ * pointer entry point called by drivers. ++ **/ ++void e1000_clear_vfta(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.clear_vfta) ++ hw->mac.ops.clear_vfta(hw); ++} ++ ++/** ++ * igb_e1000_write_vfta - Write value to VLAN filter table ++ * @hw: pointer to the HW structure ++ * @offset: the 32-bit offset in which to write the value to. ++ * @value: the 32-bit value to write at location offset. ++ * ++ * This writes a 32-bit value to a 32-bit offset in the VLAN filter ++ * table. This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) ++{ ++ if (hw->mac.ops.write_vfta) ++ hw->mac.ops.write_vfta(hw, offset, value); ++} ++ ++/** ++ * e1000_update_mc_addr_list - Update Multicast addresses ++ * @hw: pointer to the HW structure ++ * @mc_addr_list: array of multicast addresses to program ++ * @mc_addr_count: number of multicast addresses to program ++ * ++ * Updates the Multicast Table Array. ++ * The caller must have a packed mc_addr_list of multicast addresses. ++ **/ ++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count) ++{ ++ if (hw->mac.ops.update_mc_addr_list) ++ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, ++ mc_addr_count); ++} ++ ++/** ++ * igb_e1000_force_mac_fc - Force MAC flow control ++ * @hw: pointer to the HW structure ++ * ++ * Force the MAC's flow control settings. Currently no func pointer exists ++ * and all implementations are handled in the generic version of this ++ * function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_force_mac_fc(struct e1000_hw *hw) ++{ ++ return e1000_force_mac_fc_generic(hw); ++} ++ ++/** ++ * igb_e1000_check_for_link - Check/Store link connection ++ * @hw: pointer to the HW structure ++ * ++ * This checks the link condition of the adapter and stores the ++ * results in the hw->mac structure. This is a function pointer entry ++ * point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_for_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.check_for_link) ++ return hw->mac.ops.check_for_link(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_check_mng_mode - Check management mode ++ * @hw: pointer to the HW structure ++ * ++ * This checks if the adapter has manageability enabled. ++ * This is a function pointer entry point called by drivers. ++ **/ ++bool e1000_check_mng_mode(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.check_mng_mode) ++ return hw->mac.ops.check_mng_mode(hw); ++ ++ return false; ++} ++ ++/** ++ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface ++ * @length: size of the buffer ++ * ++ * Writes the DHCP information to the host interface. ++ **/ ++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) ++{ ++ return e1000_mng_write_dhcp_info_generic(hw, buffer, length); ++} ++ ++/** ++ * igb_e1000_reset_hw - Reset hardware ++ * @hw: pointer to the HW structure ++ * ++ * This resets the hardware into a known state. This is a function pointer ++ * entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_reset_hw(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.reset_hw) ++ return hw->mac.ops.reset_hw(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_init_hw - Initialize hardware ++ * @hw: pointer to the HW structure ++ * ++ * This inits the hardware readying it for operation. This is a function ++ * pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_init_hw(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.init_hw) ++ return hw->mac.ops.init_hw(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_setup_link - Configures link and flow control ++ * @hw: pointer to the HW structure ++ * ++ * This configures link and flow control settings for the adapter. This ++ * is a function pointer entry point called by drivers. While modules can ++ * also call this, they probably call their own version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_setup_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.setup_link) ++ return hw->mac.ops.setup_link(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_get_speed_and_duplex - Returns current speed and duplex ++ * @hw: pointer to the HW structure ++ * @speed: pointer to a 16-bit value to store the speed ++ * @duplex: pointer to a 16-bit value to store the duplex. ++ * ++ * This returns the speed and duplex of the adapter in the two 'out' ++ * variables passed in. This is a function pointer entry point called ++ * by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) ++{ ++ if (hw->mac.ops.get_link_up_info) ++ return hw->mac.ops.get_link_up_info(hw, speed, duplex); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * igb_e1000_setup_led - Configures SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This prepares the SW controllable LED for use and saves the current state ++ * of the LED so it can be later restored. This is a function pointer entry ++ * point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_setup_led(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.setup_led) ++ return hw->mac.ops.setup_led(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_cleanup_led - Restores SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This restores the SW controllable LED to the value saved off by ++ * igb_e1000_setup_led. This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_cleanup_led(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.cleanup_led) ++ return hw->mac.ops.cleanup_led(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_blink_led - Blink SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This starts the adapter LED blinking. Request the LED to be setup first ++ * and cleaned up after. This is a function pointer entry point called by ++ * drivers. ++ **/ ++s32 e1000_blink_led(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.blink_led) ++ return hw->mac.ops.blink_led(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_id_led_init - store LED configurations in SW ++ * @hw: pointer to the HW structure ++ * ++ * Initializes the LED config in SW. This is a function pointer entry point ++ * called by drivers. ++ **/ ++s32 e1000_id_led_init(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.id_led_init) ++ return hw->mac.ops.id_led_init(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_led_on - Turn on SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * Turns the SW defined LED on. This is a function pointer entry point ++ * called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_led_on(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.led_on) ++ return hw->mac.ops.led_on(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_led_off - Turn off SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * Turns the SW defined LED off. This is a function pointer entry point ++ * called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_led_off(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.led_off) ++ return hw->mac.ops.led_off(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_reset_adaptive - Reset adaptive IFS ++ * @hw: pointer to the HW structure ++ * ++ * Resets the adaptive IFS. Currently no func pointer exists and all ++ * implementations are handled in the generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_reset_adaptive(struct e1000_hw *hw) ++{ ++ e1000_reset_adaptive_generic(hw); ++} ++ ++/** ++ * igb_e1000_update_adaptive - Update adaptive IFS ++ * @hw: pointer to the HW structure ++ * ++ * Updates adapter IFS. Currently no func pointer exists and all ++ * implementations are handled in the generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_update_adaptive(struct e1000_hw *hw) ++{ ++ e1000_update_adaptive_generic(hw); ++} ++ ++/** ++ * e1000_disable_pcie_master - Disable PCI-Express master access ++ * @hw: pointer to the HW structure ++ * ++ * Disables PCI-Express master access and verifies there are no pending ++ * requests. Currently no func pointer exists and all implementations are ++ * handled in the generic version of this function. ++ **/ ++s32 e1000_disable_pcie_master(struct e1000_hw *hw) ++{ ++ return e1000_disable_pcie_master_generic(hw); ++} ++ ++/** ++ * igb_e1000_config_collision_dist - Configure collision distance ++ * @hw: pointer to the HW structure ++ * ++ * Configures the collision distance to the default value and is used ++ * during link setup. ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_config_collision_dist(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.config_collision_dist) ++ hw->mac.ops.config_collision_dist(hw); ++} ++ ++/** ++ * igb_e1000_rar_set - Sets a receive address register ++ * @hw: pointer to the HW structure ++ * @addr: address to set the RAR to ++ * @index: the RAR to set ++ * ++ * Sets a Receive Address Register (RAR) to the specified address. ++ **/ ++/* Changed name, duplicated with e1000 */ ++int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) ++{ ++ if (hw->mac.ops.rar_set) ++ return hw->mac.ops.rar_set(hw, addr, index); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state ++ * @hw: pointer to the HW structure ++ * ++ * Ensures that the MDI/MDIX SW state is valid. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.validate_mdi_setting) ++ return hw->mac.ops.validate_mdi_setting(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_hash_mc_addr - Determines address location in multicast table ++ * @hw: pointer to the HW structure ++ * @mc_addr: Multicast address to hash. ++ * ++ * This hashes an address to determine its location in the multicast ++ * table. Currently no func pointer exists and all implementations ++ * are handled in the generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) ++{ ++ return e1000_hash_mc_addr_generic(hw, mc_addr); ++} ++ ++/** ++ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX ++ * @hw: pointer to the HW structure ++ * ++ * Enables packet filtering on transmit packets if manageability is enabled ++ * and host interface is enabled. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) ++{ ++ return e1000_enable_tx_pkt_filtering_generic(hw); ++} ++ ++/** ++ * e1000_mng_host_if_write - Writes to the manageability host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface buffer ++ * @length: size of the buffer ++ * @offset: location in the buffer to write to ++ * @sum: sum of the data (not checksum) ++ * ++ * This function writes the buffer content at the offset given on the host if. ++ * It also does alignment considerations to do the writes in most efficient ++ * way. Also fills up the sum of the buffer in *buffer parameter. ++ **/ ++s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, ++ u16 offset, u8 *sum) ++{ ++ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); ++} ++ ++/** ++ * e1000_mng_write_cmd_header - Writes manageability command header ++ * @hw: pointer to the HW structure ++ * @hdr: pointer to the host interface command header ++ * ++ * Writes the command header after does the checksum calculation. ++ **/ ++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr) ++{ ++ return e1000_mng_write_cmd_header_generic(hw, hdr); ++} ++ ++/** ++ * e1000_mng_enable_host_if - Checks host interface is enabled ++ * @hw: pointer to the HW structure ++ * ++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND ++ * ++ * This function checks whether the HOST IF is enabled for command operation ++ * and also checks whether the previous command is completed. It busy waits ++ * in case of previous command is not completed. ++ **/ ++s32 e1000_mng_enable_host_if(struct e1000_hw *hw) ++{ ++ return e1000_mng_enable_host_if_generic(hw); ++} ++ ++/** ++ * e1000_check_reset_block - Verifies PHY can be reset ++ * @hw: pointer to the HW structure ++ * ++ * Checks if the PHY is in a state that can be reset or if manageability ++ * has it tied up. This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_check_reset_block(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.check_reset_block) ++ return hw->phy.ops.check_reset_block(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_read_phy_reg - Reads PHY register ++ * @hw: pointer to the HW structure ++ * @offset: the register to read ++ * @data: the buffer to store the 16-bit read. ++ * ++ * Reads the PHY register and returns the value in data. ++ * This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ if (hw->phy.ops.read_reg) ++ return hw->phy.ops.read_reg(hw, offset, data); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_write_phy_reg - Writes PHY register ++ * @hw: pointer to the HW structure ++ * @offset: the register to write ++ * @data: the value to write. ++ * ++ * Writes the PHY register at offset with the value in data. ++ * This is a function pointer entry point called by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ if (hw->phy.ops.write_reg) ++ return hw->phy.ops.write_reg(hw, offset, data); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_release_phy - Generic release PHY ++ * @hw: pointer to the HW structure ++ * ++ * Return if silicon family does not require a semaphore when accessing the ++ * PHY. ++ **/ ++void e1000_release_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.release) ++ hw->phy.ops.release(hw); ++} ++ ++/** ++ * e1000_acquire_phy - Generic acquire PHY ++ * @hw: pointer to the HW structure ++ * ++ * Return success if silicon family does not require a semaphore when ++ * accessing the PHY. ++ **/ ++s32 e1000_acquire_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.acquire) ++ return hw->phy.ops.acquire(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_kmrn_reg - Reads register using Kumeran interface ++ * @hw: pointer to the HW structure ++ * @offset: the register to read ++ * @data: the location to store the 16-bit value read. ++ * ++ * Reads a register out of the Kumeran interface. Currently no func pointer ++ * exists and all implementations are handled in the generic version of ++ * this function. ++ **/ ++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return e1000_read_kmrn_reg_generic(hw, offset, data); ++} ++ ++/** ++ * e1000_write_kmrn_reg - Writes register using Kumeran interface ++ * @hw: pointer to the HW structure ++ * @offset: the register to write ++ * @data: the value to write. ++ * ++ * Writes a register to the Kumeran interface. Currently no func pointer ++ * exists and all implementations are handled in the generic version of ++ * this function. ++ **/ ++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return e1000_write_kmrn_reg_generic(hw, offset, data); ++} ++ ++/** ++ * e1000_get_cable_length - Retrieves cable length estimation ++ * @hw: pointer to the HW structure ++ * ++ * This function estimates the cable length and stores them in ++ * hw->phy.min_length and hw->phy.max_length. This is a function pointer ++ * entry point called by drivers. ++ **/ ++s32 e1000_get_cable_length(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.get_cable_length) ++ return hw->phy.ops.get_cable_length(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_get_phy_info - Retrieves PHY information from registers ++ * @hw: pointer to the HW structure ++ * ++ * This function gets some information from various PHY registers and ++ * populates hw->phy values with it. This is a function pointer entry ++ * point called by drivers. ++ **/ ++s32 e1000_get_phy_info(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.get_info) ++ return hw->phy.ops.get_info(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_phy_hw_reset - Hard PHY reset ++ * @hw: pointer to the HW structure ++ * ++ * Performs a hard PHY reset. This is a function pointer entry point called ++ * by drivers. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.reset) ++ return hw->phy.ops.reset(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_phy_commit - Soft PHY reset ++ * @hw: pointer to the HW structure ++ * ++ * Performs a soft PHY reset on those that apply. This is a function pointer ++ * entry point called by drivers. ++ **/ ++s32 e1000_phy_commit(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.commit) ++ return hw->phy.ops.commit(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_d0_lplu_state - Sets low power link up state for D0 ++ * @hw: pointer to the HW structure ++ * @active: boolean used to enable/disable lplu ++ * ++ * Success returns 0, Failure returns 1 ++ * ++ * The low power link up (lplu) state is set to the power management level D0 ++ * and SmartSpeed is disabled when active is true, else clear lplu for D0 ++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU ++ * is used during Dx states where the power conservation is most important. ++ * During driver activity, SmartSpeed should be enabled so performance is ++ * maintained. This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) ++{ ++ if (hw->phy.ops.set_d0_lplu_state) ++ return hw->phy.ops.set_d0_lplu_state(hw, active); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_d3_lplu_state - Sets low power link up state for D3 ++ * @hw: pointer to the HW structure ++ * @active: boolean used to enable/disable lplu ++ * ++ * Success returns 0, Failure returns 1 ++ * ++ * The low power link up (lplu) state is set to the power management level D3 ++ * and SmartSpeed is disabled when active is true, else clear lplu for D3 ++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU ++ * is used during Dx states where the power conservation is most important. ++ * During driver activity, SmartSpeed should be enabled so performance is ++ * maintained. This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) ++{ ++ if (hw->phy.ops.set_d3_lplu_state) ++ return hw->phy.ops.set_d3_lplu_state(hw, active); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_read_mac_addr - Reads MAC address ++ * @hw: pointer to the HW structure ++ * ++ * Reads the MAC address out of the adapter and stores it in the HW structure. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_mac_addr(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.read_mac_addr) ++ return hw->mac.ops.read_mac_addr(hw); ++ ++ return igb_e1000_read_mac_addr_generic(hw); ++} ++ ++/** ++ * e1000_read_pba_string - Read device part number string ++ * @hw: pointer to the HW structure ++ * @pba_num: pointer to device part number ++ * @pba_num_size: size of part number buffer ++ * ++ * Reads the product board assembly (PBA) number from the EEPROM and stores ++ * the value in pba_num. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) ++{ ++ return igb_e1000_read_pba_string_generic(hw, pba_num, pba_num_size); ++} ++ ++/** ++ * e1000_read_pba_length - Read device part number string length ++ * @hw: pointer to the HW structure ++ * @pba_num_size: size of part number buffer ++ * ++ * Reads the product board assembly (PBA) number length from the EEPROM and ++ * stores the value in pba_num. ++ * Currently no func pointer exists and all implementations are handled in the ++ * generic version of this function. ++ **/ ++s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) ++{ ++ return e1000_read_pba_length_generic(hw, pba_num_size); ++} ++ ++/** ++ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum ++ * @hw: pointer to the HW structure ++ * ++ * Validates the NVM checksum is correct. This is a function pointer entry ++ * point called by drivers. ++ **/ ++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) ++{ ++ if (hw->nvm.ops.validate) ++ return hw->nvm.ops.validate(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum ++ * @hw: pointer to the HW structure ++ * ++ * Updates the NVM checksum. Currently no func pointer exists and all ++ * implementations are handled in the generic version of this function. ++ **/ ++s32 e1000_update_nvm_checksum(struct e1000_hw *hw) ++{ ++ if (hw->nvm.ops.update) ++ return hw->nvm.ops.update(hw); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_reload_nvm - Reloads EEPROM ++ * @hw: pointer to the HW structure ++ * ++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the ++ * extended control register. ++ **/ ++void e1000_reload_nvm(struct e1000_hw *hw) ++{ ++ if (hw->nvm.ops.reload) ++ hw->nvm.ops.reload(hw); ++} ++ ++/** ++ * e1000_read_nvm - Reads NVM (EEPROM) ++ * @hw: pointer to the HW structure ++ * @offset: the word offset to read ++ * @words: number of 16-bit words to read ++ * @data: pointer to the properly sized buffer for the data. ++ * ++ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function ++ * pointer entry point called by drivers. ++ **/ ++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++{ ++ if (hw->nvm.ops.read) ++ return hw->nvm.ops.read(hw, offset, words, data); ++ ++ return -E1000_ERR_CONFIG; ++} ++ ++/** ++ * e1000_write_nvm - Writes to NVM (EEPROM) ++ * @hw: pointer to the HW structure ++ * @offset: the word offset to read ++ * @words: number of 16-bit words to write ++ * @data: pointer to the properly sized buffer for the data. ++ * ++ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function ++ * pointer entry point called by drivers. ++ **/ ++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++{ ++ if (hw->nvm.ops.write) ++ return hw->nvm.ops.write(hw, offset, words, data); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register ++ * @hw: pointer to the HW structure ++ * @reg: 32bit register offset ++ * @offset: the register to write ++ * @data: the value to write. ++ * ++ * Writes the PHY register at offset with the value in data. ++ * This is a function pointer entry point called by drivers. ++ **/ ++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, ++ u8 data) ++{ ++ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); ++} ++ ++/** ++ * igb_e1000_power_up_phy - Restores link in case of PHY power down ++ * @hw: pointer to the HW structure ++ * ++ * The phy may be powered down to save power, to turn off link when the ++ * driver is unloaded, or wake on lan is not enabled (among others). ++ **/ ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_power_up_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.power_up) ++ hw->phy.ops.power_up(hw); ++ ++ igb_e1000_setup_link(hw); ++} ++ ++/** ++ * e1000_power_down_phy - Power down PHY ++ * @hw: pointer to the HW structure ++ * ++ * The phy may be powered down to save power, to turn off link when the ++ * driver is unloaded, or wake on lan is not enabled (among others). ++ **/ ++void e1000_power_down_phy(struct e1000_hw *hw) ++{ ++ if (hw->phy.ops.power_down) ++ hw->phy.ops.power_down(hw); ++} ++ ++/** ++ * e1000_power_up_fiber_serdes_link - Power up serdes link ++ * @hw: pointer to the HW structure ++ * ++ * Power on the optics and PCS. ++ **/ ++void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.power_up_serdes) ++ hw->mac.ops.power_up_serdes(hw); ++} ++ ++/** ++ * e1000_shutdown_fiber_serdes_link - Remove link during power down ++ * @hw: pointer to the HW structure ++ * ++ * Shutdown the optics and PCS on driver unload. ++ **/ ++void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.shutdown_serdes) ++ hw->mac.ops.shutdown_serdes(hw); ++} ++ ++/** ++ * e1000_get_thermal_sensor_data - Gathers thermal sensor data ++ * @hw: pointer to hardware structure ++ * ++ * Updates the temperatures in mac.thermal_sensor_data ++ **/ ++s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.get_thermal_sensor_data) ++ return hw->mac.ops.get_thermal_sensor_data(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds ++ * @hw: pointer to hardware structure ++ * ++ * Sets the thermal sensor thresholds according to the NVM map ++ **/ ++s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) ++{ ++ if (hw->mac.ops.init_thermal_sensor_thresh) ++ return hw->mac.ops.init_thermal_sensor_thresh(hw); ++ ++ return E1000_SUCCESS; ++} ++ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.h b/drivers/net/ethernet/intel/igb/e1000_api.h +--- a/drivers/net/ethernet/intel/igb/e1000_api.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_api.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,152 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _E1000_API_H_ ++#define _E1000_API_H_ ++ ++#include "e1000_hw.h" ++ ++extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); ++extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); ++extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); ++extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); ++extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); ++extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); ++ ++s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); ++s32 igb_e1000_set_mac_type(struct e1000_hw *hw); ++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); ++s32 e1000_init_mac_params(struct e1000_hw *hw); ++s32 e1000_init_nvm_params(struct e1000_hw *hw); ++s32 e1000_init_phy_params(struct e1000_hw *hw); ++s32 e1000_init_mbx_params(struct e1000_hw *hw); ++s32 igb_e1000_get_bus_info(struct e1000_hw *hw); ++void e1000_clear_vfta(struct e1000_hw *hw); ++void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); ++s32 igb_e1000_force_mac_fc(struct e1000_hw *hw); ++s32 igb_e1000_check_for_link(struct e1000_hw *hw); ++s32 igb_e1000_reset_hw(struct e1000_hw *hw); ++s32 igb_e1000_init_hw(struct e1000_hw *hw); ++s32 igb_e1000_setup_link(struct e1000_hw *hw); ++s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); ++s32 e1000_disable_pcie_master(struct e1000_hw *hw); ++void igb_e1000_config_collision_dist(struct e1000_hw *hw); ++int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); ++u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); ++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count); ++s32 igb_e1000_setup_led(struct e1000_hw *hw); ++s32 igb_e1000_cleanup_led(struct e1000_hw *hw); ++s32 e1000_check_reset_block(struct e1000_hw *hw); ++s32 e1000_blink_led(struct e1000_hw *hw); ++s32 igb_e1000_led_on(struct e1000_hw *hw); ++s32 igb_e1000_led_off(struct e1000_hw *hw); ++s32 e1000_id_led_init(struct e1000_hw *hw); ++void igb_e1000_reset_adaptive(struct e1000_hw *hw); ++void igb_e1000_update_adaptive(struct e1000_hw *hw); ++s32 e1000_get_cable_length(struct e1000_hw *hw); ++s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw); ++s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, ++ u8 data); ++s32 e1000_get_phy_info(struct e1000_hw *hw); ++void e1000_release_phy(struct e1000_hw *hw); ++s32 e1000_acquire_phy(struct e1000_hw *hw); ++s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw); ++s32 e1000_phy_commit(struct e1000_hw *hw); ++void igb_e1000_power_up_phy(struct e1000_hw *hw); ++void e1000_power_down_phy(struct e1000_hw *hw); ++s32 igb_e1000_read_mac_addr(struct e1000_hw *hw); ++s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); ++s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); ++void e1000_reload_nvm(struct e1000_hw *hw); ++s32 e1000_update_nvm_checksum(struct e1000_hw *hw); ++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); ++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); ++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); ++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); ++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); ++bool e1000_check_mng_mode(struct e1000_hw *hw); ++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); ++s32 e1000_mng_enable_host_if(struct e1000_hw *hw); ++s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, ++ u16 offset, u8 *sum); ++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr); ++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); ++s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); ++s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); ++ ++/* ++ * TBI_ACCEPT macro definition: ++ * ++ * This macro requires: ++ * a = a pointer to struct e1000_hw ++ * status = the 8 bit status field of the Rx descriptor with EOP set ++ * errors = the 8 bit error field of the Rx descriptor with EOP set ++ * length = the sum of all the length fields of the Rx descriptors that ++ * make up the current frame ++ * last_byte = the last byte of the frame DMAed by the hardware ++ * min_frame_size = the minimum frame length we want to accept. ++ * max_frame_size = the maximum frame length we want to accept. ++ * ++ * This macro is a conditional that should be used in the interrupt ++ * handler's Rx processing routine when RxErrors have been detected. ++ * ++ * Typical use: ++ * ... ++ * if (TBI_ACCEPT) { ++ * accept_frame = true; ++ * e1000_tbi_adjust_stats(adapter, MacAddress); ++ * frame_length--; ++ * } else { ++ * accept_frame = false; ++ * } ++ * ... ++ */ ++ ++/* The carrier extension symbol, as received by the NIC. */ ++#define CARRIER_EXTENSION 0x0F ++ ++#define TBI_ACCEPT(a, status, errors, length, last_byte, \ ++ min_frame_size, max_frame_size) \ ++ (e1000_tbi_sbp_enabled_82543(a) && \ ++ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ ++ ((last_byte) == CARRIER_EXTENSION) && \ ++ (((status) & E1000_RXD_STAT_VP) ? \ ++ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ ++ ((length) <= ((max_frame_size) + 1))) : \ ++ (((length) > (min_frame_size)) && \ ++ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) ++ ++#ifndef E1000_MAX ++#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) ++#endif ++#ifndef E1000_DIVIDE_ROUND_UP ++#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ ++#endif ++#endif /* _E1000_API_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h +--- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,25 +1,26 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_DEFINES_H_ + #define _E1000_DEFINES_H_ +@@ -30,38 +31,55 @@ + + /* Definitions for power management and wakeup registers */ + /* Wake Up Control */ +-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ ++#define E1000_WUC_APME 0x00000001 /* APM Enable */ ++#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ ++#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ ++#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ ++#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + + /* Wake Up Filter Control */ +-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +-#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +-#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +-#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +-#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ ++#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ ++#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ ++#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ ++#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ ++#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ ++#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ ++#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ ++#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ ++ ++/* Wake Up Status */ ++#define E1000_WUS_LNKC E1000_WUFC_LNKC ++#define E1000_WUS_MAG E1000_WUFC_MAG ++#define E1000_WUS_EX E1000_WUFC_EX ++#define E1000_WUS_MC E1000_WUFC_MC ++#define E1000_WUS_BC E1000_WUFC_BC + + /* Extended Device Control */ +-#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +-#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +-#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +-#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +- ++#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ ++#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ ++#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ ++#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ ++#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ ++#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ ++#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ ++#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ + /* Physical Func Reset Done Indication */ + #define E1000_CTRL_EXT_PFRSTD 0x00004000 + #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ ++#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ ++#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ ++#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ + #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 ++/* Offset of the link mode field in Ctrl Ext register */ ++#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 + #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 + #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +-#define E1000_CTRL_EXT_EIAME 0x01000000 ++#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 ++#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 ++#define E1000_CTRL_EXT_EIAME 0x01000000 + #define E1000_CTRL_EXT_IRCA 0x00000001 +-/* Interrupt delay cancellation */ +-/* Driver loaded bit for FW */ +-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +-/* Interrupt acknowledge Auto-mask */ +-/* Clear Interrupt timers after IMS clear */ +-/* packet buffer parity error detection enabled */ +-/* descriptor FIFO parity error detection enable */ ++#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ ++#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ + #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ + #define E1000_CTRL_EXT_PHYPDEN 0x00100000 + #define E1000_I2CCMD_REG_ADDR_SHIFT 16 +@@ -74,322 +92,446 @@ + #define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) + #define E1000_MAX_SGMII_PHY_REG_ADDR 255 + #define E1000_I2CCMD_PHY_TIMEOUT 200 +-#define E1000_IVAR_VALID 0x80 +-#define E1000_GPIE_NSICR 0x00000001 +-#define E1000_GPIE_MSIX_MODE 0x00000010 +-#define E1000_GPIE_EIAME 0x40000000 +-#define E1000_GPIE_PBA 0x80000000 ++#define E1000_IVAR_VALID 0x80 ++#define E1000_GPIE_NSICR 0x00000001 ++#define E1000_GPIE_MSIX_MODE 0x00000010 ++#define E1000_GPIE_EIAME 0x40000000 ++#define E1000_GPIE_PBA 0x80000000 + + /* Receive Descriptor bit definitions */ +-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +-#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ +- +-#define E1000_RXDEXT_STATERR_LB 0x00040000 +-#define E1000_RXDEXT_STATERR_CE 0x01000000 +-#define E1000_RXDEXT_STATERR_SE 0x02000000 +-#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +-#define E1000_RXDEXT_STATERR_CXE 0x10000000 +-#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +-#define E1000_RXDEXT_STATERR_IPE 0x40000000 +-#define E1000_RXDEXT_STATERR_RXE 0x80000000 ++#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ ++#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ ++#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ ++#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ ++#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ ++#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ ++#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ ++#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ ++#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ ++#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ ++#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ ++#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ ++#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ ++#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ ++#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ ++#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ ++#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ ++#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ ++#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ ++ ++#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ ++#define E1000_RXDEXT_STATERR_LB 0x00040000 ++#define E1000_RXDEXT_STATERR_CE 0x01000000 ++#define E1000_RXDEXT_STATERR_SE 0x02000000 ++#define E1000_RXDEXT_STATERR_SEQ 0x04000000 ++#define E1000_RXDEXT_STATERR_CXE 0x10000000 ++#define E1000_RXDEXT_STATERR_TCPE 0x20000000 ++#define E1000_RXDEXT_STATERR_IPE 0x40000000 ++#define E1000_RXDEXT_STATERR_RXE 0x80000000 ++ ++/* mask to determine if packets should be dropped due to frame errors */ ++#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ ++ E1000_RXD_ERR_CE | \ ++ E1000_RXD_ERR_SE | \ ++ E1000_RXD_ERR_SEQ | \ ++ E1000_RXD_ERR_CXE | \ ++ E1000_RXD_ERR_RXE) + + /* Same mask, but for extended and packet split descriptors */ + #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ +- E1000_RXDEXT_STATERR_CE | \ +- E1000_RXDEXT_STATERR_SE | \ +- E1000_RXDEXT_STATERR_SEQ | \ +- E1000_RXDEXT_STATERR_CXE | \ ++ E1000_RXDEXT_STATERR_CE | \ ++ E1000_RXDEXT_STATERR_SE | \ ++ E1000_RXDEXT_STATERR_SEQ | \ ++ E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +-#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +-#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +-#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +-#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 ++#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 ++#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 ++#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 ++#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 ++#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 ++#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + ++#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + + /* Management Control */ +-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +-#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +-/* Enable Neighbor Discovery Filtering */ +-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ ++#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ ++#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ ++#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ ++#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ ++#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + /* Enable MAC address filtering */ +-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 ++#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 ++/* Enable MNG packets to host memory */ ++#define E1000_MANC_EN_MNG2HOST 0x00200000 ++ ++#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ ++#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ ++#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ ++#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + + /* Receive Control */ +-#define E1000_RCTL_EN 0x00000002 /* enable */ +-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +-#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +-#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +-#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ ++#define E1000_RCTL_RST 0x00000001 /* Software reset */ ++#define E1000_RCTL_EN 0x00000002 /* enable */ ++#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ ++#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ ++#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ ++#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ ++#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ ++#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ ++#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ ++#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ ++#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ ++#define E1000_RCTL_RDMTS_HEX 0x00010000 ++#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX ++#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ ++#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ ++#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ ++/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ ++#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ ++#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ ++#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ ++#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ ++/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ ++#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ ++#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ ++#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ ++#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ ++#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ ++#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ ++#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ ++#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ ++#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ ++#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + + /* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & +- * E1000_PSRCTL_BSIZE0_MASK) | +- * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & +- * E1000_PSRCTL_BSIZE1_MASK) | +- * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & +- * E1000_PSRCTL_BSIZE2_MASK) | +- * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; +- * E1000_PSRCTL_BSIZE3_MASK)) ++ * E1000_PSRCTL_BSIZE0_MASK) | ++ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & ++ * E1000_PSRCTL_BSIZE1_MASK) | ++ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & ++ * E1000_PSRCTL_BSIZE2_MASK) | ++ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; ++ * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +-#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +-#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +-#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +-#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 +- +-#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +-#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +-#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +-#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ ++#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F ++#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 ++#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 ++#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 ++ ++#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ ++#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ ++#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ ++#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + + /* SWFW_SYNC Definitions */ +-#define E1000_SWFW_EEP_SM 0x1 +-#define E1000_SWFW_PHY0_SM 0x2 +-#define E1000_SWFW_PHY1_SM 0x4 +-#define E1000_SWFW_PHY2_SM 0x20 +-#define E1000_SWFW_PHY3_SM 0x40 ++#define E1000_SWFW_EEP_SM 0x01 ++#define E1000_SWFW_PHY0_SM 0x02 ++#define E1000_SWFW_PHY1_SM 0x04 ++#define E1000_SWFW_CSR_SM 0x08 ++#define E1000_SWFW_PHY2_SM 0x20 ++#define E1000_SWFW_PHY3_SM 0x40 ++#define E1000_SWFW_SW_MNG_SM 0x400 + +-/* FACTPS Definitions */ + /* Device Control */ +-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +-/* Defined polarity of Dock/Undock indication in SDP[0] */ +-/* Reset both PHY ports, through PHYRST_N pin */ +-/* enable link status from external LINK_0 and LINK_1 pins */ +-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +-#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +-#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +-#define E1000_CTRL_RST 0x04000000 /* Global reset */ +-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +-/* Initiate an interrupt to manageability engine */ +-#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ +- +-/* Bit definitions for the Management Data IO (MDIO) and Management Data +- * Clock (MDC) pins in the Device Control Register. +- */ ++#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ ++#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ ++#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ ++#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ ++#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ ++#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ ++#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ ++#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ ++#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ ++#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ ++#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ ++#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ ++#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ ++#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ ++#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ ++#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ ++#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ ++#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ ++#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ ++#define E1000_CTRL_RST 0x04000000 /* Global reset */ ++#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ ++#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ ++#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ ++#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ ++#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +-#define E1000_CONNSW_ENRGSRC 0x4 ++#define E1000_CONNSW_ENRGSRC 0x4 + #define E1000_CONNSW_PHYSD 0x400 + #define E1000_CONNSW_PHY_PDN 0x800 + #define E1000_CONNSW_SERDESD 0x200 + #define E1000_CONNSW_AUTOSENSE_CONF 0x2 + #define E1000_CONNSW_AUTOSENSE_EN 0x1 +-#define E1000_PCS_CFG_PCS_EN 8 +-#define E1000_PCS_LCTL_FLV_LINK_UP 1 +-#define E1000_PCS_LCTL_FSV_100 2 +-#define E1000_PCS_LCTL_FSV_1000 4 +-#define E1000_PCS_LCTL_FDV_FULL 8 +-#define E1000_PCS_LCTL_FSD 0x10 +-#define E1000_PCS_LCTL_FORCE_LINK 0x20 +-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +-#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +-#define E1000_PCS_LCTL_AN_RESTART 0x20000 +-#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +-#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 +- +-#define E1000_PCS_LSTS_LINK_OK 1 +-#define E1000_PCS_LSTS_SPEED_100 2 +-#define E1000_PCS_LSTS_SPEED_1000 4 +-#define E1000_PCS_LSTS_DUPLEX_FULL 8 +-#define E1000_PCS_LSTS_SYNK_OK 0x10 ++#define E1000_PCS_CFG_PCS_EN 8 ++#define E1000_PCS_LCTL_FLV_LINK_UP 1 ++#define E1000_PCS_LCTL_FSV_10 0 ++#define E1000_PCS_LCTL_FSV_100 2 ++#define E1000_PCS_LCTL_FSV_1000 4 ++#define E1000_PCS_LCTL_FDV_FULL 8 ++#define E1000_PCS_LCTL_FSD 0x10 ++#define E1000_PCS_LCTL_FORCE_LINK 0x20 ++#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 ++#define E1000_PCS_LCTL_AN_ENABLE 0x10000 ++#define E1000_PCS_LCTL_AN_RESTART 0x20000 ++#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 ++#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 ++ ++#define E1000_PCS_LSTS_LINK_OK 1 ++#define E1000_PCS_LSTS_SPEED_100 2 ++#define E1000_PCS_LSTS_SPEED_1000 4 ++#define E1000_PCS_LSTS_DUPLEX_FULL 8 ++#define E1000_PCS_LSTS_SYNK_OK 0x10 ++#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + + /* Device Status */ +-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +-#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +-#define E1000_STATUS_FUNC_SHIFT 2 +-#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +-/* Change in Dock/Undock state. Clear on write '0'. */ +-/* Status of Master requests. */ +-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +-/* BMC external code execution disabled */ +- ++#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ ++#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ ++#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ ++#define E1000_STATUS_FUNC_SHIFT 2 ++#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ ++#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ ++#define E1000_STATUS_SPEED_MASK 0x000000C0 ++#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ ++#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ ++#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ ++#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ ++#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ ++#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ + #define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ + #define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +-/* Constants used to intrepret the masked PCI-X bus speed. */ + +-#define SPEED_10 10 +-#define SPEED_100 100 +-#define SPEED_1000 1000 +-#define SPEED_2500 2500 +-#define HALF_DUPLEX 1 +-#define FULL_DUPLEX 2 +- +- +-#define ADVERTISE_10_HALF 0x0001 +-#define ADVERTISE_10_FULL 0x0002 +-#define ADVERTISE_100_HALF 0x0004 +-#define ADVERTISE_100_FULL 0x0008 +-#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +-#define ADVERTISE_1000_FULL 0x0020 ++#define SPEED_10 10 ++#define SPEED_100 100 ++#define SPEED_1000 1000 ++#define SPEED_2500 2500 ++#define HALF_DUPLEX 1 ++#define FULL_DUPLEX 2 ++ ++#define ADVERTISE_10_HALF 0x0001 ++#define ADVERTISE_10_FULL 0x0002 ++#define ADVERTISE_100_HALF 0x0004 ++#define ADVERTISE_100_FULL 0x0008 ++#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ ++#define ADVERTISE_1000_FULL 0x0020 + + /* 1000/H is not supported, nor spec-compliant. */ +-#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ +- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ +- ADVERTISE_1000_FULL) +-#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ +- ADVERTISE_100_HALF | ADVERTISE_100_FULL) +-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +-#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ +- ADVERTISE_1000_FULL) +-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) ++#define E1000_ALL_SPEED_DUPLEX ( \ ++ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ++ ADVERTISE_100_FULL | ADVERTISE_1000_FULL) ++#define E1000_ALL_NOT_GIG ( \ ++ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ++ ADVERTISE_100_FULL) ++#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) ++#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) ++#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +-#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX ++#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + + /* LED Control */ +-#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +-#define E1000_LEDCTL_LED0_BLINK 0x00000080 + #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F ++#define E1000_LEDCTL_LED0_MODE_SHIFT 0 + #define E1000_LEDCTL_LED0_IVRT 0x00000040 ++#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +-#define E1000_LEDCTL_MODE_LED_ON 0xE +-#define E1000_LEDCTL_MODE_LED_OFF 0xF ++#define E1000_LEDCTL_MODE_LED_ON 0xE ++#define E1000_LEDCTL_MODE_LED_OFF 0xF + + /* Transmit Descriptor bit definitions */ +-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +-/* Extended desc bits for Linksec and timesync */ ++#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ ++#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ ++#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ ++#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ ++#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ ++#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ ++#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ ++#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ ++#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ ++#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ ++#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ ++#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ ++#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ ++#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ ++#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ ++#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ ++#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ ++#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ ++#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ ++#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ ++#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + + /* Transmit Control */ +-#define E1000_TCTL_EN 0x00000002 /* enable tx */ +-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +- +-/* DMA Coalescing register fields */ +-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +-#define E1000_DMACR_DMACTHR_SHIFT 16 +-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +-#define E1000_DMACR_DMAC_LX_SHIFT 28 +-#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +-/* DMA Coalescing BMC-to-OS Watchdog Enable */ +-#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 +- +-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ +- +-#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ +- +-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ ++#define E1000_TCTL_EN 0x00000002 /* enable Tx */ ++#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ ++#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ ++#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ ++#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ ++#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ +- +-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +-#define E1000_FCRTC_RTH_COAL_SHIFT 4 +-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ +- +-/* Timestamp in Rx buffer */ +-#define E1000_RXPBS_CFG_TS_EN 0x80000000 +- +-#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +-#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ ++/* Transmit Arbitration Count */ ++#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + + /* SerDes Control */ +-#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 ++#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 ++#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + + /* Receive Checksum Control */ +-#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +-#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +-#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +-#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ ++#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ ++#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ ++#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ ++#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ ++#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + + /* Header split receive */ +-#define E1000_RFCTL_LEF 0x00040000 ++#define E1000_RFCTL_NFSW_DIS 0x00000040 ++#define E1000_RFCTL_NFSR_DIS 0x00000080 ++#define E1000_RFCTL_ACK_DIS 0x00001000 ++#define E1000_RFCTL_EXTEN 0x00008000 ++#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 ++#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 ++#define E1000_RFCTL_LEF 0x00040000 + + /* Collision related configuration parameters */ +-#define E1000_COLLISION_THRESHOLD 15 +-#define E1000_CT_SHIFT 4 +-#define E1000_COLLISION_DISTANCE 63 +-#define E1000_COLD_SHIFT 12 ++#define E1000_COLLISION_THRESHOLD 15 ++#define E1000_CT_SHIFT 4 ++#define E1000_COLLISION_DISTANCE 63 ++#define E1000_COLD_SHIFT 12 ++ ++/* Default values for the transmit IPG register */ ++#define DEFAULT_82543_TIPG_IPGT_FIBER 9 ++#define DEFAULT_82543_TIPG_IPGT_COPPER 8 ++ ++#define E1000_TIPG_IPGT_MASK 0x000003FF ++ ++#define DEFAULT_82543_TIPG_IPGR1 8 ++#define E1000_TIPG_IPGR1_SHIFT 10 ++ ++#define DEFAULT_82543_TIPG_IPGR2 6 ++#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 ++#define E1000_TIPG_IPGR2_SHIFT 20 + + /* Ethertype field values */ +-#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ ++#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ ++ ++#define ETHERNET_FCS_SIZE 4 ++#define MAX_JUMBO_FRAME_SIZE 0x3F00 ++/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ ++#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 ++#define E1000_TX_PTR_GAP 0x1F ++ ++/* Extended Configuration Control and Size */ ++#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 ++#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 ++#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 ++#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 ++#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 ++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 ++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 ++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 ++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 ++ ++#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 ++#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 ++#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 ++#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +-#define MAX_JUMBO_FRAME_SIZE 0x3F00 ++#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + + /* PBA constants */ +-#define E1000_PBA_34K 0x0022 +-#define E1000_PBA_64K 0x0040 /* 64KB */ ++#define E1000_PBA_8K 0x0008 /* 8KB */ ++#define E1000_PBA_10K 0x000A /* 10KB */ ++#define E1000_PBA_12K 0x000C /* 12KB */ ++#define E1000_PBA_14K 0x000E /* 14KB */ ++#define E1000_PBA_16K 0x0010 /* 16KB */ ++#define E1000_PBA_18K 0x0012 ++#define E1000_PBA_20K 0x0014 ++#define E1000_PBA_22K 0x0016 ++#define E1000_PBA_24K 0x0018 ++#define E1000_PBA_26K 0x001A ++#define E1000_PBA_30K 0x001E ++#define E1000_PBA_32K 0x0020 ++#define E1000_PBA_34K 0x0022 ++#define E1000_PBA_35K 0x0023 ++#define E1000_PBA_38K 0x0026 ++#define E1000_PBA_40K 0x0028 ++#define E1000_PBA_48K 0x0030 /* 48KB */ ++#define E1000_PBA_64K 0x0040 /* 64KB */ ++ ++#define E1000_PBA_RXA_MASK 0xFFFF ++ ++#define E1000_PBS_16K E1000_PBA_16K ++ ++/* Uncorrectable/correctable ECC Error counts and enable bits */ ++#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF ++#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 ++#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 ++#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 ++ ++#define IFS_MAX 80 ++#define IFS_MIN 40 ++#define IFS_RATIO 4 ++#define IFS_STEP 10 ++#define MIN_NUM_XMITS 1000 + + /* SW Semaphore Register */ +-#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +-#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ ++#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ ++#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ ++#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ ++ ++#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + + /* Interrupt Cause Read */ +-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +-#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +-#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +-#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +-#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +-#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +-#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ ++#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ ++#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ ++#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ ++#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ ++#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ ++#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ ++#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ ++#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ ++#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ ++#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ ++#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ ++#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ ++#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ ++#define E1000_ICR_TXD_LOW 0x00008000 ++#define E1000_ICR_MNG 0x00040000 /* Manageability event */ ++#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ ++#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ ++#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ + /* If this bit asserted, the driver should claim the interrupt */ +-#define E1000_ICR_INT_ASSERTED 0x80000000 +-/* LAN connected device generates an interrupt */ +-#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ ++#define E1000_ICR_INT_ASSERTED 0x80000000 ++#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ ++#define E1000_ICR_FER 0x00400000 /* Fatal Error */ ++ ++#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ ++#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + + /* Extended Interrupt Cause Read */ +-#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +-#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +-#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +-#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +-#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +-#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +-#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +-#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +-#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ ++#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ ++#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ ++#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ ++#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ ++#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ ++#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ ++#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ ++#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ ++#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ ++#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + /* TCP Timer */ ++#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ ++#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ ++#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ ++#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + + /* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: +@@ -404,194 +546,207 @@ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ +- E1000_IMS_LSC | \ +- E1000_IMS_DOUTSYNC) ++ E1000_IMS_LSC) + + /* Interrupt Mask Set */ +-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +-#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +-#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +-#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +-#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ ++#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ ++#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ ++#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ ++#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ ++#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ ++#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ ++#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ ++#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ ++#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW ++#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ ++#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ ++#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ ++#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ ++#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + ++#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ ++#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ + /* Extended Interrupt Mask Set */ +-#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ ++#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ ++#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ ++#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ ++#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ ++#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ ++#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ ++#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ ++#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ ++#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ ++#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + + /* Interrupt Cause Set */ +-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +-#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ ++#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ ++#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ ++#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + + /* Extended Interrupt Cause Set */ +-/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +-#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ ++#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ ++#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ ++#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ ++#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ ++#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ ++#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ ++#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ ++#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ ++#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ ++#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + ++#define E1000_EITR_ITR_INT_MASK 0x0000FFFF ++/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ ++#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ ++#define E1000_EITR_INTERVAL 0x00007FFC + + /* Transmit Descriptor Control */ ++#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ ++#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ ++#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ ++#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ ++#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ ++#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ + /* Enable the counting of descriptors still to be processed. */ ++#define E1000_TXDCTL_COUNT_DESC 0x00400000 + + /* Flow Control Constants */ +-#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +-#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +-#define FLOW_CONTROL_TYPE 0x8808 +- +-/* Transmit Config Word */ +-#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +-#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ ++#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 ++#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 ++#define FLOW_CONTROL_TYPE 0x8808 + + /* 802.1q VLAN Packet Size */ +-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +-#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ ++#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ ++#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +-/* Receive Address */ +-/* Number of high/low register pairs in the RAR. The RAR (Receive Address ++/* Receive Address ++ * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +-#define E1000_RAL_MAC_ADDR_LEN 4 +-#define E1000_RAH_MAC_ADDR_LEN 2 +-#define E1000_RAH_POOL_MASK 0x03FC0000 +-#define E1000_RAH_POOL_1 0x00040000 ++#define E1000_RAR_ENTRIES 15 ++#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ ++#define E1000_RAL_MAC_ADDR_LEN 4 ++#define E1000_RAH_MAC_ADDR_LEN 2 ++#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 ++#define E1000_RAH_POOL_1 0x00040000 + + /* Error Codes */ +-#define E1000_ERR_NVM 1 +-#define E1000_ERR_PHY 2 +-#define E1000_ERR_CONFIG 3 +-#define E1000_ERR_PARAM 4 +-#define E1000_ERR_MAC_INIT 5 +-#define E1000_ERR_RESET 9 +-#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +-#define E1000_BLK_PHY_RESET 12 +-#define E1000_ERR_SWFW_SYNC 13 +-#define E1000_NOT_IMPLEMENTED 14 +-#define E1000_ERR_MBX 15 +-#define E1000_ERR_INVALID_ARGUMENT 16 +-#define E1000_ERR_NO_SPACE 17 +-#define E1000_ERR_NVM_PBA_SECTION 18 +-#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +-#define E1000_ERR_I2C 20 ++#define E1000_SUCCESS 0 ++#define E1000_ERR_NVM 1 ++#define E1000_ERR_PHY 2 ++#define E1000_ERR_CONFIG 3 ++#define E1000_ERR_PARAM 4 ++#define E1000_ERR_MAC_INIT 5 ++#define E1000_ERR_PHY_TYPE 6 ++#define E1000_ERR_RESET 9 ++#define E1000_ERR_MASTER_REQUESTS_PENDING 10 ++#define E1000_ERR_HOST_INTERFACE_COMMAND 11 ++#define E1000_BLK_PHY_RESET 12 ++#define E1000_ERR_SWFW_SYNC 13 ++#define E1000_NOT_IMPLEMENTED 14 ++#define E1000_ERR_MBX 15 ++#define E1000_ERR_INVALID_ARGUMENT 16 ++#define E1000_ERR_NO_SPACE 17 ++#define E1000_ERR_NVM_PBA_SECTION 18 ++#define E1000_ERR_I2C 19 ++#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 + + /* Loop limit on how long we wait for auto-negotiation to complete */ +-#define COPPER_LINK_UP_LIMIT 10 +-#define PHY_AUTO_NEG_LIMIT 45 +-#define PHY_FORCE_LIMIT 20 ++#define FIBER_LINK_UP_LIMIT 50 ++#define COPPER_LINK_UP_LIMIT 10 ++#define PHY_AUTO_NEG_LIMIT 45 ++#define PHY_FORCE_LIMIT 20 + /* Number of 100 microseconds we wait for PCI Express master disable */ +-#define MASTER_DISABLE_TIMEOUT 800 ++#define MASTER_DISABLE_TIMEOUT 800 + /* Number of milliseconds we wait for PHY configuration done after MAC reset */ +-#define PHY_CFG_TIMEOUT 100 ++#define PHY_CFG_TIMEOUT 100 + /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ ++#define MDIO_OWNERSHIP_TIMEOUT 10 + /* Number of milliseconds for NVM auto read done after MAC reset. */ +-#define AUTO_READ_DONE_TIMEOUT 10 ++#define AUTO_READ_DONE_TIMEOUT 10 + + /* Flow Control */ +-#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ ++#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ ++#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ ++#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +-#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +-#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ ++/* Transmit Configuration Word */ ++#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ ++#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ ++#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ ++#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ ++#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +-#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +-#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +-#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +-#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +-#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +-#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +-#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +-#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ +- +-#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +-#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +-#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +-#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 +- +-#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +-#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +-#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +-#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +-#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +-#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 +- +-#define E1000_TIMINCA_16NS_SHIFT 24 +- +-/* Time Sync Interrupt Cause/Mask Register Bits */ +- +-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ +- +-#define TSYNC_INTERRUPTS TSINTR_TXTS +-#define E1000_TSICR_TXTS TSINTR_TXTS +- +-/* TSAUXC Configuration Bits */ +-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ +- +-/* SDP Configuration Bits */ +-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ +- +-#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +-#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +-#define E1000_MDICNFG_PHY_MASK 0x03E00000 +-#define E1000_MDICNFG_PHY_SHIFT 21 ++/* Receive Configuration Word */ ++#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ ++#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ ++#define E1000_RXCW_C 0x20000000 /* Receive config */ ++#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ ++ ++#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ ++#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ ++ ++#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ ++#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ ++#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 ++#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 ++#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 ++#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 ++#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A ++#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ ++#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ ++ ++#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF ++#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 ++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 ++#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 ++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 ++#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 ++ ++#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 ++#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 ++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 ++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 ++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 ++#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 ++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 ++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 ++#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 ++#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 ++#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 ++ ++#define E1000_TIMINCA_16NS_SHIFT 24 ++#define E1000_TIMINCA_INCPERIOD_SHIFT 24 ++#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF ++ ++#define E1000_TSICR_TXTS 0x00000002 ++#define E1000_TSIM_TXTS 0x00000002 ++/* TUPLE Filtering Configuration */ ++#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ ++#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ ++#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ ++/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ ++#define E1000_TTQF_PROTOCOL_TCP 0x0 ++/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ ++#define E1000_TTQF_PROTOCOL_UDP 0x1 ++/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ ++#define E1000_TTQF_PROTOCOL_SCTP 0x2 ++#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ ++#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ ++#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ ++#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ ++#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ ++#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ ++#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ ++#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ ++ ++#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ ++#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ ++#define E1000_MDICNFG_PHY_MASK 0x03E00000 ++#define E1000_MDICNFG_PHY_SHIFT 21 + + #define E1000_MEDIA_PORT_COPPER 1 + #define E1000_MEDIA_PORT_OTHER 2 +@@ -604,95 +759,209 @@ + #define E1000_M88E1112_PAGE_ADDR 0x16 + #define E1000_M88E1112_STATUS 0x01 + ++#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ ++#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ ++#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ ++#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ ++#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ ++ ++/* I350 EEE defines */ ++#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ ++#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ ++#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ ++#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ ++#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ ++/* EEE status */ ++#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ ++#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ ++#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ ++#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ ++#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ ++#define E1000_M88E1543_EEE_CTRL_1 0x0 ++#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ ++#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ ++#define E1000_EEE_ADV_DEV_I354 7 ++#define E1000_EEE_ADV_ADDR_I354 60 ++#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ ++#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ ++#define E1000_PCS_STATUS_DEV_I354 3 ++#define E1000_PCS_STATUS_ADDR_I354 1 ++#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 ++#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 ++#define E1000_M88E1512_CFG_REG_1 0x0010 ++#define E1000_M88E1512_CFG_REG_2 0x0011 ++#define E1000_M88E1512_CFG_REG_3 0x0007 ++#define E1000_M88E1512_MODE 0x0014 ++#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ ++#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ ++#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ + /* PCI Express Control */ +-#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +-#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +-#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +-#define E1000_GCR_CAP_VER2 0x00040000 +- +-/* mPHY Address Control and Data Registers */ +-#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +-#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +-#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ +- +-/* mPHY PCS CLK Register */ +-#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +-/* mPHY Near End Digital Loopback Override Bit */ +-#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 +- +-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +-#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 ++#define E1000_GCR_RXD_NO_SNOOP 0x00000001 ++#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 ++#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 ++#define E1000_GCR_TXD_NO_SNOOP 0x00000008 ++#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 ++#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 ++#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 ++#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 ++#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 ++#define E1000_GCR_CAP_VER2 0x00040000 ++ ++#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ ++ E1000_GCR_RXDSCW_NO_SNOOP | \ ++ E1000_GCR_RXDSCR_NO_SNOOP | \ ++ E1000_GCR_TXD_NO_SNOOP | \ ++ E1000_GCR_TXDSCW_NO_SNOOP | \ ++ E1000_GCR_TXDSCR_NO_SNOOP) ++ ++#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ ++ ++/* mPHY address control and data registers */ ++#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ ++#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 ++#define E1000_MPHY_DATA 0x0E10 /* Data Register */ ++ ++/* AFE CSR Offset for PCS CLK */ ++#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 ++/* Override for near end digital loopback. */ ++#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + + /* PHY Control Register */ +-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +-#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +-#define MII_CR_SPEED_1000 0x0040 +-#define MII_CR_SPEED_100 0x2000 +-#define MII_CR_SPEED_10 0x0000 ++#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ ++#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ ++#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ ++#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ ++#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ ++#define MII_CR_POWER_DOWN 0x0800 /* Power down */ ++#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ ++#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ ++#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ ++#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ ++#define MII_CR_SPEED_1000 0x0040 ++#define MII_CR_SPEED_100 0x2000 ++#define MII_CR_SPEED_10 0x0000 + + /* PHY Status Register */ +-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ ++#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ ++#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ ++#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ ++#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ ++#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ ++#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ ++#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ ++#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ ++#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ ++#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ ++#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ ++#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ ++#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ ++#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ ++#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + + /* Autoneg Advertisement Register */ +-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ ++#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ ++#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ ++#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ ++#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ ++#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ ++#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ ++#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ ++#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ ++#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ ++#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + + /* Link Partner Ability Register (Base Page) */ +-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ ++#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ ++#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ ++#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ ++#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ ++#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ ++#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ ++#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ ++#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ ++#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ ++#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ ++#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + + /* Autoneg Expansion Register */ ++#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ ++#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ ++#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ ++#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ ++#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + + /* 1000BASE-T Control Register */ +-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ +- /* 0=Configure PHY as Slave */ +-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ +- /* 0=Automatic Master/Slave config */ ++#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ ++#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ ++#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ ++/* 1=Repeater/switch device port 0=DTE device */ ++#define CR_1000T_REPEATER_DTE 0x0400 ++/* 1=Configure PHY as Master 0=Configure PHY as Slave */ ++#define CR_1000T_MS_VALUE 0x0800 ++/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ ++#define CR_1000T_MS_ENABLE 0x1000 ++#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ ++#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ ++#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ ++#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ ++#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + + /* 1000BASE-T Status Register */ +-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ ++#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ ++#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ ++#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ ++#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ ++#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ ++#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ ++#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ ++#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + ++#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + + /* PHY 1000 MII Register/Bit Definitions */ + /* PHY Registers defined by IEEE */ +-#define PHY_CONTROL 0x00 /* Control Register */ +-#define PHY_STATUS 0x01 /* Status Register */ +-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ ++#define PHY_CONTROL 0x00 /* Control Register */ ++#define PHY_STATUS 0x01 /* Status Register */ ++#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ ++#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ ++#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ ++#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ ++#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ ++#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ ++#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ ++#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ ++#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ ++#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ ++ ++#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + + /* NVM Control */ +-#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +-#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +-#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +-#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +-#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +-#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +-#define E1000_EECD_PRES 0x00000100 /* NVM Present */ ++#define E1000_EECD_SK 0x00000001 /* NVM Clock */ ++#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ ++#define E1000_EECD_DI 0x00000004 /* NVM Data In */ ++#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ ++#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ ++#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ ++#define E1000_EECD_PRES 0x00000100 /* NVM Present */ ++#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ ++#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ ++#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ ++#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ ++#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ + /* NVM Addressing bits based on type 0=small, 1=large */ +-#define E1000_EECD_ADDR_BITS 0x00000400 +-#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +-#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +-#define E1000_EECD_SIZE_EX_SHIFT 11 ++#define E1000_EECD_ADDR_BITS 0x00000400 ++#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ ++#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ ++#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ ++#define E1000_EECD_SIZE_EX_SHIFT 11 ++#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ ++#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ ++#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ ++#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ ++#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ + #define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ ++#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ + #define E1000_FLUDONE_ATTEMPTS 20000 + #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + #define E1000_I210_FIFO_SEL_RX 0x00 +@@ -700,53 +969,32 @@ + #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) + #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 + #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 ++ + #define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ + /* Secure FLASH mode requires removing MSb */ + #define E1000_I210_FW_PTR_MASK 0x7FFF + /* Firmware code revision field word offset*/ + #define E1000_I210_FW_VER_OFFSET 328 +-#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +-#define E1000_FLUDONE_ATTEMPTS 20000 +-#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +-#define E1000_I210_FIFO_SEL_RX 0x00 +-#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +-#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +-#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +-#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +- + +-/* Offset to data in NVM read/write registers */ +-#define E1000_NVM_RW_REG_DATA 16 +-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +-#define E1000_NVM_RW_REG_START 1 /* Start operation */ +-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ ++#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ ++#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ ++#define E1000_NVM_RW_REG_START 1 /* Start operation */ ++#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ ++#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ ++#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ ++#define E1000_FLASH_UPDATES 2000 + + /* NVM Word Offsets */ +-#define NVM_COMPAT 0x0003 +-#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +-#define NVM_VERSION 0x0005 +-#define NVM_INIT_CONTROL2_REG 0x000F +-#define NVM_INIT_CONTROL3_PORT_B 0x0014 +-#define NVM_INIT_CONTROL3_PORT_A 0x0024 +-#define NVM_ALT_MAC_ADDR_PTR 0x0037 +-#define NVM_CHECKSUM_REG 0x003F +-#define NVM_COMPATIBILITY_REG_3 0x0003 +-#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +-#define NVM_MAC_ADDR 0x0000 +-#define NVM_SUB_DEV_ID 0x000B +-#define NVM_SUB_VEN_ID 0x000C +-#define NVM_DEV_ID 0x000D +-#define NVM_VEN_ID 0x000E +-#define NVM_INIT_CTRL_2 0x000F +-#define NVM_INIT_CTRL_4 0x0013 +-#define NVM_LED_1_CFG 0x001C +-#define NVM_LED_0_2_CFG 0x001F +-#define NVM_ETRACK_WORD 0x0042 +-#define NVM_ETRACK_HIWORD 0x0043 +-#define NVM_COMB_VER_OFF 0x0083 +-#define NVM_COMB_VER_PTR 0x003d ++#define NVM_COMPAT 0x0003 ++#define NVM_ID_LED_SETTINGS 0x0004 ++#define NVM_VERSION 0x0005 ++#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 ++#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 ++#define NVM_FUTURE_INIT_WORD1 0x0019 ++#define NVM_ETRACK_WORD 0x0042 ++#define NVM_ETRACK_HIWORD 0x0043 ++#define NVM_COMB_VER_OFF 0x0083 ++#define NVM_COMB_VER_PTR 0x003d + + /* NVM version defines */ + #define NVM_MAJOR_MASK 0xF000 +@@ -763,6 +1011,31 @@ + #define NVM_HEX_CONV 16 + #define NVM_HEX_TENS 10 + ++/* FW version defines */ ++/* Offset of "Loader patch ptr" in Firmware Header */ ++#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 ++/* Patch generation hour & minutes */ ++#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 ++/* Patch generation month & day */ ++#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 ++/* Patch generation year */ ++#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 ++/* Patch major & minor numbers */ ++#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 ++ ++#define NVM_MAC_ADDR 0x0000 ++#define NVM_SUB_DEV_ID 0x000B ++#define NVM_SUB_VEN_ID 0x000C ++#define NVM_DEV_ID 0x000D ++#define NVM_VEN_ID 0x000E ++#define NVM_INIT_CTRL_2 0x000F ++#define NVM_INIT_CTRL_4 0x0013 ++#define NVM_LED_1_CFG 0x001C ++#define NVM_LED_0_2_CFG 0x001F ++ ++#define NVM_COMPAT_VALID_CSUM 0x0001 ++#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 ++ + #define NVM_ETS_CFG 0x003E + #define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 + #define NVM_ETS_LTHRES_DELTA_SHIFT 6 +@@ -775,236 +1048,292 @@ + #define NVM_ETS_DATA_INDEX_MASK 0x0300 + #define NVM_ETS_DATA_INDEX_SHIFT 8 + #define NVM_ETS_DATA_HTHRESH_MASK 0x00FF ++#define NVM_INIT_CONTROL2_REG 0x000F ++#define NVM_INIT_CONTROL3_PORT_B 0x0014 ++#define NVM_INIT_3GIO_3 0x001A ++#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 ++#define NVM_INIT_CONTROL3_PORT_A 0x0024 ++#define NVM_CFG 0x0012 ++#define NVM_ALT_MAC_ADDR_PTR 0x0037 ++#define NVM_CHECKSUM_REG 0x003F ++#define NVM_COMPATIBILITY_REG_3 0x0003 ++#define NVM_COMPATIBILITY_BIT_MASK 0x8000 ++ ++#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ ++#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ ++#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ ++#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +-#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +-#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +-#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +-#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ +- +-#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) ++#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + + /* Mask bits for fields in Word 0x24 of the NVM */ +-#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +-#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ ++#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ ++#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ ++/* Offset of Link Mode bits for 82575/82576 */ ++#define NVM_WORD24_LNK_MODE_OFFSET 8 ++/* Offset of Link Mode bits for 82580 up */ ++#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + + /* Mask bits for fields in Word 0x0f of the NVM */ +-#define NVM_WORD0F_PAUSE_MASK 0x3000 +-#define NVM_WORD0F_ASM_DIR 0x2000 ++#define NVM_WORD0F_PAUSE_MASK 0x3000 ++#define NVM_WORD0F_PAUSE 0x1000 ++#define NVM_WORD0F_ASM_DIR 0x2000 + + /* Mask bits for fields in Word 0x1a of the NVM */ ++#define NVM_WORD1A_ASPM_MASK 0x000C + +-/* length of string needed to store part num */ +-#define E1000_PBANUM_LENGTH 11 ++/* Mask bits for fields in Word 0x03 of the EEPROM */ ++#define NVM_COMPAT_LOM 0x0800 ++ ++/* length of string needed to store PBA number */ ++#define E1000_PBANUM_LENGTH 11 + + /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +-#define NVM_SUM 0xBABA ++#define NVM_SUM 0xBABA + +-#define NVM_PBA_OFFSET_0 8 +-#define NVM_PBA_OFFSET_1 9 ++/* PBA (printed board assembly) number words */ ++#define NVM_PBA_OFFSET_0 8 ++#define NVM_PBA_OFFSET_1 9 ++#define NVM_PBA_PTR_GUARD 0xFAFA + #define NVM_RESERVED_WORD 0xFFFF +-#define NVM_PBA_PTR_GUARD 0xFAFA +-#define NVM_WORD_SIZE_BASE_SHIFT 6 +- +-/* NVM Commands - Microwire */ ++#define NVM_WORD_SIZE_BASE_SHIFT 6 + + /* NVM Commands - SPI */ +-#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +-#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +-#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +-#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +-#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +-#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ ++#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ ++#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ ++#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ ++#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ ++#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ ++#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + + /* SPI NVM Status Register */ +-#define NVM_STATUS_RDY_SPI 0x01 ++#define NVM_STATUS_RDY_SPI 0x01 + + /* Word definitions for ID LED Settings */ +-#define ID_LED_RESERVED_0000 0x0000 +-#define ID_LED_RESERVED_FFFF 0xFFFF +-#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ +- (ID_LED_OFF1_OFF2 << 8) | \ +- (ID_LED_DEF1_DEF2 << 4) | \ +- (ID_LED_DEF1_DEF2)) +-#define ID_LED_DEF1_DEF2 0x1 +-#define ID_LED_DEF1_ON2 0x2 +-#define ID_LED_DEF1_OFF2 0x3 +-#define ID_LED_ON1_DEF2 0x4 +-#define ID_LED_ON1_ON2 0x5 +-#define ID_LED_ON1_OFF2 0x6 +-#define ID_LED_OFF1_DEF2 0x7 +-#define ID_LED_OFF1_ON2 0x8 +-#define ID_LED_OFF1_OFF2 0x9 +- +-#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +-#define IGP_ACTIVITY_LED_ENABLE 0x0300 +-#define IGP_LED3_MODE 0x07000000 ++#define ID_LED_RESERVED_0000 0x0000 ++#define ID_LED_RESERVED_FFFF 0xFFFF ++#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ ++ (ID_LED_OFF1_OFF2 << 8) | \ ++ (ID_LED_DEF1_DEF2 << 4) | \ ++ (ID_LED_DEF1_DEF2)) ++#define ID_LED_DEF1_DEF2 0x1 ++#define ID_LED_DEF1_ON2 0x2 ++#define ID_LED_DEF1_OFF2 0x3 ++#define ID_LED_ON1_DEF2 0x4 ++#define ID_LED_ON1_ON2 0x5 ++#define ID_LED_ON1_OFF2 0x6 ++#define ID_LED_OFF1_DEF2 0x7 ++#define ID_LED_OFF1_ON2 0x8 ++#define ID_LED_OFF1_OFF2 0x9 ++ ++#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF ++#define IGP_ACTIVITY_LED_ENABLE 0x0300 ++#define IGP_LED3_MODE 0x07000000 + + /* PCI/PCI-X/PCI-EX Config space */ +-#define PCIE_DEVICE_CONTROL2 0x28 +-#define PCIE_DEVICE_CONTROL2_16ms 0x0005 ++#define PCIX_COMMAND_REGISTER 0xE6 ++#define PCIX_STATUS_REGISTER_LO 0xE8 ++#define PCIX_STATUS_REGISTER_HI 0xEA ++#define PCI_HEADER_TYPE_REGISTER 0x0E ++#define PCIE_LINK_STATUS 0x12 ++#define PCIE_DEVICE_CONTROL2 0x28 ++ ++#define PCIX_COMMAND_MMRBC_MASK 0x000C ++#define PCIX_COMMAND_MMRBC_SHIFT 0x2 ++#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 ++#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 ++#define PCIX_STATUS_HI_MMRBC_4K 0x3 ++#define PCIX_STATUS_HI_MMRBC_2K 0x2 ++#define PCIX_STATUS_LO_FUNC_MASK 0x7 ++#define PCI_HEADER_TYPE_MULTIFUNC 0x80 ++#define PCIE_LINK_WIDTH_MASK 0x3F0 ++#define PCIE_LINK_WIDTH_SHIFT 4 ++#define PCIE_LINK_SPEED_MASK 0x0F ++#define PCIE_LINK_SPEED_2500 0x01 ++#define PCIE_LINK_SPEED_5000 0x02 ++#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +-#define PHY_REVISION_MASK 0xFFFFFFF0 +-#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +-#define MAX_PHY_MULTI_PAGE_REG 0xF ++#ifndef ETH_ADDR_LEN ++#define ETH_ADDR_LEN 6 ++#endif ++ ++#define PHY_REVISION_MASK 0xFFFFFFF0 ++#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ ++#define MAX_PHY_MULTI_PAGE_REG 0xF + +-/* Bit definitions for valid PHY IDs. */ +-/* I = Integrated ++/* Bit definitions for valid PHY IDs. ++ * I = Integrated + * E = External + */ +-#define M88E1111_I_PHY_ID 0x01410CC0 +-#define M88E1112_E_PHY_ID 0x01410C90 +-#define I347AT4_E_PHY_ID 0x01410DC0 +-#define IGP03E1000_E_PHY_ID 0x02A80390 +-#define I82580_I_PHY_ID 0x015403A0 +-#define I350_I_PHY_ID 0x015403B0 +-#define M88_VENDOR 0x0141 +-#define I210_I_PHY_ID 0x01410C00 +-#define M88E1543_E_PHY_ID 0x01410EA0 ++#define M88E1000_E_PHY_ID 0x01410C50 ++#define M88E1000_I_PHY_ID 0x01410C30 ++#define M88E1011_I_PHY_ID 0x01410C20 ++#define IGP01E1000_I_PHY_ID 0x02A80380 ++#define M88E1111_I_PHY_ID 0x01410CC0 ++#define M88E1543_E_PHY_ID 0x01410EA0 ++#define M88E1512_E_PHY_ID 0x01410DD0 ++#define M88E1112_E_PHY_ID 0x01410C90 ++#define I347AT4_E_PHY_ID 0x01410DC0 ++#define M88E1340M_E_PHY_ID 0x01410DF0 ++#define GG82563_E_PHY_ID 0x01410CA0 ++#define IGP03E1000_E_PHY_ID 0x02A80390 ++#define IFE_E_PHY_ID 0x02A80330 ++#define IFE_PLUS_E_PHY_ID 0x02A80320 ++#define IFE_C_E_PHY_ID 0x02A80310 ++#define I82580_I_PHY_ID 0x015403A0 ++#define I350_I_PHY_ID 0x015403B0 ++#define I210_I_PHY_ID 0x01410C00 ++#define IGP04E1000_E_PHY_ID 0x02A80391 ++#define M88_VENDOR 0x0141 + + /* M88E1000 Specific Registers */ +-#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +-#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ ++#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ ++#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ ++#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ ++#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +-#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +-#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ ++#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ ++#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ + + /* M88E1000 PHY Specific Control Register */ +-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +-/* 1=CLK125 low, 0=CLK125 toggling */ +-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ +- /* Manual MDI configuration */ +-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ ++#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ ++/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ ++#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 ++#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ + /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +-#define M88E1000_PSCR_AUTO_X_1000T 0x0040 ++#define M88E1000_PSCR_AUTO_X_1000T 0x0040 + /* Auto crossover enabled all speeds */ +-#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold +- * 0=Normal 10BASE-T Rx Threshold +- */ +-/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ ++#define M88E1000_PSCR_AUTO_X_MODE 0x0060 ++#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + + /* M88E1000 PHY Specific Status Register */ +-#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +-#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +-#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ ++#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ ++#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ ++#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ + /* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +-#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +-#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +-#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ +- +-#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 +- +-/* M88E1000 Extended PHY Specific Control Register */ +-/* 1 = Lost lock detect enabled. +- * Will assert lost lock and bring +- * link down if idle not seen +- * within 1ms in 1000BASE-T +- */ ++#define M88E1000_PSSR_CABLE_LENGTH 0x0380 ++#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ ++#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ ++#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ ++#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ ++ ++#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 ++ + /* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 ++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 ++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 + /* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +-#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ ++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 ++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 ++#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ ++ ++/* Intel I347AT4 Registers */ ++#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ ++#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ ++#define I347AT4_PAGE_SELECT 0x16 + +-/* Intel i347-AT4 Registers */ ++/* I347AT4 Extended PHY Specific Control Register */ + +-#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +-#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +-#define I347AT4_PAGE_SELECT 0x16 +- +-/* i347-AT4 Extended PHY Specific Control Register */ +- +-/* Number of times we will attempt to autonegotiate before downshifting if we +- * are the master ++/* Number of times we will attempt to autonegotiate before downshifting if we ++ * are the master + */ +-#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +-#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +-#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +-#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +-#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +-#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +-#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +-#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +-#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +-#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 ++#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 ++#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 ++#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 ++#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 ++#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 ++#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 ++#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 ++#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 ++#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 ++#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +-/* i347-AT4 PHY Cable Diagnostics Control */ +-#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ ++/* I347AT4 PHY Cable Diagnostics Control */ ++#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +-/* Marvell 1112 only registers */ +-#define M88E1112_VCT_DSP_DISTANCE 0x001A ++/* M88E1112 only registers */ ++#define M88E1112_VCT_DSP_DISTANCE 0x001A + + /* M88EC018 Rev 2 specific DownShift settings */ +-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 ++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 ++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 ++ ++/* Bits... ++ * 15-5: page ++ * 4-0: register offset ++ */ ++#define GG82563_PAGE_SHIFT 5 ++#define GG82563_REG(page, reg) \ ++ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) ++#define GG82563_MIN_ALT_REG 30 ++ ++/* GG82563 Specific Registers */ ++#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ ++#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ ++#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ ++#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ ++ ++/* MAC Specific Control Register */ ++#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) ++ ++#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ ++ ++/* Page 193 - Port Control Registers */ ++/* Kumeran Mode Control */ ++#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) ++#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ ++ ++/* Page 194 - KMRN Registers */ ++#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + + /* MDI Control */ +-#define E1000_MDIC_DATA_MASK 0x0000FFFF +-#define E1000_MDIC_REG_MASK 0x001F0000 +-#define E1000_MDIC_REG_SHIFT 16 +-#define E1000_MDIC_PHY_MASK 0x03E00000 +-#define E1000_MDIC_PHY_SHIFT 21 +-#define E1000_MDIC_OP_WRITE 0x04000000 +-#define E1000_MDIC_OP_READ 0x08000000 +-#define E1000_MDIC_READY 0x10000000 +-#define E1000_MDIC_INT_EN 0x20000000 +-#define E1000_MDIC_ERROR 0x40000000 +-#define E1000_MDIC_DEST 0x80000000 +- +-/* Thermal Sensor */ +-#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +-#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ +- +-/* Energy Efficient Ethernet */ +-#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +-#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +-#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +-#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +-#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +-#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +-#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +-#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +-#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +-#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +-#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +-#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +-#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +-#define E1000_M88E1543_EEE_CTRL_1 0x0 +-#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +-#define E1000_EEE_ADV_DEV_I354 7 +-#define E1000_EEE_ADV_ADDR_I354 60 +-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +-#define E1000_PCS_STATUS_DEV_I354 3 +-#define E1000_PCS_STATUS_ADDR_I354 1 +-#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +-#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +-#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 ++#define E1000_MDIC_REG_MASK 0x001F0000 ++#define E1000_MDIC_REG_SHIFT 16 ++#define E1000_MDIC_PHY_MASK 0x03E00000 ++#define E1000_MDIC_PHY_SHIFT 21 ++#define E1000_MDIC_OP_WRITE 0x04000000 ++#define E1000_MDIC_OP_READ 0x08000000 ++#define E1000_MDIC_READY 0x10000000 ++#define E1000_MDIC_ERROR 0x40000000 ++#define E1000_MDIC_DEST 0x80000000 + + /* SerDes Control */ +-#define E1000_GEN_CTL_READY 0x80000000 +-#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +-#define E1000_GEN_POLL_TIMEOUT 640 +- +-#define E1000_VFTA_ENTRY_SHIFT 5 +-#define E1000_VFTA_ENTRY_MASK 0x7F +-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F +- +-/* DMA Coalescing register fields */ +-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ ++#define E1000_GEN_CTL_READY 0x80000000 ++#define E1000_GEN_CTL_ADDRESS_SHIFT 8 ++#define E1000_GEN_POLL_TIMEOUT 640 ++ ++/* LinkSec register fields */ ++#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 ++#define E1000_LSECTXCAP_SUM_SHIFT 16 ++#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 ++#define E1000_LSECRXCAP_SUM_SHIFT 16 ++ ++#define E1000_LSECTXCTRL_EN_MASK 0x00000003 ++#define E1000_LSECTXCTRL_DISABLE 0x0 ++#define E1000_LSECTXCTRL_AUTH 0x1 ++#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 ++#define E1000_LSECTXCTRL_AISCI 0x00000020 ++#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 ++#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 ++ ++#define E1000_LSECRXCTRL_EN_MASK 0x0000000C ++#define E1000_LSECRXCTRL_EN_SHIFT 2 ++#define E1000_LSECRXCTRL_DISABLE 0x0 ++#define E1000_LSECRXCTRL_CHECK 0x1 ++#define E1000_LSECRXCTRL_STRICT 0x2 ++#define E1000_LSECRXCTRL_DROP 0x3 ++#define E1000_LSECRXCTRL_PLSH 0x00000040 ++#define E1000_LSECRXCTRL_RP 0x00000080 ++#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + + /* Tx Rate-Scheduler Config fields */ + #define E1000_RTTBCNRC_RS_ENA 0x80000000 +@@ -1013,4 +1342,70 @@ + #define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +-#endif ++/* DMA Coalescing register fields */ ++/* DMA Coalescing Watchdog Timer */ ++#define E1000_DMACR_DMACWT_MASK 0x00003FFF ++/* DMA Coalescing Rx Threshold */ ++#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 ++#define E1000_DMACR_DMACTHR_SHIFT 16 ++/* Lx when no PCIe transactions */ ++#define E1000_DMACR_DMAC_LX_MASK 0x30000000 ++#define E1000_DMACR_DMAC_LX_SHIFT 28 ++#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ ++/* DMA Coalescing BMC-to-OS Watchdog Enable */ ++#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 ++ ++/* DMA Coalescing Transmit Threshold */ ++#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF ++ ++#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ ++ ++/* Rx Traffic Rate Threshold */ ++#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF ++/* Rx packet rate in current window */ ++#define E1000_DMCRTRH_LRPRCW 0x80000000 ++ ++/* DMA Coal Rx Traffic Current Count */ ++#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF ++ ++/* Flow ctrl Rx Threshold High val */ ++#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 ++#define E1000_FCRTC_RTH_COAL_SHIFT 4 ++/* Lx power decision based on DMA coal */ ++#define E1000_PCIEMISC_LX_DECISION 0x00000080 ++ ++#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ ++#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ ++#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ ++#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ ++#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ ++ ++/* Proxy Filter Control */ ++#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ ++#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ ++#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ ++#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ ++#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ ++#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ ++#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ ++#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ ++#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ ++/* Proxy Status */ ++#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ ++ ++/* Firmware Status */ ++#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ ++/* VF Control */ ++#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ ++ ++#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ ++/* Lan ID bit field offset in status register */ ++#define E1000_STATUS_LAN_ID_OFFSET 2 ++#define E1000_VFTA_ENTRIES 128 ++#ifndef E1000_UNUSEDARG ++#define E1000_UNUSEDARG ++#endif /* E1000_UNUSEDARG */ ++#ifndef ERROR_REPORT ++#define ERROR_REPORT(fmt) do { } while (0) ++#endif /* ERROR_REPORT */ ++#endif /* _E1000_DEFINES_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h +--- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,33 +1,31 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_HW_H_ + #define _E1000_HW_H_ + +-#include +-#include +-#include +-#include +- ++#include "e1000_osdep.h" + #include "e1000_regs.h" + #include "e1000_defines.h" + +@@ -50,15 +48,14 @@ + #define E1000_DEV_ID_82580_SGMII 0x1511 + #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 + #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +-#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +-#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +-#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + #define E1000_DEV_ID_I350_COPPER 0x1521 + #define E1000_DEV_ID_I350_FIBER 0x1522 + #define E1000_DEV_ID_I350_SERDES 0x1523 + #define E1000_DEV_ID_I350_SGMII 0x1524 ++#define E1000_DEV_ID_I350_DA4 0x1546 + #define E1000_DEV_ID_I210_COPPER 0x1533 ++#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 ++#define E1000_DEV_ID_I210_COPPER_IT 0x1535 + #define E1000_DEV_ID_I210_FIBER 0x1536 + #define E1000_DEV_ID_I210_SERDES 0x1537 + #define E1000_DEV_ID_I210_SGMII 0x1538 +@@ -68,19 +65,26 @@ + #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 + #define E1000_DEV_ID_I354_SGMII 0x1F41 + #define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 ++#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 ++#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A ++#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C ++#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +-#define E1000_REVISION_2 2 +-#define E1000_REVISION_4 4 +- +-#define E1000_FUNC_0 0 +-#define E1000_FUNC_1 1 +-#define E1000_FUNC_2 2 +-#define E1000_FUNC_3 3 +- +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 ++#define E1000_REVISION_0 0 ++#define E1000_REVISION_1 1 ++#define E1000_REVISION_2 2 ++#define E1000_REVISION_3 3 ++#define E1000_REVISION_4 4 ++ ++#define E1000_FUNC_0 0 ++#define E1000_FUNC_1 1 ++#define E1000_FUNC_2 2 ++#define E1000_FUNC_3 3 ++ ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 ++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + + enum e1000_mac_type { + e1000_undefined = 0, +@@ -127,6 +131,7 @@ + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, ++ e1000_phy_vf, + e1000_phy_i210, + }; + +@@ -181,6 +186,177 @@ + e1000_fc_default = 0xFF + }; + ++enum e1000_ms_type { ++ e1000_ms_hw_default = 0, ++ e1000_ms_force_master, ++ e1000_ms_force_slave, ++ e1000_ms_auto ++}; ++ ++enum e1000_smart_speed { ++ e1000_smart_speed_default = 0, ++ e1000_smart_speed_on, ++ e1000_smart_speed_off ++}; ++ ++enum e1000_serdes_link_state { ++ e1000_serdes_link_down = 0, ++ e1000_serdes_link_autoneg_progress, ++ e1000_serdes_link_autoneg_complete, ++ e1000_serdes_link_forced_up ++}; ++ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++#endif ++/* Receive Descriptor */ ++struct e1000_rx_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ __le16 length; /* Length of data DMAed into data buffer */ ++ __le16 csum; /* Packet checksum */ ++ u8 status; /* Descriptor status */ ++ u8 errors; /* Descriptor Errors */ ++ __le16 special; ++}; ++ ++/* Receive Descriptor - Extended */ ++union e1000_rx_desc_extended { ++ struct { ++ __le64 buffer_addr; ++ __le64 reserved; ++ } read; ++ struct { ++ struct { ++ __le32 mrq; /* Multiple Rx Queues */ ++ union { ++ __le32 rss; /* RSS Hash */ ++ struct { ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ ++ } csum_ip; ++ } hi_dword; ++ } lower; ++ struct { ++ __le32 status_error; /* ext status/error */ ++ __le16 length; ++ __le16 vlan; /* VLAN tag */ ++ } upper; ++ } wb; /* writeback */ ++}; ++ ++#define MAX_PS_BUFFERS 4 ++ ++/* Number of packet split data buffers (not including the header buffer) */ ++#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) ++ ++/* Receive Descriptor - Packet Split */ ++union e1000_rx_desc_packet_split { ++ struct { ++ /* one buffer for protocol header(s), three data buffers */ ++ __le64 buffer_addr[MAX_PS_BUFFERS]; ++ } read; ++ struct { ++ struct { ++ __le32 mrq; /* Multiple Rx Queues */ ++ union { ++ __le32 rss; /* RSS Hash */ ++ struct { ++ __le16 ip_id; /* IP id */ ++ __le16 csum; /* Packet Checksum */ ++ } csum_ip; ++ } hi_dword; ++ } lower; ++ struct { ++ __le32 status_error; /* ext status/error */ ++ __le16 length0; /* length of buffer 0 */ ++ __le16 vlan; /* VLAN tag */ ++ } middle; ++ struct { ++ __le16 header_status; ++ /* length of buffers 1-3 */ ++ __le16 length[PS_PAGE_BUFFERS]; ++ } upper; ++ __le64 reserved; ++ } wb; /* writeback */ ++}; ++ ++/* Transmit Descriptor */ ++struct e1000_tx_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ union { ++ __le32 data; ++ struct { ++ __le16 length; /* Data buffer length */ ++ u8 cso; /* Checksum offset */ ++ u8 cmd; /* Descriptor control */ ++ } flags; ++ } lower; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 css; /* Checksum start */ ++ __le16 special; ++ } fields; ++ } upper; ++}; ++ ++/* Offload Context Descriptor */ ++struct e1000_context_desc { ++ union { ++ __le32 ip_config; ++ struct { ++ u8 ipcss; /* IP checksum start */ ++ u8 ipcso; /* IP checksum offset */ ++ __le16 ipcse; /* IP checksum end */ ++ } ip_fields; ++ } lower_setup; ++ union { ++ __le32 tcp_config; ++ struct { ++ u8 tucss; /* TCP checksum start */ ++ u8 tucso; /* TCP checksum offset */ ++ __le16 tucse; /* TCP checksum end */ ++ } tcp_fields; ++ } upper_setup; ++ __le32 cmd_and_length; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 hdr_len; /* Header length */ ++ __le16 mss; /* Maximum segment size */ ++ } fields; ++ } tcp_seg_setup; ++}; ++ ++/* Offload data descriptor */ ++struct e1000_data_desc { ++ __le64 buffer_addr; /* Address of the descriptor's buffer address */ ++ union { ++ __le32 data; ++ struct { ++ __le16 length; /* Data buffer length */ ++ u8 typ_len_ext; ++ u8 cmd; ++ } flags; ++ } lower; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 popts; /* Packet Options */ ++ __le16 special; ++ } fields; ++ } upper; ++}; ++ + /* Statistics counters collected by the MAC */ + struct e1000_hw_stats { + u64 crcerrs; +@@ -289,7 +465,7 @@ + u8 checksum; + }; + +-#define E1000_HI_MAX_DATA_LENGTH 252 ++#define E1000_HI_MAX_DATA_LENGTH 252 + struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +@@ -304,7 +480,7 @@ + u16 command_length; + }; + +-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 ++#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 + struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +@@ -313,52 +489,95 @@ + #include "e1000_mac.h" + #include "e1000_phy.h" + #include "e1000_nvm.h" ++#include "e1000_manage.h" + #include "e1000_mbx.h" + ++/* Function pointers for the MAC. */ + struct e1000_mac_operations { +- s32 (*check_for_link)(struct e1000_hw *); +- s32 (*reset_hw)(struct e1000_hw *); +- s32 (*init_hw)(struct e1000_hw *); ++ s32 (*init_params)(struct e1000_hw *); ++ s32 (*id_led_init)(struct e1000_hw *); ++ s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); +- s32 (*setup_physical_interface)(struct e1000_hw *); +- void (*rar_set)(struct e1000_hw *, u8 *, u32); +- s32 (*read_mac_addr)(struct e1000_hw *); +- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); +- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); +- void (*release_swfw_sync)(struct e1000_hw *, u16); +-#ifdef CONFIG_IGB_HWMON ++ s32 (*check_for_link)(struct e1000_hw *); ++ s32 (*cleanup_led)(struct e1000_hw *); ++ void (*clear_hw_cntrs)(struct e1000_hw *); ++ void (*clear_vfta)(struct e1000_hw *); ++ s32 (*get_bus_info)(struct e1000_hw *); ++ void (*set_lan_id)(struct e1000_hw *); ++ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); ++ s32 (*led_on)(struct e1000_hw *); ++ s32 (*led_off)(struct e1000_hw *); ++ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); ++ s32 (*reset_hw)(struct e1000_hw *); ++ s32 (*init_hw)(struct e1000_hw *); ++ void (*shutdown_serdes)(struct e1000_hw *); ++ void (*power_up_serdes)(struct e1000_hw *); ++ s32 (*setup_link)(struct e1000_hw *); ++ s32 (*setup_physical_interface)(struct e1000_hw *); ++ s32 (*setup_led)(struct e1000_hw *); ++ void (*write_vfta)(struct e1000_hw *, u32, u32); ++ void (*config_collision_dist)(struct e1000_hw *); ++ int (*rar_set)(struct e1000_hw *, u8*, u32); ++ s32 (*read_mac_addr)(struct e1000_hw *); ++ s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +-#endif +- ++ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); ++ void (*release_swfw_sync)(struct e1000_hw *, u16); + }; + ++/* When to use various PHY register access functions: ++ * ++ * Func Caller ++ * Function Does Does When to use ++ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ * X_reg L,P,A n/a for simple PHY reg accesses ++ * X_reg_locked P,A L for multiple accesses of different regs ++ * on different pages ++ * X_reg_page A L,P for multiple accesses of different regs ++ * on the same page ++ * ++ * Where X=[read|write], L=locking, P=sets page, A=register access ++ * ++ */ + struct e1000_phy_operations { +- s32 (*acquire)(struct e1000_hw *); +- s32 (*check_polarity)(struct e1000_hw *); +- s32 (*check_reset_block)(struct e1000_hw *); +- s32 (*force_speed_duplex)(struct e1000_hw *); +- s32 (*get_cfg_done)(struct e1000_hw *hw); +- s32 (*get_cable_length)(struct e1000_hw *); +- s32 (*get_phy_info)(struct e1000_hw *); +- s32 (*read_reg)(struct e1000_hw *, u32, u16 *); ++ s32 (*init_params)(struct e1000_hw *); ++ s32 (*acquire)(struct e1000_hw *); ++ s32 (*check_polarity)(struct e1000_hw *); ++ s32 (*check_reset_block)(struct e1000_hw *); ++ s32 (*commit)(struct e1000_hw *); ++ s32 (*force_speed_duplex)(struct e1000_hw *); ++ s32 (*get_cfg_done)(struct e1000_hw *hw); ++ s32 (*get_cable_length)(struct e1000_hw *); ++ s32 (*get_info)(struct e1000_hw *); ++ s32 (*set_page)(struct e1000_hw *, u16); ++ s32 (*read_reg)(struct e1000_hw *, u32, u16 *); ++ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); ++ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); +- s32 (*reset)(struct e1000_hw *); +- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); +- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); +- s32 (*write_reg)(struct e1000_hw *, u32, u16); ++ s32 (*reset)(struct e1000_hw *); ++ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); ++ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); ++ s32 (*write_reg)(struct e1000_hw *, u32, u16); ++ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); ++ s32 (*write_reg_page)(struct e1000_hw *, u32, u16); ++ void (*power_up)(struct e1000_hw *); ++ void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); + }; + ++/* Function pointers for the NVM. */ + struct e1000_nvm_operations { +- s32 (*acquire)(struct e1000_hw *); +- s32 (*read)(struct e1000_hw *, u16, u16, u16 *); ++ s32 (*init_params)(struct e1000_hw *); ++ s32 (*acquire)(struct e1000_hw *); ++ s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); +- s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +- s32 (*update)(struct e1000_hw *); +- s32 (*validate)(struct e1000_hw *); +- s32 (*valid_led_default)(struct e1000_hw *, u16 *); ++ void (*reload)(struct e1000_hw *); ++ s32 (*update)(struct e1000_hw *); ++ s32 (*valid_led_default)(struct e1000_hw *, u16 *); ++ s32 (*validate)(struct e1000_hw *); ++ s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + }; + + #define E1000_MAX_SENSORS 3 +@@ -374,49 +593,45 @@ + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; + }; + +-struct e1000_info { +- s32 (*get_invariants)(struct e1000_hw *); +- struct e1000_mac_operations *mac_ops; +- struct e1000_phy_operations *phy_ops; +- struct e1000_nvm_operations *nvm_ops; +-}; +- +-extern const struct e1000_info e1000_82575_info; +- + struct e1000_mac_info { + struct e1000_mac_operations ops; +- +- u8 addr[6]; +- u8 perm_addr[6]; ++ u8 addr[ETH_ADDR_LEN]; ++ u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + ++ u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; ++ u32 tx_packet_delta; + u32 txcw; + ++ u16 current_ifs_val; ++ u16 ifs_max_val; ++ u16 ifs_min_val; ++ u16 ifs_ratio; ++ u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +- #define MAX_MTA_REG 128 ++#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; ++ bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; +- bool disable_hw_init_bits; + bool get_link_status; +- bool ifs_params_forced; + bool in_ifs_mode; +- bool report_tx_early; ++ enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +@@ -424,7 +639,6 @@ + + struct e1000_phy_info { + struct e1000_phy_operations ops; +- + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; +@@ -477,20 +691,19 @@ + enum e1000_bus_speed speed; + enum e1000_bus_width width; + +- u32 snoop; +- + u16 func; + u16 pci_cmd_word; + }; + + struct e1000_fc_info { +- u32 high_water; /* Flow control high-water mark */ +- u32 low_water; /* Flow control low-water mark */ +- u16 pause_time; /* Flow control pause timer */ +- bool send_xon; /* Flow control send XON */ +- bool strict_ieee; /* Strict IEEE mode */ +- enum e1000_fc_mode current_mode; /* Type of flow control */ +- enum e1000_fc_mode requested_mode; ++ u32 high_water; /* Flow control high-water mark */ ++ u32 low_water; /* Flow control low-water mark */ ++ u16 pause_time; /* Flow control pause timer */ ++ u16 refresh_time; /* Flow control refresh timer */ ++ bool send_xon; /* Flow control send XON */ ++ bool strict_ieee; /* Strict IEEE mode */ ++ enum e1000_fc_mode current_mode; /* FC mode in effect */ ++ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ + }; + + struct e1000_mbx_operations { +@@ -525,12 +738,17 @@ + bool sgmii_active; + bool global_device_reset; + bool eee_disable; +- bool clear_semaphore_once; +- struct e1000_sfp_flags eth_flags; + bool module_plugged; ++ bool clear_semaphore_once; ++ u32 mtu; ++ struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; +- bool mas_capable; ++}; ++ ++struct e1000_dev_spec_vf { ++ u32 vf_number; ++ u32 v2p_mailbox; + }; + + struct e1000_hw { +@@ -549,7 +767,8 @@ + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { +- struct e1000_dev_spec_82575 _82575; ++ struct e1000_dev_spec_82575 _82575; ++ struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; +@@ -560,14 +779,13 @@ + u8 revision_id; + }; + +-struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +-#define hw_dbg(format, arg...) \ +- netdev_dbg(igb_get_hw_dev(hw), format, ##arg) ++#include "e1000_82575.h" ++#include "e1000_i210.h" + + /* These functions must be implemented by drivers */ +-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); ++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); ++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); ++void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); ++void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +-void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +-void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +-#endif /* _E1000_HW_H_ */ ++#endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c +--- a/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,107 +1,40 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* + +-/* e1000_i210 +- * e1000_i211 +- */ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. + +-#include +-#include ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-#include "e1000_hw.h" +-#include "e1000_i210.h" ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. + +-static s32 igb_update_flash_i210(struct e1000_hw *hw); ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". + +-/** +- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore +- * @hw: pointer to the HW structure +- * +- * Acquire the HW semaphore to access the PHY or NVM +- */ +-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +-{ +- u32 swsm; +- s32 timeout = hw->nvm.word_size + 1; +- s32 i = 0; ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +- /* Get the SW semaphore */ +- while (i < timeout) { +- swsm = rd32(E1000_SWSM); +- if (!(swsm & E1000_SWSM_SMBI)) +- break; ++*******************************************************************************/ + +- udelay(50); +- i++; +- } ++#include "e1000_api.h" + +- if (i == timeout) { +- /* In rare circumstances, the SW semaphore may already be held +- * unintentionally. Clear the semaphore once before giving up. +- */ +- if (hw->dev_spec._82575.clear_semaphore_once) { +- hw->dev_spec._82575.clear_semaphore_once = false; +- igb_put_hw_semaphore(hw); +- for (i = 0; i < timeout; i++) { +- swsm = rd32(E1000_SWSM); +- if (!(swsm & E1000_SWSM_SMBI)) +- break; + +- udelay(50); +- } +- } +- +- /* If we do not have the semaphore here, we have to give up. */ +- if (i == timeout) { +- hw_dbg("Driver can't access device - SMBI bit is set.\n"); +- return -E1000_ERR_NVM; +- } +- } +- +- /* Get the FW semaphore. */ +- for (i = 0; i < timeout; i++) { +- swsm = rd32(E1000_SWSM); +- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); +- +- /* Semaphore acquired if bit latched */ +- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) +- break; +- +- udelay(50); +- } +- +- if (i == timeout) { +- /* Release semaphores */ +- igb_put_hw_semaphore(hw); +- hw_dbg("Driver can't access the NVM\n"); +- return -E1000_ERR_NVM; +- } +- +- return 0; +-} ++static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); ++static void e1000_release_nvm_i210(struct e1000_hw *hw); ++static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); ++static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data); ++static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); ++static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + + /** +- * igb_acquire_nvm_i210 - Request for access to EEPROM ++ * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. +@@ -109,93 +42,178 @@ + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +-static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) ++static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) + { +- return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_acquire_nvm_i210"); ++ ++ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); ++ ++ return ret_val; + } + + /** +- * igb_release_nvm_i210 - Release exclusive access to EEPROM ++ * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +-static void igb_release_nvm_i210(struct e1000_hw *hw) ++static void e1000_release_nvm_i210(struct e1000_hw *hw) + { +- igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); ++ DEBUGFUNC("e1000_release_nvm_i210"); ++ ++ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + } + + /** +- * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore ++ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) ++s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + ++ DEBUGFUNC("e1000_acquire_swfw_sync_i210"); ++ + while (i < timeout) { +- if (igb_get_hw_semaphore_i210(hw)) { ++ if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + +- /* Firmware currently using resource (fwmask) */ +- igb_put_hw_semaphore(hw); +- mdelay(5); ++ /* ++ * Firmware currently using resource (fwmask) ++ * or other software thread using resource (swmask) ++ */ ++ e1000_put_hw_semaphore_generic(hw); ++ msec_delay_irq(5); + i++; + } + + if (i == timeout) { +- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); ++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); ++ ++ e1000_put_hw_semaphore_generic(hw); + +- igb_put_hw_semaphore(hw); + out: + return ret_val; + } + + /** +- * igb_release_swfw_sync_i210 - Release SW/FW semaphore ++ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) ++void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) + { + u32 swfw_sync; + +- while (igb_get_hw_semaphore_i210(hw)) ++ DEBUGFUNC("e1000_release_swfw_sync_i210"); ++ ++ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + +- swfw_sync = rd32(E1000_SW_FW_SYNC); ++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; +- wr32(E1000_SW_FW_SYNC, swfw_sync); ++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + +- igb_put_hw_semaphore(hw); ++ e1000_put_hw_semaphore_generic(hw); + } + + /** +- * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register ++ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore ++ * @hw: pointer to the HW structure ++ * ++ * Acquire the HW semaphore to access the PHY or NVM ++ **/ ++static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) ++{ ++ u32 swsm; ++ s32 timeout = hw->nvm.word_size + 1; ++ s32 i = 0; ++ ++ DEBUGFUNC("e1000_get_hw_semaphore_i210"); ++ ++ /* Get the SW semaphore */ ++ while (i < timeout) { ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ if (!(swsm & E1000_SWSM_SMBI)) ++ break; ++ ++ usec_delay(50); ++ i++; ++ } ++ ++ if (i == timeout) { ++ /* In rare circumstances, the SW semaphore may already be held ++ * unintentionally. Clear the semaphore once before giving up. ++ */ ++ if (hw->dev_spec._82575.clear_semaphore_once) { ++ hw->dev_spec._82575.clear_semaphore_once = false; ++ e1000_put_hw_semaphore_generic(hw); ++ for (i = 0; i < timeout; i++) { ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ if (!(swsm & E1000_SWSM_SMBI)) ++ break; ++ ++ usec_delay(50); ++ } ++ } ++ ++ /* If we do not have the semaphore here, we have to give up. */ ++ if (i == timeout) { ++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); ++ return -E1000_ERR_NVM; ++ } ++ } ++ ++ /* Get the FW semaphore. */ ++ for (i = 0; i < timeout; i++) { ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); ++ ++ /* Semaphore acquired if bit latched */ ++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) ++ break; ++ ++ usec_delay(50); ++ } ++ ++ if (i == timeout) { ++ /* Release semaphores */ ++ e1000_put_hw_semaphore_generic(hw); ++ DEBUGOUT("Driver can't access the NVM\n"); ++ return -E1000_ERR_NVM; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read +@@ -204,28 +222,74 @@ + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +-static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, +- u16 *data) ++s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data) + { +- s32 status = 0; ++ s32 status = E1000_SUCCESS; + u16 i, count; + ++ DEBUGFUNC("e1000_read_nvm_srrd_i210"); ++ + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient +- * to read in bursts than synchronizing access for each word. +- */ ++ * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); +- if (!(hw->nvm.ops.acquire(hw))) { +- status = igb_read_nvm_eerd(hw, offset, count, ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { ++ status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + +- if (status) ++ if (status != E1000_SUCCESS) ++ break; ++ } ++ ++ return status; ++} ++ ++/** ++ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR ++ * @hw: pointer to the HW structure ++ * @offset: offset within the Shadow RAM to be written to ++ * @words: number of words to write ++ * @data: 16 bit word(s) to be written to the Shadow RAM ++ * ++ * Writes data to Shadow RAM at offset using EEWR register. ++ * ++ * If e1000_update_nvm_checksum is not called after this function , the ++ * data will not be committed to FLASH and also Shadow RAM will most likely ++ * contain an invalid checksum. ++ * ++ * If error code is returned, data and Shadow RAM may be inconsistent - buffer ++ * partially written. ++ **/ ++s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data) ++{ ++ s32 status = E1000_SUCCESS; ++ u16 i, count; ++ ++ DEBUGFUNC("e1000_write_nvm_srwr_i210"); ++ ++ /* We cannot hold synchronization semaphores for too long, ++ * because of forceful takeover procedure. However it is more efficient ++ * to write in bursts than synchronizing access for each word. */ ++ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { ++ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? ++ E1000_EERD_EEWR_MAX_COUNT : (words - i); ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { ++ status = e1000_write_nvm_srwr(hw, offset, count, ++ data + i); ++ hw->nvm.ops.release(hw); ++ } else { ++ status = E1000_ERR_SWFW_SYNC; ++ } ++ ++ if (status != E1000_SUCCESS) + break; + } + +@@ -233,7 +297,7 @@ + } + + /** +- * igb_write_nvm_srwr - Write to Shadow Ram using EEWR ++ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write +@@ -241,23 +305,26 @@ + * + * Writes data to Shadow Ram at offset using EEWR register. + * +- * If igb_update_nvm_checksum is not called after this function , the ++ * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, ++static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + +- /* A check for invalid values: offset too large, too many words, ++ DEBUGFUNC("e1000_write_nvm_srwr"); ++ ++ /* ++ * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } +@@ -267,19 +334,19 @@ + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + +- wr32(E1000_SRWR, eewr); ++ E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & +- rd32(E1000_SRWR)) { +- ret_val = 0; ++ E1000_READ_REG(hw, E1000_SRWR)) { ++ ret_val = E1000_SUCCESS; + break; + } +- udelay(5); +- } ++ usec_delay(5); ++ } + +- if (ret_val) { +- hw_dbg("Shadow RAM write EEWR timed out\n"); ++ if (ret_val != E1000_SUCCESS) { ++ DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } +@@ -288,52 +355,7 @@ + return ret_val; + } + +-/** +- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR +- * @hw: pointer to the HW structure +- * @offset: offset within the Shadow RAM to be written to +- * @words: number of words to write +- * @data: 16 bit word(s) to be written to the Shadow RAM +- * +- * Writes data to Shadow RAM at offset using EEWR register. +- * +- * If e1000_update_nvm_checksum is not called after this function , the +- * data will not be committed to FLASH and also Shadow RAM will most likely +- * contain an invalid checksum. +- * +- * If error code is returned, data and Shadow RAM may be inconsistent - buffer +- * partially written. +- **/ +-static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, +- u16 *data) +-{ +- s32 status = 0; +- u16 i, count; +- +- /* We cannot hold synchronization semaphores for too long, +- * because of forceful takeover procedure. However it is more efficient +- * to write in bursts than synchronizing access for each word. +- */ +- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { +- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? +- E1000_EERD_EEWR_MAX_COUNT : (words - i); +- if (!(hw->nvm.ops.acquire(hw))) { +- status = igb_write_nvm_srwr(hw, offset, count, +- data + i); +- hw->nvm.ops.release(hw); +- } else { +- status = E1000_ERR_SWFW_SYNC; +- } +- +- if (status) +- break; +- } +- +- return status; +-} +- +-/** +- * igb_read_invm_word_i210 - Reads OTP ++/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read +@@ -341,15 +363,17 @@ + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +-static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) ++static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) + { + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + ++ DEBUGFUNC("e1000_read_invm_word_i210"); ++ + for (i = 0; i < E1000_INVM_SIZE; i++) { +- invm_dword = rd32(E1000_INVM_DATA_REG(i)); ++ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) +@@ -362,75 +386,76 @@ + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); +- hw_dbg("Read INVM Word 0x%02x = %x\n", ++ DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); +- status = 0; ++ status = E1000_SUCCESS; + break; + } + } + } +- if (status) +- hw_dbg("Requested word 0x%02x not found in OTP\n", address); ++ if (status != E1000_SUCCESS) ++ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; + } + +-/** +- * igb_read_invm_i210 - Read invm wrapper function for I210/I211 ++/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure +- * @words: number of words to read ++ * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +-static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, +- u16 words __always_unused, u16 *data) ++static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, ++ u16 E1000_UNUSEDARG words, u16 *data) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); +- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); ++ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); +- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, ++ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); +- if (ret_val) +- hw_dbg("MAC Addr not found in iNVM\n"); ++ if (ret_val != E1000_SUCCESS) ++ DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: +- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); +- if (ret_val) { ++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); ++ if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: +@@ -446,7 +471,7 @@ + *data = hw->vendor_id; + break; + default: +- hw_dbg("NVM word 0x%02x is not mapped.\n", offset); ++ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } +@@ -454,14 +479,15 @@ + } + + /** +- * igb_read_invm_version - Reads iNVM version and image type ++ * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +-s32 igb_read_invm_version(struct e1000_hw *hw, +- struct e1000_fw_version *invm_ver) { ++s32 e1000_read_invm_version(struct e1000_hw *hw, ++ struct e1000_fw_version *invm_ver) ++{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; +@@ -472,9 +498,11 @@ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + ++ DEBUGFUNC("e1000_read_invm_version"); ++ + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { +- invm_dword = rd32(E1000_INVM_DATA_REG(i)); ++ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + +@@ -486,17 +514,18 @@ + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; +- status = 0; ++ status = E1000_SUCCESS; + break; + } +- /* Check if we have odd version location ++ /* ++ * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && +@@ -504,21 +533,22 @@ + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; +- status = 0; ++ status = E1000_SUCCESS; + break; + } +- /* Check if we have even version location ++ /* ++ * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + } + +- if (!status) { ++ if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; +@@ -531,7 +561,7 @@ + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ +@@ -540,7 +570,7 @@ + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; +- status = 0; ++ status = E1000_SUCCESS; + break; + } + } +@@ -548,27 +578,30 @@ + } + + /** +- * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum ++ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +-static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) ++s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) + { +- s32 status = 0; ++ s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + +- if (!(hw->nvm.ops.acquire(hw))) { ++ DEBUGFUNC("e1000_validate_nvm_checksum_i210"); ++ ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + +- /* Replace the read function with semaphore grabbing with ++ /* ++ * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; +- hw->nvm.ops.read = igb_read_nvm_eerd; ++ hw->nvm.ops.read = e1000_read_nvm_eerd; + +- status = igb_validate_nvm_checksum(hw); ++ status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; +@@ -581,147 +614,208 @@ + return status; + } + ++ + /** +- * igb_update_nvm_checksum_i210 - Update EEPROM checksum ++ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +-static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) ++s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + +- /* Read the first word from the EEPROM. If this times out or fails, do ++ DEBUGFUNC("e1000_update_nvm_checksum_i210"); ++ ++ /* ++ * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ +- ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); +- if (ret_val) { +- hw_dbg("EEPROM read failed\n"); ++ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); ++ if (ret_val != E1000_SUCCESS) { ++ DEBUGOUT("EEPROM read failed\n"); + goto out; + } + +- if (!(hw->nvm.ops.acquire(hw))) { +- /* Do not use hw->nvm.ops.write, hw->nvm.ops.read ++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { ++ /* ++ * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { +- ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); ++ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); +- hw_dbg("NVM Read Error while updating checksum.\n"); ++ DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; +- ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, ++ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); +- if (ret_val) { ++ if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); +- hw_dbg("NVM Write Error while updating checksum.\n"); ++ DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + +- ret_val = igb_update_flash_i210(hw); ++ ret_val = e1000_update_flash_i210(hw); + } else { +- ret_val = -E1000_ERR_SWFW_SYNC; ++ ret_val = E1000_ERR_SWFW_SYNC; ++ } ++out: ++ return ret_val; ++} ++ ++/** ++ * e1000_get_flash_presence_i210 - Check if flash device is detected. ++ * @hw: pointer to the HW structure ++ * ++ **/ ++bool e1000_get_flash_presence_i210(struct e1000_hw *hw) ++{ ++ u32 eec = 0; ++ bool ret_val = false; ++ ++ DEBUGFUNC("e1000_get_flash_presence_i210"); ++ ++ eec = E1000_READ_REG(hw, E1000_EECD); ++ ++ if (eec & E1000_EECD_FLASH_DETECTED_I210) ++ ret_val = true; ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_update_flash_i210 - Commit EEPROM to the flash ++ * @hw: pointer to the HW structure ++ * ++ **/ ++s32 e1000_update_flash_i210(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u32 flup; ++ ++ DEBUGFUNC("e1000_update_flash_i210"); ++ ++ ret_val = e1000_pool_flash_update_done_i210(hw); ++ if (ret_val == -E1000_ERR_NVM) { ++ DEBUGOUT("Flash update time out\n"); ++ goto out; + } ++ ++ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; ++ E1000_WRITE_REG(hw, E1000_EECD, flup); ++ ++ ret_val = e1000_pool_flash_update_done_i210(hw); ++ if (ret_val == E1000_SUCCESS) ++ DEBUGOUT("Flash update complete\n"); ++ else ++ DEBUGOUT("Flash update time out\n"); ++ + out: + return ret_val; + } + + /** +- * igb_pool_flash_update_done_i210 - Pool FLUDONE status. ++ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) ++s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) + { + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + ++ DEBUGFUNC("e1000_pool_flash_update_done_i210"); ++ + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { +- reg = rd32(E1000_EECD); ++ reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + break; + } +- udelay(5); ++ usec_delay(5); + } + + return ret_val; + } + + /** +- * igb_get_flash_presence_i210 - Check if flash device is detected. ++ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * ++ * Initialize the i210/i211 NVM parameters and function pointers. + **/ +-bool igb_get_flash_presence_i210(struct e1000_hw *hw) ++static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) + { +- u32 eec = 0; +- bool ret_val = false; ++ s32 ret_val; ++ struct e1000_nvm_info *nvm = &hw->nvm; + +- eec = rd32(E1000_EECD); +- if (eec & E1000_EECD_FLASH_DETECTED_I210) +- ret_val = true; ++ DEBUGFUNC("e1000_init_nvm_params_i210"); + ++ ret_val = e1000_init_nvm_params_82575(hw); ++ nvm->ops.acquire = e1000_acquire_nvm_i210; ++ nvm->ops.release = e1000_release_nvm_i210; ++ nvm->ops.valid_led_default = e1000_valid_led_default_i210; ++ if (e1000_get_flash_presence_i210(hw)) { ++ hw->nvm.type = e1000_nvm_flash_hw; ++ nvm->ops.read = e1000_read_nvm_srrd_i210; ++ nvm->ops.write = e1000_write_nvm_srwr_i210; ++ nvm->ops.validate = e1000_validate_nvm_checksum_i210; ++ nvm->ops.update = e1000_update_nvm_checksum_i210; ++ } else { ++ hw->nvm.type = e1000_nvm_invm; ++ nvm->ops.read = e1000_read_invm_i210; ++ nvm->ops.write = e1000_null_write_nvm; ++ nvm->ops.validate = e1000_null_ops_generic; ++ nvm->ops.update = e1000_null_ops_generic; ++ } + return ret_val; + } + + /** +- * igb_update_flash_i210 - Commit EEPROM to the flash ++ * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * ++ * Called to initialize all function pointers and parameters. + **/ +-static s32 igb_update_flash_i210(struct e1000_hw *hw) ++void e1000_init_function_pointers_i210(struct e1000_hw *hw) + { +- s32 ret_val = 0; +- u32 flup; +- +- ret_val = igb_pool_flash_update_done_i210(hw); +- if (ret_val == -E1000_ERR_NVM) { +- hw_dbg("Flash update time out\n"); +- goto out; +- } ++ e1000_init_function_pointers_82575(hw); ++ hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + +- flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; +- wr32(E1000_EECD, flup); +- +- ret_val = igb_pool_flash_update_done_i210(hw); +- if (ret_val) +- hw_dbg("Flash update complete\n"); +- else +- hw_dbg("Flash update time out\n"); +- +-out: +- return ret_val; ++ return; + } + + /** +- * igb_valid_led_default_i210 - Verify a valid default LED config ++ * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) ++static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_valid_led_default_i210"); ++ + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + goto out; + } + +@@ -741,17 +835,19 @@ + } + + /** +- * __igb_access_xmdio_reg - Read/write XMDIO register ++ * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +-static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, +- u8 dev_addr, u16 *data, bool read) ++static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, ++ u8 dev_addr, u16 *data, bool read) + { +- s32 ret_val = 0; ++ s32 ret_val; ++ ++ DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) +@@ -782,67 +878,41 @@ + } + + /** +- * igb_read_xmdio_reg - Read XMDIO register ++ * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) ++s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) + { +- return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); ++ DEBUGFUNC("e1000_read_xmdio_reg"); ++ ++ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); + } + + /** +- * igb_write_xmdio_reg - Write XMDIO register ++ * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +-{ +- return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +-} +- +-/** +- * igb_init_nvm_params_i210 - Init NVM func ptrs. +- * @hw: pointer to the HW structure +- **/ +-s32 igb_init_nvm_params_i210(struct e1000_hw *hw) ++s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) + { +- s32 ret_val = 0; +- struct e1000_nvm_info *nvm = &hw->nvm; ++ DEBUGFUNC("e1000_read_xmdio_reg"); + +- nvm->ops.acquire = igb_acquire_nvm_i210; +- nvm->ops.release = igb_release_nvm_i210; +- nvm->ops.valid_led_default = igb_valid_led_default_i210; +- +- /* NVM Function Pointers */ +- if (igb_get_flash_presence_i210(hw)) { +- hw->nvm.type = e1000_nvm_flash_hw; +- nvm->ops.read = igb_read_nvm_srrd_i210; +- nvm->ops.write = igb_write_nvm_srwr_i210; +- nvm->ops.validate = igb_validate_nvm_checksum_i210; +- nvm->ops.update = igb_update_nvm_checksum_i210; +- } else { +- hw->nvm.type = e1000_nvm_invm; +- nvm->ops.read = igb_read_invm_i210; +- nvm->ops.write = NULL; +- nvm->ops.validate = NULL; +- nvm->ops.update = NULL; +- } +- return ret_val; ++ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); + } + + /** +- * igb_pll_workaround_i210 ++ * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +-s32 igb_pll_workaround_i210(struct e1000_hw *hw) ++static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) + { + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; +@@ -850,53 +920,104 @@ + int i; + + /* Get and set needed register values */ +- wuc = rd32(E1000_WUC); +- mdicnfg = rd32(E1000_MDICNFG); ++ wuc = E1000_READ_REG(hw, E1000_WUC); ++ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; +- wr32(E1000_MDICNFG, reg_val); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ +- ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, +- &nvm_word); +- if (ret_val) ++ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, ++ &nvm_word); ++ if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ +- igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | ++ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { +- ret_val = 0; ++ ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ +- ctrl = rd32(E1000_CTRL); +- wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + +- ctrl_ext = rd32(E1000_CTRL_EXT); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); +- wr32(E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +- wr32(E1000_WUC, 0); ++ E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); +- wr32(E1000_EEARBC_I210, reg_val); ++ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + +- igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); ++ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; +- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); +- usleep_range(1000, 2000); ++ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); ++ msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; +- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); ++ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); +- wr32(E1000_EEARBC_I210, reg_val); ++ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ +- wr32(E1000_WUC, wuc); ++ E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ +- wr32(E1000_MDICNFG, mdicnfg); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); ++ return ret_val; ++} ++ ++/** ++ * e1000_get_cfg_done_i210 - Read config done bit ++ * @hw: pointer to the HW structure ++ * ++ * Read the management control register for the config done bit for ++ * completion status. NOTE: silicon which is EEPROM-less will fail trying ++ * to read the config done bit, so an error is *ONLY* logged and returns ++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon ++ * would not be able to be reset or change link. ++ **/ ++static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) ++{ ++ s32 timeout = PHY_CFG_TIMEOUT; ++ u32 mask = E1000_NVM_CFG_DONE_PORT_0; ++ ++ DEBUGFUNC("e1000_get_cfg_done_i210"); ++ ++ while (timeout) { ++ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) ++ break; ++ msec_delay(1); ++ timeout--; ++ } ++ if (!timeout) ++ DEBUGOUT("MNG configuration cycle has not completed.\n"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_init_hw_i210 - Init hw for I210/I211 ++ * @hw: pointer to the HW structure ++ * ++ * Called to initialize hw for i210 hw family. ++ **/ ++s32 e1000_init_hw_i210(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_init_hw_i210"); ++ if ((hw->mac.type >= e1000_i210) && ++ !(e1000_get_flash_presence_i210(hw))) { ++ ret_val = e1000_pll_workaround_i210(hw); ++ if (ret_val != E1000_SUCCESS) ++ return ret_val; ++ } ++ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; ++ ret_val = e1000_init_hw_82575(hw); + return ret_val; + } +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h +--- a/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,39 +1,47 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_I210_H_ + #define _E1000_I210_H_ + +-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +-s32 igb_read_invm_version(struct e1000_hw *hw, +- struct e1000_fw_version *invm_ver); +-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +-s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +-bool igb_get_flash_presence_i210(struct e1000_hw *hw); +-s32 igb_pll_workaround_i210(struct e1000_hw *hw); ++bool e1000_get_flash_presence_i210(struct e1000_hw *hw); ++s32 e1000_update_flash_i210(struct e1000_hw *hw); ++s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); ++s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); ++s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, ++ u16 words, u16 *data); ++s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, ++ u16 words, u16 *data); ++s32 e1000_read_invm_version(struct e1000_hw *hw, ++ struct e1000_fw_version *invm_ver); ++s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); ++void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); ++s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, ++ u16 *data); ++s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, ++ u16 data); ++s32 e1000_init_hw_i210(struct e1000_hw *hw); + + #define E1000_STM_OPCODE 0xDB00 + #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 +@@ -56,15 +64,15 @@ + + #define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 + #define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +-#define E1000_INVM_ULT_BYTES_SIZE 8 +-#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +-#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +-#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +-#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 +- +-#define E1000_INVM_MAJOR_MASK 0x3F0 +-#define E1000_INVM_MINOR_MASK 0xF +-#define E1000_INVM_MAJOR_SHIFT 4 ++#define E1000_INVM_ULT_BYTES_SIZE 8 ++#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 ++#define E1000_INVM_VER_FIELD_ONE 0x1FF8 ++#define E1000_INVM_VER_FIELD_TWO 0x7FE000 ++#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 ++ ++#define E1000_INVM_MAJOR_MASK 0x3F0 ++#define E1000_INVM_MINOR_MASK 0xF ++#define E1000_INVM_MAJOR_SHIFT 4 + + #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ +@@ -73,7 +81,7 @@ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +-/* NVM offset defaults for i211 device */ ++/* NVM offset defaults for I211 devices */ + #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 + #define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 + #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c +--- a/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,68 +1,179 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); ++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); ++static void e1000_config_collision_dist_generic(struct e1000_hw *hw); ++static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); ++ ++/** ++ * e1000_init_mac_ops_generic - Initialize MAC function pointers ++ * @hw: pointer to the HW structure + * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++ * Setups up the function pointers to no-op functions ++ **/ ++void e1000_init_mac_ops_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ DEBUGFUNC("e1000_init_mac_ops_generic"); ++ ++ /* General Setup */ ++ mac->ops.init_params = e1000_null_ops_generic; ++ mac->ops.init_hw = e1000_null_ops_generic; ++ mac->ops.reset_hw = e1000_null_ops_generic; ++ mac->ops.setup_physical_interface = e1000_null_ops_generic; ++ mac->ops.get_bus_info = e1000_null_ops_generic; ++ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; ++ mac->ops.read_mac_addr = igb_e1000_read_mac_addr_generic; ++ mac->ops.config_collision_dist = e1000_config_collision_dist_generic; ++ mac->ops.clear_hw_cntrs = e1000_null_mac_generic; ++ /* LED */ ++ mac->ops.cleanup_led = e1000_null_ops_generic; ++ mac->ops.setup_led = e1000_null_ops_generic; ++ mac->ops.blink_led = e1000_null_ops_generic; ++ mac->ops.led_on = e1000_null_ops_generic; ++ mac->ops.led_off = e1000_null_ops_generic; ++ /* LINK */ ++ mac->ops.setup_link = e1000_null_ops_generic; ++ mac->ops.get_link_up_info = e1000_null_link_info; ++ mac->ops.check_for_link = e1000_null_ops_generic; ++ /* Management */ ++ mac->ops.check_mng_mode = e1000_null_mng_mode; ++ /* VLAN, MC, etc. */ ++ mac->ops.update_mc_addr_list = e1000_null_update_mc; ++ mac->ops.clear_vfta = e1000_null_mac_generic; ++ mac->ops.write_vfta = e1000_null_write_vfta; ++ mac->ops.rar_set = e1000_rar_set_generic; ++ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; ++} ++ ++/** ++ * e1000_null_ops_generic - No-op function, returns 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_ops_generic"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_mac_generic - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_mac_generic"); ++ return; ++} + +-#include +-#include +-#include +-#include +-#include ++/** ++ * e1000_null_link_info - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) ++{ ++ DEBUGFUNC("e1000_null_link_info"); ++ return E1000_SUCCESS; ++} + +-#include "e1000_mac.h" ++/** ++ * e1000_null_mng_mode - No-op function, return false ++ * @hw: pointer to the HW structure ++ **/ ++bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_mng_mode"); ++ return false; ++} + +-#include "igb.h" ++/** ++ * e1000_null_update_mc - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) ++{ ++ DEBUGFUNC("e1000_null_update_mc"); ++ return; ++} + +-static s32 igb_set_default_fc(struct e1000_hw *hw); +-static s32 igb_set_fc_watermarks(struct e1000_hw *hw); ++/** ++ * e1000_null_write_vfta - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) ++{ ++ DEBUGFUNC("e1000_null_write_vfta"); ++ return; ++} + + /** +- * igb_get_bus_info_pcie - Get PCIe bus information ++ * e1000_null_rar_set - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) ++{ ++ DEBUGFUNC("e1000_null_rar_set"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +-s32 igb_get_bus_info_pcie(struct e1000_hw *hw) ++s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) + { ++ struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; +- u32 reg; + u16 pcie_link_status; + ++ DEBUGFUNC("igb_e1000_get_bus_info_pcie_generic"); ++ + bus->type = e1000_bus_type_pci_express; + +- ret_val = igb_read_pcie_cap_reg(hw, +- PCI_EXP_LNKSTA, +- &pcie_link_status); ++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, ++ &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { +- switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { +- case PCI_EXP_LNKSTA_CLS_2_5GB: ++ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { ++ case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; +- case PCI_EXP_LNKSTA_CLS_5_0GB: ++ case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: +@@ -71,75 +182,70 @@ + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & +- PCI_EXP_LNKSTA_NLW) >> +- PCI_EXP_LNKSTA_NLW_SHIFT); ++ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + +- reg = rd32(E1000_STATUS); +- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; ++ mac->ops.set_lan_id(hw); + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_clear_vfta - Clear VLAN filter table ++ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices ++ * + * @hw: pointer to the HW structure + * +- * Clears the register array which contains the VLAN filter table by +- * setting all the values to 0. ++ * Determines the LAN function id by reading memory-mapped registers ++ * and swaps the port value if requested. + **/ +-void igb_clear_vfta(struct e1000_hw *hw) ++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) + { +- u32 offset; ++ struct e1000_bus_info *bus = &hw->bus; ++ u32 reg; + +- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { +- array_wr32(E1000_VFTA, offset, 0); +- wrfl(); +- } ++ /* The status register reports the correct function number ++ * for the device regardless of function swap state. ++ */ ++ reg = E1000_READ_REG(hw, E1000_STATUS); ++ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + } + + /** +- * igb_write_vfta - Write value to VLAN filter table ++ * igb_e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure +- * @offset: register offset in VLAN filter table +- * @value: register value written to VLAN filter table + * +- * Writes value at the given offset in the register array which stores +- * the VLAN filter table. ++ * Sets the LAN function id to zero for a single port device. + **/ +-static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw) + { +- array_wr32(E1000_VFTA, offset, value); +- wrfl(); +-} ++ struct e1000_bus_info *bus = &hw->bus; + +-/* Due to a hw errata, if the host tries to configure the VFTA register +- * while performing queries from the BMC or DMA, then the VFTA in some +- * cases won't be written. +- */ ++ bus->func = 0; ++} + + /** +- * igb_clear_vfta_i350 - Clear VLAN filter table ++ * igb_e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +-void igb_clear_vfta_i350(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_clear_vfta_generic(struct e1000_hw *hw) + { + u32 offset; +- int i; + +- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { +- for (i = 0; i < 10; i++) +- array_wr32(E1000_VFTA, offset, 0); ++ DEBUGFUNC("igb_e1000_clear_vfta_generic"); + +- wrfl(); ++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); ++ E1000_WRITE_FLUSH(hw); + } + } + + /** +- * igb_write_vfta_i350 - Write value to VLAN filter table ++ * igb_e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table +@@ -147,113 +253,85 @@ + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +-static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) + { +- int i; +- +- for (i = 0; i < 10; i++) +- array_wr32(E1000_VFTA, offset, value); ++ DEBUGFUNC("igb_e1000_write_vfta_generic"); + +- wrfl(); ++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_init_rx_addrs - Initialize receive address's ++ * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * +- * Setups the receive address registers by setting the base receive address ++ * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) ++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) + { + u32 i; +- u8 mac_addr[ETH_ALEN] = {0}; ++ u8 mac_addr[ETH_ADDR_LEN] = {0}; ++ ++ DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ +- hw_dbg("Programming MAC Address into RAR[0]\n"); ++ DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ +- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); ++ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); + } + + /** +- * igb_vfta_set - enable or disable vlan in VLAN filter table +- * @hw: pointer to the HW structure +- * @vid: VLAN id to add or remove +- * @add: if true add filter, if false remove +- * +- * Sets or clears a bit in the VLAN filter table array based on VLAN id +- * and if we are adding or removing the filter +- **/ +-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +-{ +- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; +- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); +- u32 vfta; +- struct igb_adapter *adapter = hw->back; +- s32 ret_val = 0; +- +- vfta = adapter->shadow_vfta[index]; +- +- /* bit was set/cleared before we started */ +- if ((!!(vfta & mask)) == add) { +- ret_val = -E1000_ERR_CONFIG; +- } else { +- if (add) +- vfta |= mask; +- else +- vfta &= ~mask; +- } +- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) +- igb_write_vfta_i350(hw, index, vfta); +- else +- igb_write_vfta(hw, index, vfta); +- adapter->shadow_vfta[index] = vfta; +- +- return ret_val; +-} +- +-/** +- * igb_check_alt_mac_addr - Check for alternate MAC addr ++ * igb_e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent +- * address and must override the actual permanent MAC address. If an +- * alternate MAC address is found it is saved in the hw struct and +- * programmed into RAR0 and the function returns success, otherwise the +- * function returns an error. ++ * address and must override the actual permanent MAC address. If an ++ * alternate MAC address is found it is programmed into RAR0, replacing ++ * the permanent address that was installed into RAR0 by the Si on reset. ++ * This function will return SUCCESS unless it encounters an error while ++ * reading the EEPROM. + **/ +-s32 igb_check_alt_mac_addr(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) + { + u32 i; +- s32 ret_val = 0; ++ s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; +- u8 alt_mac_addr[ETH_ALEN]; ++ u8 alt_mac_addr[ETH_ADDR_LEN]; ++ ++ DEBUGFUNC("igb_e1000_check_alt_mac_addr_generic"); ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); ++ if (ret_val) ++ return ret_val; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) +- goto out; ++ return E1000_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, +- &nvm_alt_mac_addr_offset); ++ &nvm_alt_mac_addr_offset); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ +- goto out; ++ return E1000_SUCCESS; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; +@@ -262,12 +340,12 @@ + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; +- for (i = 0; i < ETH_ALEN; i += 2) { ++ for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); +@@ -275,9 +353,9 @@ + } + + /* if multicast bit is set, the alternate address will not be used */ +- if (is_multicast_ether_addr(alt_mac_addr)) { +- hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); +- goto out; ++ if (alt_mac_addr[0] & 0x01) { ++ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); ++ return E1000_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the +@@ -286,12 +364,11 @@ + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_rar_set - Set receive address register ++ * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register +@@ -299,16 +376,17 @@ + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) ++static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) + { + u32 rar_low, rar_high; + ++ DEBUGFUNC("e1000_rar_set_generic"); ++ + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ +- rar_low = ((u32) addr[0] | +- ((u32) addr[1] << 8) | +- ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); ++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + +@@ -320,60 +398,29 @@ + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ +- wr32(E1000_RAL(index), rar_low); +- wrfl(); +- wr32(E1000_RAH(index), rar_high); +- wrfl(); +-} ++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); ++ E1000_WRITE_FLUSH(hw); ++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); ++ E1000_WRITE_FLUSH(hw); + +-/** +- * igb_mta_set - Set multicast filter table address +- * @hw: pointer to the HW structure +- * @hash_value: determines the MTA register and bit to set +- * +- * The multicast table address is a register array of 32-bit registers. +- * The hash_value is used to determine what register the bit is in, the +- * current value is read, the new bit is OR'd in and the new value is +- * written back into the register. +- **/ +-void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +-{ +- u32 hash_bit, hash_reg, mta; +- +- /* The MTA is a register array of 32-bit registers. It is +- * treated like an array of (32*mta_reg_count) bits. We want to +- * set bit BitArray[hash_value]. So we figure out what register +- * the bit is in, read it, OR in the new bit, then write +- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a +- * mask to bits 31:5 of the hash value which gives us the +- * register we're modifying. The hash bit within that register +- * is determined by the lower 5 bits of the hash value. +- */ +- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); +- hash_bit = hash_value & 0x1F; +- +- mta = array_rd32(E1000_MTA, hash_reg); +- +- mta |= (1 << hash_bit); +- +- array_wr32(E1000_MTA, hash_reg, mta); +- wrfl(); ++ return E1000_SUCCESS; + } + + /** +- * igb_hash_mc_addr - Generate a multicast hash value ++ * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine +- * the multicast filter table array address and new table value. See +- * igb_mta_set() ++ * the multicast filter table array address and new table value. + **/ +-static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) ++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) + { + u32 hash_value, hash_mask; + u8 bit_shift = 0; + ++ DEBUGFUNC("e1000_hash_mc_addr_generic"); ++ + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + +@@ -401,7 +448,7 @@ + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 +- * LSB MSB ++ * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 +@@ -430,7 +477,7 @@ + } + + /** +- * igb_update_mc_addr_list - Update Multicast addresses ++ * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program +@@ -438,150 +485,412 @@ + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +-void igb_update_mc_addr_list(struct e1000_hw *hw, +- u8 *mc_addr_list, u32 mc_addr_count) ++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, ++ u8 *mc_addr_list, u32 mc_addr_count) + { + u32 hash_value, hash_bit, hash_reg; + int i; + ++ DEBUGFUNC("e1000_update_mc_addr_list_generic"); ++ + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { +- hash_value = igb_hash_mc_addr(hw, mc_addr_list); ++ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); +- mc_addr_list += (ETH_ALEN); ++ mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) +- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); +- wrfl(); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_clear_hw_cntrs_base - Clear base hardware counters ++ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value ++ * @hw: pointer to the HW structure ++ * ++ * In certain situations, a system BIOS may report that the PCIx maximum ++ * memory read byte count (MMRBC) value is higher than than the actual ++ * value. We check the PCIx command register with the current PCIx status ++ * register. ++ **/ ++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) ++{ ++ u16 cmd_mmrbc; ++ u16 pcix_cmd; ++ u16 pcix_stat_hi_word; ++ u16 stat_mmrbc; ++ ++ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); ++ ++ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ ++ if (hw->bus.type != e1000_bus_type_pcix) ++ return; ++ ++ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); ++ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); ++ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> ++ PCIX_COMMAND_MMRBC_SHIFT; ++ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> ++ PCIX_STATUS_HI_MMRBC_SHIFT; ++ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) ++ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; ++ if (cmd_mmrbc > stat_mmrbc) { ++ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; ++ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; ++ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); ++ } ++} ++ ++/** ++ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +-void igb_clear_hw_cntrs_base(struct e1000_hw *hw) ++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) + { +- rd32(E1000_CRCERRS); +- rd32(E1000_SYMERRS); +- rd32(E1000_MPC); +- rd32(E1000_SCC); +- rd32(E1000_ECOL); +- rd32(E1000_MCC); +- rd32(E1000_LATECOL); +- rd32(E1000_COLC); +- rd32(E1000_DC); +- rd32(E1000_SEC); +- rd32(E1000_RLEC); +- rd32(E1000_XONRXC); +- rd32(E1000_XONTXC); +- rd32(E1000_XOFFRXC); +- rd32(E1000_XOFFTXC); +- rd32(E1000_FCRUC); +- rd32(E1000_GPRC); +- rd32(E1000_BPRC); +- rd32(E1000_MPRC); +- rd32(E1000_GPTC); +- rd32(E1000_GORCL); +- rd32(E1000_GORCH); +- rd32(E1000_GOTCL); +- rd32(E1000_GOTCH); +- rd32(E1000_RNBC); +- rd32(E1000_RUC); +- rd32(E1000_RFC); +- rd32(E1000_ROC); +- rd32(E1000_RJC); +- rd32(E1000_TORL); +- rd32(E1000_TORH); +- rd32(E1000_TOTL); +- rd32(E1000_TOTH); +- rd32(E1000_TPR); +- rd32(E1000_TPT); +- rd32(E1000_MPTC); +- rd32(E1000_BPTC); ++ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); ++ ++ E1000_READ_REG(hw, E1000_CRCERRS); ++ E1000_READ_REG(hw, E1000_SYMERRS); ++ E1000_READ_REG(hw, E1000_MPC); ++ E1000_READ_REG(hw, E1000_SCC); ++ E1000_READ_REG(hw, E1000_ECOL); ++ E1000_READ_REG(hw, E1000_MCC); ++ E1000_READ_REG(hw, E1000_LATECOL); ++ E1000_READ_REG(hw, E1000_COLC); ++ E1000_READ_REG(hw, E1000_DC); ++ E1000_READ_REG(hw, E1000_SEC); ++ E1000_READ_REG(hw, E1000_RLEC); ++ E1000_READ_REG(hw, E1000_XONRXC); ++ E1000_READ_REG(hw, E1000_XONTXC); ++ E1000_READ_REG(hw, E1000_XOFFRXC); ++ E1000_READ_REG(hw, E1000_XOFFTXC); ++ E1000_READ_REG(hw, E1000_FCRUC); ++ E1000_READ_REG(hw, E1000_GPRC); ++ E1000_READ_REG(hw, E1000_BPRC); ++ E1000_READ_REG(hw, E1000_MPRC); ++ E1000_READ_REG(hw, E1000_GPTC); ++ E1000_READ_REG(hw, E1000_GORCL); ++ E1000_READ_REG(hw, E1000_GORCH); ++ E1000_READ_REG(hw, E1000_GOTCL); ++ E1000_READ_REG(hw, E1000_GOTCH); ++ E1000_READ_REG(hw, E1000_RNBC); ++ E1000_READ_REG(hw, E1000_RUC); ++ E1000_READ_REG(hw, E1000_RFC); ++ E1000_READ_REG(hw, E1000_ROC); ++ E1000_READ_REG(hw, E1000_RJC); ++ E1000_READ_REG(hw, E1000_TORL); ++ E1000_READ_REG(hw, E1000_TORH); ++ E1000_READ_REG(hw, E1000_TOTL); ++ E1000_READ_REG(hw, E1000_TOTH); ++ E1000_READ_REG(hw, E1000_TPR); ++ E1000_READ_REG(hw, E1000_TPT); ++ E1000_READ_REG(hw, E1000_MPTC); ++ E1000_READ_REG(hw, E1000_BPTC); + } + + /** +- * igb_check_for_copper_link - Check for link (Copper) ++ * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +-s32 igb_check_for_copper_link(struct e1000_hw *hw) ++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + ++ DEBUGFUNC("e1000_check_for_copper_link"); ++ + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ +- if (!mac->get_link_status) { +- ret_val = 0; +- goto out; +- } ++ if (!mac->get_link_status) ++ return E1000_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) +- goto out; /* No link detected */ ++ return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ +- igb_check_downshift(hw); ++ e1000_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ +- if (!mac->autoneg) { +- ret_val = -E1000_ERR_CONFIG; +- goto out; +- } ++ if (!mac->autoneg) ++ return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ +- igb_config_collision_dist(hw); ++ mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ +- ret_val = igb_config_fc_after_link_up(hw); ++ ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) +- hw_dbg("Error configuring flow control\n"); ++ DEBUGOUT("Error configuring flow control\n"); + +-out: + return ret_val; + } + + /** +- * igb_setup_link - Setup flow control and link settings ++ * e1000_check_for_fiber_link_generic - Check for link (Fiber) ++ * @hw: pointer to the HW structure ++ * ++ * Checks for link up on the hardware. If link is not up and we have ++ * a signal, then we need to force link up. ++ **/ ++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 rxcw; ++ u32 ctrl; ++ u32 status; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_check_for_fiber_link_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ ++ /* If we don't have link (auto-negotiation failed or link partner ++ * cannot auto-negotiate), the cable is plugged in (we have signal), ++ * and our link partner is not trying to auto-negotiate with us (we ++ * are receiving idles or data), we need to force link up. We also ++ * need to give auto-negotiation time to complete, in case the cable ++ * was just plugged in. The autoneg_failed flag does this. ++ */ ++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ ++ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && ++ !(rxcw & E1000_RXCW_C)) { ++ if (!mac->autoneg_failed) { ++ mac->autoneg_failed = true; ++ return E1000_SUCCESS; ++ } ++ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); ++ ++ /* Disable auto-negotiation in the TXCW register */ ++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); ++ ++ /* Force link-up and also force full-duplex. */ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ ++ /* Configure Flow Control after forcing link up. */ ++ ret_val = e1000_config_fc_after_link_up_generic(hw); ++ if (ret_val) { ++ DEBUGOUT("Error configuring flow control\n"); ++ return ret_val; ++ } ++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { ++ /* If we are forcing link and we are receiving /C/ ordered ++ * sets, re-enable auto-negotiation in the TXCW register ++ * and disable forced link in the Device Control register ++ * in an attempt to auto-negotiate with our link partner. ++ */ ++ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); ++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); ++ ++ mac->serdes_has_link = true; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_check_for_serdes_link_generic - Check for link (Serdes) ++ * @hw: pointer to the HW structure ++ * ++ * Checks for link up on the hardware. If link is not up and we have ++ * a signal, then we need to force link up. ++ **/ ++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 rxcw; ++ u32 ctrl; ++ u32 status; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_check_for_serdes_link_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ ++ /* If we don't have link (auto-negotiation failed or link partner ++ * cannot auto-negotiate), and our link partner is not trying to ++ * auto-negotiate with us (we are receiving idles or data), ++ * we need to force link up. We also need to give auto-negotiation ++ * time to complete. ++ */ ++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ ++ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { ++ if (!mac->autoneg_failed) { ++ mac->autoneg_failed = true; ++ return E1000_SUCCESS; ++ } ++ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); ++ ++ /* Disable auto-negotiation in the TXCW register */ ++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); ++ ++ /* Force link-up and also force full-duplex. */ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ ++ /* Configure Flow Control after forcing link up. */ ++ ret_val = e1000_config_fc_after_link_up_generic(hw); ++ if (ret_val) { ++ DEBUGOUT("Error configuring flow control\n"); ++ return ret_val; ++ } ++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { ++ /* If we are forcing link and we are receiving /C/ ordered ++ * sets, re-enable auto-negotiation in the TXCW register ++ * and disable forced link in the Device Control register ++ * in an attempt to auto-negotiate with our link partner. ++ */ ++ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); ++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); ++ ++ mac->serdes_has_link = true; ++ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { ++ /* If we force link for non-auto-negotiation switch, check ++ * link status based on MAC synchronization for internal ++ * serdes media type. ++ */ ++ /* SYNCH bit and IV bit are sticky. */ ++ usec_delay(10); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ if (rxcw & E1000_RXCW_SYNCH) { ++ if (!(rxcw & E1000_RXCW_IV)) { ++ mac->serdes_has_link = true; ++ DEBUGOUT("SERDES: Link up - forced.\n"); ++ } ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - force failed.\n"); ++ } ++ } ++ ++ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if (status & E1000_STATUS_LU) { ++ /* SYNCH bit and IV bit are sticky, so reread rxcw. */ ++ usec_delay(10); ++ rxcw = E1000_READ_REG(hw, E1000_RXCW); ++ if (rxcw & E1000_RXCW_SYNCH) { ++ if (!(rxcw & E1000_RXCW_IV)) { ++ mac->serdes_has_link = true; ++ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); ++ } ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - no sync.\n"); ++ } ++ } else { ++ mac->serdes_has_link = false; ++ DEBUGOUT("SERDES: Link down - autoneg failed\n"); ++ } ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_default_fc_generic - Set flow control default values ++ * @hw: pointer to the HW structure ++ * ++ * Read the EEPROM for the default values for flow control and store the ++ * values. ++ **/ ++static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 nvm_data; ++ u16 nvm_offset = 0; ++ ++ DEBUGFUNC("e1000_set_default_fc_generic"); ++ ++ /* Read and store word 0x0F of the EEPROM. This word contains bits ++ * that determine the hardware's default PAUSE (flow control) mode, ++ * a bit that determines whether the HW defaults to enabling or ++ * disabling auto-negotiation, and the direction of the ++ * SW defined pins. If there is no SW over-ride of the flow ++ * control setting, then the variable hw->fc will ++ * be initialized based on a value in the EEPROM. ++ */ ++ if (hw->mac.type == e1000_i350) { ++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); ++ ret_val = hw->nvm.ops.read(hw, ++ NVM_INIT_CONTROL2_REG + ++ nvm_offset, ++ 1, &nvm_data); ++ } else { ++ ret_val = hw->nvm.ops.read(hw, ++ NVM_INIT_CONTROL2_REG, ++ 1, &nvm_data); ++ } ++ ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) ++ hw->fc.requested_mode = e1000_fc_none; ++ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == ++ NVM_WORD0F_ASM_DIR) ++ hw->fc.requested_mode = e1000_fc_tx_pause; ++ else ++ hw->fc.requested_mode = e1000_fc_full; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow +@@ -590,91 +899,260 @@ + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +-s32 igb_setup_link(struct e1000_hw *hw) ++s32 e1000_setup_link_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ +- if (igb_check_reset_block(hw)) +- goto out; ++ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) ++ return E1000_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { +- ret_val = igb_set_default_fc(hw); ++ ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) +- goto out; ++ return ret_val; + } + +- /* We want to save off the original Flow Control configuration just +- * in case we get disconnected and then reconnected into a different +- * hub or switch with different Flow Control capabilities. ++ /* Save off the requested flow control mode for use later. Depending ++ * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + +- hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); ++ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", ++ hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) +- goto out; ++ return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ +- hw_dbg("Initializing the Flow Control address, type and timer regs\n"); +- wr32(E1000_FCT, FLOW_CONTROL_TYPE); +- wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); +- wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); ++ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); ++ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); ++ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); ++ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); ++ ++ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + +- wr32(E1000_FCTTV, hw->fc.pause_time); ++ return e1000_set_fc_watermarks_generic(hw); ++} + +- ret_val = igb_set_fc_watermarks(hw); ++/** ++ * e1000_commit_fc_settings_generic - Configure flow control ++ * @hw: pointer to the HW structure ++ * ++ * Write the flow control settings to the Transmit Config Word Register (TXCW) ++ * base on the flow control settings in e1000_mac_info. ++ **/ ++static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 txcw; + +-out: ++ DEBUGFUNC("e1000_commit_fc_settings_generic"); ++ ++ /* Check for a software override of the flow control settings, and ++ * setup the device accordingly. If auto-negotiation is enabled, then ++ * software will have to set the "PAUSE" bits to the correct value in ++ * the Transmit Config Word Register (TXCW) and re-start auto- ++ * negotiation. However, if auto-negotiation is disabled, then ++ * software will have to manually configure the two flow control enable ++ * bits in the CTRL register. ++ * ++ * The possible values of the "fc" parameter are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but we ++ * do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++ */ ++ switch (hw->fc.current_mode) { ++ case e1000_fc_none: ++ /* Flow control completely disabled by a software over-ride. */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); ++ break; ++ case e1000_fc_rx_pause: ++ /* Rx Flow control is enabled and Tx Flow control is disabled ++ * by a software over-ride. Since there really isn't a way to ++ * advertise that we are capable of Rx Pause ONLY, we will ++ * advertise that we support both symmetric and asymmetric Rx ++ * PAUSE. Later, we will disable the adapter's ability to send ++ * PAUSE frames. ++ */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); ++ break; ++ case e1000_fc_tx_pause: ++ /* Tx Flow control is enabled, and Rx Flow control is disabled, ++ * by a software over-ride. ++ */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); ++ break; ++ case e1000_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by a software ++ * over-ride. ++ */ ++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); ++ break; ++ default: ++ DEBUGOUT("Flow control param set incorrectly\n"); ++ return -E1000_ERR_CONFIG; ++ break; ++ } ++ ++ E1000_WRITE_REG(hw, E1000_TXCW, txcw); ++ mac->txcw = txcw; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_poll_fiber_serdes_link_generic - Poll for link up ++ * @hw: pointer to the HW structure ++ * ++ * Polls for link up by reading the status register, if link fails to come ++ * up with auto-negotiation, then the link is forced if a signal is detected. ++ **/ ++static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ u32 i, status; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); ++ ++ /* If we have a signal (the cable is plugged in, or assumed true for ++ * serdes media) then poll for a "Link-Up" indication in the Device ++ * Status Register. Time-out if a link isn't seen in 500 milliseconds ++ * seconds (Auto-negotiation should complete in less than 500 ++ * milliseconds even if the other end is doing it in SW). ++ */ ++ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { ++ msec_delay(10); ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if (status & E1000_STATUS_LU) ++ break; ++ } ++ if (i == FIBER_LINK_UP_LIMIT) { ++ DEBUGOUT("Never got a valid link from auto-neg!!!\n"); ++ mac->autoneg_failed = true; ++ /* AutoNeg failed to achieve a link, so we'll call ++ * mac->check_for_link. This routine will force the ++ * link up if we detect a signal. This will allow us to ++ * communicate with non-autonegotiating link partners. ++ */ ++ ret_val = mac->ops.check_for_link(hw); ++ if (ret_val) { ++ DEBUGOUT("Error while checking for link\n"); ++ return ret_val; ++ } ++ mac->autoneg_failed = false; ++ } else { ++ mac->autoneg_failed = false; ++ DEBUGOUT("Valid Link Found\n"); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes ++ * @hw: pointer to the HW structure ++ * ++ * Configures collision distance and flow control for fiber and serdes ++ * links. Upon successful setup, poll for link. ++ **/ ++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) ++{ ++ u32 ctrl; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ++ /* Take the link out of reset */ ++ ctrl &= ~E1000_CTRL_LRST; ++ ++ hw->mac.ops.config_collision_dist(hw); ++ ++ ret_val = e1000_commit_fc_settings_generic(hw); ++ if (ret_val) ++ return ret_val; ++ ++ /* Since auto-negotiation is enabled, take the link out of reset (the ++ * link will be in reset, because we previously reset the chip). This ++ * will restart auto-negotiation. If auto-negotiation is successful ++ * then the link-up status bit will be set and the flow control enable ++ * bits (RFCE and TFCE) will be set according to their negotiated value. ++ */ ++ DEBUGOUT("Auto-negotiation enabled\n"); ++ ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ E1000_WRITE_FLUSH(hw); ++ msec_delay(1); ++ ++ /* For these adapters, the SW definable pin 1 is set when the optics ++ * detect a signal. If we have a signal, then poll for a "Link-Up" ++ * indication. ++ */ ++ if (hw->phy.media_type == e1000_media_type_internal_serdes || ++ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { ++ ret_val = e1000_poll_fiber_serdes_link_generic(hw); ++ } else { ++ DEBUGOUT("No signal detected\n"); ++ } + + return ret_val; + } + + /** +- * igb_config_collision_dist - Configure collision distance ++ * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used +- * during link setup. Currently no func pointer exists and all +- * implementations are handled in the generic version of this function. ++ * during link setup. + **/ +-void igb_config_collision_dist(struct e1000_hw *hw) ++static void e1000_config_collision_dist_generic(struct e1000_hw *hw) + { + u32 tctl; + +- tctl = rd32(E1000_TCTL); ++ DEBUGFUNC("e1000_config_collision_dist_generic"); ++ ++ tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + +- wr32(E1000_TCTL, tctl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_TCTL, tctl); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_set_fc_watermarks - Set flow control high/low watermarks ++ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame +- * tansmission as well. ++ * transmission as well. + **/ +-static s32 igb_set_fc_watermarks(struct e1000_hw *hw) ++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; + u32 fcrtl = 0, fcrth = 0; + ++ DEBUGFUNC("e1000_set_fc_watermarks_generic"); ++ + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the +@@ -692,61 +1170,14 @@ + + fcrth = hw->fc.high_water; + } +- wr32(E1000_FCRTL, fcrtl); +- wr32(E1000_FCRTH, fcrth); ++ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); ++ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_set_default_fc - Set flow control default values +- * @hw: pointer to the HW structure +- * +- * Read the EEPROM for the default values for flow control and store the +- * values. +- **/ +-static s32 igb_set_default_fc(struct e1000_hw *hw) +-{ +- s32 ret_val = 0; +- u16 lan_offset; +- u16 nvm_data; +- +- /* Read and store word 0x0F of the EEPROM. This word contains bits +- * that determine the hardware's default PAUSE (flow control) mode, +- * a bit that determines whether the HW defaults to enabling or +- * disabling auto-negotiation, and the direction of the +- * SW defined pins. If there is no SW over-ride of the flow +- * control setting, then the variable hw->fc will +- * be initialized based on a value in the EEPROM. +- */ +- if (hw->mac.type == e1000_i350) { +- lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); +- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG +- + lan_offset, 1, &nvm_data); +- } else { +- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, +- 1, &nvm_data); +- } +- +- if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; +- } +- +- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) +- hw->fc.requested_mode = e1000_fc_none; +- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == +- NVM_WORD0F_ASM_DIR) +- hw->fc.requested_mode = e1000_fc_tx_pause; +- else +- hw->fc.requested_mode = e1000_fc_full; +- +-out: +- return ret_val; +-} +- +-/** +- * igb_force_mac_fc - Force the MAC's flow control settings ++ * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the +@@ -755,12 +1186,13 @@ + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +-s32 igb_force_mac_fc(struct e1000_hw *hw) ++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) + { + u32 ctrl; +- s32 ret_val = 0; + +- ctrl = rd32(E1000_CTRL); ++ DEBUGFUNC("e1000_force_mac_fc_generic"); ++ ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY +@@ -776,10 +1208,10 @@ + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). +- * 3: Both Rx and TX flow control (symmetric) is enabled. ++ * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ +- hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); ++ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: +@@ -797,19 +1229,17 @@ + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: +- hw_dbg("Flow control param set incorrectly\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Flow control param set incorrectly\n"); ++ return -E1000_ERR_CONFIG; + } + +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_config_fc_after_link_up - Configures flow control after link ++ * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the +@@ -818,29 +1248,32 @@ + * and did not fail, then we configure flow control based on our link + * partner. + **/ +-s32 igb_config_fc_after_link_up(struct e1000_hw *hw) ++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + ++ DEBUGFUNC("e1000_config_fc_after_link_up_generic"); ++ + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { +- if (hw->phy.media_type == e1000_media_type_internal_serdes) +- ret_val = igb_force_mac_fc(hw); ++ if (hw->phy.media_type == e1000_media_type_fiber || ++ hw->phy.media_type == e1000_media_type_internal_serdes) ++ ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) +- ret_val = igb_force_mac_fc(hw); ++ ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { +- hw_dbg("Error forcing flow control settings\n"); +- goto out; ++ DEBUGOUT("Error forcing flow control settings\n"); ++ return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is +@@ -853,18 +1286,16 @@ + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ +- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, +- &mii_status_reg); ++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) +- goto out; +- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, +- &mii_status_reg); ++ return ret_val; ++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) +- goto out; ++ return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { +- hw_dbg("Copper PHY and Auto Neg has not completed.\n"); +- goto out; ++ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); ++ return ret_val; + } + + /* The AutoNeg process has completed, so we now need to +@@ -874,13 +1305,13 @@ + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, +- &mii_nway_adv_reg); ++ &mii_nway_adv_reg); + if (ret_val) +- goto out; ++ return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, +- &mii_nway_lp_ability_reg); ++ &mii_nway_lp_ability_reg); + if (ret_val) +- goto out; ++ return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base +@@ -917,18 +1348,18 @@ + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { +- /* Now we need to check if the user selected RX ONLY ++ /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise +- * FULL flow control because we could not advertise RX ++ * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to +- * turn OFF the TRANSMISSION of PAUSE frames. ++ * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; +- hw_dbg("Flow Control = FULL.\n"); ++ DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = RX PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. +@@ -943,7 +1374,7 @@ + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; +- hw_dbg("Flow Control = TX PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * +@@ -957,46 +1388,23 @@ + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = RX PAUSE frames only.\n"); +- } +- /* Per the IEEE spec, at this point flow control should be +- * disabled. However, we want to consider that we could +- * be connected to a legacy switch that doesn't advertise +- * desired flow control, but can be forced on the link +- * partner. So if we advertised no flow control, that is +- * what we will resolve to. If we advertised some kind of +- * receive capability (Rx Pause Only or Full Flow Control) +- * and the link partner advertised none, we will configure +- * ourselves to enable Rx Flow Control only. We can do +- * this safely for two reasons: If the link partner really +- * didn't want flow control enabled, and we enable Rx, no +- * harm done since we won't be receiving any PAUSE frames +- * anyway. If the intent on the link partner was to have +- * flow control enabled, then by us enabling RX only, we +- * can at least receive pause frames and process them. +- * This is a good idea because in most cases, since we are +- * predominantly a server NIC, more times than not we will +- * be asked to delay transmission of packets than asking +- * our link partner to pause transmission of frames. +- */ +- else if ((hw->fc.requested_mode == e1000_fc_none) || +- (hw->fc.requested_mode == e1000_fc_tx_pause) || +- (hw->fc.strict_ieee)) { +- hw->fc.current_mode = e1000_fc_none; +- hw_dbg("Flow Control = NONE.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { +- hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = RX PAUSE frames only.\n"); ++ /* Per the IEEE spec, at this point flow control ++ * should be disabled. ++ */ ++ hw->fc.current_mode = e1000_fc_none; ++ DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ +- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); ++ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { +- hw_dbg("Error getting link speed and duplex\n"); +- goto out; ++ DEBUGOUT("Error getting link speed and duplex\n"); ++ return ret_val; + } + + if (duplex == HALF_DUPLEX) +@@ -1005,26 +1413,27 @@ + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ +- ret_val = igb_force_mac_fc(hw); ++ ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { +- hw_dbg("Error forcing flow control settings\n"); +- goto out; ++ DEBUGOUT("Error forcing flow control settings\n"); ++ return ret_val; + } + } ++ + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ +- if ((hw->phy.media_type == e1000_media_type_internal_serdes) +- && mac->autoneg) { ++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) && ++ mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ +- pcs_status_reg = rd32(E1000_PCS_LSTAT); ++ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { +- hw_dbg("PCS Auto Neg has not completed.\n"); ++ DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + +@@ -1034,8 +1443,8 @@ + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ +- pcs_adv_reg = rd32(E1000_PCS_ANADV); +- pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); ++ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); ++ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base +@@ -1080,10 +1489,10 @@ + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; +- hw_dbg("Flow Control = FULL.\n"); ++ DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = Rx PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. +@@ -1098,7 +1507,7 @@ + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; +- hw_dbg("Flow Control = Tx PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * +@@ -1112,35 +1521,34 @@ + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; +- hw_dbg("Flow Control = Rx PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; +- hw_dbg("Flow Control = NONE.\n"); ++ DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ +- pcs_ctrl_reg = rd32(E1000_PCS_LCTL); ++ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; +- wr32(E1000_PCS_LCTL, pcs_ctrl_reg); ++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + +- ret_val = igb_force_mac_fc(hw); ++ ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { +- hw_dbg("Error forcing flow control settings\n"); ++ DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex ++ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex +@@ -1148,172 +1556,185 @@ + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, +- u16 *duplex) ++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, ++ u16 *duplex) + { + u32 status; + +- status = rd32(E1000_STATUS); ++ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); ++ ++ status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; +- hw_dbg("1000 Mbs, "); ++ DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; +- hw_dbg("100 Mbs, "); ++ DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; +- hw_dbg("10 Mbs, "); ++ DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; +- hw_dbg("Full Duplex\n"); ++ DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; +- hw_dbg("Half Duplex\n"); ++ DEBUGOUT("Half Duplex\n"); + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_hw_semaphore - Acquire hardware semaphore ++ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex ++ * @hw: pointer to the HW structure ++ * @speed: stores the current speed ++ * @duplex: stores the current duplex ++ * ++ * Sets the speed and duplex to gigabit full duplex (the only possible option) ++ * for fiber/serdes links. ++ **/ ++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 *speed, u16 *duplex) ++{ ++ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); ++ ++ *speed = SPEED_1000; ++ *duplex = FULL_DUPLEX; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +-s32 igb_get_hw_semaphore(struct e1000_hw *hw) ++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) + { + u32 swsm; +- s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + ++ DEBUGFUNC("e1000_get_hw_semaphore_generic"); ++ + /* Get the SW semaphore */ + while (i < timeout) { +- swsm = rd32(E1000_SWSM); ++ swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + +- udelay(50); ++ usec_delay(50); + i++; + } + + if (i == timeout) { +- hw_dbg("Driver can't access device - SMBI bit is set.\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); ++ return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { +- swsm = rd32(E1000_SWSM); +- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); ++ swsm = E1000_READ_REG(hw, E1000_SWSM); ++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ +- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) ++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + +- udelay(50); ++ usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ +- igb_put_hw_semaphore(hw); +- hw_dbg("Driver can't access the NVM\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ e1000_put_hw_semaphore_generic(hw); ++ DEBUGOUT("Driver can't access the NVM\n"); ++ return -E1000_ERR_NVM; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_put_hw_semaphore - Release hardware semaphore ++ * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +-void igb_put_hw_semaphore(struct e1000_hw *hw) ++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) + { + u32 swsm; + +- swsm = rd32(E1000_SWSM); ++ DEBUGFUNC("e1000_put_hw_semaphore_generic"); ++ ++ swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + +- wr32(E1000_SWSM, swsm); ++ E1000_WRITE_REG(hw, E1000_SWSM, swsm); + } + + /** +- * igb_get_auto_rd_done - Check for auto read completion ++ * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +-s32 igb_get_auto_rd_done(struct e1000_hw *hw) ++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) + { + s32 i = 0; +- s32 ret_val = 0; + ++ DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { +- if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) ++ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { +- hw_dbg("Auto read by HW from NVM has not completed.\n"); +- ret_val = -E1000_ERR_RESET; +- goto out; ++ DEBUGOUT("Auto read by HW from NVM has not completed.\n"); ++ return -E1000_ERR_RESET; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_valid_led_default - Verify a valid default LED config ++ * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +-static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) ++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) + { + s32 ret_val; + ++ DEBUGFUNC("e1000_valid_led_default_generic"); ++ + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + +- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { +- switch (hw->phy.media_type) { +- case e1000_media_type_internal_serdes: +- *data = ID_LED_DEFAULT_82575_SERDES; +- break; +- case e1000_media_type_copper: +- default: +- *data = ID_LED_DEFAULT; +- break; +- } +- } +-out: +- return ret_val; ++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) ++ *data = ID_LED_DEFAULT; ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_id_led_init - ++ * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +-s32 igb_id_led_init(struct e1000_hw *hw) ++s32 e1000_id_led_init_generic(struct e1000_hw *hw) + { + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; +@@ -1323,17 +1744,13 @@ + u16 data, i, temp; + const u16 led_mask = 0x0F; + +- /* i210 and i211 devices have different LED mechanism */ +- if ((hw->mac.type == e1000_i210) || +- (hw->mac.type == e1000_i211)) +- ret_val = igb_valid_led_default_i210(hw, &data); +- else +- ret_val = igb_valid_led_default(hw, &data); ++ DEBUGFUNC("e1000_id_led_init_generic"); + ++ ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) +- goto out; ++ return ret_val; + +- mac->ledctl_default = rd32(E1000_LEDCTL); ++ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + +@@ -1375,34 +1792,69 @@ + } + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_cleanup_led - Set LED config to default operation ++ * e1000_setup_led_generic - Configures SW controllable LED ++ * @hw: pointer to the HW structure ++ * ++ * This prepares the SW controllable LED for use and saves the current state ++ * of the LED so it can be later restored. ++ **/ ++s32 e1000_setup_led_generic(struct e1000_hw *hw) ++{ ++ u32 ledctl; ++ ++ DEBUGFUNC("e1000_setup_led_generic"); ++ ++ if (hw->mac.ops.setup_led != e1000_setup_led_generic) ++ return -E1000_ERR_CONFIG; ++ ++ if (hw->phy.media_type == e1000_media_type_fiber) { ++ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ++ hw->mac.ledctl_default = ledctl; ++ /* Turn off LED0 */ ++ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | ++ E1000_LEDCTL_LED0_MODE_MASK); ++ ledctl |= (E1000_LEDCTL_MODE_LED_OFF << ++ E1000_LEDCTL_LED0_MODE_SHIFT); ++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); ++ } else if (hw->phy.media_type == e1000_media_type_copper) { ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +-s32 igb_cleanup_led(struct e1000_hw *hw) ++s32 e1000_cleanup_led_generic(struct e1000_hw *hw) + { +- wr32(E1000_LEDCTL, hw->mac.ledctl_default); +- return 0; ++ DEBUGFUNC("e1000_cleanup_led_generic"); ++ ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); ++ return E1000_SUCCESS; + } + + /** +- * igb_blink_led - Blink LED ++ * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * +- * Blink the led's which are set to be on. ++ * Blink the LEDs which are set to be on. + **/ +-s32 igb_blink_led(struct e1000_hw *hw) ++s32 e1000_blink_led_generic(struct e1000_hw *hw) + { + u32 ledctl_blink = 0; + u32 i; + ++ DEBUGFUNC("e1000_blink_led_generic"); ++ + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | +@@ -1432,100 +1884,239 @@ + } + } + +- wr32(E1000_LEDCTL, ledctl_blink); ++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_led_on_generic - Turn LED on ++ * @hw: pointer to the HW structure ++ * ++ * Turn LED on. ++ **/ ++s32 e1000_led_on_generic(struct e1000_hw *hw) ++{ ++ u32 ctrl; ++ ++ DEBUGFUNC("e1000_led_on_generic"); ++ ++ switch (hw->phy.media_type) { ++ case e1000_media_type_fiber: ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl &= ~E1000_CTRL_SWDPIN0; ++ ctrl |= E1000_CTRL_SWDPIO0; ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ break; ++ case e1000_media_type_copper: ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); ++ break; ++ default: ++ break; ++ } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_led_off - Turn LED off ++ * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +-s32 igb_led_off(struct e1000_hw *hw) ++s32 e1000_led_off_generic(struct e1000_hw *hw) + { ++ u32 ctrl; ++ ++ DEBUGFUNC("e1000_led_off_generic"); ++ + switch (hw->phy.media_type) { ++ case e1000_media_type_fiber: ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ ctrl |= E1000_CTRL_SWDPIN0; ++ ctrl |= E1000_CTRL_SWDPIO0; ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ break; + case e1000_media_type_copper: +- wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); ++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + +- return 0; ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities ++ * @hw: pointer to the HW structure ++ * @no_snoop: bitmap of snoop events ++ * ++ * Set the PCI-express register to snoop for events enabled in 'no_snoop'. ++ **/ ++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) ++{ ++ u32 gcr; ++ ++ DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); ++ ++ if (hw->bus.type != e1000_bus_type_pci_express) ++ return; ++ ++ if (no_snoop) { ++ gcr = E1000_READ_REG(hw, E1000_GCR); ++ gcr &= ~(PCIE_NO_SNOOP_ALL); ++ gcr |= no_snoop; ++ E1000_WRITE_REG(hw, E1000_GCR, gcr); ++ } + } + + /** +- * igb_disable_pcie_master - Disables PCI-express master access ++ * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * +- * Returns 0 (0) if successful, else returns -10 ++ * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +-s32 igb_disable_pcie_master(struct e1000_hw *hw) ++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) + { + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) +- goto out; ++ return E1000_SUCCESS; + +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { +- if (!(rd32(E1000_STATUS) & +- E1000_STATUS_GIO_MASTER_ENABLE)) ++ if (!(E1000_READ_REG(hw, E1000_STATUS) & ++ E1000_STATUS_GIO_MASTER_ENABLE) || ++ E1000_REMOVED(hw->hw_addr)) + break; +- udelay(100); ++ usec_delay(100); + timeout--; + } + + if (!timeout) { +- hw_dbg("Master requests are pending.\n"); +- ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; +- goto out; ++ DEBUGOUT("Master requests are pending.\n"); ++ return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_validate_mdi_setting - Verify MDI/MDIx settings ++ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * +- * Verify that when not using auto-negotitation that MDI/MDIx is correctly +- * set, which is forced to MDI mode only. ++ * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +-s32 igb_validate_mdi_setting(struct e1000_hw *hw) ++void e1000_reset_adaptive_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ struct e1000_mac_info *mac = &hw->mac; + +- /* All MDI settings are supported on 82580 and newer. */ +- if (hw->mac.type >= e1000_82580) +- goto out; ++ DEBUGFUNC("e1000_reset_adaptive_generic"); ++ ++ if (!mac->adaptive_ifs) { ++ DEBUGOUT("Not in Adaptive IFS mode!\n"); ++ return; ++ } ++ ++ mac->current_ifs_val = 0; ++ mac->ifs_min_val = IFS_MIN; ++ mac->ifs_max_val = IFS_MAX; ++ mac->ifs_step_size = IFS_STEP; ++ mac->ifs_ratio = IFS_RATIO; ++ ++ mac->in_ifs_mode = false; ++ E1000_WRITE_REG(hw, E1000_AIT, 0); ++} ++ ++/** ++ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing ++ * @hw: pointer to the HW structure ++ * ++ * Update the Adaptive Interframe Spacing Throttle value based on the ++ * time between transmitted packets and time between collisions. ++ **/ ++void e1000_update_adaptive_generic(struct e1000_hw *hw) ++{ ++ struct e1000_mac_info *mac = &hw->mac; ++ ++ DEBUGFUNC("e1000_update_adaptive_generic"); ++ ++ if (!mac->adaptive_ifs) { ++ DEBUGOUT("Not in Adaptive IFS mode!\n"); ++ return; ++ } ++ ++ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { ++ if (mac->tx_packet_delta > MIN_NUM_XMITS) { ++ mac->in_ifs_mode = true; ++ if (mac->current_ifs_val < mac->ifs_max_val) { ++ if (!mac->current_ifs_val) ++ mac->current_ifs_val = mac->ifs_min_val; ++ else ++ mac->current_ifs_val += ++ mac->ifs_step_size; ++ E1000_WRITE_REG(hw, E1000_AIT, ++ mac->current_ifs_val); ++ } ++ } ++ } else { ++ if (mac->in_ifs_mode && ++ (mac->tx_packet_delta <= MIN_NUM_XMITS)) { ++ mac->current_ifs_val = 0; ++ mac->in_ifs_mode = false; ++ E1000_WRITE_REG(hw, E1000_AIT, 0); ++ } ++ } ++} ++ ++/** ++ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings ++ * @hw: pointer to the HW structure ++ * ++ * Verify that when not using auto-negotiation that MDI/MDIx is correctly ++ * set, which is forced to MDI mode only. ++ **/ ++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) ++{ ++ DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { +- hw_dbg("Invalid MDI setting detected\n"); ++ DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ return -E1000_ERR_CONFIG; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register ++ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings ++ * @hw: pointer to the HW structure ++ * ++ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced ++ * operation. ++ **/ ++s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to +@@ -1535,72 +2126,28 @@ + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, +- u32 offset, u8 data) ++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, ++ u32 offset, u8 data) + { + u32 i, regvalue = 0; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); +- wr32(reg, regvalue); ++ E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { +- udelay(5); +- regvalue = rd32(reg); ++ usec_delay(5); ++ regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { +- hw_dbg("Reg %08x did not indicate ready\n", reg); +- ret_val = -E1000_ERR_PHY; +- goto out; +- } +- +-out: +- return ret_val; +-} +- +-/** +- * igb_enable_mng_pass_thru - Enable processing of ARP's +- * @hw: pointer to the HW structure +- * +- * Verifies the hardware needs to leave interface enabled so that frames can +- * be directed to and from the management interface. +- **/ +-bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +-{ +- u32 manc; +- u32 fwsm, factps; +- bool ret_val = false; +- +- if (!hw->mac.asf_firmware_present) +- goto out; +- +- manc = rd32(E1000_MANC); +- +- if (!(manc & E1000_MANC_RCV_TCO_EN)) +- goto out; +- +- if (hw->mac.arc_subsystem_valid) { +- fwsm = rd32(E1000_FWSM); +- factps = rd32(E1000_FACTPS); +- +- if (!(factps & E1000_FACTPS_MNGCG) && +- ((fwsm & E1000_FWSM_MODE_MASK) == +- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { +- ret_val = true; +- goto out; +- } +- } else { +- if ((manc & E1000_MANC_SMBUS_EN) && +- !(manc & E1000_MANC_ASF_EN)) { +- ret_val = true; +- goto out; +- } ++ DEBUGOUT1("Reg %08x did not indicate ready\n", reg); ++ return -E1000_ERR_PHY; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h +--- a/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,87 +1,81 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* + +-#ifndef _E1000_MAC_H_ +-#define _E1000_MAC_H_ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-#include "e1000_hw.h" ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. + +-#include "e1000_phy.h" +-#include "e1000_nvm.h" +-#include "e1000_defines.h" +-#include "e1000_i210.h" +- +-/* Functions that should not be called directly from drivers but can be used +- * by other files in this 'shared code' +- */ +-s32 igb_blink_led(struct e1000_hw *hw); +-s32 igb_check_for_copper_link(struct e1000_hw *hw); +-s32 igb_cleanup_led(struct e1000_hw *hw); +-s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +-s32 igb_disable_pcie_master(struct e1000_hw *hw); +-s32 igb_force_mac_fc(struct e1000_hw *hw); +-s32 igb_get_auto_rd_done(struct e1000_hw *hw); +-s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +-s32 igb_get_hw_semaphore(struct e1000_hw *hw); +-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, +- u16 *duplex); +-s32 igb_id_led_init(struct e1000_hw *hw); +-s32 igb_led_off(struct e1000_hw *hw); +-void igb_update_mc_addr_list(struct e1000_hw *hw, +- u8 *mc_addr_list, u32 mc_addr_count); +-s32 igb_setup_link(struct e1000_hw *hw); +-s32 igb_validate_mdi_setting(struct e1000_hw *hw); +-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, +- u32 offset, u8 data); +- +-void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +-void igb_clear_vfta(struct e1000_hw *hw); +-void igb_clear_vfta_i350(struct e1000_hw *hw); +-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +-void igb_config_collision_dist(struct e1000_hw *hw); +-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +-void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +-void igb_put_hw_semaphore(struct e1000_hw *hw); +-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +-s32 igb_check_alt_mac_addr(struct e1000_hw *hw); +- +-bool igb_enable_mng_pass_thru(struct e1000_hw *hw); +- +-enum e1000_mng_mode { +- e1000_mng_mode_none = 0, +- e1000_mng_mode_asf, +- e1000_mng_mode_pt, +- e1000_mng_mode_ipmi, +- e1000_mng_mode_host_if_only +-}; ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". + +-#define E1000_FACTPS_MNGCG 0x20000000 ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +-#define E1000_FWSM_MODE_MASK 0xE +-#define E1000_FWSM_MODE_SHIFT 1 ++*******************************************************************************/ + +-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 ++#ifndef _E1000_MAC_H_ ++#define _E1000_MAC_H_ + +-void e1000_init_function_pointers_82575(struct e1000_hw *hw); ++void e1000_init_mac_ops_generic(struct e1000_hw *hw); ++#ifndef E1000_REMOVED ++#define E1000_REMOVED(a) (0) ++#endif /* E1000_REMOVED */ ++void e1000_null_mac_generic(struct e1000_hw *hw); ++s32 e1000_null_ops_generic(struct e1000_hw *hw); ++s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); ++bool e1000_null_mng_mode(struct e1000_hw *hw); ++void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); ++void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); ++int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); ++s32 e1000_blink_led_generic(struct e1000_hw *hw); ++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); ++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); ++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); ++s32 e1000_cleanup_led_generic(struct e1000_hw *hw); ++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); ++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); ++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); ++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); ++s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); ++void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw); ++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); ++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, ++ u16 *duplex); ++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, ++ u16 *speed, u16 *duplex); ++s32 e1000_id_led_init_generic(struct e1000_hw *hw); ++s32 e1000_led_on_generic(struct e1000_hw *hw); ++s32 e1000_led_off_generic(struct e1000_hw *hw); ++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, ++ u8 *mc_addr_list, u32 mc_addr_count); ++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); ++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); ++s32 e1000_setup_led_generic(struct e1000_hw *hw); ++s32 e1000_setup_link_generic(struct e1000_hw *hw); ++s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); ++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, ++ u32 offset, u8 data); ++ ++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); ++ ++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); ++void igb_e1000_clear_vfta_generic(struct e1000_hw *hw); ++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); ++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); ++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); ++s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); ++void e1000_reset_adaptive_generic(struct e1000_hw *hw); ++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); ++void e1000_update_adaptive_generic(struct e1000_hw *hw); ++void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.c b/drivers/net/ethernet/intel/igb/e1000_manage.c +--- a/drivers/net/ethernet/intel/igb/e1000_manage.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_manage.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,552 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++/** ++ * e1000_calculate_checksum - Calculate checksum for buffer ++ * @buffer: pointer to EEPROM ++ * @length: size of EEPROM to calculate a checksum for ++ * ++ * Calculates the checksum for some buffer on a specified length. The ++ * checksum calculated is returned. ++ **/ ++u8 e1000_calculate_checksum(u8 *buffer, u32 length) ++{ ++ u32 i; ++ u8 sum = 0; ++ ++ DEBUGFUNC("e1000_calculate_checksum"); ++ ++ if (!buffer) ++ return 0; ++ ++ for (i = 0; i < length; i++) ++ sum += buffer[i]; ++ ++ return (u8) (0 - sum); ++} ++ ++/** ++ * e1000_mng_enable_host_if_generic - Checks host interface is enabled ++ * @hw: pointer to the HW structure ++ * ++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND ++ * ++ * This function checks whether the HOST IF is enabled for command operation ++ * and also checks whether the previous command is completed. It busy waits ++ * in case of previous command is not completed. ++ **/ ++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) ++{ ++ u32 hicr; ++ u8 i; ++ ++ DEBUGFUNC("e1000_mng_enable_host_if_generic"); ++ ++ if (!hw->mac.arc_subsystem_valid) { ++ DEBUGOUT("ARC subsystem not valid.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Check that the host interface is enabled. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_EN)) { ++ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ /* check the previous command is completed */ ++ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_C)) ++ break; ++ msec_delay_irq(1); ++ } ++ ++ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { ++ DEBUGOUT("Previous command timeout failed .\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_check_mng_mode_generic - Generic check management mode ++ * @hw: pointer to the HW structure ++ * ++ * Reads the firmware semaphore register and returns true (>0) if ++ * manageability is enabled, else false (0). ++ **/ ++bool e1000_check_mng_mode_generic(struct e1000_hw *hw) ++{ ++ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); ++ ++ DEBUGFUNC("e1000_check_mng_mode_generic"); ++ ++ ++ return (fwsm & E1000_FWSM_MODE_MASK) == ++ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); ++} ++ ++/** ++ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx ++ * @hw: pointer to the HW structure ++ * ++ * Enables packet filtering on transmit packets if manageability is enabled ++ * and host interface is enabled. ++ **/ ++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) ++{ ++ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; ++ u32 *buffer = (u32 *)&hw->mng_cookie; ++ u32 offset; ++ s32 ret_val, hdr_csum, csum; ++ u8 i, len; ++ ++ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); ++ ++ hw->mac.tx_pkt_filtering = true; ++ ++ /* No manageability, no filtering */ ++ if (!hw->mac.ops.check_mng_mode(hw)) { ++ hw->mac.tx_pkt_filtering = false; ++ return hw->mac.tx_pkt_filtering; ++ } ++ ++ /* If we can't read from the host interface for whatever ++ * reason, disable filtering. ++ */ ++ ret_val = e1000_mng_enable_host_if_generic(hw); ++ if (ret_val != E1000_SUCCESS) { ++ hw->mac.tx_pkt_filtering = false; ++ return hw->mac.tx_pkt_filtering; ++ } ++ ++ /* Read in the header. Length and offset are in dwords. */ ++ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; ++ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; ++ for (i = 0; i < len; i++) ++ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, ++ offset + i); ++ hdr_csum = hdr->checksum; ++ hdr->checksum = 0; ++ csum = e1000_calculate_checksum((u8 *)hdr, ++ E1000_MNG_DHCP_COOKIE_LENGTH); ++ /* If either the checksums or signature don't match, then ++ * the cookie area isn't considered valid, in which case we ++ * take the safe route of assuming Tx filtering is enabled. ++ */ ++ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { ++ hw->mac.tx_pkt_filtering = true; ++ return hw->mac.tx_pkt_filtering; ++ } ++ ++ /* Cookie area is valid, make the final check for filtering. */ ++ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) ++ hw->mac.tx_pkt_filtering = false; ++ ++ return hw->mac.tx_pkt_filtering; ++} ++ ++/** ++ * e1000_mng_write_cmd_header_generic - Writes manageability command header ++ * @hw: pointer to the HW structure ++ * @hdr: pointer to the host interface command header ++ * ++ * Writes the command header after does the checksum calculation. ++ **/ ++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr) ++{ ++ u16 i, length = sizeof(struct e1000_host_mng_command_header); ++ ++ DEBUGFUNC("e1000_mng_write_cmd_header_generic"); ++ ++ /* Write the whole command header structure with new checksum. */ ++ ++ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); ++ ++ length >>= 2; ++ /* Write the relevant command block into the ram area. */ ++ for (i = 0; i < length; i++) { ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, ++ *((u32 *) hdr + i)); ++ E1000_WRITE_FLUSH(hw); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_mng_host_if_write_generic - Write to the manageability host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface buffer ++ * @length: size of the buffer ++ * @offset: location in the buffer to write to ++ * @sum: sum of the data (not checksum) ++ * ++ * This function writes the buffer content at the offset given on the host if. ++ * It also does alignment considerations to do the writes in most efficient ++ * way. Also fills up the sum of the buffer in *buffer parameter. ++ **/ ++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, ++ u16 length, u16 offset, u8 *sum) ++{ ++ u8 *tmp; ++ u8 *bufptr = buffer; ++ u32 data = 0; ++ u16 remaining, i, j, prev_bytes; ++ ++ DEBUGFUNC("e1000_mng_host_if_write_generic"); ++ ++ /* sum = only sum of the data and it is not checksum */ ++ ++ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) ++ return -E1000_ERR_PARAM; ++ ++ tmp = (u8 *)&data; ++ prev_bytes = offset & 0x3; ++ offset >>= 2; ++ ++ if (prev_bytes) { ++ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); ++ for (j = prev_bytes; j < sizeof(u32); j++) { ++ *(tmp + j) = *bufptr++; ++ *sum += *(tmp + j); ++ } ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); ++ length -= j - prev_bytes; ++ offset++; ++ } ++ ++ remaining = length & 0x3; ++ length -= remaining; ++ ++ /* Calculate length in DWORDs */ ++ length >>= 2; ++ ++ /* The device driver writes the relevant command block into the ++ * ram area. ++ */ ++ for (i = 0; i < length; i++) { ++ for (j = 0; j < sizeof(u32); j++) { ++ *(tmp + j) = *bufptr++; ++ *sum += *(tmp + j); ++ } ++ ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, ++ data); ++ } ++ if (remaining) { ++ for (j = 0; j < sizeof(u32); j++) { ++ if (j < remaining) ++ *(tmp + j) = *bufptr++; ++ else ++ *(tmp + j) = 0; ++ ++ *sum += *(tmp + j); ++ } ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, ++ data); ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface ++ * @hw: pointer to the HW structure ++ * @buffer: pointer to the host interface ++ * @length: size of the buffer ++ * ++ * Writes the DHCP information to the host interface. ++ **/ ++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, ++ u16 length) ++{ ++ struct e1000_host_mng_command_header hdr; ++ s32 ret_val; ++ u32 hicr; ++ ++ DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); ++ ++ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; ++ hdr.command_length = length; ++ hdr.reserved1 = 0; ++ hdr.reserved2 = 0; ++ hdr.checksum = 0; ++ ++ /* Enable the host interface */ ++ ret_val = e1000_mng_enable_host_if_generic(hw); ++ if (ret_val) ++ return ret_val; ++ ++ /* Populate the host interface with the contents of "buffer". */ ++ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, ++ sizeof(hdr), &(hdr.checksum)); ++ if (ret_val) ++ return ret_val; ++ ++ /* Write the manageability command header */ ++ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); ++ if (ret_val) ++ return ret_val; ++ ++ /* Tell the ARC a new command is pending. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_enable_mng_pass_thru - Check if management passthrough is needed ++ * @hw: pointer to the HW structure ++ * ++ * Verifies the hardware needs to leave interface enabled so that frames can ++ * be directed to and from the management interface. ++ **/ ++/* Changed name, duplicated with e1000 */ ++bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw) ++{ ++ u32 manc; ++ u32 fwsm, factps; ++ ++ DEBUGFUNC("igb_e1000_enable_mng_pass_thru"); ++ ++ if (!hw->mac.asf_firmware_present) ++ return false; ++ ++ manc = E1000_READ_REG(hw, E1000_MANC); ++ ++ if (!(manc & E1000_MANC_RCV_TCO_EN)) ++ return false; ++ ++ if (hw->mac.has_fwsm) { ++ fwsm = E1000_READ_REG(hw, E1000_FWSM); ++ factps = E1000_READ_REG(hw, E1000_FACTPS); ++ ++ if (!(factps & E1000_FACTPS_MNGCG) && ++ ((fwsm & E1000_FWSM_MODE_MASK) == ++ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) ++ return true; ++ } else if ((manc & E1000_MANC_SMBUS_EN) && ++ !(manc & E1000_MANC_ASF_EN)) { ++ return true; ++ } ++ ++ return false; ++} ++ ++/** ++ * e1000_host_interface_command - Writes buffer to host interface ++ * @hw: pointer to the HW structure ++ * @buffer: contains a command to write ++ * @length: the byte length of the buffer, must be multiple of 4 bytes ++ * ++ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS ++ * else returns E1000_ERR_HOST_INTERFACE_COMMAND. ++ **/ ++s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) ++{ ++ u32 hicr, i; ++ ++ DEBUGFUNC("e1000_host_interface_command"); ++ ++ if (!(hw->mac.arc_subsystem_valid)) { ++ DEBUGOUT("Hardware doesn't support host interface command.\n"); ++ return E1000_SUCCESS; ++ } ++ ++ if (!hw->mac.asf_firmware_present) { ++ DEBUGOUT("Firmware is not present.\n"); ++ return E1000_SUCCESS; ++ } ++ ++ if (length == 0 || length & 0x3 || ++ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { ++ DEBUGOUT("Buffer length failure.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Check that the host interface is enabled. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_EN)) { ++ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Calculate length in DWORDs */ ++ length >>= 2; ++ ++ /* The device driver writes the relevant command block ++ * into the ram area. ++ */ ++ for (i = 0; i < length; i++) ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, ++ *((u32 *)buffer + i)); ++ ++ /* Setting this bit tells the ARC that a new command is pending. */ ++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); ++ ++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_C)) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check command successful completion. */ ++ if (i == E1000_HI_COMMAND_TIMEOUT || ++ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { ++ DEBUGOUT("Command has failed with no status valid.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ for (i = 0; i < length; i++) ++ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, ++ E1000_HOST_IF, ++ i); ++ ++ return E1000_SUCCESS; ++} ++/** ++ * e1000_load_firmware - Writes proxy FW code buffer to host interface ++ * and execute. ++ * @hw: pointer to the HW structure ++ * @buffer: contains a firmware to write ++ * @length: the byte length of the buffer, must be multiple of 4 bytes ++ * ++ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled ++ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. ++ **/ ++s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) ++{ ++ u32 hicr, hibba, fwsm, icr, i; ++ ++ DEBUGFUNC("e1000_load_firmware"); ++ ++ if (hw->mac.type < e1000_i210) { ++ DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ ++ /* Check that the host interface is enabled. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_EN)) { ++ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { ++ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ ++ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { ++ DEBUGOUT("Buffer length failure.\n"); ++ return -E1000_ERR_INVALID_ARGUMENT; ++ } ++ ++ /* Clear notification from ROM-FW by reading ICR register */ ++ icr = E1000_READ_REG(hw, E1000_ICR_V2); ++ ++ /* Reset ROM-FW */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ hicr |= E1000_HICR_FW_RESET_ENABLE; ++ E1000_WRITE_REG(hw, E1000_HICR, hicr); ++ hicr |= E1000_HICR_FW_RESET; ++ E1000_WRITE_REG(hw, E1000_HICR, hicr); ++ E1000_WRITE_FLUSH(hw); ++ ++ /* Wait till MAC notifies about its readiness after ROM-FW reset */ ++ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { ++ icr = E1000_READ_REG(hw, E1000_ICR_V2); ++ if (icr & E1000_ICR_MNG) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check for timeout */ ++ if (i == E1000_HI_COMMAND_TIMEOUT) { ++ DEBUGOUT("FW reset failed.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Wait till MAC is ready to accept new FW code */ ++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { ++ fwsm = E1000_READ_REG(hw, E1000_FWSM); ++ if ((fwsm & E1000_FWSM_FW_VALID) && ++ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == ++ E1000_FWSM_HI_EN_ONLY_MODE)) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check for timeout */ ++ if (i == E1000_HI_COMMAND_TIMEOUT) { ++ DEBUGOUT("FW reset failed.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Calculate length in DWORDs */ ++ length >>= 2; ++ ++ /* The device driver writes the relevant FW code block ++ * into the ram area in DWORDs via 1kB ram addressing window. ++ */ ++ for (i = 0; i < length; i++) { ++ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { ++ /* Point to correct 1kB ram window */ ++ hibba = E1000_HI_FW_BASE_ADDRESS + ++ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * ++ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); ++ ++ E1000_WRITE_REG(hw, E1000_HIBBA, hibba); ++ } ++ ++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, ++ i % E1000_HI_FW_BLOCK_DWORD_LENGTH, ++ *((u32 *)buffer + i)); ++ } ++ ++ /* Setting this bit tells the ARC that a new FW is ready to execute. */ ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); ++ ++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { ++ hicr = E1000_READ_REG(hw, E1000_HICR); ++ if (!(hicr & E1000_HICR_C)) ++ break; ++ msec_delay(1); ++ } ++ ++ /* Check for successful FW start. */ ++ if (i == E1000_HI_COMMAND_TIMEOUT) { ++ DEBUGOUT("New FW did not start within timeout period.\n"); ++ return -E1000_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ return E1000_SUCCESS; ++} ++ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.h b/drivers/net/ethernet/intel/igb/e1000_manage.h +--- a/drivers/net/ethernet/intel/igb/e1000_manage.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_manage.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,86 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _E1000_MANAGE_H_ ++#define _E1000_MANAGE_H_ ++ ++bool e1000_check_mng_mode_generic(struct e1000_hw *hw); ++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); ++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); ++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, ++ u16 length, u16 offset, u8 *sum); ++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, ++ struct e1000_host_mng_command_header *hdr); ++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, ++ u8 *buffer, u16 length); ++bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw); ++u8 e1000_calculate_checksum(u8 *buffer, u32 length); ++s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); ++s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); ++ ++enum e1000_mng_mode { ++ e1000_mng_mode_none = 0, ++ e1000_mng_mode_asf, ++ e1000_mng_mode_pt, ++ e1000_mng_mode_ipmi, ++ e1000_mng_mode_host_if_only ++}; ++ ++#define E1000_FACTPS_MNGCG 0x20000000 ++ ++#define E1000_FWSM_MODE_MASK 0xE ++#define E1000_FWSM_MODE_SHIFT 1 ++#define E1000_FWSM_FW_VALID 0x00008000 ++#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 ++ ++#define E1000_MNG_IAMT_MODE 0x3 ++#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 ++#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 ++#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 ++#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 ++#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 ++#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 ++ ++#define E1000_VFTA_ENTRY_SHIFT 5 ++#define E1000_VFTA_ENTRY_MASK 0x7F ++#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F ++ ++#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ ++#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ ++#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ ++#define E1000_HI_FW_BASE_ADDRESS 0x10000 ++#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ ++#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ ++#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ ++#define E1000_HICR_EN 0x01 /* Enable bit - RO */ ++/* Driver sets this bit when done to put command in RAM */ ++#define E1000_HICR_C 0x02 ++#define E1000_HICR_SV 0x04 /* Status Validity */ ++#define E1000_HICR_FW_RESET_ENABLE 0x40 ++#define E1000_HICR_FW_RESET 0x80 ++ ++/* Intel(R) Active Management Technology signature */ ++#define E1000_IAMT_SIGNATURE 0x544D4149 ++ ++#endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c +--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,42 +1,71 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #include "e1000_mbx.h" + + /** +- * igb_read_mbx - Reads a message from the mailbox ++ * e1000_null_mbx_check_for_flag - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG mbx_id) ++{ ++ DEBUGFUNC("e1000_null_mbx_check_flag"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_mbx_transact - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG *msg, ++ u16 E1000_UNUSEDARG size, ++ u16 E1000_UNUSEDARG mbx_id) ++{ ++ DEBUGFUNC("e1000_null_mbx_rw_msg"); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * +- * returns SUCCESS if it successfully read message from buffer ++ * returns SUCCESS if it successfuly read message from buffer + **/ +-s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_read_mbx"); ++ + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; +@@ -48,7 +77,7 @@ + } + + /** +- * igb_write_mbx - Write a message to the mailbox ++ * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -56,10 +85,12 @@ + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +-s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) ++s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; +@@ -71,17 +102,19 @@ + } + + /** +- * igb_check_for_msg - checks to see if someone sent us mail ++ * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +-s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) ++s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_msg"); ++ + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + +@@ -89,17 +122,19 @@ + } + + /** +- * igb_check_for_ack - checks to see if someone sent us ACK ++ * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +-s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) ++s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_ack"); ++ + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + +@@ -107,17 +142,19 @@ + } + + /** +- * igb_check_for_rst - checks to see if other side has reset ++ * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +-s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) ++s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_rst"); ++ + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + +@@ -125,17 +162,19 @@ + } + + /** +- * igb_poll_for_msg - Wait for message notification ++ * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +-static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) ++static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + ++ DEBUGFUNC("e1000_poll_for_msg"); ++ + if (!countdown || !mbx->ops.check_for_msg) + goto out; + +@@ -143,28 +182,30 @@ + countdown--; + if (!countdown) + break; +- udelay(mbx->usec_delay); ++ usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + out: +- return countdown ? 0 : -E1000_ERR_MBX; ++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; + } + + /** +- * igb_poll_for_ack - Wait for message acknowledgement ++ * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +-static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) ++static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + ++ DEBUGFUNC("e1000_poll_for_ack"); ++ + if (!countdown || !mbx->ops.check_for_ack) + goto out; + +@@ -172,18 +213,18 @@ + countdown--; + if (!countdown) + break; +- udelay(mbx->usec_delay); ++ usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + out: +- return countdown ? 0 : -E1000_ERR_MBX; ++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; + } + + /** +- * igb_read_posted_mbx - Wait for message notification and receive message ++ * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -192,17 +233,19 @@ + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +-static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 mbx_id) ++s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_read_posted_mbx"); ++ + if (!mbx->ops.read) + goto out; + +- ret_val = igb_poll_for_msg(hw, mbx_id); ++ ret_val = e1000_poll_for_msg(hw, mbx_id); + ++ /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + out: +@@ -210,7 +253,7 @@ + } + + /** +- * igb_write_posted_mbx - Write a message to the mailbox, wait for ack ++ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -219,12 +262,13 @@ + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +-static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 mbx_id) ++s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_write_posted_mbx"); ++ + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; +@@ -234,37 +278,58 @@ + + /* if msg sent wait until we receive an ack */ + if (!ret_val) +- ret_val = igb_poll_for_ack(hw, mbx_id); ++ ret_val = e1000_poll_for_ack(hw, mbx_id); + out: + return ret_val; + } + +-static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) ++/** ++ * e1000_init_mbx_ops_generic - Initialize mbx function pointers ++ * @hw: pointer to the HW structure ++ * ++ * Sets the function pointers to no-op functions ++ **/ ++void e1000_init_mbx_ops_generic(struct e1000_hw *hw) + { +- u32 mbvficr = rd32(E1000_MBVFICR); ++ struct e1000_mbx_info *mbx = &hw->mbx; ++ mbx->ops.init_params = e1000_null_ops_generic; ++ mbx->ops.read = e1000_null_mbx_transact; ++ mbx->ops.write = e1000_null_mbx_transact; ++ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; ++ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; ++ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; ++ mbx->ops.read_posted = e1000_read_posted_mbx; ++ mbx->ops.write_posted = e1000_write_posted_mbx; ++} ++ ++static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) ++{ ++ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { +- ret_val = 0; +- wr32(E1000_MBVFICR, mask); ++ ret_val = E1000_SUCCESS; ++ E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; + } + + /** +- * igb_check_for_msg_pf - checks to see if the VF has sent mail ++ * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) + { + s32 ret_val = -E1000_ERR_MBX; + +- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { +- ret_val = 0; ++ DEBUGFUNC("e1000_check_for_msg_pf"); ++ ++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { ++ ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + +@@ -272,18 +337,20 @@ + } + + /** +- * igb_check_for_ack_pf - checks to see if the VF has ACKed ++ * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) + { + s32 ret_val = -E1000_ERR_MBX; + +- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { +- ret_val = 0; ++ DEBUGFUNC("e1000_check_for_ack_pf"); ++ ++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { ++ ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + +@@ -291,20 +358,22 @@ + } + + /** +- * igb_check_for_rst_pf - checks to see if the VF has reset ++ * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) + { +- u32 vflre = rd32(E1000_VFLRE); ++ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + ++ DEBUGFUNC("e1000_check_for_rst_pf"); ++ + if (vflre & (1 << vf_number)) { +- ret_val = 0; +- wr32(E1000_VFLRE, (1 << vf_number)); ++ ret_val = E1000_SUCCESS; ++ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + +@@ -312,30 +381,40 @@ + } + + /** +- * igb_obtain_mbx_lock_pf - obtain mailbox lock ++ * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +-static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) ++static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) + { + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; ++ int count = 10; + +- /* Take ownership of the buffer */ +- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); ++ DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + +- /* reserve mailbox for vf use */ +- p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); +- if (p2v_mailbox & E1000_P2VMAILBOX_PFU) +- ret_val = 0; ++ do { ++ /* Take ownership of the buffer */ ++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), ++ E1000_P2VMAILBOX_PFU); ++ ++ /* reserve mailbox for pf use */ ++ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); ++ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { ++ ret_val = E1000_SUCCESS; ++ break; ++ } ++ usec_delay(1000); ++ } while (count-- > 0); + + return ret_val; ++ + } + + /** +- * igb_write_mbx_pf - Places a message in the mailbox ++ * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -343,27 +422,29 @@ + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +-static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 vf_number) ++static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, ++ u16 vf_number) + { + s32 ret_val; + u16 i; + ++ DEBUGFUNC("e1000_write_mbx_pf"); ++ + /* lock the mailbox to prevent pf/vf race condition */ +- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); ++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ +- igb_check_for_msg_pf(hw, vf_number); +- igb_check_for_ack_pf(hw, vf_number); ++ e1000_check_for_msg_pf(hw, vf_number); ++ e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) +- array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); ++ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ +- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); ++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; +@@ -374,7 +455,7 @@ + } + + /** +- * igb_read_mbx_pf - Read a message from the mailbox ++ * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer +@@ -384,23 +465,25 @@ + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +-static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, +- u16 vf_number) ++static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, ++ u16 vf_number) + { + s32 ret_val; + u16 i; + ++ DEBUGFUNC("e1000_read_mbx_pf"); ++ + /* lock the mailbox to prevent pf/vf race condition */ +- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); ++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) +- msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); ++ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ +- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); ++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; +@@ -415,29 +498,34 @@ + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +-s32 igb_init_mbx_params_pf(struct e1000_hw *hw) ++s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) + { + struct e1000_mbx_info *mbx = &hw->mbx; + +- mbx->timeout = 0; +- mbx->usec_delay = 0; +- +- mbx->size = E1000_VFMAILBOX_SIZE; +- +- mbx->ops.read = igb_read_mbx_pf; +- mbx->ops.write = igb_write_mbx_pf; +- mbx->ops.read_posted = igb_read_posted_mbx; +- mbx->ops.write_posted = igb_write_posted_mbx; +- mbx->ops.check_for_msg = igb_check_for_msg_pf; +- mbx->ops.check_for_ack = igb_check_for_ack_pf; +- mbx->ops.check_for_rst = igb_check_for_rst_pf; ++ switch (hw->mac.type) { ++ case e1000_82576: ++ case e1000_i350: ++ case e1000_i354: ++ mbx->timeout = 0; ++ mbx->usec_delay = 0; + +- mbx->stats.msgs_tx = 0; +- mbx->stats.msgs_rx = 0; +- mbx->stats.reqs = 0; +- mbx->stats.acks = 0; +- mbx->stats.rsts = 0; ++ mbx->size = E1000_VFMAILBOX_SIZE; + +- return 0; ++ mbx->ops.read = e1000_read_mbx_pf; ++ mbx->ops.write = e1000_write_mbx_pf; ++ mbx->ops.read_posted = e1000_read_posted_mbx; ++ mbx->ops.write_posted = e1000_write_posted_mbx; ++ mbx->ops.check_for_msg = e1000_check_for_msg_pf; ++ mbx->ops.check_for_ack = e1000_check_for_ack_pf; ++ mbx->ops.check_for_rst = e1000_check_for_rst_pf; ++ ++ mbx->stats.msgs_tx = 0; ++ mbx->stats.msgs_rx = 0; ++ mbx->stats.reqs = 0; ++ mbx->stats.acks = 0; ++ mbx->stats.rsts = 0; ++ default: ++ return E1000_SUCCESS; ++ } + } + +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h +--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,30 +1,31 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_MBX_H_ + #define _E1000_MBX_H_ + +-#include "e1000_hw.h" ++#include "e1000_api.h" + + #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ + #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +@@ -32,10 +33,10 @@ + #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ + #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +-#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +-#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ ++#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ ++#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ ++#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ ++#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +@@ -43,31 +44,41 @@ + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +-/* Messages below or'd with this are the ACK */ ++/* Msgs below or'd with this are the ACK */ + #define E1000_VT_MSGTYPE_ACK 0x80000000 +-/* Messages below or'd with this are the NACK */ ++/* Msgs below or'd with this are the NACK */ + #define E1000_VT_MSGTYPE_NACK 0x40000000 + /* Indicates that VF is still clear to send requests */ + #define E1000_VT_MSGTYPE_CTS 0x20000000 + #define E1000_VT_MSGINFO_SHIFT 16 +-/* bits 23:16 are used for exra info for certain messages */ ++/* bits 23:16 are used for extra info for certain messages */ + #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +-#define E1000_VF_RESET 0x01 /* VF requests reset */ +-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +-#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +-#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +-#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ ++#define E1000_VF_RESET 0x01 /* VF requests reset */ ++#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ ++#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ ++#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) ++#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) ++#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ ++#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) ++#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ ++#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ ++#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) + #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ ++#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ ++ ++#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ ++#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +-s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); +-s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); +-s32 igb_check_for_msg(struct e1000_hw *, u16); +-s32 igb_check_for_ack(struct e1000_hw *, u16); +-s32 igb_check_for_rst(struct e1000_hw *, u16); +-s32 igb_init_mbx_params_pf(struct e1000_hw *); ++s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); ++s32 e1000_check_for_msg(struct e1000_hw *, u16); ++s32 e1000_check_for_ack(struct e1000_hw *, u16); ++s32 e1000_check_for_rst(struct e1000_hw *, u16); ++void e1000_init_mbx_ops_generic(struct e1000_hw *hw); ++s32 e1000_init_mbx_params_pf(struct e1000_hw *); + + #endif /* _E1000_MBX_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c +--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,63 +1,131 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++static void e1000_reload_nvm_generic(struct e1000_hw *hw); ++ ++/** ++ * e1000_init_nvm_ops_generic - Initialize NVM function pointers ++ * @hw: pointer to the HW structure + * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++ * Setups up the function pointers to no-op functions ++ **/ ++void e1000_init_nvm_ops_generic(struct e1000_hw *hw) ++{ ++ struct e1000_nvm_info *nvm = &hw->nvm; ++ DEBUGFUNC("e1000_init_nvm_ops_generic"); ++ ++ /* Initialize function pointers */ ++ nvm->ops.init_params = e1000_null_ops_generic; ++ nvm->ops.acquire = e1000_null_ops_generic; ++ nvm->ops.read = e1000_null_read_nvm; ++ nvm->ops.release = e1000_null_nvm_generic; ++ nvm->ops.reload = e1000_reload_nvm_generic; ++ nvm->ops.update = e1000_null_ops_generic; ++ nvm->ops.valid_led_default = e1000_null_led_default; ++ nvm->ops.validate = e1000_null_ops_generic; ++ nvm->ops.write = e1000_null_write_nvm; ++} + +-#include +-#include ++/** ++ * e1000_null_nvm_read - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, ++ u16 E1000_UNUSEDARG *c) ++{ ++ DEBUGFUNC("e1000_null_read_nvm"); ++ return E1000_SUCCESS; ++} + +-#include "e1000_mac.h" +-#include "e1000_nvm.h" ++/** ++ * e1000_null_nvm_generic - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_nvm_generic"); ++ return; ++} + + /** +- * igb_raise_eec_clk - Raise EEPROM clock ++ * e1000_null_led_default - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG *data) ++{ ++ DEBUGFUNC("e1000_null_led_default"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_write_nvm - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, ++ u16 E1000_UNUSEDARG *c) ++{ ++ DEBUGFUNC("e1000_null_write_nvm"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +-static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) ++static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) + { + *eecd = *eecd | E1000_EECD_SK; +- wr32(E1000_EECD, *eecd); +- wrfl(); +- udelay(hw->nvm.delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, *eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(hw->nvm.delay_usec); + } + + /** +- * igb_lower_eec_clk - Lower EEPROM clock ++ * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +-static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) ++static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) + { + *eecd = *eecd & ~E1000_EECD_SK; +- wr32(E1000_EECD, *eecd); +- wrfl(); +- udelay(hw->nvm.delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, *eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(hw->nvm.delay_usec); + } + + /** +- * igb_shift_out_eec_bits - Shift data bits our to the EEPROM ++ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out +@@ -66,12 +134,14 @@ + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +-static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) ++static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + ++ DEBUGFUNC("e1000_shift_out_eec_bits"); ++ + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; +@@ -82,23 +152,23 @@ + if (data & mask) + eecd |= E1000_EECD_DI; + +- wr32(E1000_EECD, eecd); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); + +- udelay(nvm->delay_usec); ++ usec_delay(nvm->delay_usec); + +- igb_raise_eec_clk(hw, &eecd); +- igb_lower_eec_clk(hw, &eecd); ++ e1000_raise_eec_clk(hw, &eecd); ++ e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; +- wr32(E1000_EECD, eecd); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); + } + + /** +- * igb_shift_in_eec_bits - Shift data bits in from the EEPROM ++ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * +@@ -108,121 +178,124 @@ + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +-static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) ++static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) + { + u32 eecd; + u32 i; + u16 data; + +- eecd = rd32(E1000_EECD); ++ DEBUGFUNC("e1000_shift_in_eec_bits"); ++ ++ eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; +- igb_raise_eec_clk(hw, &eecd); ++ e1000_raise_eec_clk(hw, &eecd); + +- eecd = rd32(E1000_EECD); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + +- igb_lower_eec_clk(hw, &eecd); ++ e1000_lower_eec_clk(hw, &eecd); + } + + return data; + } + + /** +- * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion ++ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +-static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) ++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) + { + u32 attempts = 100000; + u32 i, reg = 0; +- s32 ret_val = -E1000_ERR_NVM; ++ ++ DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) +- reg = rd32(E1000_EERD); ++ reg = E1000_READ_REG(hw, E1000_EERD); + else +- reg = rd32(E1000_EEWR); ++ reg = E1000_READ_REG(hw, E1000_EEWR); + +- if (reg & E1000_NVM_RW_REG_DONE) { +- ret_val = 0; +- break; +- } ++ if (reg & E1000_NVM_RW_REG_DONE) ++ return E1000_SUCCESS; + +- udelay(5); ++ usec_delay(5); + } + +- return ret_val; ++ return -E1000_ERR_NVM; + } + + /** +- * igb_acquire_nvm - Generic request for access to EEPROM ++ * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +-s32 igb_acquire_nvm(struct e1000_hw *hw) ++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) + { +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; +- s32 ret_val = 0; + ++ DEBUGFUNC("e1000_acquire_nvm_generic"); + +- wr32(E1000_EECD, eecd | E1000_EECD_REQ); +- eecd = rd32(E1000_EECD); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; +- udelay(5); +- eecd = rd32(E1000_EECD); ++ usec_delay(5); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; +- wr32(E1000_EECD, eecd); +- hw_dbg("Could not acquire NVM grant\n"); +- ret_val = -E1000_ERR_NVM; ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ DEBUGOUT("Could not acquire NVM grant\n"); ++ return -E1000_ERR_NVM; + } + +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_standby_nvm - Return EEPROM to standby state ++ * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +-static void igb_standby_nvm(struct e1000_hw *hw) ++static void e1000_standby_nvm(struct e1000_hw *hw) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); ++ ++ DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; +- wr32(E1000_EECD, eecd); +- wrfl(); +- udelay(nvm->delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; +- wr32(E1000_EECD, eecd); +- wrfl(); +- udelay(nvm->delay_usec); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(nvm->delay_usec); + } + } + +@@ -236,53 +309,57 @@ + { + u32 eecd; + +- eecd = rd32(E1000_EECD); ++ DEBUGFUNC("e1000_stop_nvm"); ++ ++ eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; +- igb_lower_eec_clk(hw, &eecd); ++ e1000_lower_eec_clk(hw, &eecd); + } + } + + /** +- * igb_release_nvm - Release exclusive access to EEPROM ++ * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +-void igb_release_nvm(struct e1000_hw *hw) ++void e1000_release_nvm_generic(struct e1000_hw *hw) + { + u32 eecd; + ++ DEBUGFUNC("e1000_release_nvm_generic"); ++ + e1000_stop_nvm(hw); + +- eecd = rd32(E1000_EECD); ++ eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; +- wr32(E1000_EECD, eecd); ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); + } + + /** +- * igb_ready_nvm_eeprom - Prepares EEPROM for read/write ++ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +-static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) ++static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) + { + struct e1000_nvm_info *nvm = &hw->nvm; +- u32 eecd = rd32(E1000_EECD); +- s32 ret_val = 0; +- u16 timeout = 0; ++ u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u8 spi_stat_reg; + ++ DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_spi) { ++ u16 timeout = NVM_MAX_RETRY_SPI; ++ + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); +- wr32(E1000_EECD, eecd); +- wrfl(); +- udelay(1); +- timeout = NVM_MAX_RETRY_SPI; ++ E1000_WRITE_REG(hw, E1000_EECD, eecd); ++ E1000_WRITE_FLUSH(hw); ++ usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed +@@ -290,30 +367,28 @@ + * not cleared within 'timeout', then error out. + */ + while (timeout) { +- igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, +- hw->nvm.opcode_bits); +- spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); ++ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, ++ hw->nvm.opcode_bits); ++ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + +- udelay(5); +- igb_standby_nvm(hw); ++ usec_delay(5); ++ e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { +- hw_dbg("SPI NVM Status error\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("SPI NVM Status error\n"); ++ return -E1000_ERR_NVM; + } + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_nvm_spi - Read EEPROM's using SPI ++ * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read +@@ -321,7 +396,7 @@ + * + * Reads a 16 bit word from the EEPROM. + **/ +-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; +@@ -329,51 +404,51 @@ + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + ++ DEBUGFUNC("e1000_read_nvm_spi"); ++ + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); ++ return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- ret_val = igb_ready_nvm_eeprom(hw); ++ ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ +- igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); +- igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); ++ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); ++ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { +- word_in = igb_shift_in_eec_bits(hw, 16); ++ word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + release: + nvm->ops.release(hw); + +-out: + return ret_val; + } + + /** +- * igb_read_nvm_eerd - Reads EEPROM using EERD register ++ * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read +@@ -381,41 +456,44 @@ + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, +- * and not enough words. ++ * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); ++ return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + +- E1000_NVM_RW_REG_START; ++ E1000_NVM_RW_REG_START; + +- wr32(E1000_EERD, eerd); +- ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); ++ E1000_WRITE_REG(hw, E1000_EERD, eerd); ++ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + +- data[i] = (rd32(E1000_EERD) >> +- E1000_NVM_RW_REG_DATA); ++ data[i] = (E1000_READ_REG(hw, E1000_EERD) >> ++ E1000_NVM_RW_REG_DATA); + } + +-out: ++ if (ret_val) ++ DEBUGOUT1("NVM read error: %d\n", ret_val); ++ + return ret_val; + } + + /** +- * igb_write_nvm_spi - Write to EEPROM using SPI ++ * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write +@@ -424,21 +502,23 @@ + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the +- * EEPROM will most likley contain an invalid checksum. ++ * EEPROM will most likely contain an invalid checksum. + **/ +-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) ++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) + { + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + ++ DEBUGFUNC("e1000_write_nvm_spi"); ++ + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { +- hw_dbg("nvm parameter(s) out of bounds\n"); +- return ret_val; ++ DEBUGOUT("nvm parameter(s) out of bounds\n"); ++ return -E1000_ERR_NVM; + } + + while (widx < words) { +@@ -448,19 +528,19 @@ + if (ret_val) + return ret_val; + +- ret_val = igb_ready_nvm_eeprom(hw); ++ ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ +- igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, ++ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode +@@ -469,24 +549,23 @@ + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ +- igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); +- igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), ++ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); ++ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; +- + word_out = (word_out >> 8) | (word_out << 8); +- igb_shift_out_eec_bits(hw, word_out, 16); ++ e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { +- igb_standby_nvm(hw); ++ e1000_standby_nvm(hw); + break; + } + } +- usleep_range(1000, 2000); ++ msec_delay(10); + nvm->ops.release(hw); + } + +@@ -494,132 +573,199 @@ + } + + /** +- * igb_read_part_string - Read device part number ++ * igb_e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure +- * @part_num: pointer to device part number +- * @part_num_size: size of part number buffer ++ * @pba_num: pointer to device part number ++ * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores +- * the value in part_num. ++ * the value in pba_num. + **/ +-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, ++ u32 pba_num_size) + { + s32 ret_val; + u16 nvm_data; +- u16 pointer; ++ u16 pba_ptr; + u16 offset; + u16 length; + +- if (part_num == NULL) { +- hw_dbg("PBA string buffer was null\n"); +- ret_val = E1000_ERR_INVALID_ARGUMENT; +- goto out; ++ DEBUGFUNC("igb_e1000_read_pba_string_generic"); ++ ++ if ((hw->mac.type >= e1000_i210) && ++ !e1000_get_flash_presence_i210(hw)) { ++ DEBUGOUT("Flashless no PBA string\n"); ++ return -E1000_ERR_NVM_PBA_SECTION; ++ } ++ ++ if (pba_num == NULL) { ++ DEBUGOUT("PBA string buffer was null\n"); ++ return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + +- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); ++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which +- * means pointer is actually our second data word for the PBA number ++ * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { +- hw_dbg("NVM PBA number is not stored as string\n"); ++ DEBUGOUT("NVM PBA number is not stored as string\n"); + +- /* we will need 11 characters to store the PBA */ +- if (part_num_size < 11) { +- hw_dbg("PBA string buffer too small\n"); ++ /* make sure callers buffer is big enough to store the PBA */ ++ if (pba_num_size < E1000_PBANUM_LENGTH) { ++ DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + +- /* extract hex string from data and pointer */ +- part_num[0] = (nvm_data >> 12) & 0xF; +- part_num[1] = (nvm_data >> 8) & 0xF; +- part_num[2] = (nvm_data >> 4) & 0xF; +- part_num[3] = nvm_data & 0xF; +- part_num[4] = (pointer >> 12) & 0xF; +- part_num[5] = (pointer >> 8) & 0xF; +- part_num[6] = '-'; +- part_num[7] = 0; +- part_num[8] = (pointer >> 4) & 0xF; +- part_num[9] = pointer & 0xF; ++ /* extract hex string from data and pba_ptr */ ++ pba_num[0] = (nvm_data >> 12) & 0xF; ++ pba_num[1] = (nvm_data >> 8) & 0xF; ++ pba_num[2] = (nvm_data >> 4) & 0xF; ++ pba_num[3] = nvm_data & 0xF; ++ pba_num[4] = (pba_ptr >> 12) & 0xF; ++ pba_num[5] = (pba_ptr >> 8) & 0xF; ++ pba_num[6] = '-'; ++ pba_num[7] = 0; ++ pba_num[8] = (pba_ptr >> 4) & 0xF; ++ pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ +- part_num[10] = '\0'; ++ pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { +- if (part_num[offset] < 0xA) +- part_num[offset] += '0'; +- else if (part_num[offset] < 0x10) +- part_num[offset] += 'A' - 0xA; ++ if (pba_num[offset] < 0xA) ++ pba_num[offset] += '0'; ++ else if (pba_num[offset] < 0x10) ++ pba_num[offset] += 'A' - 0xA; + } + +- goto out; ++ return E1000_SUCCESS; + } + +- ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); ++ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + + if (length == 0xFFFF || length == 0) { +- hw_dbg("NVM PBA number section invalid length\n"); +- ret_val = E1000_ERR_NVM_PBA_SECTION; +- goto out; +- } +- /* check if part_num buffer is big enough */ +- if (part_num_size < (((u32)length * 2) - 1)) { +- hw_dbg("PBA string buffer too small\n"); +- ret_val = E1000_ERR_NO_SPACE; +- goto out; ++ DEBUGOUT("NVM PBA number section invalid length\n"); ++ return -E1000_ERR_NVM_PBA_SECTION; ++ } ++ /* check if pba_num buffer is big enough */ ++ if (pba_num_size < (((u32)length * 2) - 1)) { ++ DEBUGOUT("PBA string buffer too small\n"); ++ return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ +- pointer++; ++ pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { +- ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); ++ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } +- part_num[offset * 2] = (u8)(nvm_data >> 8); +- part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); ++ pba_num[offset * 2] = (u8)(nvm_data >> 8); ++ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } +- part_num[offset * 2] = '\0'; ++ pba_num[offset * 2] = '\0'; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_mac_addr - Read device MAC address ++ * e1000_read_pba_length_generic - Read device part number length ++ * @hw: pointer to the HW structure ++ * @pba_num_size: size of part number buffer ++ * ++ * Reads the product board assembly (PBA) number length from the EEPROM and ++ * stores the value in pba_num_size. ++ **/ ++s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) ++{ ++ s32 ret_val; ++ u16 nvm_data; ++ u16 pba_ptr; ++ u16 length; ++ ++ DEBUGFUNC("e1000_read_pba_length_generic"); ++ ++ if (pba_num_size == NULL) { ++ DEBUGOUT("PBA buffer size was null\n"); ++ return -E1000_ERR_INVALID_ARGUMENT; ++ } ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ /* if data is not ptr guard the PBA must be in legacy format */ ++ if (nvm_data != NVM_PBA_PTR_GUARD) { ++ *pba_num_size = E1000_PBANUM_LENGTH; ++ return E1000_SUCCESS; ++ } ++ ++ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); ++ if (ret_val) { ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; ++ } ++ ++ if (length == 0xFFFF || length == 0) { ++ DEBUGOUT("NVM PBA number section invalid length\n"); ++ return -E1000_ERR_NVM_PBA_SECTION; ++ } ++ ++ /* Convert from length in u16 values to u8 chars, add 1 for NULL, ++ * and subtract 2 because length field is included in length. ++ */ ++ *pba_num_size = ((u32)length * 2) - 1; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * igb_e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +-s32 igb_read_mac_addr(struct e1000_hw *hw) ++ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw) + { + u32 rar_high; + u32 rar_low; + u16 i; + +- rar_high = rd32(E1000_RAH(0)); +- rar_low = rd32(E1000_RAL(0)); ++ rar_high = E1000_READ_REG(hw, E1000_RAH(0)); ++ rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); +@@ -627,83 +773,104 @@ + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + +- for (i = 0; i < ETH_ALEN; i++) ++ for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_validate_nvm_checksum - Validate EEPROM checksum ++ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +-s32 igb_validate_nvm_checksum(struct e1000_hw *hw) ++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + ++ DEBUGFUNC("e1000_validate_nvm_checksum_generic"); ++ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error\n"); +- goto out; ++ DEBUGOUT("NVM Read Error\n"); ++ return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { +- hw_dbg("NVM Checksum Invalid\n"); +- ret_val = -E1000_ERR_NVM; +- goto out; ++ DEBUGOUT("NVM Checksum Invalid\n"); ++ return -E1000_ERR_NVM; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_update_nvm_checksum - Update EEPROM checksum ++ * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +-s32 igb_update_nvm_checksum(struct e1000_hw *hw) ++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) + { +- s32 ret_val; ++ s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + ++ DEBUGFUNC("e1000_update_nvm_checksum"); ++ + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { +- hw_dbg("NVM Read Error while updating checksum.\n"); +- goto out; ++ DEBUGOUT("NVM Read Error while updating checksum.\n"); ++ return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) +- hw_dbg("NVM Write Error while updating checksum.\n"); ++ DEBUGOUT("NVM Write Error while updating checksum.\n"); + +-out: + return ret_val; + } + + /** +- * igb_get_fw_version - Get firmware version information ++ * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure +- * @fw_vers: pointer to output structure + * +- * unsupported MAC types will return all 0 version structure ++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the ++ * extended control register. + **/ +-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) ++static void e1000_reload_nvm_generic(struct e1000_hw *hw) ++{ ++ u32 ctrl_ext; ++ ++ DEBUGFUNC("e1000_reload_nvm_generic"); ++ ++ usec_delay(10); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ctrl_ext |= E1000_CTRL_EXT_EE_RST; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ++ E1000_WRITE_FLUSH(hw); ++} ++ ++/** ++ * e1000_get_fw_version - Get firmware version information ++ * @hw: pointer to the HW structure ++ * @fw_vers: pointer to output version structure ++ * ++ * unsupported/not present features return 0 in version structure ++ **/ ++void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) + { + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; +@@ -711,17 +878,18 @@ + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + +- /* basic eeprom version numbers and bits used vary by part and by tool +- * used to create the nvm images. Check which data format we have. +- */ +- hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); ++ /* basic eeprom version numbers, bits used vary by part and by tool ++ * used to create the nvm images */ ++ /* Check which data format we have */ + switch (hw->mac.type) { + case e1000_i211: +- igb_read_invm_version(hw, fw_vers); ++ e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: ++ case e1000_i354: ++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ +@@ -736,12 +904,13 @@ + } + break; + case e1000_i210: +- if (!(igb_get_flash_presence_i210(hw))) { +- igb_read_invm_version(hw, fw_vers); ++ if (!(e1000_get_flash_presence_i210(hw))) { ++ e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: ++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && +@@ -769,6 +938,7 @@ + } + break; + default: ++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); +@@ -797,5 +967,11 @@ + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; ++ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { ++ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); ++ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); ++ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | ++ eeprom_verl; + } + } ++ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h +--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,41 +1,30 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_NVM_H_ + #define _E1000_NVM_H_ + +-s32 igb_acquire_nvm(struct e1000_hw *hw); +-void igb_release_nvm(struct e1000_hw *hw); +-s32 igb_read_mac_addr(struct e1000_hw *hw); +-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, +- u32 part_num_size); +-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +-s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +-s32 igb_update_nvm_checksum(struct e1000_hw *hw); +- + struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; +@@ -51,6 +40,31 @@ + u16 or_build; + u16 or_patch; + }; +-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); ++ ++void e1000_init_nvm_ops_generic(struct e1000_hw *hw); ++s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); ++void e1000_null_nvm_generic(struct e1000_hw *hw); ++s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); ++s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); ++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); ++ ++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); ++s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw); ++s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, ++ u32 pba_num_size); ++s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); ++s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); ++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data); ++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); ++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); ++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, ++ u16 *data); ++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); ++void e1000_release_nvm_generic(struct e1000_hw *hw); ++void e1000_get_fw_version(struct e1000_hw *hw, ++ struct e1000_fw_version *fw_vers); ++ ++#define E1000_STM_OPCODE 0xDB00 + + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_osdep.h b/drivers/net/ethernet/intel/igb/e1000_osdep.h +--- a/drivers/net/ethernet/intel/igb/e1000_osdep.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_osdep.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,141 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* glue for the OS independent part of e1000 ++ * includes register access macros ++ */ ++ ++#ifndef _E1000_OSDEP_H_ ++#define _E1000_OSDEP_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "kcompat.h" ++ ++#define usec_delay(x) udelay(x) ++#define usec_delay_irq(x) udelay(x) ++#ifndef msec_delay ++#define msec_delay(x) do { \ ++ /* Don't mdelay in interrupt context! */ \ ++ if (in_interrupt()) \ ++ BUG(); \ ++ else \ ++ msleep(x); \ ++} while (0) ++ ++/* Some workarounds require millisecond delays and are run during interrupt ++ * context. Most notably, when establishing link, the phy may need tweaking ++ * but cannot process phy register reads/writes faster than millisecond ++ * intervals...and we establish link due to a "link status change" interrupt. ++ */ ++#define msec_delay_irq(x) mdelay(x) ++ ++#define E1000_READ_REG(x, y) e1000_read_reg(x, y) ++#endif ++ ++#define PCI_COMMAND_REGISTER PCI_COMMAND ++#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE ++#define ETH_ADDR_LEN ETH_ALEN ++ ++#ifdef __BIG_ENDIAN ++#define E1000_BIG_ENDIAN __BIG_ENDIAN ++#endif ++ ++#ifdef DEBUG ++#define DEBUGOUT(S) pr_debug(S) ++#define DEBUGOUT1(S, A...) pr_debug(S, ## A) ++#else ++#define DEBUGOUT(S) ++#define DEBUGOUT1(S, A...) ++#endif ++ ++#ifdef DEBUG_FUNC ++#define DEBUGFUNC(F) DEBUGOUT(F "\n") ++#else ++#define DEBUGFUNC(F) ++#endif ++#define DEBUGOUT2 DEBUGOUT1 ++#define DEBUGOUT3 DEBUGOUT2 ++#define DEBUGOUT7 DEBUGOUT3 ++ ++#define E1000_REGISTER(a, reg) reg ++ ++/* forward declaration */ ++struct e1000_hw; ++ ++/* write operations, indexed using DWORDS */ ++#define E1000_WRITE_REG(hw, reg, val) \ ++do { \ ++ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ ++ if (!E1000_REMOVED(hw_addr)) \ ++ writel((val), &hw_addr[(reg)]); \ ++} while (0) ++ ++u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); ++ ++#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \ ++ E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val)) ++ ++#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \ ++ e1000_read_reg((hw), (reg) + ((idx) << 2))) ++ ++#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY ++#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY ++ ++#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ ++ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \ ++ ((offset) << 1)))) ++ ++#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ ++ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) ++ ++#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ ++ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) ++ ++#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ ++ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) ++ ++#define E1000_WRITE_REG_IO(a, reg, offset) do { \ ++ outl(reg, ((a)->io_base)); \ ++ outl(offset, ((a)->io_base + 4)); \ ++ } while (0) ++ ++#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) ++ ++#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ ++ writel((value), ((a)->flash_address + reg))) ++ ++#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ ++ writew((value), ((a)->flash_address + reg))) ++ ++#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) ++ ++#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) ++ ++#define E1000_REMOVED(h) unlikely(!(h)) ++ ++#endif /* _E1000_OSDEP_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c +--- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,147 +1,271 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ +- +-#include +-#include +- +-#include "e1000_mac.h" +-#include "e1000_phy.h" +- +-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, +- u16 *phy_ctrl); +-static s32 igb_wait_autoneg(struct e1000_hw *hw); +-static s32 igb_set_master_slave_mode(struct e1000_hw *hw); ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + ++*******************************************************************************/ ++ ++#include "e1000_api.h" ++ ++static s32 e1000_wait_autoneg(struct e1000_hw *hw); + /* Cable length tables */ + static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ +- (sizeof(e1000_m88_cable_length_table) / \ +- sizeof(e1000_m88_cable_length_table[0])) ++ (sizeof(e1000_m88_cable_length_table) / \ ++ sizeof(e1000_m88_cable_length_table[0])) + + static const u16 e1000_igp_2_cable_length_table[] = { +- 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, +- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, +- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, +- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, +- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, +- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, +- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, +- 104, 109, 114, 118, 121, 124}; ++ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, ++ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, ++ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, ++ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, ++ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, ++ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, ++ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, ++ 124}; + #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ +- (sizeof(e1000_igp_2_cable_length_table) / \ +- sizeof(e1000_igp_2_cable_length_table[0])) ++ (sizeof(e1000_igp_2_cable_length_table) / \ ++ sizeof(e1000_igp_2_cable_length_table[0])) ++ ++/** ++ * e1000_init_phy_ops_generic - Initialize PHY function pointers ++ * @hw: pointer to the HW structure ++ * ++ * Setups up the function pointers to no-op functions ++ **/ ++void e1000_init_phy_ops_generic(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ DEBUGFUNC("e1000_init_phy_ops_generic"); ++ ++ /* Initialize function pointers */ ++ phy->ops.init_params = e1000_null_ops_generic; ++ phy->ops.acquire = e1000_null_ops_generic; ++ phy->ops.check_polarity = e1000_null_ops_generic; ++ phy->ops.check_reset_block = e1000_null_ops_generic; ++ phy->ops.commit = e1000_null_ops_generic; ++ phy->ops.force_speed_duplex = e1000_null_ops_generic; ++ phy->ops.get_cfg_done = e1000_null_ops_generic; ++ phy->ops.get_cable_length = e1000_null_ops_generic; ++ phy->ops.get_info = e1000_null_ops_generic; ++ phy->ops.set_page = e1000_null_set_page; ++ phy->ops.read_reg = e1000_null_read_reg; ++ phy->ops.read_reg_locked = e1000_null_read_reg; ++ phy->ops.read_reg_page = e1000_null_read_reg; ++ phy->ops.release = e1000_null_phy_generic; ++ phy->ops.reset = e1000_null_ops_generic; ++ phy->ops.set_d0_lplu_state = e1000_null_lplu_state; ++ phy->ops.set_d3_lplu_state = e1000_null_lplu_state; ++ phy->ops.write_reg = e1000_null_write_reg; ++ phy->ops.write_reg_locked = e1000_null_write_reg; ++ phy->ops.write_reg_page = e1000_null_write_reg; ++ phy->ops.power_up = e1000_null_phy_generic; ++ phy->ops.power_down = e1000_null_phy_generic; ++ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; ++ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; ++} ++ ++/** ++ * e1000_null_set_page - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, ++ u16 E1000_UNUSEDARG data) ++{ ++ DEBUGFUNC("e1000_null_set_page"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_read_reg - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) ++{ ++ DEBUGFUNC("e1000_null_read_reg"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_phy_generic - No-op function, return void ++ * @hw: pointer to the HW structure ++ **/ ++void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_null_phy_generic"); ++ return; ++} ++ ++/** ++ * e1000_null_lplu_state - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, ++ bool E1000_UNUSEDARG active) ++{ ++ DEBUGFUNC("e1000_null_lplu_state"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_null_write_reg - No-op function, return 0 ++ * @hw: pointer to the HW structure ++ **/ ++s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, ++ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) ++{ ++ DEBUGFUNC("e1000_null_write_reg"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_i2c_byte_null - No-op function, return 0 ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: device address ++ * @data: data value read ++ * ++ **/ ++s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG byte_offset, ++ u8 E1000_UNUSEDARG dev_addr, ++ u8 E1000_UNUSEDARG *data) ++{ ++ DEBUGFUNC("e1000_read_i2c_byte_null"); ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_i2c_byte_null - No-op function, return 0 ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: device address ++ * @data: data value to write ++ * ++ **/ ++s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, ++ u8 E1000_UNUSEDARG byte_offset, ++ u8 E1000_UNUSEDARG dev_addr, ++ u8 E1000_UNUSEDARG data) ++{ ++ DEBUGFUNC("e1000_write_i2c_byte_null"); ++ return E1000_SUCCESS; ++} + + /** +- * igb_check_reset_block - Check if PHY reset is blocked ++ * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset +- * is blocked. If a reset is not blocked return 0, otherwise ++ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +-s32 igb_check_reset_block(struct e1000_hw *hw) ++s32 e1000_check_reset_block_generic(struct e1000_hw *hw) + { + u32 manc; + +- manc = rd32(E1000_MANC); ++ DEBUGFUNC("e1000_check_reset_block"); ++ ++ manc = E1000_READ_REG(hw, E1000_MANC); + +- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; ++ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? ++ E1000_BLK_PHY_RESET : E1000_SUCCESS; + } + + /** +- * igb_get_phy_id - Retrieve the PHY ID and revision ++ * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +-s32 igb_get_phy_id(struct e1000_hw *hw) ++s32 e1000_get_phy_id(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 phy_id; + ++ DEBUGFUNC("e1000_get_phy_id"); ++ ++ if (!phy->ops.read_reg) ++ return E1000_SUCCESS; ++ + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) +- goto out; ++ return ret_val; + + phy->id = (u32)(phy_id << 16); +- udelay(20); ++ usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) +- goto out; ++ return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_phy_reset_dsp - Reset PHY DSP ++ * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +-static s32 igb_phy_reset_dsp(struct e1000_hw *hw) ++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + +- if (!(hw->phy.ops.write_reg)) +- goto out; ++ DEBUGFUNC("e1000_phy_reset_dsp_generic"); ++ ++ if (!hw->phy.ops.write_reg) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) +- goto out; +- +- ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); ++ return ret_val; + +-out: +- return ret_val; ++ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + } + + /** +- * igb_read_phy_reg_mdic - Read MDI control register ++ * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * +- * Reads the MDI control regsiter in the PHY at offset and stores the ++ * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { +- hw_dbg("PHY Address %d is out of range\n", offset); +- ret_val = -E1000_ERR_PARAM; +- goto out; ++ DEBUGOUT1("PHY Address %d is out of range\n", offset); ++ return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI +@@ -152,52 +276,55 @@ + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + +- wr32(E1000_MDIC, mdic); ++ E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { +- udelay(50); +- mdic = rd32(E1000_MDIC); ++ usec_delay_irq(50); ++ mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { +- hw_dbg("MDI Read did not complete\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Read did not complete\n"); ++ return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { +- hw_dbg("MDI Error\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Error\n"); ++ return -E1000_ERR_PHY; ++ } ++ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { ++ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", ++ offset, ++ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); ++ return -E1000_ERR_PHY; + } + *data = (u16) mdic; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_phy_reg_mdic - Write MDI control register ++ * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; +- s32 ret_val = 0; ++ ++ DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { +- hw_dbg("PHY Address %d is out of range\n", offset); +- ret_val = -E1000_ERR_PARAM; +- goto out; ++ DEBUGOUT1("PHY Address %d is out of range\n", offset); ++ return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI +@@ -209,35 +336,38 @@ + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + +- wr32(E1000_MDIC, mdic); ++ E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { +- udelay(50); +- mdic = rd32(E1000_MDIC); ++ usec_delay_irq(50); ++ mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { +- hw_dbg("MDI Write did not complete\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Write did not complete\n"); ++ return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { +- hw_dbg("MDI Error\n"); +- ret_val = -E1000_ERR_PHY; +- goto out; ++ DEBUGOUT("MDI Error\n"); ++ return -E1000_ERR_PHY; ++ } ++ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { ++ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", ++ offset, ++ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); ++ return -E1000_ERR_PHY; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_phy_reg_i2c - Read PHY register using i2c ++ * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data +@@ -245,11 +375,13 @@ + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + ++ DEBUGFUNC("e1000_read_phy_reg_i2c"); ++ + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. +@@ -258,47 +390,49 @@ + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + +- wr32(E1000_I2CCMD, i2ccmd); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { +- udelay(50); +- i2ccmd = rd32(E1000_I2CCMD); ++ usec_delay(50); ++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { +- hw_dbg("I2CCMD Read did not complete\n"); ++ DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { +- hw_dbg("I2CCMD Error bit set\n"); ++ DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_phy_reg_i2c - Write PHY register using i2c ++ * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) + { + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + ++ DEBUGFUNC("e1000_write_phy_reg_i2c"); ++ + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { +- hw_dbg("PHY I2C Address %d is out of range.\n", ++ DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } +@@ -315,29 +449,29 @@ + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + +- wr32(E1000_I2CCMD, i2ccmd); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { +- udelay(50); +- i2ccmd = rd32(E1000_I2CCMD); ++ usec_delay(50); ++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { +- hw_dbg("I2CCMD Write did not complete\n"); ++ DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { +- hw_dbg("I2CCMD Error bit set\n"); ++ DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_sfp_data_byte - Reads SFP module data. ++ * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer +@@ -349,14 +483,16 @@ + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +-s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) ++s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) + { + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + ++ DEBUGFUNC("e1000_read_sfp_data_byte"); ++ + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { +- hw_dbg("I2CCMD command address exceeds upper limit\n"); ++ DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + +@@ -367,30 +503,103 @@ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + +- wr32(E1000_I2CCMD, i2ccmd); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { +- udelay(50); +- data_local = rd32(E1000_I2CCMD); ++ usec_delay(50); ++ data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { +- hw_dbg("I2CCMD Read did not complete\n"); ++ DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { +- hw_dbg("I2CCMD Error bit set\n"); ++ DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_read_phy_reg_igp - Read igp PHY register ++ * e1000_write_sfp_data_byte - Writes SFP module data. ++ * @hw: pointer to the HW structure ++ * @offset: byte location offset to write to ++ * @data: data to write ++ * ++ * Writes one byte to SFP module data stored ++ * in SFP resided EEPROM memory or SFP diagnostic area. ++ * Function should be called with ++ * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access ++ * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters ++ * access ++ **/ ++s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) ++{ ++ u32 i = 0; ++ u32 i2ccmd = 0; ++ u32 data_local = 0; ++ ++ DEBUGFUNC("e1000_write_sfp_data_byte"); ++ ++ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { ++ DEBUGOUT("I2CCMD command address exceeds upper limit\n"); ++ return -E1000_ERR_PHY; ++ } ++ /* The programming interface is 16 bits wide ++ * so we need to read the whole word first ++ * then update appropriate byte lane and write ++ * the updated word back. ++ */ ++ /* Set up Op-code, EEPROM Address,in the I2CCMD ++ * register. The MAC will take care of interfacing ++ * with an EEPROM to write the data given. ++ */ ++ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | ++ E1000_I2CCMD_OPCODE_READ); ++ /* Set a command to read single word */ ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); ++ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { ++ usec_delay(50); ++ /* Poll the ready bit to see if lastly ++ * launched I2C operation completed ++ */ ++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); ++ if (i2ccmd & E1000_I2CCMD_READY) { ++ /* Check if this is READ or WRITE phase */ ++ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == ++ E1000_I2CCMD_OPCODE_READ) { ++ /* Write the selected byte ++ * lane and update whole word ++ */ ++ data_local = i2ccmd & 0xFF00; ++ data_local |= data; ++ i2ccmd = ((offset << ++ E1000_I2CCMD_REG_ADDR_SHIFT) | ++ E1000_I2CCMD_OPCODE_WRITE | data_local); ++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); ++ } else { ++ break; ++ } ++ } ++ } ++ if (!(i2ccmd & E1000_I2CCMD_READY)) { ++ DEBUGOUT("I2CCMD Write did not complete\n"); ++ return -E1000_ERR_PHY; ++ } ++ if (i2ccmd & E1000_I2CCMD_ERROR) { ++ DEBUGOUT("I2CCMD Error bit set\n"); ++ return -E1000_ERR_PHY; ++ } ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data +@@ -399,38 +608,29 @@ + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) + { +- s32 ret_val = 0; ++ s32 ret_val; + +- if (!(hw->phy.ops.acquire)) +- goto out; ++ DEBUGFUNC("e1000_read_phy_reg_m88"); ++ ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) +- goto out; +- +- if (offset > MAX_PHY_MULTI_PAGE_REG) { +- ret_val = igb_write_phy_reg_mdic(hw, +- IGP01E1000_PHY_PAGE_SELECT, +- (u16)offset); +- if (ret_val) { +- hw->phy.ops.release(hw); +- goto out; +- } +- } ++ return ret_val; + +- ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, +- data); ++ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, ++ data); + + hw->phy.ops.release(hw); + +-out: + return ret_val; + } + + /** +- * igb_write_phy_reg_igp - Write igp PHY register ++ * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset +@@ -438,80 +638,415 @@ + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) + { +- s32 ret_val = 0; ++ s32 ret_val; ++ ++ DEBUGFUNC("e1000_write_phy_reg_m88"); + +- if (!(hw->phy.ops.acquire)) +- goto out; ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- if (offset > MAX_PHY_MULTI_PAGE_REG) { +- ret_val = igb_write_phy_reg_mdic(hw, +- IGP01E1000_PHY_PAGE_SELECT, +- (u16)offset); +- if (ret_val) { +- hw->phy.ops.release(hw); +- goto out; +- } ++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, ++ data); ++ ++ hw->phy.ops.release(hw); ++ ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_set_page_igp - Set page as on IGP-like PHY(s) ++ * @hw: pointer to the HW structure ++ * @page: page to set (shifted left when necessary) ++ * ++ * Sets PHY page required for PHY register access. Assumes semaphore is ++ * already acquired. Note, this function sets phy.addr to 1 so the caller ++ * must set it appropriately (if necessary) after this function returns. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page) ++{ ++ DEBUGFUNC("igb_e1000_set_page_igp"); ++ ++ DEBUGOUT1("Setting page 0x%x\n", page); ++ ++ hw->phy.addr = 1; ++ ++ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); ++} ++ ++/** ++ * __e1000_read_phy_reg_igp - Read igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary, then reads the PHY register at offset ++ * and stores the retrieved information in data. Release any acquired ++ * semaphores before exiting. ++ **/ ++static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, ++ bool locked) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("__e1000_read_phy_reg_igp"); ++ ++ if (!locked) { ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; + } + +- ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, +- data); ++ if (offset > MAX_PHY_MULTI_PAGE_REG) ++ ret_val = e1000_write_phy_reg_mdic(hw, ++ IGP01E1000_PHY_PAGE_SELECT, ++ (u16)offset); ++ if (!ret_val) ++ ret_val = e1000_read_phy_reg_mdic(hw, ++ MAX_PHY_REG_ADDRESS & offset, ++ data); ++ if (!locked) ++ hw->phy.ops.release(hw); + +- hw->phy.ops.release(hw); ++ return ret_val; ++} ++ ++/** ++ * e1000_read_phy_reg_igp - Read igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Acquires semaphore then reads the PHY register at offset and stores the ++ * retrieved information in data. ++ * Release the acquired semaphore before exiting. ++ **/ ++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_phy_reg_igp(hw, offset, data, false); ++} ++ ++/** ++ * e1000_read_phy_reg_igp_locked - Read igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Reads the PHY register at offset and stores the retrieved information ++ * in data. Assumes semaphore already acquired. ++ **/ ++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_phy_reg_igp(hw, offset, data, true); ++} ++ ++/** ++ * e1000_write_phy_reg_igp - Write igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary, then writes the data to PHY register ++ * at the offset. Release any acquired semaphores before exiting. ++ **/ ++static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, ++ bool locked) ++{ ++ s32 ret_val = E1000_SUCCESS; ++ ++ DEBUGFUNC("e1000_write_phy_reg_igp"); ++ ++ if (!locked) { ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ if (offset > MAX_PHY_MULTI_PAGE_REG) ++ ret_val = e1000_write_phy_reg_mdic(hw, ++ IGP01E1000_PHY_PAGE_SELECT, ++ (u16)offset); ++ if (!ret_val) ++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & ++ offset, ++ data); ++ if (!locked) ++ hw->phy.ops.release(hw); + +-out: + return ret_val; + } + + /** +- * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link ++ * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset + * +- * Sets up Carrier-sense on Transmit and downshift values. ++ * Acquires semaphore then writes the data to PHY register ++ * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_copper_link_setup_82580(struct e1000_hw *hw) ++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_phy_reg_igp(hw, offset, data, false); ++} ++ ++/** ++ * e1000_write_phy_reg_igp_locked - Write igp PHY register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * ++ * Writes the data to PHY register at the offset. ++ * Assumes semaphore already acquired. ++ **/ ++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_phy_reg_igp(hw, offset, data, true); ++} ++ ++/** ++ * __e1000_read_kmrn_reg - Read kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary. Then reads the PHY register at offset ++ * using the kumeran interface. The information retrieved is stored in data. ++ * Release any acquired semaphores before exiting. ++ **/ ++static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, ++ bool locked) ++{ ++ u32 kmrnctrlsta; ++ ++ DEBUGFUNC("__e1000_read_kmrn_reg"); ++ ++ if (!locked) { ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & ++ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); ++ E1000_WRITE_FLUSH(hw); ++ ++ usec_delay(2); ++ ++ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); ++ *data = (u16)kmrnctrlsta; ++ ++ if (!locked) ++ hw->phy.ops.release(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_read_kmrn_reg_generic - Read kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Acquires semaphore then reads the PHY register at offset using the ++ * kumeran interface. The information retrieved is stored in data. ++ * Release the acquired semaphore before exiting. ++ **/ ++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_kmrn_reg(hw, offset, data, false); ++} ++ ++/** ++ * e1000_read_kmrn_reg_locked - Read kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to be read ++ * @data: pointer to the read data ++ * ++ * Reads the PHY register at offset using the kumeran interface. The ++ * information retrieved is stored in data. ++ * Assumes semaphore already acquired. ++ **/ ++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) ++{ ++ return __e1000_read_kmrn_reg(hw, offset, data, true); ++} ++ ++/** ++ * __e1000_write_kmrn_reg - Write kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * @locked: semaphore has already been acquired or not ++ * ++ * Acquires semaphore, if necessary. Then write the data to PHY register ++ * at the offset using the kumeran interface. Release any acquired semaphores ++ * before exiting. ++ **/ ++static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, ++ bool locked) ++{ ++ u32 kmrnctrlsta; ++ ++ DEBUGFUNC("e1000_write_kmrn_reg_generic"); ++ ++ if (!locked) { ++ s32 ret_val = E1000_SUCCESS; ++ ++ if (!hw->phy.ops.acquire) ++ return E1000_SUCCESS; ++ ++ ret_val = hw->phy.ops.acquire(hw); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & ++ E1000_KMRNCTRLSTA_OFFSET) | data; ++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); ++ E1000_WRITE_FLUSH(hw); ++ ++ usec_delay(2); ++ ++ if (!locked) ++ hw->phy.ops.release(hw); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_kmrn_reg_generic - Write kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * ++ * Acquires semaphore then writes the data to the PHY register at the offset ++ * using the kumeran interface. Release the acquired semaphore before exiting. ++ **/ ++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_kmrn_reg(hw, offset, data, false); ++} ++ ++/** ++ * e1000_write_kmrn_reg_locked - Write kumeran register ++ * @hw: pointer to the HW structure ++ * @offset: register offset to write to ++ * @data: data to write at register offset ++ * ++ * Write the data to PHY register at the offset using the kumeran interface. ++ * Assumes semaphore already acquired. ++ **/ ++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) ++{ ++ return __e1000_write_kmrn_reg(hw, offset, data, true); ++} ++ ++/** ++ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode ++ * @hw: pointer to the HW structure ++ * ++ * Sets up Master/slave mode ++ **/ ++static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) + { +- struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + +- if (phy->reset_disable) { +- ret_val = 0; +- goto out; ++ /* Resolve Master/Slave mode */ ++ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); ++ if (ret_val) ++ return ret_val; ++ ++ /* load defaults for future use */ ++ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? ++ ((phy_data & CR_1000T_MS_VALUE) ? ++ e1000_ms_force_master : ++ e1000_ms_force_slave) : e1000_ms_auto; ++ ++ switch (hw->phy.ms_type) { ++ case e1000_ms_force_master: ++ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); ++ break; ++ case e1000_ms_force_slave: ++ phy_data |= CR_1000T_MS_ENABLE; ++ phy_data &= ~(CR_1000T_MS_VALUE); ++ break; ++ case e1000_ms_auto: ++ phy_data &= ~CR_1000T_MS_ENABLE; ++ /* fall-through */ ++ default: ++ break; + } + +- if (phy->type == e1000_phy_82580) { ++ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); ++} ++ ++/** ++ * igb_e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link ++ * @hw: pointer to the HW structure ++ * ++ * Sets up Carrier-sense on Transmit and downshift values. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw) ++{ ++ s32 ret_val; ++ u16 phy_data; ++ ++ DEBUGFUNC("igb_e1000_copper_link_setup_82577"); ++ ++ if (hw->phy.reset_disable) ++ return E1000_SUCCESS; ++ ++ if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { +- hw_dbg("Error resetting the PHY.\n"); +- goto out; ++ DEBUGOUT("Error resetting the PHY.\n"); ++ return ret_val; + } + } + +- /* Enable CRS on TX. This must be set for half-duplex operation. */ +- ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); ++ /* Enable CRS on Tx. This must be set for half-duplex operation. */ ++ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; ++ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ +- phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; ++ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + +- ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); ++ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Set MDI/MDIX mode */ +- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); ++ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) +- goto out; +- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; ++ return ret_val; ++ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode +@@ -521,41 +1056,42 @@ + case 1: + break; + case 2: +- phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; ++ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: +- phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; ++ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } +- ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); ++ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); ++ if (ret_val) ++ return ret_val; + +-out: +- return ret_val; ++ return e1000_set_master_slave_mode(hw); + } + + /** +- * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link ++ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +-s32 igb_copper_link_setup_m88(struct e1000_hw *hw) ++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + +- if (phy->reset_disable) { +- ret_val = 0; +- goto out; +- } ++ DEBUGFUNC("e1000_copper_link_setup_m88"); ++ ++ if (phy->reset_disable) ++ return E1000_SUCCESS; + +- /* Enable CRS on TX. This must be set for half-duplex operation. */ ++ /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + +@@ -591,12 +1127,12 @@ + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; +- if (phy->disable_polarity_correction == 1) ++ if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register +@@ -605,7 +1141,7 @@ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + +@@ -617,42 +1153,43 @@ + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | +- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); ++ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) +- goto out; ++ return ret_val; + } + + /* Commit the changes. */ +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = phy->ops.commit(hw); + if (ret_val) { +- hw_dbg("Error committing the PHY changes\n"); +- goto out; ++ DEBUGOUT("Error committing the PHY changes\n"); ++ return ret_val; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link ++ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) ++s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + ++ DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); ++ + if (phy->reset_disable) +- return 0; ++ return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); +@@ -694,7 +1231,7 @@ + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; +- if (phy->disable_polarity_correction == 1) ++ if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ +@@ -705,9 +1242,9 @@ + if (ret_val) + return ret_val; + +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = phy->ops.commit(hw); + if (ret_val) { +- hw_dbg("Error committing the PHY changes\n"); ++ DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } +@@ -721,70 +1258,60 @@ + return ret_val; + + /* Commit the changes. */ +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = phy->ops.commit(hw); + if (ret_val) { +- hw_dbg("Error committing the PHY changes\n"); ++ DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } +- ret_val = igb_set_master_slave_mode(hw); ++ ++ ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + +- return 0; ++ return E1000_SUCCESS; + } + + /** +- * igb_copper_link_setup_igp - Setup igp PHY's for copper link ++ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +-s32 igb_copper_link_setup_igp(struct e1000_hw *hw) ++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + +- if (phy->reset_disable) { +- ret_val = 0; +- goto out; +- } ++ DEBUGFUNC("e1000_copper_link_setup_igp"); ++ ++ if (phy->reset_disable) ++ return E1000_SUCCESS; + +- ret_val = phy->ops.reset(hw); ++ ret_val = hw->phy.ops.reset(hw); + if (ret_val) { +- hw_dbg("Error resetting the PHY.\n"); +- goto out; ++ DEBUGOUT("Error resetting the PHY.\n"); ++ return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ +- msleep(100); ++ msec_delay(100); + +- /* The NVM settings will configure LPLU in D3 for +- * non-IGP1 PHYs. +- */ +- if (phy->type == e1000_phy_igp) { +- /* disable lplu d3 during driver init */ +- if (phy->ops.set_d3_lplu_state) +- ret_val = phy->ops.set_d3_lplu_state(hw, false); ++ /* disable lplu d0 during driver init */ ++ if (hw->phy.ops.set_d0_lplu_state) { ++ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { +- hw_dbg("Error Disabling LPLU D3\n"); +- goto out; ++ DEBUGOUT("Error Disabling LPLU D0\n"); ++ return ret_val; + } + } +- +- /* disable lplu d0 during driver init */ +- ret_val = phy->ops.set_d0_lplu_state(hw, false); +- if (ret_val) { +- hw_dbg("Error Disabling LPLU D0\n"); +- goto out; +- } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + +@@ -802,7 +1329,7 @@ + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) +- goto out; ++ return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { +@@ -816,124 +1343,34 @@ + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) +- goto out; +- } +- +- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); +- if (ret_val) +- goto out; +- +- /* load defaults for future use */ +- phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? +- ((data & CR_1000T_MS_VALUE) ? +- e1000_ms_force_master : +- e1000_ms_force_slave) : +- e1000_ms_auto; +- +- switch (phy->ms_type) { +- case e1000_ms_force_master: +- data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); +- break; +- case e1000_ms_force_slave: +- data |= CR_1000T_MS_ENABLE; +- data &= ~(CR_1000T_MS_VALUE); +- break; +- case e1000_ms_auto: +- data &= ~CR_1000T_MS_ENABLE; +- default: +- break; ++ return ret_val; + } +- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); +- if (ret_val) +- goto out; +- } +- +-out: +- return ret_val; +-} +- +-/** +- * igb_copper_link_autoneg - Setup/Enable autoneg for copper link +- * @hw: pointer to the HW structure +- * +- * Performs initial bounds checking on autoneg advertisement parameter, then +- * configure to advertise the full capability. Setup the PHY to autoneg +- * and restart the negotiation process between the link partner. If +- * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. +- **/ +-static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +-{ +- struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; +- u16 phy_ctrl; +- +- /* Perform some bounds checking on the autoneg advertisement +- * parameter. +- */ +- phy->autoneg_advertised &= phy->autoneg_mask; +- +- /* If autoneg_advertised is zero, we assume it was not defaulted +- * by the calling code so we set to advertise full capability. +- */ +- if (phy->autoneg_advertised == 0) +- phy->autoneg_advertised = phy->autoneg_mask; +- +- hw_dbg("Reconfiguring auto-neg advertisement params\n"); +- ret_val = igb_phy_setup_autoneg(hw); +- if (ret_val) { +- hw_dbg("Error Setting up Auto-Negotiation\n"); +- goto out; +- } +- hw_dbg("Restarting Auto-Neg\n"); +- +- /* Restart auto-negotiation by setting the Auto Neg Enable bit and +- * the Auto Neg Restart bit in the PHY control register. +- */ +- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); +- if (ret_val) +- goto out; + +- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); +- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); +- if (ret_val) +- goto out; +- +- /* Does the user want to wait for Auto-Neg to complete here, or +- * check at a later time (for example, callback routine). +- */ +- if (phy->autoneg_wait_to_complete) { +- ret_val = igb_wait_autoneg(hw); +- if (ret_val) { +- hw_dbg("Error while waiting for autoneg to complete\n"); +- goto out; +- } ++ ret_val = e1000_set_master_slave_mode(hw); + } + +- hw->mac.get_link_status = true; +- +-out: + return ret_val; + } + + /** +- * igb_phy_setup_autoneg - Configure PHY for auto-negotiation ++ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control +@@ -941,26 +1378,28 @@ + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) ++static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + ++ DEBUGFUNC("e1000_phy_setup_autoneg"); ++ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) +- goto out; ++ return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) +- goto out; ++ return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up +@@ -980,39 +1419,39 @@ + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + +- hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); ++ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { +- hw_dbg("Advertise 10mb Half duplex\n"); ++ DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { +- hw_dbg("Advertise 10mb Full duplex\n"); ++ DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { +- hw_dbg("Advertise 100mb Half duplex\n"); ++ DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { +- hw_dbg("Advertise 100mb Full duplex\n"); ++ DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) +- hw_dbg("Advertise 1000mb Half duplex request denied!\n"); ++ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { +- hw_dbg("Advertise 1000mb Full duplex\n"); ++ DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + +@@ -1029,68 +1468,126 @@ + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). +- * 3: Both Rx and TX flow control (symmetric) are enabled. ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: +- /* Flow control (RX & TX) is completely disabled by a ++ /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: +- /* RX Flow control is enabled, and TX Flow control is ++ /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are +- * capable of RX Pause ONLY, we will advertise that we +- * support both symmetric and asymmetric RX PAUSE. Later ++ * capable of Rx Pause ONLY, we will advertise that we ++ * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: +- /* TX Flow control is enabled, and RX Flow control is ++ /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: +- /* Flow control (both RX and TX) is enabled by a software ++ /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: +- hw_dbg("Flow control param set incorrectly\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Flow control param set incorrectly\n"); ++ return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) +- goto out; ++ return ret_val; + +- hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); ++ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + +- if (phy->autoneg_mask & ADVERTISE_1000_FULL) { +- ret_val = phy->ops.write_reg(hw, +- PHY_1000T_CTRL, ++ if (phy->autoneg_mask & ADVERTISE_1000_FULL) ++ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); +- if (ret_val) +- goto out; ++ ++ return ret_val; ++} ++ ++/** ++ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link ++ * @hw: pointer to the HW structure ++ * ++ * Performs initial bounds checking on autoneg advertisement parameter, then ++ * configure to advertise the full capability. Setup the PHY to autoneg ++ * and restart the negotiation process between the link partner. If ++ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. ++ **/ ++static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 phy_ctrl; ++ ++ DEBUGFUNC("e1000_copper_link_autoneg"); ++ ++ /* Perform some bounds checking on the autoneg advertisement ++ * parameter. ++ */ ++ phy->autoneg_advertised &= phy->autoneg_mask; ++ ++ /* If autoneg_advertised is zero, we assume it was not defaulted ++ * by the calling code so we set to advertise full capability. ++ */ ++ if (!phy->autoneg_advertised) ++ phy->autoneg_advertised = phy->autoneg_mask; ++ ++ DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); ++ ret_val = e1000_phy_setup_autoneg(hw); ++ if (ret_val) { ++ DEBUGOUT("Error Setting up Auto-Negotiation\n"); ++ return ret_val; ++ } ++ DEBUGOUT("Restarting Auto-Neg\n"); ++ ++ /* Restart auto-negotiation by setting the Auto Neg Enable bit and ++ * the Auto Neg Restart bit in the PHY control register. ++ */ ++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); ++ if (ret_val) ++ return ret_val; ++ ++ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); ++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); ++ if (ret_val) ++ return ret_val; ++ ++ /* Does the user want to wait for Auto-Neg to complete here, or ++ * check at a later time (for example, callback routine). ++ */ ++ if (phy->autoneg_wait_to_complete) { ++ ret_val = e1000_wait_autoneg(hw); ++ if (ret_val) { ++ DEBUGOUT("Error while waiting for autoneg to complete\n"); ++ return ret_val; ++ } + } + +-out: ++ hw->mac.get_link_status = true; ++ + return ret_val; + } + + /** +- * igb_setup_copper_link - Configure copper link settings ++ * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced +@@ -1098,129 +1595,134 @@ + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +-s32 igb_setup_copper_link(struct e1000_hw *hw) ++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) + { + s32 ret_val; + bool link; + ++ DEBUGFUNC("e1000_setup_copper_link_generic"); ++ + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ +- ret_val = igb_copper_link_autoneg(hw); ++ ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) +- goto out; ++ return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ +- hw_dbg("Forcing Speed and Duplex\n"); ++ DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { +- hw_dbg("Error Forcing Speed and Duplex\n"); +- goto out; ++ DEBUGOUT("Error Forcing Speed and Duplex\n"); ++ return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ +- ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); ++ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, ++ &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (link) { +- hw_dbg("Valid link established!!!\n"); +- igb_config_collision_dist(hw); +- ret_val = igb_config_fc_after_link_up(hw); ++ DEBUGOUT("Valid link established!!!\n"); ++ hw->mac.ops.config_collision_dist(hw); ++ ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { +- hw_dbg("Unable to establish link!!!\n"); ++ DEBUGOUT("Unable to establish link!!!\n"); + } + +-out: + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY ++ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) ++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); ++ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- igb_phy_force_speed_duplex_setup(hw, &phy_data); ++ e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- hw_dbg("IGP PSCR: %X\n", phy_data); ++ DEBUGOUT1("IGP PSCR: %X\n", phy_data); + +- udelay(1); ++ usec_delay(1); + + if (phy->autoneg_wait_to_complete) { +- hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); ++ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) +- hw_dbg("Link taking longer than expected.\n"); ++ DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); +- if (ret_val) +- goto out; ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + } + +-out: + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY ++ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. +- * After reset, TX_CLK and CRS on TX must be set. Return successful upon ++ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) ++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); ++ + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 +@@ -1229,45 +1731,49 @@ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- hw_dbg("M88E1000 PSCR: %X\n", phy_data); ++ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- igb_phy_force_speed_duplex_setup(hw, &phy_data); ++ e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Reset the phy to commit changes. */ +- ret_val = igb_phy_sw_reset(hw); ++ ret_val = hw->phy.ops.commit(hw); + if (ret_val) +- goto out; ++ return ret_val; + + if (phy->autoneg_wait_to_complete) { +- hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ++ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: ++ case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; +@@ -1276,9 +1782,10 @@ + reset_dsp = false; + break; + } +- if (!reset_dsp) +- hw_dbg("Link taking longer than expected.\n"); +- else { ++ ++ if (!reset_dsp) { ++ DEBUGOUT("Link taking longer than expected.\n"); ++ } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ +@@ -1286,29 +1793,35 @@ + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) +- goto out; +- ret_val = igb_phy_reset_dsp(hw); ++ return ret_val; ++ ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) +- goto out; ++ return ret_val; + } + } + + /* Try once more */ +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, +- 100000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + } + +- if (hw->phy.type != e1000_phy_m88 || +- hw->phy.id == I347AT4_E_PHY_ID || +- hw->phy.id == M88E1112_E_PHY_ID || +- hw->phy.id == I210_I_PHY_ID) +- goto out; ++ if (hw->phy.type != e1000_phy_m88) ++ return E1000_SUCCESS; + ++ if (hw->phy.id == I347AT4_E_PHY_ID || ++ hw->phy.id == M88E1340M_E_PHY_ID || ++ hw->phy.id == M88E1112_E_PHY_ID) ++ return E1000_SUCCESS; ++ if (hw->phy.id == I210_I_PHY_ID) ++ return E1000_SUCCESS; ++ if ((hw->phy.id == M88E1543_E_PHY_ID) || ++ (hw->phy.id == M88E1512_E_PHY_ID)) ++ return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from +@@ -1317,24 +1830,88 @@ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +-out: + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex ++ * igb_e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex ++ * @hw: pointer to the HW structure ++ * ++ * Forces the speed and duplex settings of the PHY. ++ * This is a function pointer entry point only called by ++ * PHY setup routines. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data; ++ bool link; ++ ++ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_ife"); ++ ++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ ++ e1000_phy_force_speed_duplex_setup(hw, &data); ++ ++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); ++ if (ret_val) ++ return ret_val; ++ ++ /* Disable MDI-X support for 10/100 */ ++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ ++ data &= ~IFE_PMC_AUTO_MDIX; ++ data &= ~IFE_PMC_FORCE_MDIX; ++ ++ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); ++ if (ret_val) ++ return ret_val; ++ ++ DEBUGOUT1("IFE PMC: %X\n", data); ++ ++ usec_delay(1); ++ ++ if (phy->autoneg_wait_to_complete) { ++ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); ++ if (ret_val) ++ return ret_val; ++ ++ if (!link) ++ DEBUGOUT("Link taking longer than expected.\n"); ++ ++ /* Try once more */ ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); ++ if (ret_val) ++ return ret_val; ++ } ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * +@@ -1345,17 +1922,18 @@ + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, +- u16 *phy_ctrl) ++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) + { + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + ++ DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); ++ + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + +@@ -1369,33 +1947,32 @@ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; +- hw_dbg("Half Duplex\n"); ++ DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; +- hw_dbg("Full Duplex\n"); ++ DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; +- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); +- hw_dbg("Forcing 100mb\n"); ++ *phy_ctrl &= ~MII_CR_SPEED_1000; ++ DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); +- *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); +- hw_dbg("Forcing 10mb\n"); ++ DEBUGOUT("Forcing 10mb\n"); + } + +- igb_config_collision_dist(hw); ++ hw->mac.ops.config_collision_dist(hw); + +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + + /** +- * igb_set_d3_lplu_state - Sets low power link up state for D3 ++ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * +@@ -1408,25 +1985,27 @@ + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) ++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val; + u16 data; + +- if (!(hw->phy.ops.read_reg)) +- goto out; ++ DEBUGFUNC("e1000_set_d3_lplu_state_generic"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) +- goto out; ++ return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) +- goto out; ++ return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable +@@ -1437,176 +2016,219 @@ + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) +- goto out; ++ return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) +- goto out; ++ return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, +- IGP01E1000_PHY_PORT_CONFIG, +- &data); ++ IGP01E1000_PHY_PORT_CONFIG, ++ &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) +- goto out; ++ return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, +- data); ++ data); + if (ret_val) +- goto out; ++ return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) +- goto out; ++ return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +-out: + return ret_val; + } + + /** +- * igb_check_downshift - Checks whether a downshift in speed occurred ++ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +-s32 igb_check_downshift(struct e1000_hw *hw) ++s32 e1000_check_downshift_generic(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + ++ DEBUGFUNC("e1000_check_downshift_generic"); ++ + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: +- offset = M88E1000_PHY_SPEC_STATUS; +- mask = M88E1000_PSSR_DOWNSHIFT; ++ offset = M88E1000_PHY_SPEC_STATUS; ++ mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: +- case e1000_phy_igp: + case e1000_phy_igp_3: +- offset = IGP01E1000_PHY_LINK_HEALTH; +- mask = IGP01E1000_PLHR_SS_DOWNGRADE; ++ offset = IGP01E1000_PHY_LINK_HEALTH; ++ mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; +- ret_val = 0; +- goto out; ++ return E1000_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) +- phy->speed_downgraded = (phy_data & mask) ? true : false; ++ phy->speed_downgraded = !!(phy_data & mask); + +-out: + return ret_val; + } + + /** +- * igb_check_polarity_m88 - Checks the polarity. ++ * igb_e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +-s32 igb_check_polarity_m88(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + ++ DEBUGFUNC("igb_e1000_check_polarity_m88"); ++ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) +- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) +- ? e1000_rev_polarity_reversed +- : e1000_rev_polarity_normal; ++ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); ++ ++ return ret_val; ++} ++ ++/** ++ * igb_e1000_check_polarity_igp - Checks the polarity. ++ * @hw: pointer to the HW structure ++ * ++ * Success returns 0, Failure returns -E1000_ERR_PHY (-2) ++ * ++ * Polarity is determined based on the PHY port status register, and the ++ * current speed (since there is no polarity at 100Mbps). ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data, offset, mask; ++ ++ DEBUGFUNC("igb_e1000_check_polarity_igp"); ++ ++ /* Polarity is determined based on the speed of ++ * our connection. ++ */ ++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); ++ if (ret_val) ++ return ret_val; ++ ++ if ((data & IGP01E1000_PSSR_SPEED_MASK) == ++ IGP01E1000_PSSR_SPEED_1000MBPS) { ++ offset = IGP01E1000_PHY_PCS_INIT_REG; ++ mask = IGP01E1000_PHY_POLARITY_MASK; ++ } else { ++ /* This really only applies to 10Mbps since ++ * there is no polarity for 100Mbps (always 0). ++ */ ++ offset = IGP01E1000_PHY_PORT_STATUS; ++ mask = IGP01E1000_PSSR_POLARITY_REVERSED; ++ } ++ ++ ret_val = phy->ops.read_reg(hw, offset, &data); ++ ++ if (!ret_val) ++ phy->cable_polarity = ((data & mask) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); + + return ret_val; + } + + /** +- * igb_check_polarity_igp - Checks the polarity. ++ * igb_e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * +- * Success returns 0, Failure returns -E1000_ERR_PHY (-2) +- * +- * Polarity is determined based on the PHY port status register, and the +- * current speed (since there is no polarity at 100Mbps). ++ * Polarity is determined on the polarity reversal feature being enabled. + **/ +-static s32 igb_check_polarity_igp(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; +- u16 data, offset, mask; ++ u16 phy_data, offset, mask; + +- /* Polarity is determined based on the speed of +- * our connection. +- */ +- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); +- if (ret_val) +- goto out; ++ DEBUGFUNC("igb_e1000_check_polarity_ife"); + +- if ((data & IGP01E1000_PSSR_SPEED_MASK) == +- IGP01E1000_PSSR_SPEED_1000MBPS) { +- offset = IGP01E1000_PHY_PCS_INIT_REG; +- mask = IGP01E1000_PHY_POLARITY_MASK; ++ /* Polarity is determined based on the reversal feature being enabled. ++ */ ++ if (phy->polarity_correction) { ++ offset = IFE_PHY_EXTENDED_STATUS_CONTROL; ++ mask = IFE_PESC_POLARITY_REVERSED; + } else { +- /* This really only applies to 10Mbps since +- * there is no polarity for 100Mbps (always 0). +- */ +- offset = IGP01E1000_PHY_PORT_STATUS; +- mask = IGP01E1000_PSSR_POLARITY_REVERSED; ++ offset = IFE_PHY_SPECIAL_CONTROL; ++ mask = IFE_PSC_FORCE_POLARITY; + } + +- ret_val = phy->ops.read_reg(hw, offset, &data); ++ ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) +- phy->cable_polarity = (data & mask) +- ? e1000_rev_polarity_reversed +- : e1000_rev_polarity_normal; ++ phy->cable_polarity = ((phy_data & mask) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); + +-out: + return ret_val; + } + + /** +- * igb_wait_autoneg - Wait for auto-neg completion ++ * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +-static s32 igb_wait_autoneg(struct e1000_hw *hw) ++static s32 e1000_wait_autoneg(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + ++ DEBUGFUNC("e1000_wait_autoneg"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; ++ + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); +@@ -1617,7 +2239,7 @@ + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; +- msleep(100); ++ msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation +@@ -1627,7 +2249,7 @@ + } + + /** +- * igb_phy_has_link - Polls PHY for link ++ * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts +@@ -1635,27 +2257,32 @@ + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, +- u32 usec_interval, bool *success) ++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ++ u32 usec_interval, bool *success) + { +- s32 ret_val = 0; ++ s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + ++ DEBUGFUNC("e1000_phy_has_link_generic"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; ++ + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); +- if (ret_val && usec_interval > 0) { ++ if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) +- mdelay(usec_interval/1000); ++ msec_delay(usec_interval/1000); + else +- udelay(usec_interval); ++ usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) +@@ -1663,18 +2290,18 @@ + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) +- mdelay(usec_interval/1000); ++ msec_delay(usec_interval/1000); + else +- udelay(usec_interval); ++ usec_delay(usec_interval); + } + +- *success = (i < iterations) ? true : false; ++ *success = (i < iterations); + + return ret_val; + } + + /** +- * igb_get_cable_length_m88 - Determine cable length for m88 PHY ++ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length +@@ -1688,37 +2315,40 @@ + * 3 110 - 140 meters + * 4 > 140 meters + **/ +-s32 igb_get_cable_length_m88(struct e1000_hw *hw) ++s32 e1000_get_cable_length_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + ++ DEBUGFUNC("e1000_get_cable_length_m88"); ++ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> +- M88E1000_PSSR_CABLE_LENGTH_SHIFT; +- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { +- ret_val = -E1000_ERR_PHY; +- goto out; +- } ++ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> ++ M88E1000_PSSR_CABLE_LENGTH_SHIFT); ++ ++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) ++ return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + +-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) ++s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; +- u16 phy_data, phy_data2, index, default_page, is_cm; ++ u16 phy_data, phy_data2, is_cm; ++ u16 index, default_page; ++ ++ DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: +@@ -1743,27 +2373,29 @@ + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) +- goto out; ++ return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) +- goto out; ++ return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + +@@ -1772,34 +2404,34 @@ + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + +- /* Reset the page selec to its original value */ ++ /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) +- goto out; ++ return ret_val; + break; ++ + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; +- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { +- ret_val = -E1000_ERR_PHY; +- goto out; +- } ++ ++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) ++ return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; +@@ -1811,20 +2443,18 @@ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) +- goto out; ++ return ret_val; + + break; + default: +- ret_val = -E1000_ERR_PHY; +- goto out; ++ return -E1000_ERR_PHY; + } + +-out: + return ret_val; + } + + /** +- * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY ++ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the +@@ -1834,10 +2464,10 @@ + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) ++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; +@@ -1848,26 +2478,26 @@ + IGP02E1000_PHY_AGC_D + }; + ++ DEBUGFUNC("e1000_get_cable_length_igp_2"); ++ + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ +- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & +- IGP02E1000_AGC_LENGTH_MASK; ++ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & ++ IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || +- (cur_agc_index == 0)) { +- ret_val = -E1000_ERR_PHY; +- goto out; +- } ++ (cur_agc_index == 0)) ++ return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > +@@ -1885,18 +2515,17 @@ + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ +- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? +- (agc_value - IGP02E1000_AGC_RANGE) : 0; ++ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? ++ (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_phy_info_m88 - Retrieve PHY information ++ * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) +@@ -1905,54 +2534,54 @@ + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +-s32 igb_get_phy_info_m88(struct e1000_hw *hw) ++s32 e1000_get_phy_info_m88(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("e1000_get_phy_info_m88"); ++ + if (phy->media_type != e1000_media_type_copper) { +- hw_dbg("Phy info is only valid for copper media\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid for copper media\n"); ++ return -E1000_ERR_CONFIG; + } + +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { +- hw_dbg("Phy info is only valid if link is up\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) +- ? true : false; ++ phy->polarity_correction = !!(phy_data & ++ M88E1000_PSCR_POLARITY_REVERSAL); + +- ret_val = igb_check_polarity_m88(hw); ++ ret_val = igb_e1000_check_polarity_m88(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; ++ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { +- ret_val = phy->ops.get_cable_length(hw); ++ ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok +@@ -1968,12 +2597,11 @@ + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +-out: + return ret_val; + } + + /** +- * igb_get_phy_info_igp - Retrieve igp PHY information ++ * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then +@@ -1981,44 +2609,45 @@ + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +-s32 igb_get_phy_info_igp(struct e1000_hw *hw) ++s32 e1000_get_phy_info_igp(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ DEBUGFUNC("e1000_get_phy_info_igp"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { +- hw_dbg("Phy info is only valid if link is up\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + +- ret_val = igb_check_polarity_igp(hw); ++ ret_val = igb_e1000_check_polarity_igp(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; ++ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) +- goto out; ++ return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok +@@ -2033,42 +2662,97 @@ + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +-out: + return ret_val; + } + + /** +- * igb_phy_sw_reset - PHY software reset ++ * igb_e1000_get_phy_info_ife - Retrieves various IFE PHY states ++ * @hw: pointer to the HW structure ++ * ++ * Populates "phy" structure with various feature states. ++ **/ ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ u16 data; ++ bool link; ++ ++ DEBUGFUNC("igb_e1000_get_phy_info_ife"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); ++ if (ret_val) ++ return ret_val; ++ ++ if (!link) { ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; ++ } ++ ++ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); ++ ++ if (phy->polarity_correction) { ++ ret_val = igb_e1000_check_polarity_ife(hw); ++ if (ret_val) ++ return ret_val; ++ } else { ++ /* Polarity is forced */ ++ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); ++ } ++ ++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); ++ if (ret_val) ++ return ret_val; ++ ++ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); ++ ++ /* The following parameters are undefined for 10/100 operation. */ ++ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; ++ phy->local_rx = e1000_1000t_rx_status_undefined; ++ phy->remote_rx = e1000_1000t_rx_status_undefined; ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +-s32 igb_phy_sw_reset(struct e1000_hw *hw) ++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val; + u16 phy_ctrl; + +- if (!(hw->phy.ops.read_reg)) +- goto out; ++ DEBUGFUNC("e1000_phy_sw_reset_generic"); ++ ++ if (!hw->phy.ops.read_reg) ++ return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) +- goto out; ++ return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) +- goto out; ++ return ret_val; + +- udelay(1); ++ usec_delay(1); + +-out: + return ret_val; + } + + /** +- * igb_phy_hw_reset - PHY hardware reset ++ * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire +@@ -2076,50 +2760,65 @@ + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +-s32 igb_phy_hw_reset(struct e1000_hw *hw) ++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; +- s32 ret_val; ++ s32 ret_val; + u32 ctrl; + +- ret_val = igb_check_reset_block(hw); +- if (ret_val) { +- ret_val = 0; +- goto out; ++ DEBUGFUNC("e1000_phy_hw_reset_generic"); ++ ++ if (phy->ops.check_reset_block) { ++ ret_val = phy->ops.check_reset_block(hw); ++ if (ret_val) ++ return E1000_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- ctrl = rd32(E1000_CTRL); +- wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); +- wrfl(); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); ++ E1000_WRITE_FLUSH(hw); + +- udelay(phy->reset_delay_us); ++ usec_delay(phy->reset_delay_us); + +- wr32(E1000_CTRL, ctrl); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ E1000_WRITE_FLUSH(hw); + +- udelay(150); ++ usec_delay(150); + + phy->ops.release(hw); + +- ret_val = phy->ops.get_cfg_done(hw); ++ return phy->ops.get_cfg_done(hw); ++} + +-out: +- return ret_val; ++/** ++ * e1000_get_cfg_done_generic - Generic configuration done ++ * @hw: pointer to the HW structure ++ * ++ * Generic function to wait 10 milli-seconds for configuration to complete ++ * and return success. ++ **/ ++s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) ++{ ++ DEBUGFUNC("e1000_get_cfg_done_generic"); ++ ++ msec_delay_irq(10); ++ ++ return E1000_SUCCESS; + } + + /** +- * igb_phy_init_script_igp3 - Inits the IGP3 PHY ++ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +-s32 igb_phy_init_script_igp3(struct e1000_hw *hw) ++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) + { +- hw_dbg("Running IGP 3 PHY init script\n"); ++ DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ +@@ -2130,7 +2829,7 @@ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); +- /* Add 4% to TX amplitude in Giga mode */ ++ /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); +@@ -2191,17 +2890,106 @@ + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + +- return 0; ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_get_phy_type_from_id - Get PHY type from id ++ * @phy_id: phy_id read from the phy ++ * ++ * Returns the phy type from the id. ++ **/ ++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) ++{ ++ enum e1000_phy_type phy_type = e1000_phy_unknown; ++ ++ switch (phy_id) { ++ case M88E1000_I_PHY_ID: ++ case M88E1000_E_PHY_ID: ++ case M88E1111_I_PHY_ID: ++ case M88E1011_I_PHY_ID: ++ case M88E1543_E_PHY_ID: ++ case M88E1512_E_PHY_ID: ++ case I347AT4_E_PHY_ID: ++ case M88E1112_E_PHY_ID: ++ case M88E1340M_E_PHY_ID: ++ phy_type = e1000_phy_m88; ++ break; ++ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ ++ phy_type = e1000_phy_igp_2; ++ break; ++ case GG82563_E_PHY_ID: ++ phy_type = e1000_phy_gg82563; ++ break; ++ case IGP03E1000_E_PHY_ID: ++ phy_type = e1000_phy_igp_3; ++ break; ++ case IFE_E_PHY_ID: ++ case IFE_PLUS_E_PHY_ID: ++ case IFE_C_E_PHY_ID: ++ phy_type = e1000_phy_ife; ++ break; ++ case I82580_I_PHY_ID: ++ phy_type = e1000_phy_82580; ++ break; ++ case I210_I_PHY_ID: ++ phy_type = e1000_phy_i210; ++ break; ++ default: ++ phy_type = e1000_phy_unknown; ++ break; ++ } ++ return phy_type; ++} ++ ++/** ++ * e1000_determine_phy_address - Determines PHY address. ++ * @hw: pointer to the HW structure ++ * ++ * This uses a trial and error method to loop through possible PHY ++ * addresses. It tests each by reading the PHY ID registers and ++ * checking for a match. ++ **/ ++s32 e1000_determine_phy_address(struct e1000_hw *hw) ++{ ++ u32 phy_addr = 0; ++ u32 i; ++ enum e1000_phy_type phy_type = e1000_phy_unknown; ++ ++ hw->phy.id = phy_type; ++ ++ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { ++ hw->phy.addr = phy_addr; ++ i = 0; ++ ++ do { ++ e1000_get_phy_id(hw); ++ phy_type = e1000_get_phy_type_from_id(hw->phy.id); ++ ++ /* If phy_type is valid, break - we found our ++ * PHY address ++ */ ++ if (phy_type != e1000_phy_unknown) ++ return E1000_SUCCESS; ++ ++ msec_delay(1); ++ i++; ++ } while (i < 10); ++ } ++ ++ return -E1000_ERR_PHY_TYPE; + } + + /** +- * igb_power_up_phy_copper - Restore copper link in case of PHY power down ++ * igb_e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a +- * driver unload, restore the link to previous settings. ++ * driver unload, or wake on lan is not enabled, restore the link to previous ++ * settings. + **/ +-void igb_power_up_phy_copper(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_power_up_phy_copper(struct e1000_hw *hw) + { + u16 mii_reg = 0; + +@@ -2212,13 +3000,15 @@ + } + + /** +- * igb_power_down_phy_copper - Power down copper PHY ++ * igb_e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * +- * Power down PHY to save power when interface is down and wake on lan +- * is not enabled. ++ * In the case of a PHY power down to save power, or to turn off link during a ++ * driver unload, or wake on lan is not enabled, restore the link to previous ++ * settings. + **/ +-void igb_power_down_phy_copper(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++void igb_e1000_power_down_phy_copper(struct e1000_hw *hw) + { + u16 mii_reg = 0; + +@@ -2226,98 +3016,85 @@ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +- usleep_range(1000, 2000); ++ msec_delay(1); + } + + /** +- * igb_check_polarity_82580 - Checks the polarity. ++ * igb_e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +-static s32 igb_check_polarity_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + ++ DEBUGFUNC("igb_e1000_check_polarity_82577"); + +- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); ++ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) +- phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) +- ? e1000_rev_polarity_reversed +- : e1000_rev_polarity_normal; ++ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) ++ ? e1000_rev_polarity_reversed ++ : e1000_rev_polarity_normal); + + return ret_val; + } + + /** +- * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY ++ * igb_e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * +- * Calls the PHY setup function to force speed and duplex. Clears the +- * auto-crossover to force MDI manually. Waits for link and returns +- * successful if link up is successful, else -E1000_ERR_PHY (-2). ++ * Calls the PHY setup function to force speed and duplex. + **/ +-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + ++ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_82577"); ++ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- igb_phy_force_speed_duplex_setup(hw, &phy_data); ++ e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) +- goto out; +- +- /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI +- * forced whenever speed and duplex are forced. +- */ +- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); +- if (ret_val) +- goto out; +- +- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; +- +- ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); +- if (ret_val) +- goto out; +- +- hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); ++ return ret_val; + +- udelay(1); ++ usec_delay(1); + + if (phy->autoneg_wait_to_complete) { +- hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); ++ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) +- hw_dbg("Link taking longer than expected.\n"); ++ DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ +- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); +- if (ret_val) +- goto out; ++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, ++ 100000, &link); + } + +-out: + return ret_val; + } + + /** +- * igb_get_phy_info_82580 - Retrieve I82580 PHY information ++ * igb_e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then +@@ -2325,44 +3102,46 @@ + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +-s32 igb_get_phy_info_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + +- ret_val = igb_phy_has_link(hw, 1, 0, &link); ++ DEBUGFUNC("igb_e1000_get_phy_info_82577"); ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) +- goto out; ++ return ret_val; + + if (!link) { +- hw_dbg("Phy info is only valid if link is up\n"); +- ret_val = -E1000_ERR_CONFIG; +- goto out; ++ DEBUGOUT("Phy info is only valid if link is up\n"); ++ return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + +- ret_val = igb_check_polarity_82580(hw); ++ ret_val = igb_e1000_check_polarity_82577(hw); + if (ret_val) +- goto out; ++ return ret_val; + +- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); ++ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) +- goto out; ++ return ret_val; + +- phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; ++ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + +- if ((data & I82580_PHY_STATUS2_SPEED_MASK) == +- I82580_PHY_STATUS2_SPEED_1000MBPS) { ++ if ((data & I82577_PHY_STATUS2_SPEED_MASK) == ++ I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) +- goto out; ++ return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) +- goto out; ++ return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok +@@ -2377,63 +3156,65 @@ + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_get_cable_length_82580 - Determine cable length for 82580 PHY ++ * igb_e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +-s32 igb_get_cable_length_82580(struct e1000_hw *hw) ++/* Changed name, duplicated with e1000 */ ++s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw) + { + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + +- ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); ++ DEBUGFUNC("igb_e1000_get_cable_length_82577"); ++ ++ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) +- goto out; ++ return ret_val; + +- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> +- I82580_DSTATUS_CABLE_LENGTH_SHIFT; ++ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> ++ I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) +- ret_val = -E1000_ERR_PHY; ++ return -E1000_ERR_PHY; + + phy->cable_length = length; + +-out: +- return ret_val; ++ return E1000_SUCCESS; + } + + /** +- * igb_write_phy_reg_gs40g - Write GS40G PHY register ++ * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure +- * @offset: lower half is register offset to write to +- * upper half is page to use. ++ * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) ++s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) + { + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + ++ DEBUGFUNC("e1000_write_phy_reg_gs40g"); ++ + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + +- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); ++ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; +- ret_val = igb_write_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + release: + hw->phy.ops.release(hw); +@@ -2441,7 +3222,7 @@ + } + + /** +- * igb_read_phy_reg_gs40g - Read GS40G PHY register ++ * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. +@@ -2450,20 +3231,22 @@ + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) ++s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) + { + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + ++ DEBUGFUNC("e1000_read_phy_reg_gs40g"); ++ + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + +- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); ++ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; +- ret_val = igb_read_phy_reg_mdic(hw, offset, data); ++ ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + release: + hw->phy.ops.release(hw); +@@ -2471,41 +3254,156 @@ + } + + /** +- * igb_set_master_slave_mode - Setup PHY for Master/slave mode ++ * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure ++ * @address: address to be read ++ * @data: pointer to the read data + * +- * Sets up Master/slave mode ++ * Reads the mPHY control register in the PHY at offset and stores the ++ * information read to data. + **/ +-static s32 igb_set_master_slave_mode(struct e1000_hw *hw) ++s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) + { +- s32 ret_val; +- u16 phy_data; ++ u32 mphy_ctrl = 0; ++ bool locked = false; ++ bool ready; + +- /* Resolve Master/Slave mode */ +- ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); +- if (ret_val) +- return ret_val; ++ DEBUGFUNC("e1000_read_phy_reg_mphy"); + +- /* load defaults for future use */ +- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? +- ((phy_data & CR_1000T_MS_VALUE) ? +- e1000_ms_force_master : +- e1000_ms_force_slave) : e1000_ms_auto; ++ /* Check if mPHY is ready to read/write operations */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; + +- switch (hw->phy.ms_type) { +- case e1000_ms_force_master: +- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); +- break; +- case e1000_ms_force_slave: +- phy_data |= CR_1000T_MS_ENABLE; +- phy_data &= ~(CR_1000T_MS_VALUE); +- break; +- case e1000_ms_auto: +- phy_data &= ~CR_1000T_MS_ENABLE; +- /* fall-through */ +- default: ++ /* Check if mPHY access is disabled and enable it if so */ ++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); ++ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { ++ locked = true; ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ mphy_ctrl |= E1000_MPHY_ENA_ACCESS; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ } ++ ++ /* Set the address that we want to read */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ ++ /* We mask address, because we want to use only current lane */ ++ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & ++ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | ++ (address & E1000_MPHY_ADDRESS_MASK); ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ ++ /* Read data from the address */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ *data = E1000_READ_REG(hw, E1000_MPHY_DATA); ++ ++ /* Disable access to mPHY if it was originally disabled */ ++ if (locked) ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, ++ E1000_MPHY_DIS_ACCESS); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_write_phy_reg_mphy - Write mPHY control register ++ * @hw: pointer to the HW structure ++ * @address: address to write to ++ * @data: data to write to register at offset ++ * @line_override: used when we want to use different line than default one ++ * ++ * Writes data to mPHY control register. ++ **/ ++s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, ++ bool line_override) ++{ ++ u32 mphy_ctrl = 0; ++ bool locked = false; ++ bool ready; ++ ++ DEBUGFUNC("e1000_write_phy_reg_mphy"); ++ ++ /* Check if mPHY is ready to read/write operations */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ ++ /* Check if mPHY access is disabled and enable it if so */ ++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); ++ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { ++ locked = true; ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ mphy_ctrl |= E1000_MPHY_ENA_ACCESS; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ } ++ ++ /* Set the address that we want to read */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ ++ /* We mask address, because we want to use only current lane */ ++ if (line_override) ++ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; ++ else ++ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; ++ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | ++ (address & E1000_MPHY_ADDRESS_MASK); ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); ++ ++ /* Read data from the address */ ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); ++ ++ /* Disable access to mPHY if it was originally disabled */ ++ if (locked) ++ ready = e1000_is_mphy_ready(hw); ++ if (!ready) ++ return -E1000_ERR_PHY; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, ++ E1000_MPHY_DIS_ACCESS); ++ ++ return E1000_SUCCESS; ++} ++ ++/** ++ * e1000_is_mphy_ready - Check if mPHY control register is not busy ++ * @hw: pointer to the HW structure ++ * ++ * Returns mPHY control register status. ++ **/ ++bool e1000_is_mphy_ready(struct e1000_hw *hw) ++{ ++ u16 retry_count = 0; ++ u32 mphy_ctrl = 0; ++ bool ready = false; ++ ++ while (retry_count < 2) { ++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); ++ if (mphy_ctrl & E1000_MPHY_BUSY) { ++ usec_delay(20); ++ retry_count++; ++ continue; ++ } ++ ready = true; + break; + } + +- return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); ++ if (!ready) ++ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); ++ ++ return ready; + } +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h +--- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,146 +1,115 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* + +-#ifndef _E1000_PHY_H_ +-#define _E1000_PHY_H_ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. + +-enum e1000_ms_type { +- e1000_ms_hw_default = 0, +- e1000_ms_force_master, +- e1000_ms_force_slave, +- e1000_ms_auto +-}; ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-enum e1000_smart_speed { +- e1000_smart_speed_default = 0, +- e1000_smart_speed_on, +- e1000_smart_speed_off +-}; ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. + +-s32 igb_check_downshift(struct e1000_hw *hw); +-s32 igb_check_reset_block(struct e1000_hw *hw); +-s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +-s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +-s32 igb_get_cable_length_m88(struct e1000_hw *hw); +-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +-s32 igb_get_phy_id(struct e1000_hw *hw); +-s32 igb_get_phy_info_igp(struct e1000_hw *hw); +-s32 igb_get_phy_info_m88(struct e1000_hw *hw); +-s32 igb_phy_sw_reset(struct e1000_hw *hw); +-s32 igb_phy_hw_reset(struct e1000_hw *hw); +-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +-s32 igb_setup_copper_link(struct e1000_hw *hw); +-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, +- u32 usec_interval, bool *success); +-void igb_power_up_phy_copper(struct e1000_hw *hw); +-void igb_power_down_phy_copper(struct e1000_hw *hw); +-s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +-s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +-s32 igb_get_phy_info_82580(struct e1000_hw *hw); +-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +-s32 igb_get_cable_length_82580(struct e1000_hw *hw); +-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +-s32 igb_check_polarity_m88(struct e1000_hw *hw); ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". + +-/* IGP01E1000 Specific Registers */ +-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +-#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +- +-#define I82580_ADDR_REG 16 +-#define I82580_CFG_REG 22 +-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) +-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +-#define I82580_CTRL_REG 23 +-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) +- +-/* 82580 specific PHY registers */ +-#define I82580_PHY_CTRL_2 18 +-#define I82580_PHY_LBK_CTRL 19 +-#define I82580_PHY_STATUS_2 26 +-#define I82580_PHY_DIAG_STATUS 31 +- +-/* I82580 PHY Status 2 */ +-#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +-#define I82580_PHY_STATUS2_MDIX 0x0800 +-#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +-#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +-#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 +- +-/* I82580 PHY Control 2 */ +-#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +-#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +-#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 +- +-/* I82580 PHY Diagnostics Status */ +-#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +-#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +-/* 82580 PHY Power Management */ +-#define E1000_82580_PHY_POWER_MGMT 0xE14 +-#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +-#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +-#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +-#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ ++*******************************************************************************/ ++ ++#ifndef _E1000_PHY_H_ ++#define _E1000_PHY_H_ ++ ++void e1000_init_phy_ops_generic(struct e1000_hw *hw); ++s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); ++void e1000_null_phy_generic(struct e1000_hw *hw); ++s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); ++s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); ++s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++s32 e1000_check_downshift_generic(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw); ++s32 e1000_check_reset_block_generic(struct e1000_hw *hw); ++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); ++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); ++s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); ++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); ++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); ++s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); ++s32 e1000_get_cable_length_m88(struct e1000_hw *hw); ++s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); ++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); ++s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); ++s32 e1000_get_phy_id(struct e1000_hw *hw); ++s32 e1000_get_phy_info_igp(struct e1000_hw *hw); ++s32 e1000_get_phy_info_m88(struct e1000_hw *hw); ++s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw); ++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); ++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); ++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); ++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); ++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page); ++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); ++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); ++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ++ u32 usec_interval, bool *success); ++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); ++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); ++s32 e1000_determine_phy_address(struct e1000_hw *hw); ++s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); ++s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); ++void igb_e1000_power_up_phy_copper(struct e1000_hw *hw); ++void igb_e1000_power_down_phy_copper(struct e1000_hw *hw); ++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); ++s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); ++s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw); ++s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw); ++s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw); ++s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); ++s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw); ++s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); ++s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); ++s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); ++s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, ++ bool line_override); ++bool e1000_is_mphy_ready(struct e1000_hw *hw); + +-/* Enable flexible speed on link-up */ +-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +-#define IGP01E1000_PSSR_MDIX 0x0800 +-#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +-#define IGP02E1000_PHY_CHANNEL_NUM 4 +-#define IGP02E1000_PHY_AGC_A 0x11B1 +-#define IGP02E1000_PHY_AGC_B 0x12B1 +-#define IGP02E1000_PHY_AGC_C 0x14B1 +-#define IGP02E1000_PHY_AGC_D 0x18B1 +-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +-#define IGP02E1000_AGC_LENGTH_MASK 0x7F +-#define IGP02E1000_AGC_RANGE 15 ++#define E1000_MAX_PHY_ADDR 8 + +-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF ++/* IGP01E1000 Specific Registers */ ++#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ ++#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ ++#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ ++#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ ++#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ ++#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ ++#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ ++#define IGP_PAGE_SHIFT 5 ++#define PHY_REG_MASK 0x1F + + /* GS40G - I210 PHY defines */ + #define GS40G_PAGE_SELECT 0x16 +@@ -151,7 +120,110 @@ + #define GS40G_MAC_LB 0x4140 + #define GS40G_MAC_SPEED_1G 0X0006 + #define GS40G_COPPER_SPEC 0x0010 +-#define GS40G_LINE_LB 0x4000 ++ ++#define HV_INTC_FC_PAGE_START 768 ++#define I82578_ADDR_REG 29 ++#define I82577_ADDR_REG 16 ++#define I82577_CFG_REG 22 ++#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) ++#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ ++#define I82577_CTRL_REG 23 ++ ++/* 82577 specific PHY registers */ ++#define I82577_PHY_CTRL_2 18 ++#define I82577_PHY_LBK_CTRL 19 ++#define I82577_PHY_STATUS_2 26 ++#define I82577_PHY_DIAG_STATUS 31 ++ ++/* I82577 PHY Status 2 */ ++#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 ++#define I82577_PHY_STATUS2_MDIX 0x0800 ++#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 ++#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 ++ ++/* I82577 PHY Control 2 */ ++#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 ++#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 ++#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 ++ ++/* I82577 PHY Diagnostics Status */ ++#define I82577_DSTATUS_CABLE_LENGTH 0x03FC ++#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 ++ ++/* 82580 PHY Power Management */ ++#define E1000_82580_PHY_POWER_MGMT 0xE14 ++#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ ++#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ ++#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ ++#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ ++ ++#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ ++#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ ++#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ ++#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ ++#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ ++ ++#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 ++#define IGP01E1000_PHY_POLARITY_MASK 0x0078 ++ ++#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 ++#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ ++ ++#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 ++ ++#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ ++#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ ++#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ ++ ++#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 ++ ++#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 ++#define IGP01E1000_PSSR_MDIX 0x0800 ++#define IGP01E1000_PSSR_SPEED_MASK 0xC000 ++#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 ++ ++#define IGP02E1000_PHY_CHANNEL_NUM 4 ++#define IGP02E1000_PHY_AGC_A 0x11B1 ++#define IGP02E1000_PHY_AGC_B 0x12B1 ++#define IGP02E1000_PHY_AGC_C 0x14B1 ++#define IGP02E1000_PHY_AGC_D 0x18B1 ++ ++#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ ++#define IGP02E1000_AGC_LENGTH_MASK 0x7F ++#define IGP02E1000_AGC_RANGE 15 ++ ++#define E1000_CABLE_LENGTH_UNDEFINED 0xFF ++ ++#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 ++#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 ++#define E1000_KMRNCTRLSTA_REN 0x00200000 ++#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ ++#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ ++#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ ++#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ ++#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ ++ ++#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 ++#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ ++#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ ++#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ ++ ++/* IFE PHY Extended Status Control */ ++#define IFE_PESC_POLARITY_REVERSED 0x0100 ++ ++/* IFE PHY Special Control */ ++#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 ++#define IFE_PSC_FORCE_POLARITY 0x0020 ++ ++/* IFE PHY Special Control and LED Control */ ++#define IFE_PSCL_PROBE_MODE 0x0020 ++#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ ++#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ ++ ++/* IFE PHY MDIX Control */ ++#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ ++#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ ++#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + + /* SFP modules ID memory locations */ + #define E1000_SFF_IDENTIFIER_OFFSET 0x00 +@@ -160,7 +232,7 @@ + + #define E1000_SFF_ETH_FLAGS_OFFSET 0x06 + /* Flags for SFP modules compatible with ETH up to 1Gb */ +-struct e1000_sfp_flags { ++struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; +@@ -171,4 +243,10 @@ + u8 e10_base_px:1; + }; + ++/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ ++#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 ++#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 ++#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 ++#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 ++ + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h +--- a/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,154 +1,196 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #ifndef _E1000_REGS_H_ + #define _E1000_REGS_H_ + +-#define E1000_CTRL 0x00000 /* Device Control - RW */ +-#define E1000_STATUS 0x00008 /* Device Status - RO */ +-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +-#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +-#define E1000_MDIC 0x00020 /* MDI Control - RW */ +-#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +-#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +-#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +-#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +-#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +-#define E1000_RCTL 0x00100 /* RX Control - RW */ +-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +-#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +-#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +-#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +-#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +-#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +-#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +-#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +-#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +-#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +-#define E1000_TCTL 0x00400 /* TX Control - RW */ +-#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +-#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +-#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +-#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +-#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +-#define E1000_PBS 0x01008 /* Packet Buffer Size */ +-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +-#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +-#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +-#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +-#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +-#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +-#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +-#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +-#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +-#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +-#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +-#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +-#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +-#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +-#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ ++#define E1000_CTRL 0x00000 /* Device Control - RW */ ++#define E1000_STATUS 0x00008 /* Device Status - RO */ ++#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ ++#define E1000_EERD 0x00014 /* EEPROM Read - RW */ ++#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ ++#define E1000_FLA 0x0001C /* Flash Access - RW */ ++#define E1000_MDIC 0x00020 /* MDI Control - RW */ ++#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ ++#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ ++#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ ++#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ ++#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ ++#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ ++#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ + #define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ + #define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ + #define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ ++#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ ++#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ ++#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ ++#define E1000_SCTL 0x00024 /* SerDes Control - RW */ ++#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ ++#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ ++#define E1000_FCT 0x00030 /* Flow Control Type - RW */ ++#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ ++#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ ++#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ ++#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ ++#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ ++#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ ++#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ ++#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ ++#define E1000_RCTL 0x00100 /* Rx Control - RW */ ++#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ ++#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ ++#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ ++#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ ++#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) ++#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ ++#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ ++#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ ++#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ ++#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ ++#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ ++#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ ++#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ ++#define E1000_TCTL 0x00400 /* Tx Control - RW */ ++#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ ++#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ ++#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ ++#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ ++#define E1000_LEDMUX 0x08130 /* LED MUX Control */ ++#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ ++#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ ++#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ ++#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ ++#define E1000_PBS 0x01008 /* Packet Buffer Size */ ++#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ ++#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ ++#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ ++#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ ++#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ ++#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ ++#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ ++#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ ++#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ ++#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ ++#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ ++#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ ++#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ ++#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ ++#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ ++#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ ++#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ ++#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ ++#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ ++#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ ++#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ ++#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ ++#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ ++#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ ++#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ ++#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ ++#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ ++#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ ++#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ ++#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ ++#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ ++#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ ++#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ ++#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ ++#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ ++#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ ++#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ ++/* Split and Replication Rx Control - RW */ ++#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ ++#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ ++#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ ++#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ ++#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ ++#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ ++#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ ++#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ ++#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ ++#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ ++#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ ++#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ ++#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ ++#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ ++#define E1000_I210_FLMNGCTL 0x12038 ++#define E1000_I210_FLMNGDATA 0x1203C ++#define E1000_I210_FLMNGCNT 0x12040 + +-/* IEEE 1588 TIMESYNCH */ +-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +-#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +-#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +-#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +-#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +-#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +-#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +-#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +-#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +-#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +-#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +-#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +-#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +-#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +-#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +-#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +-#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +-#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +-#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +-#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +-#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +-#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ ++#define E1000_I210_FLSWCTL 0x12048 ++#define E1000_I210_FLSWDATA 0x1204C ++#define E1000_I210_FLSWCNT 0x12050 + +-/* Filtering Registers */ +-#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +-#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +-#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +-#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +-#define E1000_SAQF0 E1000_SAQF(0) +-#define E1000_DAQF0 E1000_DAQF(0) +-#define E1000_SPQF0 E1000_SPQF(0) +-#define E1000_FTQF0 E1000_FTQF(0) +-#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +-#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ ++#define E1000_I210_FLA 0x1201C + +-#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) ++#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) ++#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +-/* DMA Coalescing registers */ +-#define E1000_DMACR 0x02508 /* Control Register */ +-#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +-#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +-#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +-#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +-#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ ++/* QAV Tx mode control register */ ++#define E1000_I210_TQAVCTRL 0x3570 + +-/* TX Rate Limit Registers */ +-#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +-#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +-#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ ++/* QAV Tx mode control register bitfields masks */ ++/* QAV enable */ ++#define E1000_TQAVCTRL_MODE (1 << 0) ++/* Fetching arbitration type */ ++#define E1000_TQAVCTRL_FETCH_ARB (1 << 4) ++/* Fetching timer enable */ ++#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) ++/* Launch arbitration type */ ++#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) ++/* Launch timer enable */ ++#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) ++/* SP waits for SR enable */ ++#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) ++/* Fetching timer correction */ ++#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 ++#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ ++ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) ++ ++/* High credit registers where _n can be 0 or 1. */ ++#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) ++ ++/* Queues fetch arbitration priority control register */ ++#define E1000_I210_TQAVARBCTRL 0x3574 ++/* Queues priority masks where _n and _p can be 0-3. */ ++#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) ++/* QAV Tx mode control registers where _n can be 0 or 1. */ ++#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) ++ ++/* QAV Tx mode control register bitfields masks */ ++#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ ++#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ ++#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +-/* Split and Replication RX Control - RW */ +-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ ++/* Good transmitted packets counter registers */ ++#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +-/* Thermal sensor configuration and status registers */ +-#define E1000_THMJT 0x08100 /* Junction Temperature */ +-#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +-#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +-#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +-#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ ++/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ ++#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) ++ ++#define E1000_MMDAC 13 /* MMD Access Control */ ++#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + + /* Convenience macros + * +@@ -157,269 +199,442 @@ + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +-#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ +- : (0x0C000 + ((_n) * 0x40))) +-#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ +- : (0x0C004 + ((_n) * 0x40))) +-#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ +- : (0x0C008 + ((_n) * 0x40))) +-#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ +- : (0x0C00C + ((_n) * 0x40))) +-#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ +- : (0x0C010 + ((_n) * 0x40))) +-#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ +- : (0x0C018 + ((_n) * 0x40))) +-#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ +- : (0x0C028 + ((_n) * 0x40))) +-#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ +- : (0x0E000 + ((_n) * 0x40))) +-#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ +- : (0x0E004 + ((_n) * 0x40))) +-#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ +- : (0x0E008 + ((_n) * 0x40))) +-#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ +- : (0x0E010 + ((_n) * 0x40))) +-#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ +- : (0x0E018 + ((_n) * 0x40))) +-#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ +- : (0x0E028 + ((_n) * 0x40))) +-#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ +- (0x0C014 + ((_n) * 0x40))) ++#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ ++ (0x0C000 + ((_n) * 0x40))) ++#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ ++ (0x0C004 + ((_n) * 0x40))) ++#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ ++ (0x0C008 + ((_n) * 0x40))) ++#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ ++ (0x0C00C + ((_n) * 0x40))) ++#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ ++ (0x0C010 + ((_n) * 0x40))) ++#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ ++ (0x0C014 + ((_n) * 0x40))) + #define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +-#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ +- (0x0E014 + ((_n) * 0x40))) ++#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ ++ (0x0C018 + ((_n) * 0x40))) ++#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ ++ (0x0C028 + ((_n) * 0x40))) ++#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ ++ (0x0C030 + ((_n) * 0x40))) ++#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ ++ (0x0E000 + ((_n) * 0x40))) ++#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ ++ (0x0E004 + ((_n) * 0x40))) ++#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ ++ (0x0E008 + ((_n) * 0x40))) ++#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ ++ (0x0E010 + ((_n) * 0x40))) ++#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ ++ (0x0E014 + ((_n) * 0x40))) + #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +-#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ +- : (0x0E038 + ((_n) * 0x40))) +-#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ +- : (0x0E03C + ((_n) * 0x40))) +- +-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +-#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ ++#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ ++ (0x0E018 + ((_n) * 0x40))) ++#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ ++ (0x0E028 + ((_n) * 0x40))) ++#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ ++ (0x0E038 + ((_n) * 0x40))) ++#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ ++ (0x0E03C + ((_n) * 0x40))) ++#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) ++#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ ++#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ ++#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ ++#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) ++#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ ++ (0x054E0 + ((_i - 16) * 8))) ++#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ ++ (0x054E4 + ((_i - 16) * 8))) ++#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) ++#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) ++#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) ++#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) ++#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) ++#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) ++#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) ++#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) ++#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ ++#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ ++#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ ++/* Same as TXPBS, renamed for newer Si - RW */ ++#define E1000_ITPBS 0x03404 ++#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ ++#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ ++#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ ++#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ ++#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ ++#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ ++#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ ++#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ ++#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ ++#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ ++#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ ++#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ ++#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ ++/* DMA Tx Max Total Allow Size Reqs - RW */ ++#define E1000_DTXMXSZRQ 0x03540 ++#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ ++#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ ++#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ ++#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ ++#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ ++#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ ++#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ ++#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ ++#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ ++#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ ++#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ ++#define E1000_COLC 0x04028 /* Collision Count - R/clr */ ++#define E1000_DC 0x04030 /* Defer Count - R/clr */ ++#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ ++#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ ++#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ ++#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ ++#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ ++#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ ++#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ ++#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ ++#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ ++#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ ++#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ ++#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ ++#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ ++#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ ++#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ ++#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ ++#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ ++#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ ++#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ ++#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ ++#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ ++#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ ++#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ ++#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ ++#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ ++#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ ++#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ ++#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ ++#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ ++#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ ++#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ ++#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ ++#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ ++#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ ++#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ ++#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ ++#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ ++#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ ++#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ ++#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ ++#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ ++#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ ++#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ ++#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ ++#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ ++#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ ++#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ ++#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ ++#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ ++#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ ++#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ ++#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ ++#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ ++#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ ++#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ ++#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ ++ ++/* Virtualization statistical counters */ ++#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) ++#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) ++#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) ++#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) ++#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) ++#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) ++#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) ++#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) ++#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) ++ ++/* LinkSec */ ++#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ ++#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ ++#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ ++#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ ++#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ ++#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ ++#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ ++#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ ++#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ ++#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ ++#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ ++#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ ++#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ ++#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ ++#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ ++#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ ++#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ ++#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ ++#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ ++#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ ++#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ ++#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ ++#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ ++#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ ++#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ ++#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ ++#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ ++#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ ++#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ ++#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ ++/* LinkSec Tx 128-bit Key 0 - WO */ ++#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) ++/* LinkSec Tx 128-bit Key 1 - WO */ ++#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) ++#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ ++#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ ++/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit ++ * key - RW. ++ */ ++#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +-#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +-#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +-#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +-#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +-#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +-#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +-#define E1000_DC 0x04030 /* Defer Count - R/clr */ +-#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +-#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +-#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +-#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +-#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +-#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +-#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +-#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +-#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +-#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +-#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +-#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +-#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +-#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +-#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +-#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +-#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +-#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +-#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +-#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +-#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +-#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +-#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +-#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +-#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +-#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +-#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +-#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +-#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +-#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +-#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +-#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +-#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +-#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +-#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +-#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +-#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +-#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +-#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +-#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +-#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +-#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +-#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +-#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +-/* Interrupt Cause Rx Packet Timer Expire Count */ +-#define E1000_ICRXPTC 0x04104 +-/* Interrupt Cause Rx Absolute Timer Expire Count */ +-#define E1000_ICRXATC 0x04108 +-/* Interrupt Cause Tx Packet Timer Expire Count */ +-#define E1000_ICTXPTC 0x0410C +-/* Interrupt Cause Tx Absolute Timer Expire Count */ +-#define E1000_ICTXATC 0x04110 +-/* Interrupt Cause Tx Queue Empty Count */ +-#define E1000_ICTXQEC 0x04118 +-/* Interrupt Cause Tx Queue Minimum Threshold Count */ +-#define E1000_ICTXQMTC 0x0411C +-/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +-#define E1000_ICRXDMTC 0x04120 +-#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +-#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +-#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +-#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +-#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +-#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +-#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +-#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +-#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +-#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +-#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +-#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +-#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +-#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +-#define E1000_LENERRS 0x04138 /* Length Errors Count */ +-#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +-#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +-#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +-#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +-#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +-#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +-#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +-#define E1000_RA 0x05400 /* Receive Address - RW Array */ +-#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +-#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +-#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ +- (0x054E0 + ((_i - 16) * 8))) +-#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ +- (0x054E4 + ((_i - 16) * 8))) +-#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +-#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +-#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +-#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +-#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +-#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +-#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +-#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +-#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +-#define E1000_MANC 0x05820 /* Management Control - RW */ +-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +- +-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +-#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +-#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +-#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +-#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +-#define E1000_SWSM 0x05B50 /* SW Semaphore */ +-#define E1000_FWSM 0x05B54 /* FW Semaphore */ +-#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ ++#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ ++#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ ++#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ ++#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ ++/* IPSec Rx IPv4/v6 Address - RW */ ++#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) ++/* IPSec Rx 128-bit Key - RW */ ++#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) ++#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ ++#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ ++/* IPSec Tx 128-bit Key - RW */ ++#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) ++#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ ++#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ ++#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ ++#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ ++#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ ++#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ ++#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ ++#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ ++#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ ++#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ ++#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ ++#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ ++#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ ++#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ ++#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ ++#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ ++#define E1000_LENERRS 0x04138 /* Length Errors Count */ ++#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ ++#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ ++#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ ++#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ ++#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ ++#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ ++#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ ++#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ ++#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ ++#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ ++#define E1000_RA 0x05400 /* Receive Address - RW Array */ ++#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ ++#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ ++#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ ++#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ ++#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ ++#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ ++#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ ++#define E1000_WUC 0x05800 /* Wakeup Control - RW */ ++#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ ++#define E1000_WUS 0x05810 /* Wakeup Status - RO */ ++#define E1000_MANC 0x05820 /* Management Control - RW */ ++#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ ++#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ ++#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ ++#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ ++#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ ++#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ ++#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ ++#define E1000_HOST_IF 0x08800 /* Host Interface */ ++#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ ++/* Flexible Host Filter Table */ ++#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) ++/* Ext Flexible Host Filter Table */ ++#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) ++ ++#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ ++#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ ++/* Management Decision Filters */ ++#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) ++#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ ++#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ ++#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ ++#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ ++#define E1000_GCR 0x05B00 /* PCI-Ex Control */ ++#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ ++#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ ++#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ ++#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ ++#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ ++#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ ++#define E1000_SWSM 0x05B50 /* SW Semaphore */ ++#define E1000_FWSM 0x05B54 /* FW Semaphore */ ++/* Driver-only SW semaphore (not used by BOOT agents) */ ++#define E1000_SWSM2 0x05B58 ++#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ ++#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ ++#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ ++#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ ++#define E1000_HICR 0x08F00 /* Host Interface Control */ ++#define E1000_FWSTS 0x08F0C /* FW Status */ + + /* RSS registers */ +-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +-#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +-#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +-#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +-/* MSI-X Allocation Register (_i) - RW */ +-#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +-/* Redirection Table - RW Array */ +-#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +-#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ +- ++#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ ++#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ ++#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ ++#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ ++#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ ++#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ ++#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ ++#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ ++#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ ++#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ + /* VT Registers */ +-#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +-#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +-#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +-#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +-#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +-#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +-#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +-#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +-#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +-#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +-#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +-#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ ++#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ ++#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ ++#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ ++#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ ++#define E1000_VFRE 0x00C8C /* VF Receive Enables */ ++#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ ++#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ ++#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ ++#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ ++#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ ++#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ ++#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ ++#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ ++#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ ++#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ ++#define E1000_MDFB 0x03558 /* Malicious Driver free block */ ++#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ ++#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ ++#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ ++#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ ++#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ + /* These act per VF so an array friendly macro is used */ +-#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +-#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +-#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +-#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +-#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +- +-struct e1000_hw; +- +-u32 igb_rd32(struct e1000_hw *hw, u32 reg); +- +-/* write operations, indexed using DWORDS */ +-#define wr32(reg, val) \ +-do { \ +- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ +- if (!E1000_REMOVED(hw_addr)) \ +- writel((val), &hw_addr[(reg)]); \ +-} while (0) +- +-#define rd32(reg) (igb_rd32(hw, reg)) ++#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) ++#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) ++#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) ++#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) ++#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) ++/* VLAN Virtual Machine Filter - RW */ ++#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) ++#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) ++#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ ++#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ ++#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ ++#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ ++#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ ++#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ ++#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ ++#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ ++#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ ++#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ ++#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ ++#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ ++#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ ++#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ ++#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ ++#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ ++#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ ++#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ ++#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ ++#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +-#define wrfl() ((void)rd32(E1000_STATUS)) +- +-#define array_wr32(reg, offset, value) \ +- wr32((reg) + ((offset) << 2), (value)) +- +-#define array_rd32(reg, offset) \ +- (readl(hw->hw_addr + reg + ((offset) << 2))) ++/* Filtering Registers */ ++#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ ++#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ ++#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ ++#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ ++#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ ++#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ ++#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ ++ ++#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ ++#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ ++#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ ++#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ ++#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ ++/* Tx Desc plane TC Rate-scheduler config */ ++#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) ++/* Tx Packet plane TC Rate-Scheduler Config */ ++#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) ++/* Rx Packet plane TC Rate-Scheduler Config */ ++#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) ++/* Tx Desc Plane TC Rate-Scheduler Status */ ++#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) ++/* Tx Desc Plane TC Rate-Scheduler MMW */ ++#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) ++/* Tx Packet plane TC Rate-Scheduler Status */ ++#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) ++/* Tx Packet plane TC Rate-scheduler MMW */ ++#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) ++/* Rx Packet plane TC Rate-Scheduler Status */ ++#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) ++/* Rx Packet plane TC Rate-Scheduler MMW */ ++#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) ++/* Tx Desc plane VM Rate-Scheduler MMW*/ ++#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) ++/* Tx BCN Rate-Scheduler MMW */ ++#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) ++#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ ++#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ ++#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ ++#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ ++#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ ++#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ ++#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ ++#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ ++#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ ++#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ ++#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ ++#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ ++#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ ++#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + + /* DMA Coalescing registers */ ++#define E1000_DMACR 0x02508 /* Control Register */ ++#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ ++#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ ++#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ ++#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ ++#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ + #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +-/* Energy Efficient Ethernet "EEE" register */ +-#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +-#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +-#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +-#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +-#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +-#define E1000_MMDAC 13 /* MMD Access Control */ +-#define E1000_MMDAAD 14 /* MMD Access Address/Data */ ++/* PCIe Parity Status Register */ ++#define E1000_PCIEERRSTS 0x05BA8 + +-/* Thermal Sensor Register */ ++#define E1000_PROXYS 0x5F64 /* Proxying Status */ ++#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ ++/* Thermal sensor configuration and status registers */ ++#define E1000_THMJT 0x08100 /* Junction Temperature */ ++#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ ++#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ ++#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ + #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + ++/* Energy Efficient Ethernet "EEE" registers */ ++#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ ++#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ ++#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ ++#define E1000_EEE_SU 0x0E34 /* EEE Setup */ ++#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ ++#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ ++ + /* OS2BMC Registers */ + #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ + #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ + #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ + #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +-#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +-#define E1000_I210_FLMNGCTL 0x12038 +-#define E1000_I210_FLMNGDATA 0x1203C +-#define E1000_I210_FLMNGCNT 0x12040 +- +-#define E1000_I210_FLSWCTL 0x12048 +-#define E1000_I210_FLSWDATA 0x1204C +-#define E1000_I210_FLSWCNT 0x12050 +- +-#define E1000_I210_FLA 0x1201C +- +-#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +-#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ +- +-#define E1000_REMOVED(h) unlikely(!(h)) +- + #endif +diff -Nu a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h +--- a/drivers/net/ethernet/intel/igb/igb.h 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb.h 2016-11-14 14:32:08.579567168 +0000 +@@ -1,107 +1,149 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + /* Linux PRO/1000 Ethernet Driver main header file */ + + #ifndef _IGB_H_ + #define _IGB_H_ + +-#include "e1000_mac.h" ++#include ++ ++#ifndef IGB_NO_LRO ++#include ++#endif ++ ++#include ++#include ++#include ++ ++#ifdef SIOCETHTOOL ++#include ++#endif ++ ++struct igb_adapter; ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++#define IGB_DCA ++#endif ++#ifdef IGB_DCA ++#include ++#endif ++ ++#include "kcompat.h" ++ ++#ifdef HAVE_SCTP ++#include ++#endif ++ ++#include "e1000_api.h" + #include "e1000_82575.h" ++#include "e1000_manage.h" ++#include "e1000_mbx.h" ++ ++#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args) + ++#define PFX "igb: " ++#define DPRINTK(nlevel, klevel, fmt, args...) \ ++ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ ++ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ ++ __func__ , ## args)) ++ ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#include ++#else + #include ++#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ + #include + #include +-#include +-#include ++#endif /* HAVE_PTP_1588_CLOCK */ ++ ++#ifdef HAVE_I2C_SUPPORT + #include + #include +-#include +-#include +- +-struct igb_adapter; +- +-#define E1000_PCS_CFG_IGN_SD 1 ++#endif /* HAVE_I2C_SUPPORT */ + + /* Interrupt defines */ +-#define IGB_START_ITR 648 /* ~6000 ints/sec */ +-#define IGB_4K_ITR 980 +-#define IGB_20K_ITR 196 +-#define IGB_70K_ITR 56 ++#define IGB_START_ITR 648 /* ~6000 ints/sec */ ++#define IGB_4K_ITR 980 ++#define IGB_20K_ITR 196 ++#define IGB_70K_ITR 56 ++ ++/* Interrupt modes, as used by the IntMode paramter */ ++#define IGB_INT_MODE_LEGACY 0 ++#define IGB_INT_MODE_MSI 1 ++#define IGB_INT_MODE_MSIX 2 + + /* TX/RX descriptor defines */ +-#define IGB_DEFAULT_TXD 256 +-#define IGB_DEFAULT_TX_WORK 128 +-#define IGB_MIN_TXD 80 +-#define IGB_MAX_TXD 4096 +- +-#define IGB_DEFAULT_RXD 256 +-#define IGB_MIN_RXD 80 +-#define IGB_MAX_RXD 4096 +- +-#define IGB_DEFAULT_ITR 3 /* dynamic */ +-#define IGB_MAX_ITR_USECS 10000 +-#define IGB_MIN_ITR_USECS 10 +-#define NON_Q_VECTORS 1 +-#define MAX_Q_VECTORS 8 +-#define MAX_MSIX_ENTRIES 10 ++#define IGB_DEFAULT_TXD 256 ++#define IGB_DEFAULT_TX_WORK 128 ++#define IGB_MIN_TXD 80 ++#define IGB_MAX_TXD 4096 ++ ++#define IGB_DEFAULT_RXD 256 ++#define IGB_MIN_RXD 80 ++#define IGB_MAX_RXD 4096 ++ ++#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */ ++#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ ++ ++#define NON_Q_VECTORS 1 ++#define MAX_Q_VECTORS 10 + + /* Transmit and receive queues */ +-#define IGB_MAX_RX_QUEUES 8 +-#define IGB_MAX_RX_QUEUES_82575 4 +-#define IGB_MAX_RX_QUEUES_I211 2 +-#define IGB_MAX_TX_QUEUES 8 +-#define IGB_MAX_VF_MC_ENTRIES 30 +-#define IGB_MAX_VF_FUNCTIONS 8 +-#define IGB_MAX_VFTA_ENTRIES 128 +-#define IGB_82576_VF_DEV_ID 0x10CA +-#define IGB_I350_VF_DEV_ID 0x1520 +- +-/* NVM version defines */ +-#define IGB_MAJOR_MASK 0xF000 +-#define IGB_MINOR_MASK 0x0FF0 +-#define IGB_BUILD_MASK 0x000F +-#define IGB_COMB_VER_MASK 0x00FF +-#define IGB_MAJOR_SHIFT 12 +-#define IGB_MINOR_SHIFT 4 +-#define IGB_COMB_VER_SHFT 8 +-#define IGB_NVM_VER_INVALID 0xFFFF +-#define IGB_ETRACK_SHIFT 16 +-#define NVM_ETRACK_WORD 0x0042 +-#define NVM_COMB_VER_OFF 0x0083 +-#define NVM_COMB_VER_PTR 0x003d ++#define IGB_MAX_RX_QUEUES 16 ++#define IGB_MAX_RX_QUEUES_82575 4 ++#define IGB_MAX_RX_QUEUES_I211 2 ++#define IGB_MAX_TX_QUEUES 16 ++ ++#define IGB_MAX_VF_MC_ENTRIES 30 ++#define IGB_MAX_VF_FUNCTIONS 8 ++#define IGB_82576_VF_DEV_ID 0x10CA ++#define IGB_I350_VF_DEV_ID 0x1520 ++#define IGB_MAX_UTA_ENTRIES 128 ++#define MAX_EMULATION_MAC_ADDRS 16 ++#define OUI_LEN 3 ++#define IGB_MAX_VMDQ_QUEUES 8 + + struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; ++ u16 default_vf_vlan_id; + u16 vlans_enabled; ++ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN]; ++ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; + u32 flags; + unsigned long last_nack; ++#ifdef IFLA_VF_MAX + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + bool spoofchk_enabled; ++#endif ++#endif + }; + + #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +@@ -125,31 +167,97 @@ + #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) + #define IGB_TX_HTHRESH 1 + #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ +- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ +- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) ++ adapter->msix_entries) ? 1 : 4) + + /* this is the size past which hardware will drop packets when setting LPE=0 */ + #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + ++/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we ++ * reserve 2 more, and skb_shared_info adds an additional 384 more, ++ * this adds roughly 448 bytes of extra data meaning the smallest ++ * allocation we could have is 1K. ++ * i.e. RXBUFFER_512 --> size-1024 slab ++ */ + /* Supported Rx Buffer Sizes */ +-#define IGB_RXBUFFER_256 256 +-#define IGB_RXBUFFER_2048 2048 +-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 ++#define IGB_RXBUFFER_256 256 ++#define IGB_RXBUFFER_2048 2048 ++#define IGB_RXBUFFER_16384 16384 ++#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 ++#if MAX_SKB_FRAGS < 8 ++#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024) ++#else ++#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 ++#endif ++ ++ ++/* Packet Buffer allocations */ ++#define IGB_PBA_BYTES_SHIFT 0xA ++#define IGB_TX_HEAD_ADDR_SHIFT 7 ++#define IGB_PBA_TX_MASK 0xFFFF0000 ++ ++#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ + + /* How many Rx Buffers do we bundle into one write to the hardware ? */ +-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ ++#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +-#define AUTO_ALL_MODES 0 +-#define IGB_EEPROM_APME 0x0400 ++#define IGB_EEPROM_APME 0x0400 ++#define AUTO_ALL_MODES 0 + + #ifndef IGB_MASTER_SLAVE + /* Switch to override PHY master/slave setting */ + #define IGB_MASTER_SLAVE e1000_ms_hw_default + #endif + +-#define IGB_MNG_VLAN_NONE -1 ++#define IGB_MNG_VLAN_NONE -1 ++ ++#ifndef IGB_NO_LRO ++#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ ++struct igb_lro_stats { ++ u32 flushed; ++ u32 coal; ++}; ++ ++/* ++ * igb_lro_header - header format to be aggregated by LRO ++ * @iph: IP header without options ++ * @tcp: TCP header ++ * @ts: Optional TCP timestamp data in TCP options ++ * ++ * This structure relies on the check above that verifies that the header ++ * is IPv4 and does not contain any options. ++ */ ++struct igb_lrohdr { ++ struct iphdr iph; ++ struct tcphdr th; ++ __be32 ts[0]; ++}; ++ ++struct igb_lro_list { ++ struct sk_buff_head active; ++ struct igb_lro_stats stats; ++}; ++ ++#endif /* IGB_NO_LRO */ ++struct igb_cb { ++#ifndef IGB_NO_LRO ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ union { /* Union defining head/tail partner */ ++ struct sk_buff *head; ++ struct sk_buff *tail; ++ }; ++#endif ++ __be32 tsecr; /* timestamp echo response */ ++ u32 tsval; /* timestamp value in host order */ ++ u32 next_seq; /* next expected sequence number */ ++ u16 free; /* 65521 minus total size */ ++ u16 mss; /* size of data portion of packet */ ++ u16 append_cnt; /* number of skb's appended */ ++#endif /* IGB_NO_LRO */ ++#ifdef HAVE_VLAN_RX_REGISTER ++ u16 vid; /* VLAN tag */ ++#endif ++}; ++#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) + + enum igb_tx_flags { + /* cmd_type flags */ +@@ -163,30 +271,28 @@ + }; + + /* VLAN info */ +-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +-#define IGB_TX_FLAGS_VLAN_SHIFT 16 ++#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 ++#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +-/* The largest size we can write to the descriptor is 65535. In order to ++/* ++ * The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +-#define IGB_MAX_TXD_PWR 15 ++#define IGB_MAX_TXD_PWR 15 + #define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + + /* Tx Descriptors needed, worst case */ +-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +-#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +- +-/* EEPROM byte offsets */ +-#define IGB_SFF_8472_SWAP 0x5C +-#define IGB_SFF_8472_COMP 0x5E +- +-/* Bitmasks */ +-#define IGB_SFF_ADDRESSING_MODE 0x4 +-#define IGB_SFF_8472_UNSUP 0x00 ++#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) ++#ifndef MAX_SKB_FRAGS ++#define DESC_NEEDED 4 ++#elif (MAX_SKB_FRAGS < 16) ++#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) ++#else ++#define DESC_NEEDED (MAX_SKB_FRAGS + 4) ++#endif + + /* wrapper around a pointer to a socket buffer, +- * so a DMA handle can be stored along with the buffer +- */ ++ * so a DMA handle can be stored along with the buffer */ + struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; +@@ -202,15 +308,18 @@ + + struct igb_rx_buffer { + dma_addr_t dma; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ struct sk_buff *skb; ++#else + struct page *page; +- unsigned int page_offset; ++ u32 page_offset; ++#endif + }; + + struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; +- u64 restart_queue2; + }; + + struct igb_rx_queue_stats { +@@ -221,6 +330,18 @@ + u64 alloc_failed; + }; + ++struct igb_rx_packet_stats { ++ u64 ipv4_packets; /* IPv4 headers processed */ ++ u64 ipv4e_packets; /* IPv4E headers with extensions processed */ ++ u64 ipv6_packets; /* IPv6 headers processed */ ++ u64 ipv6e_packets; /* IPv6E headers with extensions processed */ ++ u64 tcp_packets; /* TCP headers processed */ ++ u64 udp_packets; /* UDP headers processed */ ++ u64 sctp_packets; /* SCTP headers processed */ ++ u64 nfs_packets; /* NFS headers processe */ ++ u64 other_packets; ++}; ++ + struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ +@@ -231,22 +352,22 @@ + }; + + struct igb_ring { +- struct igb_q_vector *q_vector; /* backlink to q_vector */ +- struct net_device *netdev; /* back pointer to net_device */ +- struct device *dev; /* device pointer for dma mapping */ ++ struct igb_q_vector *q_vector; /* backlink to q_vector */ ++ struct net_device *netdev; /* back pointer to net_device */ ++ struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; +- void *desc; /* descriptor ring memory */ +- unsigned long flags; /* ring specific flags */ +- void __iomem *tail; /* pointer to ring tail register */ ++ void *desc; /* descriptor ring memory */ ++ unsigned long flags; /* ring specific flags */ ++ void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ +- unsigned int size; /* length of desc. ring in bytes */ ++ unsigned int size; /* length of desc. ring in bytes */ + +- u16 count; /* number of desc. in the ring */ +- u8 queue_index; /* logical index of the ring*/ +- u8 reg_idx; /* physical index of the ring */ ++ u16 count; /* number of desc. in the ring */ ++ u8 queue_index; /* logical index of the ring*/ ++ u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; +@@ -257,16 +378,22 @@ + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; +- struct u64_stats_sync tx_syncp; +- struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { +- struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; +- struct u64_stats_sync rx_syncp; ++ struct igb_rx_packet_stats pkt_stats; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ u16 rx_buffer_len; ++#else ++ struct sk_buff *skb; ++#endif + }; + }; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ struct net_device *vmdq_netdev; ++ int vqueue_index; /* queue index for virtual netdev */ ++#endif + } ____cacheline_internodealigned_in_smp; + + struct igb_q_vector { +@@ -281,29 +408,57 @@ + struct igb_ring_container rx, tx; + + struct napi_struct napi; ++#ifndef IGB_NO_LRO ++ struct igb_lro_list lrolist; /* LRO list for queue vector*/ ++#endif + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; ++#ifndef HAVE_NETDEV_NAPI_LIST ++ struct net_device poll_dev; ++#endif + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; + }; + + enum e1000_ring_flags_t { ++#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES) ++ IGB_RING_FLAG_RX_CSUM, ++#endif + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, +- IGB_RING_FLAG_TX_DETECT_HANG ++ IGB_RING_FLAG_TX_DETECT_HANG, + }; + ++struct igb_mac_addr { ++ u8 addr[ETH_ALEN]; ++ u16 queue; ++ u16 state; /* bitmask */ ++}; ++#define IGB_MAC_STATE_DEFAULT 0x1 ++#define IGB_MAC_STATE_MODIFIED 0x2 ++#define IGB_MAC_STATE_IN_USE 0x4 ++ + #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +-#define IGB_RX_DESC(R, i) \ ++#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +-#define IGB_TX_DESC(R, i) \ ++#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +-#define IGB_TX_CTXTDESC(R, i) \ ++#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++#define netdev_ring(ring) \ ++ ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) ++#define ring_queue_index(ring) \ ++ ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) ++#else ++#define netdev_ring(ring) (ring->netdev) ++#define ring_queue_index(ring) (ring->queue_index) ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ + /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ + static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +@@ -312,16 +467,27 @@ + } + + /* igb_desc_unused - calculate if we have unused descriptors */ +-static inline int igb_desc_unused(struct igb_ring *ring) ++static inline u16 igb_desc_unused(const struct igb_ring *ring) + { +- if (ring->next_to_clean > ring->next_to_use) +- return ring->next_to_clean - ring->next_to_use - 1; ++ u16 ntc = ring->next_to_clean; ++ u16 ntu = ring->next_to_use; + +- return ring->count + ring->next_to_clean - ring->next_to_use - 1; ++ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; + } + +-#ifdef CONFIG_IGB_HWMON ++#ifdef CONFIG_BQL ++static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) ++{ ++ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); ++} ++#endif /* CONFIG_BQL */ + ++struct igb_therm_proc_data { ++ struct e1000_hw *hw; ++ struct e1000_thermal_diode_data *sensor_data; ++}; ++ ++#ifdef IGB_HWMON + #define IGB_HWMON_TYPE_LOC 0 + #define IGB_HWMON_TYPE_TEMP 1 + #define IGB_HWMON_TYPE_CAUTION 2 +@@ -335,69 +501,79 @@ + }; + + struct hwmon_buff { +- struct attribute_group group; +- const struct attribute_group *groups[2]; +- struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; +- struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; ++ struct device *device; ++ struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +-#endif +- ++#endif /* IGB_HWMON */ ++#ifdef ETHTOOL_GRXFHINDIR + #define IGB_RETA_SIZE 128 ++#endif /* ETHTOOL_GRXFHINDIR */ + + /* board specific private data structure */ + struct igb_adapter { ++#ifdef HAVE_VLAN_RX_REGISTER ++ /* vlgrp must be first member of structure */ ++ struct vlan_group *vlgrp; ++#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +- ++#endif + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; +- struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; ++ struct msix_entry *msix_entries; + +- /* Interrupt Throttle Rate */ +- u32 rx_itr_setting; +- u32 tx_itr_setting; +- u16 tx_itr; +- u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; +- struct igb_ring *tx_ring[16]; ++ struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; +- struct igb_ring *rx_ring[16]; +- +- u32 max_frame_size; +- u32 min_frame_size; ++ struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; ++ struct timer_list dma_err_timer; + struct timer_list phy_info_timer; +- + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; ++ u8 port_num; ++ ++ u8 __iomem *io_addr; /* for iounmap */ ++ ++ /* Interrupt Throttle Rate */ ++ u32 rx_itr_setting; ++ u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; ++ struct work_struct dma_err_task; + bool fc_autoneg; + u8 tx_timeout_factor; +- struct timer_list blink_timer; +- unsigned long led_status; ++ ++#ifdef DEBUG ++ bool tx_hang_detected; ++ bool disable_hw_reset; ++#endif ++ u32 max_frame_size; + + /* OS defined structs */ + struct pci_dev *pdev; +- +- spinlock_t stats64_lock; +- struct rtnl_link_stats64 stats64; ++#ifndef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats net_stats; ++#endif ++#ifndef IGB_NO_LRO ++ struct igb_lro_stats lro_stats; ++#endif + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; +@@ -405,9 +581,11 @@ + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + ++#ifdef ETHTOOL_TEST + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; ++#endif + + int msg_enable; + +@@ -416,15 +594,48 @@ + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ ++ u32 *config_space; + u16 tx_ring_count; + u16 rx_ring_count; +- unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; ++#ifdef IFLA_VF_MAX + int vf_rate_link_speed; ++#endif ++ u32 lli_port; ++ u32 lli_size; ++ unsigned int vfs_allocated_count; ++ /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ ++ bool mdd; ++ int int_mode; + u32 rss_queues; ++ u32 tss_queues; ++ u32 vmdq_pools; ++ char fw_version[32]; + u32 wvbr; ++ struct igb_mac_addr *mac_table; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; ++#endif ++ int vferr_refcount; ++ int dmac; + u32 *shadow_vfta; + ++ /* External Thermal Sensor support flag */ ++ bool ets; ++#ifdef IGB_HWMON ++ struct hwmon_buff igb_hwmon_buff; ++#else /* IGB_HWMON */ ++#ifdef IGB_PROCFS ++ struct proc_dir_entry *eth_dir; ++ struct proc_dir_entry *info_dir; ++ struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; ++ struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; ++ bool old_lsc; ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ ++ u32 etrack_id; ++ ++#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; +@@ -439,39 +650,57 @@ + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; ++#endif /* HAVE_PTP_1588_CLOCK */ + +- char fw_version[32]; +-#ifdef CONFIG_IGB_HWMON +- struct hwmon_buff *igb_hwmon_buff; +- bool ets; +-#endif ++#ifdef HAVE_I2C_SUPPORT + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; +- u32 rss_indir_tbl_init; +- u8 rss_indir_tbl[IGB_RETA_SIZE]; +- ++#endif /* HAVE_I2C_SUPPORT */ + unsigned long link_check_timeout; ++ ++ int devrc; ++ + int copper_tries; +- struct e1000_info ei; + u16 eee_advert; ++#ifdef ETHTOOL_GRXFHINDIR ++ u32 rss_indir_tbl_init; ++ u8 rss_indir_tbl[IGB_RETA_SIZE]; ++#endif ++}; ++ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++struct igb_vmdq_adapter { ++#ifdef HAVE_VLAN_RX_REGISTER ++ /* vlgrp must be first member of structure */ ++ struct vlan_group *vlgrp; ++#else ++ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; ++#endif ++ struct igb_adapter *real_adapter; ++ struct net_device *vnetdev; ++ struct net_device_stats net_stats; ++ struct igb_ring *tx_ring; ++ struct igb_ring *rx_ring; + }; ++#endif + + #define IGB_FLAG_HAS_MSI (1 << 0) + #define IGB_FLAG_DCA_ENABLED (1 << 1) +-#define IGB_FLAG_QUAD_PORT_A (1 << 2) +-#define IGB_FLAG_QUEUE_PAIRS (1 << 3) +-#define IGB_FLAG_DMAC (1 << 4) +-#define IGB_FLAG_PTP (1 << 5) +-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) +-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) +-#define IGB_FLAG_WOL_SUPPORTED (1 << 8) +-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) +-#define IGB_FLAG_MEDIA_RESET (1 << 10) +-#define IGB_FLAG_MAS_CAPABLE (1 << 11) +-#define IGB_FLAG_MAS_ENABLE (1 << 12) +-#define IGB_FLAG_HAS_MSIX (1 << 13) +-#define IGB_FLAG_EEE (1 << 14) ++#define IGB_FLAG_LLI_PUSH (1 << 2) ++#define IGB_FLAG_QUAD_PORT_A (1 << 3) ++#define IGB_FLAG_QUEUE_PAIRS (1 << 4) ++#define IGB_FLAG_EEE (1 << 5) ++#define IGB_FLAG_DMAC (1 << 6) ++#define IGB_FLAG_DETECT_BAD_DMA (1 << 7) ++#define IGB_FLAG_PTP (1 << 8) ++#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9) ++#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10) ++#define IGB_FLAG_WOL_SUPPORTED (1 << 11) ++#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12) ++#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13) ++#define IGB_FLAG_MEDIA_RESET (1 << 14) ++#define IGB_FLAG_MAS_ENABLE (1 << 15) + + /* Media Auto Sense */ + #define IGB_MAS_ENABLE_0 0X0001 +@@ -479,13 +708,63 @@ + #define IGB_MAS_ENABLE_2 0X0004 + #define IGB_MAS_ENABLE_3 0X0008 + ++#define IGB_MIN_TXPBSIZE 20408 ++#define IGB_TX_BUF_4096 4096 ++ ++#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ ++ + /* DMA Coalescing defines */ +-#define IGB_MIN_TXPBSIZE 20408 +-#define IGB_TX_BUF_4096 4096 +-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ ++#define IGB_DMAC_DISABLE 0 ++#define IGB_DMAC_MIN 250 ++#define IGB_DMAC_500 500 ++#define IGB_DMAC_EN_DEFAULT 1000 ++#define IGB_DMAC_2000 2000 ++#define IGB_DMAC_3000 3000 ++#define IGB_DMAC_4000 4000 ++#define IGB_DMAC_5000 5000 ++#define IGB_DMAC_6000 6000 ++#define IGB_DMAC_7000 7000 ++#define IGB_DMAC_8000 8000 ++#define IGB_DMAC_9000 9000 ++#define IGB_DMAC_MAX 10000 ++ ++#define IGB_82576_TSYNC_SHIFT 19 ++#define IGB_82580_TSYNC_SHIFT 24 ++#define IGB_TS_HDR_LEN 16 ++ ++/* CEM Support */ ++#define FW_HDR_LEN 0x4 ++#define FW_CMD_DRV_INFO 0xDD ++#define FW_CMD_DRV_INFO_LEN 0x5 ++#define FW_CMD_RESERVED 0X0 ++#define FW_RESP_SUCCESS 0x1 ++#define FW_UNUSED_VER 0x0 ++#define FW_MAX_RETRIES 3 ++#define FW_STATUS_SUCCESS 0x1 ++#define FW_FAMILY_DRV_VER 0Xffffffff ++ ++#define IGB_MAX_LINK_TRIES 20 ++ ++struct e1000_fw_hdr { ++ u8 cmd; ++ u8 buf_len; ++ union { ++ u8 cmd_resv; ++ u8 ret_status; ++ } cmd_or_resp; ++ u8 checksum; ++}; ++ ++#pragma pack(push, 1) ++struct e1000_fw_drv_info { ++ struct e1000_fw_hdr hdr; ++ u8 port_num; ++ u32 drv_version; ++ u16 pad; /* end spacing to ensure length is mult. of dword */ ++ u8 pad2; /* end spacing to ensure length is mult. of dword2 */ ++}; ++#pragma pack(pop) + +-#define IGB_82576_TSYNC_SHIFT 19 +-#define IGB_TS_HDR_LEN 16 + enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, +@@ -493,85 +772,82 @@ + __IGB_PTP_TX_IN_PROGRESS, + }; + +-enum igb_boards { +- board_82575, +-}; +- + extern char igb_driver_name[]; + extern char igb_driver_version[]; + +-int igb_up(struct igb_adapter *); +-void igb_down(struct igb_adapter *); +-void igb_reinit_locked(struct igb_adapter *); +-void igb_reset(struct igb_adapter *); +-int igb_reinit_queues(struct igb_adapter *); +-void igb_write_rss_indir_tbl(struct igb_adapter *); +-int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +-int igb_setup_tx_resources(struct igb_ring *); +-int igb_setup_rx_resources(struct igb_ring *); +-void igb_free_tx_resources(struct igb_ring *); +-void igb_free_rx_resources(struct igb_ring *); +-void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +-void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +-void igb_setup_tctl(struct igb_adapter *); +-void igb_setup_rctl(struct igb_adapter *); +-netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); +-void igb_alloc_rx_buffers(struct igb_ring *, u16); +-void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); +-bool igb_has_link(struct igb_adapter *adapter); +-void igb_set_ethtool_ops(struct net_device *); +-void igb_power_up_link(struct igb_adapter *); +-void igb_set_fw_version(struct igb_adapter *); +-void igb_ptp_init(struct igb_adapter *adapter); +-void igb_ptp_stop(struct igb_adapter *adapter); +-void igb_ptp_reset(struct igb_adapter *adapter); +-void igb_ptp_rx_hang(struct igb_adapter *adapter); +-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, +- struct sk_buff *skb); ++extern int igb_open(struct net_device *netdev); ++extern int igb_close(struct net_device *netdev); ++extern int igb_up(struct igb_adapter *); ++extern void igb_down(struct igb_adapter *); ++extern void igb_reinit_locked(struct igb_adapter *); ++extern void igb_reset(struct igb_adapter *); ++extern int igb_reinit_queues(struct igb_adapter *); ++#ifdef ETHTOOL_SRXFHINDIR ++extern void igb_write_rss_indir_tbl(struct igb_adapter *); ++#endif ++extern int igb_set_spd_dplx(struct igb_adapter *, u16); ++extern int igb_setup_tx_resources(struct igb_ring *); ++extern int igb_setup_rx_resources(struct igb_ring *); ++extern void igb_free_tx_resources(struct igb_ring *); ++extern void igb_free_rx_resources(struct igb_ring *); ++extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); ++extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); ++extern void igb_setup_tctl(struct igb_adapter *); ++extern void igb_setup_rctl(struct igb_adapter *); ++extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); ++extern void igb_unmap_and_free_tx_resource(struct igb_ring *, ++ struct igb_tx_buffer *); ++extern void igb_alloc_rx_buffers(struct igb_ring *, u16); ++extern void igb_clean_rx_ring(struct igb_ring *); ++extern int igb_setup_queues(struct igb_adapter *adapter); ++extern void igb_update_stats(struct igb_adapter *); ++extern bool igb_has_link(struct igb_adapter *adapter); ++extern void igb_set_ethtool_ops(struct net_device *); ++extern void igb_check_options(struct igb_adapter *); ++extern void igb_power_up_link(struct igb_adapter *); ++#ifdef HAVE_PTP_1588_CLOCK ++extern void igb_ptp_init(struct igb_adapter *adapter); ++extern void igb_ptp_stop(struct igb_adapter *adapter); ++extern void igb_ptp_reset(struct igb_adapter *adapter); ++extern void igb_ptp_tx_work(struct work_struct *work); ++extern void igb_ptp_rx_hang(struct igb_adapter *adapter); ++extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); ++extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, ++ struct sk_buff *skb); ++extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, ++ unsigned char *va, ++ struct sk_buff *skb); ++extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, ++ struct ifreq *ifr, int cmd); ++#endif /* HAVE_PTP_1588_CLOCK */ ++#ifdef ETHTOOL_OPS_COMPAT ++extern int ethtool_ioctl(struct ifreq *); ++#endif ++extern int igb_write_mc_addr_list(struct net_device *netdev); ++extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); ++extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); ++extern int igb_available_rars(struct igb_adapter *adapter); ++extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); ++extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); ++extern void igb_enable_vlan_tags(struct igb_adapter *adapter); ++#ifndef HAVE_VLAN_RX_REGISTER ++extern void igb_vlan_mode(struct net_device *, u32); ++#endif ++ ++#define E1000_PCS_CFG_IGN_SD 1 ++ + int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); + int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +-void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +-#ifdef CONFIG_IGB_HWMON ++#ifdef IGB_HWMON + void igb_sysfs_exit(struct igb_adapter *adapter); + int igb_sysfs_init(struct igb_adapter *adapter); +-#endif +-static inline s32 igb_reset_phy(struct e1000_hw *hw) +-{ +- if (hw->phy.ops.reset) +- return hw->phy.ops.reset(hw); +- +- return 0; +-} +- +-static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +-{ +- if (hw->phy.ops.read_reg) +- return hw->phy.ops.read_reg(hw, offset, data); +- +- return 0; +-} +- +-static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +-{ +- if (hw->phy.ops.write_reg) +- return hw->phy.ops.write_reg(hw, offset, data); +- +- return 0; +-} +- +-static inline s32 igb_get_phy_info(struct e1000_hw *hw) +-{ +- if (hw->phy.ops.get_phy_info) +- return hw->phy.ops.get_phy_info(hw); +- +- return 0; +-} +- +-static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +-{ +- return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +-} ++#else ++#ifdef IGB_PROCFS ++int igb_procfs_init(struct igb_adapter *adapter); ++void igb_procfs_exit(struct igb_adapter *adapter); ++int igb_procfs_topdir_init(void); ++void igb_procfs_topdir_exit(void); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + + #endif /* _IGB_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_debugfs.c b/drivers/net/ethernet/intel/igb/igb_debugfs.c +--- a/drivers/net/ethernet/intel/igb/igb_debugfs.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_debugfs.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,26 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "igb.h" ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,43 +1,50 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + /* ethtool support for igb */ + +-#include + #include +-#include +-#include +-#include +-#include ++#include ++ ++#ifdef SIOCETHTOOL + #include +-#include +-#include ++#ifdef CONFIG_PM_RUNTIME + #include ++#endif /* CONFIG_PM_RUNTIME */ + #include +-#include + + #include "igb.h" ++#include "igb_regtest.h" ++#include ++#ifdef ETHTOOL_GEEE ++#include ++#endif + ++#ifdef ETHTOOL_OPS_COMPAT ++#include "kcompat_ethtool.c" ++#endif ++#ifdef ETHTOOL_GSTATS + struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; +@@ -49,6 +56,7 @@ + .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ + } ++ + static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), +@@ -82,6 +90,10 @@ + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), ++#ifndef IGB_NO_LRO ++ IGB_STAT("lro_aggregated", lro_stats.coal), ++ IGB_STAT("lro_flushed", lro_stats.flushed), ++#endif /* IGB_LRO */ + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), +@@ -89,15 +101,18 @@ + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), ++#ifdef HAVE_PTP_1588_CLOCK + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), ++#endif /* HAVE_PTP_1588_CLOCK */ + }; + + #define IGB_NETDEV_STAT(_net_stat) { \ +- .stat_string = __stringify(_net_stat), \ +- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ +- .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ ++ .stat_string = #_net_stat, \ ++ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ ++ .stat_offset = offsetof(struct net_device_stats, _net_stat) \ + } ++ + static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), +@@ -110,15 +125,12 @@ + IGB_NETDEV_STAT(tx_heartbeat_errors) + }; + +-#define IGB_GLOBAL_STATS_LEN \ +- (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +-#define IGB_NETDEV_STATS_LEN \ +- (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) ++#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats) ++#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats) + #define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) +- +-#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +- ++#define IGB_TX_QUEUE_STATS_LEN \ ++ (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) + #define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ +@@ -127,23 +139,23 @@ + #define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + ++#endif /* ETHTOOL_GSTATS */ ++#ifdef ETHTOOL_TEST + static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" + }; ++ + #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) ++#endif /* ETHTOOL_TEST */ + + static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; +- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; +- u32 speed; + +- status = rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | +@@ -165,80 +177,85 @@ + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; ++ + } else { +- ecmd->supported = (SUPPORTED_FIBRE | +- SUPPORTED_1000baseKX_Full | ++ ecmd->supported = (SUPPORTED_1000baseT_Full | ++ SUPPORTED_100baseT_Full | ++ SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause); +- ecmd->advertising = (ADVERTISED_FIBRE | +- ADVERTISED_1000baseKX_Full); +- if (hw->mac.type == e1000_i354) { +- if ((hw->device_id == +- E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && +- !(status & E1000_STATUS_2P5_SKU_OVER)) { +- ecmd->supported |= SUPPORTED_2500baseX_Full; +- ecmd->supported &= +- ~SUPPORTED_1000baseKX_Full; +- ecmd->advertising |= ADVERTISED_2500baseX_Full; +- ecmd->advertising &= +- ~ADVERTISED_1000baseKX_Full; +- } +- } +- if (eth_flags->e100_base_fx) { +- ecmd->supported |= SUPPORTED_100baseT_Full; +- ecmd->advertising |= ADVERTISED_100baseT_Full; ++ if (hw->mac.type == e1000_i354) ++ ecmd->supported |= (SUPPORTED_2500baseX_Full); ++ ++ ecmd->advertising = ADVERTISED_FIBRE; ++ ++ switch (adapter->link_speed) { ++ case SPEED_2500: ++ ecmd->advertising = ADVERTISED_2500baseX_Full; ++ break; ++ case SPEED_1000: ++ ecmd->advertising = ADVERTISED_1000baseT_Full; ++ break; ++ case SPEED_100: ++ ecmd->advertising = ADVERTISED_100baseT_Full; ++ break; ++ default: ++ break; + } ++ + if (hw->mac.autoneg == 1) + ecmd->advertising |= ADVERTISED_Autoneg; + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } ++ + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + +- switch (hw->fc.requested_mode) { +- case e1000_fc_full: ++ if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; +- break; +- case e1000_fc_rx_pause: ++ else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); +- break; +- case e1000_fc_tx_pause: ++ else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; +- break; +- default: ++ else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); +- } ++ ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ + if (status & E1000_STATUS_LU) { +- if ((status & E1000_STATUS_2P5_SKU) && +- !(status & E1000_STATUS_2P5_SKU_OVER)) { +- speed = SPEED_2500; +- } else if (status & E1000_STATUS_SPEED_1000) { +- speed = SPEED_1000; +- } else if (status & E1000_STATUS_SPEED_100) { +- speed = SPEED_100; +- } else { +- speed = SPEED_10; +- } ++ if ((hw->mac.type == e1000_i354) && ++ (status & E1000_STATUS_2P5_SKU) && ++ !(status & E1000_STATUS_2P5_SKU_OVER)) ++ ethtool_cmd_speed_set(ecmd, SPEED_2500); ++ else if (status & E1000_STATUS_SPEED_1000) ++ ethtool_cmd_speed_set(ecmd, SPEED_1000); ++ else if (status & E1000_STATUS_SPEED_100) ++ ethtool_cmd_speed_set(ecmd, SPEED_100); ++ else ++ ethtool_cmd_speed_set(ecmd, SPEED_10); ++ + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; ++ + } else { +- speed = SPEED_UNKNOWN; +- ecmd->duplex = DUPLEX_UNKNOWN; ++ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ++ ecmd->duplex = -1; + } +- ethtool_cmd_speed_set(ecmd, speed); ++ + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; ++#ifdef ETH_TP_MDI_X + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) +@@ -247,11 +264,14 @@ + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + ++#ifdef ETH_TP_MDI_AUTO + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + ++#endif ++#endif /* ETH_TP_MDI_X */ + return 0; + } + +@@ -260,16 +280,26 @@ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + ++ if (ecmd->duplex == DUPLEX_HALF) { ++ if (!hw->dev_spec._82575.eee_disable) ++ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n"); ++ hw->dev_spec._82575.eee_disable = true; ++ } else { ++ if (hw->dev_spec._82575.eee_disable) ++ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n"); ++ hw->dev_spec._82575.eee_disable = false; ++ } ++ + /* When SoL/IDER sessions are active, autoneg/speed/duplex +- * cannot be changed +- */ +- if (igb_check_reset_block(hw)) { +- dev_err(&adapter->pdev->dev, +- "Cannot change link characteristics when SoL/IDER is active.\n"); ++ * cannot be changed */ ++ if (e1000_check_reset_block(hw)) { ++ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + +- /* MDI setting is only allowed when autoneg enabled because ++#ifdef ETH_TP_MDI_AUTO ++ /* ++ * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ +@@ -284,6 +314,7 @@ + } + } + ++#endif /* ETH_TP_MDI_AUTO */ + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + +@@ -318,14 +349,13 @@ + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { +- u32 speed = ethtool_cmd_speed(ecmd); +- /* calling this overrides forced MDI setting */ +- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { ++ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + ++#ifdef ETH_TP_MDI_AUTO + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped +@@ -337,6 +367,7 @@ + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + ++#endif /* ETH_TP_MDI_AUTO */ + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); +@@ -353,7 +384,8 @@ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + +- /* If the link is not reported up to netdev, interrupts are disabled, ++ /* ++ * If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly +@@ -391,10 +423,6 @@ + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + +- /* 100basefx does not support setting link flow control */ +- if (hw->dev_spec._82575.eth_flags.e100_base_fx) +- return -EINVAL; +- + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) +@@ -420,10 +448,18 @@ + + hw->fc.current_mode = hw->fc.requested_mode; + +- retval = ((hw->phy.media_type == e1000_media_type_copper) ? +- igb_force_mac_fc(hw) : igb_setup_link(hw)); ++ if (hw->phy.media_type == e1000_media_type_fiber) { ++ retval = hw->mac.ops.setup_link(hw); ++ /* implicit goto out */ ++ } else { ++ retval = igb_e1000_force_mac_fc(hw); ++ if (retval) ++ goto out; ++ e1000_set_fc_watermarks_generic(hw); ++ } + } + ++out: + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; + } +@@ -442,7 +478,7 @@ + + static int igb_get_regs_len(struct net_device *netdev) + { +-#define IGB_REGS_LEN 739 ++#define IGB_REGS_LEN 555 + return IGB_REGS_LEN * sizeof(u32); + } + +@@ -459,80 +495,78 @@ + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ +- regs_buff[0] = rd32(E1000_CTRL); +- regs_buff[1] = rd32(E1000_STATUS); +- regs_buff[2] = rd32(E1000_CTRL_EXT); +- regs_buff[3] = rd32(E1000_MDIC); +- regs_buff[4] = rd32(E1000_SCTL); +- regs_buff[5] = rd32(E1000_CONNSW); +- regs_buff[6] = rd32(E1000_VET); +- regs_buff[7] = rd32(E1000_LEDCTL); +- regs_buff[8] = rd32(E1000_PBA); +- regs_buff[9] = rd32(E1000_PBS); +- regs_buff[10] = rd32(E1000_FRTIMER); +- regs_buff[11] = rd32(E1000_TCPTIMER); ++ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL); ++ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS); ++ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC); ++ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL); ++ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW); ++ regs_buff[6] = E1000_READ_REG(hw, E1000_VET); ++ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL); ++ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA); ++ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS); ++ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER); ++ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER); + + /* NVM Register */ +- regs_buff[12] = rd32(E1000_EECD); ++ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the +- * same but EICS does not clear on read +- */ +- regs_buff[13] = rd32(E1000_EICS); +- regs_buff[14] = rd32(E1000_EICS); +- regs_buff[15] = rd32(E1000_EIMS); +- regs_buff[16] = rd32(E1000_EIMC); +- regs_buff[17] = rd32(E1000_EIAC); +- regs_buff[18] = rd32(E1000_EIAM); ++ * same but EICS does not clear on read */ ++ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS); ++ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS); ++ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS); ++ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC); ++ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC); ++ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM); + /* Reading ICS for ICR because they read the +- * same but ICS does not clear on read +- */ +- regs_buff[19] = rd32(E1000_ICS); +- regs_buff[20] = rd32(E1000_ICS); +- regs_buff[21] = rd32(E1000_IMS); +- regs_buff[22] = rd32(E1000_IMC); +- regs_buff[23] = rd32(E1000_IAC); +- regs_buff[24] = rd32(E1000_IAM); +- regs_buff[25] = rd32(E1000_IMIRVP); ++ * same but ICS does not clear on read */ ++ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS); ++ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS); ++ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS); ++ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC); ++ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC); ++ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM); ++ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP); + + /* Flow Control */ +- regs_buff[26] = rd32(E1000_FCAL); +- regs_buff[27] = rd32(E1000_FCAH); +- regs_buff[28] = rd32(E1000_FCTTV); +- regs_buff[29] = rd32(E1000_FCRTL); +- regs_buff[30] = rd32(E1000_FCRTH); +- regs_buff[31] = rd32(E1000_FCRTV); ++ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL); ++ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH); ++ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV); ++ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL); ++ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH); ++ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV); + + /* Receive */ +- regs_buff[32] = rd32(E1000_RCTL); +- regs_buff[33] = rd32(E1000_RXCSUM); +- regs_buff[34] = rd32(E1000_RLPML); +- regs_buff[35] = rd32(E1000_RFCTL); +- regs_buff[36] = rd32(E1000_MRQC); +- regs_buff[37] = rd32(E1000_VT_CTL); ++ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL); ++ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM); ++ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML); ++ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL); ++ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC); ++ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL); + + /* Transmit */ +- regs_buff[38] = rd32(E1000_TCTL); +- regs_buff[39] = rd32(E1000_TCTL_EXT); +- regs_buff[40] = rd32(E1000_TIPG); +- regs_buff[41] = rd32(E1000_DTXCTL); ++ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL); ++ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT); ++ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG); ++ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL); + + /* Wake Up */ +- regs_buff[42] = rd32(E1000_WUC); +- regs_buff[43] = rd32(E1000_WUFC); +- regs_buff[44] = rd32(E1000_WUS); +- regs_buff[45] = rd32(E1000_IPAV); +- regs_buff[46] = rd32(E1000_WUPL); ++ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC); ++ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC); ++ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS); ++ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV); ++ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL); + + /* MAC */ +- regs_buff[47] = rd32(E1000_PCS_CFG0); +- regs_buff[48] = rd32(E1000_PCS_LCTL); +- regs_buff[49] = rd32(E1000_PCS_LSTAT); +- regs_buff[50] = rd32(E1000_PCS_ANADV); +- regs_buff[51] = rd32(E1000_PCS_LPAB); +- regs_buff[52] = rd32(E1000_PCS_NPTX); +- regs_buff[53] = rd32(E1000_PCS_LPABNP); ++ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0); ++ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL); ++ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT); ++ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV); ++ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB); ++ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX); ++ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; +@@ -598,112 +632,75 @@ + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) +- regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); ++ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) +- regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); ++ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) +- regs_buff[129 + i] = rd32(E1000_RDBAL(i)); ++ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i)); + for (i = 0; i < 4; i++) +- regs_buff[133 + i] = rd32(E1000_RDBAH(i)); ++ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i)); + for (i = 0; i < 4; i++) +- regs_buff[137 + i] = rd32(E1000_RDLEN(i)); ++ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i)); + for (i = 0; i < 4; i++) +- regs_buff[141 + i] = rd32(E1000_RDH(i)); ++ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i)); + for (i = 0; i < 4; i++) +- regs_buff[145 + i] = rd32(E1000_RDT(i)); ++ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i)); + for (i = 0; i < 4; i++) +- regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); ++ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) +- regs_buff[153 + i] = rd32(E1000_EITR(i)); ++ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i)); + for (i = 0; i < 8; i++) +- regs_buff[163 + i] = rd32(E1000_IMIR(i)); ++ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i)); + for (i = 0; i < 8; i++) +- regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); ++ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) +- regs_buff[179 + i] = rd32(E1000_RAL(i)); ++ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i)); + for (i = 0; i < 16; i++) +- regs_buff[195 + i] = rd32(E1000_RAH(i)); ++ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i)); + + for (i = 0; i < 4; i++) +- regs_buff[211 + i] = rd32(E1000_TDBAL(i)); ++ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i)); + for (i = 0; i < 4; i++) +- regs_buff[215 + i] = rd32(E1000_TDBAH(i)); ++ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i)); + for (i = 0; i < 4; i++) +- regs_buff[219 + i] = rd32(E1000_TDLEN(i)); ++ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i)); + for (i = 0; i < 4; i++) +- regs_buff[223 + i] = rd32(E1000_TDH(i)); ++ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i)); + for (i = 0; i < 4; i++) +- regs_buff[227 + i] = rd32(E1000_TDT(i)); ++ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i)); + for (i = 0; i < 4; i++) +- regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); ++ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) +- regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); ++ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) +- regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); ++ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) +- regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); ++ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) +- regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); ++ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) +- regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); ++ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) +- regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); ++ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) +- regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); ++ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) +- regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); ++ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) +- regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); +- +- regs_buff[547] = rd32(E1000_TDFH); +- regs_buff[548] = rd32(E1000_TDFT); +- regs_buff[549] = rd32(E1000_TDFHS); +- regs_buff[550] = rd32(E1000_TDFPC); ++ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i)); + ++ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH); ++ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT); ++ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS); ++ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC); + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } +- +- if (hw->mac.type != e1000_82576) +- return; +- for (i = 0; i < 12; i++) +- regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); +- for (i = 0; i < 4; i++) +- regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); +- +- for (i = 0; i < 12; i++) +- regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); +- for (i = 0; i < 12; i++) +- regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + static int igb_get_eeprom_len(struct net_device *netdev) +@@ -736,13 +733,13 @@ + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) +- ret_val = hw->nvm.ops.read(hw, first_word, +- last_word - first_word + 1, +- eeprom_buff); ++ ret_val = e1000_read_nvm(hw, first_word, ++ last_word - first_word + 1, ++ eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { +- ret_val = hw->nvm.ops.read(hw, first_word + i, 1, +- &eeprom_buff[i]); ++ ret_val = e1000_read_nvm(hw, first_word + i, 1, ++ &eeprom_buff[i]); + if (ret_val) + break; + } +@@ -750,7 +747,7 @@ + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) +- le16_to_cpus(&eeprom_buff[i]); ++ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); +@@ -772,11 +769,6 @@ + if (eeprom->len == 0) + return -EOPNOTSUPP; + +- if ((hw->mac.type >= e1000_i210) && +- !igb_get_flash_presence_i210(hw)) { +- return -EOPNOTSUPP; +- } +- + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + +@@ -791,19 +783,17 @@ + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { +- /* need read/modify/write of first changed EEPROM word +- * only the second byte of the word is being modified +- */ +- ret_val = hw->nvm.ops.read(hw, first_word, 1, ++ /* need read/modify/write of first changed EEPROM word */ ++ /* only the second byte of the word is being modified */ ++ ret_val = e1000_read_nvm(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { +- /* need read/modify/write of last changed EEPROM word +- * only the first byte of the word is being modified +- */ +- ret_val = hw->nvm.ops.read(hw, last_word, 1, +- &eeprom_buff[last_word - first_word]); ++ /* need read/modify/write of last changed EEPROM word */ ++ /* only the first byte of the word is being modified */ ++ ret_val = e1000_read_nvm(hw, last_word, 1, ++ &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ +@@ -813,16 +803,16 @@ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) +- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); ++ cpu_to_le16s(&eeprom_buff[i]); + +- ret_val = hw->nvm.ops.write(hw, first_word, +- last_word - first_word + 1, eeprom_buff); ++ ret_val = e1000_write_nvm(hw, first_word, ++ last_word - first_word + 1, eeprom_buff); + +- /* Update the checksum if nvm write succeeded */ ++ /* Update the checksum if write succeeded. ++ * and flush shadow RAM for 82573 controllers */ + if (ret_val == 0) +- hw->nvm.ops.update(hw); ++ e1000_update_nvm_checksum(hw); + +- igb_set_fw_version(adapter); + kfree(eeprom_buff); + return ret_val; + } +@@ -832,16 +822,14 @@ + { + struct igb_adapter *adapter = netdev_priv(netdev); + +- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); +- strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); +- +- /* EEPROM image version # is reported as firmware version # for +- * 82575 controllers +- */ +- strlcpy(drvinfo->fw_version, adapter->fw_version, +- sizeof(drvinfo->fw_version)); +- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), +- sizeof(drvinfo->bus_info)); ++ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); ++ strncpy(drvinfo->version, igb_driver_version, ++ sizeof(drvinfo->version) - 1); ++ ++ strncpy(drvinfo->fw_version, adapter->fw_version, ++ sizeof(drvinfo->fw_version) - 1); ++ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), ++ sizeof(drvinfo->bus_info) - 1); + drvinfo->n_stats = IGB_STATS_LEN; + drvinfo->testinfo_len = IGB_TEST_LEN; + drvinfo->regdump_len = igb_get_regs_len(netdev); +@@ -855,8 +843,12 @@ + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; + } + + static int igb_set_ringparam(struct net_device *netdev, +@@ -870,12 +862,12 @@ + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + +- new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); +- new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); ++ new_rx_count = min_t(u16, ring->rx_pending, (u32)IGB_MAX_RXD); ++ new_rx_count = max_t(u16, new_rx_count, (u16)IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + +- new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); +- new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); ++ new_tx_count = min_t(u16, ring->tx_pending, (u32)IGB_MAX_TXD); ++ new_tx_count = max_t(u16, new_tx_count, (u16)IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && +@@ -898,11 +890,11 @@ + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) +- temp_ring = vmalloc(adapter->num_tx_queues * +- sizeof(struct igb_ring)); ++ temp_ring = vmalloc(adapter->num_tx_queues ++ * sizeof(struct igb_ring)); + else +- temp_ring = vmalloc(adapter->num_rx_queues * +- sizeof(struct igb_ring)); ++ temp_ring = vmalloc(adapter->num_rx_queues ++ * sizeof(struct igb_ring)); + + if (!temp_ring) { + err = -ENOMEM; +@@ -911,9 +903,10 @@ + + igb_down(adapter); + +- /* We can't just free everything and then setup again, ++ /* ++ * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers +- * to the Tx and Rx ring structs. ++ * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { +@@ -975,224 +968,6 @@ + return err; + } + +-/* ethtool register test data */ +-struct igb_reg_test { +- u16 reg; +- u16 reg_offset; +- u16 array_len; +- u16 test_type; +- u32 mask; +- u32 write; +-}; +- +-/* In the hardware, registers are laid out either singly, in arrays +- * spaced 0x100 bytes apart, or in contiguous tables. We assume +- * most tests take place on arrays or single registers (handled +- * as a single-element array) and special-case the tables. +- * Table tests are always pattern tests. +- * +- * We also make provision for some required setup steps by specifying +- * registers to be written without any read-back testing. +- */ +- +-#define PATTERN_TEST 1 +-#define SET_READ_TEST 2 +-#define WRITE_NO_TEST 3 +-#define TABLE32_TEST 4 +-#define TABLE64_TEST_LO 5 +-#define TABLE64_TEST_HI 6 +- +-/* i210 reg test */ +-static struct igb_reg_test reg_test_i210[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- /* RDH is read-only for i210, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, +- 0x900FFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0, 0 } +-}; +- +-/* i350 reg test */ +-static struct igb_reg_test reg_test_i350[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- /* RDH is read-only for i350, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, +- 0xC3FFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 16, TABLE64_TEST_HI, +- 0xC3FFFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- +-/* 82580 reg test */ +-static struct igb_reg_test reg_test_82580[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- /* RDH is read-only for 82580, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, +- 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_LO, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_HI, +- 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, +- 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- +-/* 82576 reg test */ +-static struct igb_reg_test reg_test_82576[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- /* Enable all RX queues before testing. */ +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, +- E1000_RXDCTL_QUEUE_ENABLE }, +- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, +- E1000_RXDCTL_QUEUE_ENABLE }, +- /* RDH is read-only for 82576, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, +- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- +-/* 82575 register test */ +-static struct igb_reg_test reg_test_82575[] = { +- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, +- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- /* Enable all four RX queues before testing. */ +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, +- E1000_RXDCTL_QUEUE_ENABLE }, +- /* RDH is read-only for 82575, only test RDT. */ +- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, +- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, +- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, +- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, +- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, +- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, +- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, +- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, +- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, +- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, +- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } +-}; +- + static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) + { +@@ -1201,13 +976,14 @@ + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { +- wr32(reg, (_test[pat] & write)); +- val = rd32(reg) & mask; ++ E1000_WRITE_REG(hw, reg, (_test[pat] & write)); ++ val = E1000_READ_REG(hw, reg) & mask; + if (val != (_test[pat] & write & mask)) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", +- reg, val, (_test[pat] & write & mask)); +- *data = reg; ++ E1000_REGISTER(hw, reg), val, (_test[pat] ++ & write & mask)); ++ *data = E1000_REGISTER(hw, reg); + return true; + } + } +@@ -1220,14 +996,13 @@ + { + struct e1000_hw *hw = &adapter->hw; + u32 val; +- +- wr32(reg, write & mask); +- val = rd32(reg); ++ E1000_WRITE_REG(hw, reg, write & mask); ++ val = E1000_READ_REG(hw, reg); + if ((write & mask) != (val & mask)) { +- dev_err(&adapter->pdev->dev, +- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "set/check reg %04X test failed:got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); +- *data = reg; ++ *data = E1000_REGISTER(hw, reg); + return true; + } + +@@ -1283,19 +1058,19 @@ + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ +- before = rd32(E1000_STATUS); +- value = (rd32(E1000_STATUS) & toggle); +- wr32(E1000_STATUS, toggle); +- after = rd32(E1000_STATUS) & toggle; ++ before = E1000_READ_REG(hw, E1000_STATUS); ++ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle); ++ E1000_WRITE_REG(hw, E1000_STATUS, toggle); ++ after = E1000_READ_REG(hw, E1000_STATUS) & toggle; + if (value != after) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ +- wr32(E1000_STATUS, before); ++ E1000_WRITE_REG(hw, E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. +@@ -1317,7 +1092,7 @@ + break; + case WRITE_NO_TEST: + writel(test->write, +- (adapter->hw.hw_addr + test->reg) ++ (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: +@@ -1346,24 +1121,11 @@ + + static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) + { +- struct e1000_hw *hw = &adapter->hw; +- + *data = 0; + +- /* Validate eeprom on all parts but flashless */ +- switch (hw->mac.type) { +- case e1000_i210: +- case e1000_i211: +- if (igb_get_flash_presence_i210(hw)) { +- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) +- *data = 2; +- } +- break; +- default: +- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) +- *data = 2; +- break; +- } ++ /* Validate NVM checksum */ ++ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) ++ *data = 2; + + return *data; + } +@@ -1373,7 +1135,7 @@ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + +- adapter->test_icr |= rd32(E1000_ICR); ++ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR); + + return IRQ_HANDLED; + } +@@ -1382,20 +1144,20 @@ + { + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; +- u32 mask, ics_mask, i = 0, shared_int = true; ++ u32 mask, ics_mask, i = 0, shared_int = TRUE; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + if (request_irq(adapter->msix_entries[0].vector, +- igb_test_intr, 0, netdev->name, adapter)) { ++ &igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { +- shared_int = false; ++ shared_int = FALSE; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; +@@ -1403,19 +1165,19 @@ + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { +- shared_int = false; +- } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, ++ shared_int = FALSE; ++ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } +- dev_info(&adapter->pdev->dev, "testing %s interrupt\n", +- (shared_int ? "shared" : "unshared")); ++ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n", ++ (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ +- wr32(E1000_IMC, ~0); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, ~0); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { +@@ -1430,9 +1192,11 @@ + break; + case e1000_i350: + case e1000_i354: ++ ics_mask = 0x77DCFED5; ++ break; + case e1000_i210: + case e1000_i211: +- ics_mask = 0x77DCFED5; ++ ics_mask = 0x774CFED5; + break; + default: + ics_mask = 0x7FFFFFFF; +@@ -1457,12 +1221,12 @@ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ +- wr32(E1000_ICR, ~0); ++ E1000_WRITE_REG(hw, E1000_ICR, ~0); + +- wr32(E1000_IMC, mask); +- wr32(E1000_ICS, mask); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, mask); ++ E1000_WRITE_REG(hw, E1000_ICS, mask); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; +@@ -1479,12 +1243,12 @@ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ +- wr32(E1000_ICR, ~0); ++ E1000_WRITE_REG(hw, E1000_ICR, ~0); + +- wr32(E1000_IMS, mask); +- wr32(E1000_ICS, mask); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMS, mask); ++ E1000_WRITE_REG(hw, E1000_ICS, mask); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; +@@ -1501,12 +1265,12 @@ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ +- wr32(E1000_ICR, ~0); ++ E1000_WRITE_REG(hw, E1000_ICR, ~0); + +- wr32(E1000_IMC, ~mask); +- wr32(E1000_ICS, ~mask); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, ~mask); ++ E1000_WRITE_REG(hw, E1000_ICS, ~mask); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 5; +@@ -1516,12 +1280,12 @@ + } + + /* Disable all the interrupts */ +- wr32(E1000_IMC, ~0); +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_REG(hw, E1000_IMC, ~0); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) ++ if (adapter->msix_entries) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); +@@ -1544,7 +1308,7 @@ + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; +- tx_ring->dev = &adapter->pdev->dev; ++ tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + +@@ -1558,17 +1322,20 @@ + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; +- rx_ring->dev = &adapter->pdev->dev; ++ rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN; ++#endif + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { +- ret_val = 3; ++ ret_val = 2; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ +- wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); ++ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); +@@ -1588,10 +1355,10 @@ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ +- igb_write_phy_reg(hw, 29, 0x001F); +- igb_write_phy_reg(hw, 30, 0x8FFC); +- igb_write_phy_reg(hw, 29, 0x001A); +- igb_write_phy_reg(hw, 30, 0x8FF0); ++ igb_e1000_write_phy_reg(hw, 29, 0x001F); ++ igb_e1000_write_phy_reg(hw, 30, 0x8FFC); ++ igb_e1000_write_phy_reg(hw, 29, 0x001A); ++ igb_e1000_write_phy_reg(hw, 30, 0x8FF0); + } + + static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +@@ -1599,34 +1366,32 @@ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + +- hw->mac.autoneg = false; ++ hw->mac.autoneg = FALSE; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ +- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); ++ igb_e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ +- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ +- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ +- igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); +- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); ++ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } +- } else if (hw->phy.type == e1000_phy_82580) { ++ } else { + /* enable MII loopback */ +- igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); ++ if (hw->phy.type == e1000_phy_82580) ++ igb_e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041); + } + +- /* add small delay to avoid loopback test failure */ +- msleep(50); +- +- /* force 1000, set loopback */ +- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); ++ /* force 1000, set loopback */ ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ +- ctrl_reg = rd32(E1000_CTRL); ++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ +@@ -1637,7 +1402,7 @@ + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + +- wr32(E1000_CTRL, ctrl_reg); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. +@@ -1659,64 +1424,64 @@ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + +- reg = rd32(E1000_CTRL_EXT); ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || +- (hw->device_id == E1000_DEV_ID_I354_SGMII) || +- (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || ++ (hw->device_id == E1000_DEV_ID_I354_SGMII) || ++ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ +- reg = rd32(E1000_MPHY_ADDR_CTL); ++ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | +- E1000_MPHY_PCS_CLK_REG_OFFSET; +- wr32(E1000_MPHY_ADDR_CTL, reg); ++ E1000_MPHY_PCS_CLK_REG_OFFSET; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + +- reg = rd32(E1000_MPHY_DATA); ++ reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; +- wr32(E1000_MPHY_DATA, reg); ++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + +- reg = rd32(E1000_RCTL); ++ reg = E1000_READ_REG(hw, E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; +- wr32(E1000_RCTL, reg); ++ E1000_WRITE_REG(hw, E1000_RCTL, reg); + +- wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); ++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + +- reg = rd32(E1000_CTRL); ++ reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; +- wr32(E1000_CTRL, reg); ++ E1000_WRITE_REG(hw, E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ +- reg = rd32(E1000_CONNSW); ++ reg = E1000_READ_REG(hw, E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; +- wr32(E1000_CONNSW, reg); ++ E1000_WRITE_REG(hw, E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on +- * 82580 and newer devices. ++ * 82580 and newer devices + */ + if (hw->mac.type >= e1000_82580) { +- reg = rd32(E1000_PCS_CFG0); ++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; +- wr32(E1000_PCS_CFG0, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ +- reg = rd32(E1000_PCS_LCTL); ++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ +- wr32(E1000_PCS_LCTL, reg); ++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + return 0; + } +@@ -1731,36 +1496,37 @@ + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || +- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || +- (hw->device_id == E1000_DEV_ID_I354_SGMII)) { ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || ++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || ++ (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ +- reg = rd32(E1000_MPHY_ADDR_CTL); ++ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | +- E1000_MPHY_PCS_CLK_REG_OFFSET; +- wr32(E1000_MPHY_ADDR_CTL, reg); ++ E1000_MPHY_PCS_CLK_REG_OFFSET; ++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); + +- reg = rd32(E1000_MPHY_DATA); ++ reg = E1000_READ_REG(hw, E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; +- wr32(E1000_MPHY_DATA, reg); ++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); + } + +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + +- hw->mac.autoneg = true; +- igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); ++ hw->mac.autoneg = TRUE; ++ igb_e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; +- igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); +- igb_phy_sw_reset(hw); ++ if (hw->phy.type == I210_I_PHY_ID) ++ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); ++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg); ++ e1000_phy_commit(hw); + } + } +- + static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) + { +@@ -1779,19 +1545,25 @@ + + frame_size >>= 1; + ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ data = rx_buffer->skb->data; ++#else + data = kmap(rx_buffer->page); ++#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + ++#endif + return match; + } + +-static int igb_clean_test_rings(struct igb_ring *rx_ring, ++static u16 igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) + { +@@ -1806,13 +1578,17 @@ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { +- /* check Rx buffer */ ++ /* check rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ IGB_RX_HDR_LEN, ++#else + IGB_RX_BUFSZ, ++#endif + DMA_FROM_DEVICE); + + /* verify contents of skb */ +@@ -1822,14 +1598,18 @@ + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ IGB_RX_HDR_LEN, ++#else + IGB_RX_BUFSZ, ++#endif + DMA_FROM_DEVICE); + +- /* unmap buffer on Tx side */ ++ /* unmap buffer on tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + +- /* increment Rx/Tx next to clean counters */ ++ /* increment rx/tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; +@@ -1841,8 +1621,6 @@ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + +- netdev_tx_reset_queue(txring_txq(tx_ring)); +- + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; +@@ -1870,7 +1648,8 @@ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + +- /* Calculate the loop count based on the largest descriptor ring ++ /* ++ * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ +@@ -1897,7 +1676,7 @@ + break; + } + +- /* allow 200 milliseconds for packets to go from Tx to Rx */ ++ /* allow 200 milliseconds for packets to go from tx to rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); +@@ -1916,21 +1695,14 @@ + static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) + { + /* PHY loopback cannot be performed if SoL/IDER +- * sessions are active +- */ +- if (igb_check_reset_block(&adapter->hw)) { +- dev_err(&adapter->pdev->dev, ++ * sessions are active */ ++ if (e1000_check_reset_block(&adapter->hw)) { ++ dev_err(pci_dev_to_dev(adapter->pdev), + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + +- if (adapter->hw.mac.type == e1000_i354) { +- dev_info(&adapter->pdev->dev, +- "Loopback test not supported on i354.\n"); +- *data = 0; +- goto out; +- } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; +@@ -1938,6 +1710,7 @@ + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); ++ + igb_loopback_cleanup(adapter); + + err_loopback: +@@ -1948,32 +1721,39 @@ + + static int igb_link_test(struct igb_adapter *adapter, u64 *data) + { +- struct e1000_hw *hw = &adapter->hw; ++ u32 link; ++ int i, time; ++ + *data = 0; +- if (hw->phy.media_type == e1000_media_type_internal_serdes) { ++ time = 0; ++ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; +- +- hw->mac.serdes_has_link = false; ++ adapter->hw.mac.serdes_has_link = FALSE; + + /* On some blade server designs, link establishment +- * could take as long as 2-3 minutes +- */ ++ * could take as long as 2-3 minutes */ + do { +- hw->mac.ops.check_for_link(&adapter->hw); +- if (hw->mac.serdes_has_link) +- return *data; ++ igb_e1000_check_for_link(&adapter->hw); ++ if (adapter->hw.mac.serdes_has_link) ++ goto out; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { +- hw->mac.ops.check_for_link(&adapter->hw); +- if (hw->mac.autoneg) +- msleep(5000); +- +- if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ++ for (i = 0; i < IGB_MAX_LINK_TRIES; i++) { ++ link = igb_has_link(adapter); ++ if (link) { ++ goto out; ++ } else { ++ time++; ++ msleep(1000); ++ } ++ } ++ if (!link) + *data = 1; + } ++out: + return *data; + } + +@@ -1986,10 +1766,6 @@ + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); +- +- /* can't do offline tests on media switching devices */ +- if (adapter->hw.dev_spec._82575.mas_capable) +- eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + +@@ -1998,20 +1774,19 @@ + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + +- dev_info(&adapter->pdev->dev, "offline testing starting\n"); ++ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't +- * interfere with test result +- */ ++ * interfere with test result */ + if (igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ +- dev_close(netdev); ++ igb_close(netdev); + else + igb_reset(adapter); + +@@ -2027,8 +1802,10 @@ + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); ++ + /* power up link for loopback test */ + igb_power_up_link(adapter); ++ + if (igb_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +@@ -2038,15 +1815,15 @@ + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ +- adapter->hw.phy.autoneg_wait_to_complete = true; ++ adapter->hw.phy.autoneg_wait_to_complete = TRUE; + igb_reset(adapter); +- adapter->hw.phy.autoneg_wait_to_complete = false; ++ adapter->hw.phy.autoneg_wait_to_complete = FALSE; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) +- dev_open(netdev); ++ igb_open(netdev); + } else { +- dev_info(&adapter->pdev->dev, "online testing starting\n"); ++ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[4])) +@@ -2125,8 +1902,7 @@ + } + + /* bit defines for adapter->led_status */ +-#define IGB_LED_ON 0 +- ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID + static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) + { +@@ -2135,23 +1911,47 @@ + + switch (state) { + case ETHTOOL_ID_ACTIVE: +- igb_blink_led(hw); ++ e1000_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: +- igb_blink_led(hw); ++ igb_e1000_led_on(hw); + break; + case ETHTOOL_ID_OFF: +- igb_led_off(hw); ++ igb_e1000_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: +- igb_led_off(hw); +- clear_bit(IGB_LED_ON, &adapter->led_status); +- igb_cleanup_led(hw); ++ igb_e1000_led_off(hw); ++ igb_e1000_cleanup_led(hw); + break; + } + + return 0; + } ++#else ++static int igb_phys_id(struct net_device *netdev, u32 data) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; ++ unsigned long timeout; ++ ++ timeout = data * 1000; ++ ++ /* ++ * msleep_interruptable only accepts unsigned int so we are limited ++ * in how long a duration we can wait ++ */ ++ if (!timeout || timeout > UINT_MAX) ++ timeout = UINT_MAX; ++ ++ e1000_blink_led(hw); ++ msleep_interruptible(timeout); ++ ++ igb_e1000_led_off(hw); ++ igb_e1000_cleanup_led(hw); ++ ++ return 0; ++} ++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + + static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +@@ -2159,11 +1959,36 @@ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + ++ if (ec->rx_max_coalesced_frames || ++ ec->rx_coalesce_usecs_irq || ++ ec->rx_max_coalesced_frames_irq || ++ ec->tx_max_coalesced_frames || ++ ec->tx_coalesce_usecs_irq || ++ ec->stats_block_coalesce_usecs || ++ ec->use_adaptive_rx_coalesce || ++ ec->use_adaptive_tx_coalesce || ++ ec->pkt_rate_low || ++ ec->rx_coalesce_usecs_low || ++ ec->rx_max_coalesced_frames_low || ++ ec->tx_coalesce_usecs_low || ++ ec->tx_max_coalesced_frames_low || ++ ec->pkt_rate_high || ++ ec->rx_coalesce_usecs_high || ++ ec->rx_max_coalesced_frames_high || ++ ec->tx_coalesce_usecs_high || ++ ec->tx_max_coalesced_frames_high || ++ ec->rate_sample_interval) { ++ netdev_err(netdev, "set_coalesce: invalid parameter"); ++ return -ENOTSUPP; ++ } ++ + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || +- (ec->rx_coalesce_usecs == 2)) ++ (ec->rx_coalesce_usecs == 2)) { ++ netdev_err(netdev, "set_coalesce: invalid setting"); + return -EINVAL; ++ } + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && +@@ -2174,11 +1999,12 @@ + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + ++ if (ec->tx_max_coalesced_frames_irq) ++ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; ++ + /* If ITR is disabled, disable DMAC */ +- if (ec->rx_coalesce_usecs == 0) { +- if (adapter->flags & IGB_FLAG_DMAC) +- adapter->flags &= ~IGB_FLAG_DMAC; +- } ++ if (ec->rx_coalesce_usecs == 0) ++ adapter->dmac = IGB_DMAC_DISABLE; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) +@@ -2219,6 +2045,8 @@ + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + ++ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; ++ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; +@@ -2237,6 +2065,7 @@ + return 0; + } + ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + static int igb_get_sset_count(struct net_device *netdev, int sset) + { + switch (sset) { +@@ -2248,19 +2077,32 @@ + return -ENOTSUPP; + } + } ++#else ++static int igb_get_stats_count(struct net_device *netdev) ++{ ++ return IGB_STATS_LEN; ++} ++ ++static int igb_diag_test_count(struct net_device *netdev) ++{ ++ return IGB_TEST_LEN; ++} ++#endif + + static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) + { + struct igb_adapter *adapter = netdev_priv(netdev); +- struct rtnl_link_stats64 *net_stats = &adapter->stats64; +- unsigned int start; +- struct igb_ring *ring; +- int i, j; ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats *net_stats = &netdev->stats; ++#else ++ struct net_device_stats *net_stats = &adapter->net_stats; ++#endif ++ u64 *queue_stat; ++ int i, j, k; + char *p; + +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, net_stats); ++ igb_update_stats(adapter); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; +@@ -2273,36 +2115,15 @@ + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { +- u64 restart2; +- +- ring = adapter->tx_ring[j]; +- do { +- start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +- data[i] = ring->tx_stats.packets; +- data[i+1] = ring->tx_stats.bytes; +- data[i+2] = ring->tx_stats.restart_queue; +- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +- do { +- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); +- restart2 = ring->tx_stats.restart_queue2; +- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); +- data[i+2] += restart2; +- +- i += IGB_TX_QUEUE_STATS_LEN; ++ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; ++ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) ++ data[i] = queue_stat[k]; + } + for (j = 0; j < adapter->num_rx_queues; j++) { +- ring = adapter->rx_ring[j]; +- do { +- start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +- data[i] = ring->rx_stats.packets; +- data[i+1] = ring->rx_stats.bytes; +- data[i+2] = ring->rx_stats.drops; +- data[i+3] = ring->rx_stats.csum_err; +- data[i+4] = ring->rx_stats.alloc_failed; +- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +- i += IGB_RX_QUEUE_STATS_LEN; ++ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; ++ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) ++ data[i] = queue_stat[k]; + } +- spin_unlock(&adapter->stats64_lock); + } + + static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +@@ -2347,22 +2168,19 @@ + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } +- /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ ++/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } + } + ++#ifdef HAVE_ETHTOOL_GET_TS_INFO + static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) + { + struct igb_adapter *adapter = netdev_priv(dev); + +- if (adapter->ptp_clock) +- info->phc_index = ptp_clock_index(adapter->ptp_clock); +- else +- info->phc_index = -1; +- + switch (adapter->hw.mac.type) { ++#ifdef HAVE_PTP_1588_CLOCK + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | +@@ -2383,6 +2201,11 @@ + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + ++ if (adapter->ptp_clock) ++ info->phc_index = ptp_clock_index(adapter->ptp_clock); ++ else ++ info->phc_index = -1; ++ + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); +@@ -2396,201 +2219,217 @@ + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | +- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; ++#endif /* HAVE_PTP_1588_CLOCK */ + default: + return -EOPNOTSUPP; + } + } ++#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + +-static int igb_get_rss_hash_opts(struct igb_adapter *adapter, +- struct ethtool_rxnfc *cmd) ++#ifdef CONFIG_PM_RUNTIME ++static int igb_ethtool_begin(struct net_device *netdev) + { +- cmd->data = 0; ++ struct igb_adapter *adapter = netdev_priv(netdev); + +- /* Report default options for RSS on igb */ +- switch (cmd->flow_type) { +- case TCP_V4_FLOW: +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case UDP_V4_FLOW: +- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case SCTP_V4_FLOW: +- case AH_ESP_V4_FLOW: +- case AH_V4_FLOW: +- case ESP_V4_FLOW: +- case IPV4_FLOW: +- cmd->data |= RXH_IP_SRC | RXH_IP_DST; +- break; +- case TCP_V6_FLOW: +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case UDP_V6_FLOW: +- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) +- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* Fall through */ +- case SCTP_V6_FLOW: +- case AH_ESP_V6_FLOW: +- case AH_V6_FLOW: +- case ESP_V6_FLOW: +- case IPV6_FLOW: +- cmd->data |= RXH_IP_SRC | RXH_IP_DST; +- break; +- default: +- return -EINVAL; +- } ++ pm_runtime_get_sync(&adapter->pdev->dev); + + return 0; + } + +-static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +- u32 *rule_locs) ++static void igb_ethtool_complete(struct net_device *netdev) + { +- struct igb_adapter *adapter = netdev_priv(dev); +- int ret = -EOPNOTSUPP; ++ struct igb_adapter *adapter = netdev_priv(netdev); + +- switch (cmd->cmd) { +- case ETHTOOL_GRXRINGS: +- cmd->data = adapter->num_rx_queues; +- ret = 0; +- break; +- case ETHTOOL_GRXFH: +- ret = igb_get_rss_hash_opts(adapter, cmd); +- break; +- default: +- break; +- } ++ pm_runtime_put(&adapter->pdev->dev); ++} ++#endif /* CONFIG_PM_RUNTIME */ + +- return ret; ++#ifndef HAVE_NDO_SET_FEATURES ++static u32 igb_get_rx_csum(struct net_device *netdev) ++{ ++ return !!(netdev->features & NETIF_F_RXCSUM); + } + +-#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ +- IGB_FLAG_RSS_FIELD_IPV6_UDP) +-static int igb_set_rss_hash_opt(struct igb_adapter *adapter, +- struct ethtool_rxnfc *nfc) ++static int igb_set_rx_csum(struct net_device *netdev, u32 data) + { +- u32 flags = adapter->flags; ++ const u32 feature_list = NETIF_F_RXCSUM; + +- /* RSS does not support anything other than hashing +- * to queues on src and dst IPs and ports ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++ return 0; ++} ++ ++static int igb_set_tx_csum(struct net_device *netdev, u32 data) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++#ifdef NETIF_F_IPV6_CSUM ++ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#else ++ u32 feature_list = NETIF_F_IP_CSUM; ++#endif ++ ++ if (adapter->hw.mac.type >= e1000_82576) ++ feature_list |= NETIF_F_SCTP_CSUM; ++ ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++ return 0; ++} ++ ++#ifdef NETIF_F_TSO ++static int igb_set_tso(struct net_device *netdev, u32 data) ++{ ++#ifdef NETIF_F_TSO6 ++ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; ++#else ++ const u32 feature_list = NETIF_F_TSO; ++#endif ++ ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ if (!data) { ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct net_device *v_netdev; ++ int i; ++ ++ /* disable TSO on all VLANs if they're present */ ++ if (!adapter->vlgrp) ++ goto tso_out; ++ ++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { ++ v_netdev = vlan_group_get_device(adapter->vlgrp, i); ++ if (!v_netdev) ++ continue; ++ ++ v_netdev->features &= ~feature_list; ++ vlan_group_set_device(adapter->vlgrp, i, v_netdev); ++ } ++ } ++ ++tso_out: ++ ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ return 0; ++} ++ ++#endif /* NETIF_F_TSO */ ++#ifdef ETHTOOL_GFLAGS ++static int igb_set_flags(struct net_device *netdev, u32 data) ++{ ++ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ++ ETH_FLAG_RXHASH; ++#ifndef HAVE_VLAN_RX_REGISTER ++ u32 changed = netdev->features ^ data; ++#endif ++ int rc; ++#ifndef IGB_NO_LRO ++ ++ supported_flags |= ETH_FLAG_LRO; ++#endif ++ /* ++ * Since there is no support for separate tx vlan accel ++ * enabled make sure tx flag is cleared if rx is. + */ +- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | +- RXH_L4_B_0_1 | RXH_L4_B_2_3)) +- return -EINVAL; ++ if (!(data & ETH_FLAG_RXVLAN)) ++ data &= ~ETH_FLAG_TXVLAN; + +- switch (nfc->flow_type) { +- case TCP_V4_FLOW: +- case TCP_V6_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST) || +- !(nfc->data & RXH_L4_B_0_1) || +- !(nfc->data & RXH_L4_B_2_3)) +- return -EINVAL; +- break; +- case UDP_V4_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST)) +- return -EINVAL; +- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { +- case 0: +- flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; +- break; +- case (RXH_L4_B_0_1 | RXH_L4_B_2_3): +- flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; +- break; +- default: +- return -EINVAL; +- } +- break; +- case UDP_V6_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST)) +- return -EINVAL; +- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { +- case 0: +- flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; +- break; +- case (RXH_L4_B_0_1 | RXH_L4_B_2_3): +- flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; +- break; +- default: +- return -EINVAL; +- } +- break; +- case AH_ESP_V4_FLOW: +- case AH_V4_FLOW: +- case ESP_V4_FLOW: +- case SCTP_V4_FLOW: +- case AH_ESP_V6_FLOW: +- case AH_V6_FLOW: +- case ESP_V6_FLOW: +- case SCTP_V6_FLOW: +- if (!(nfc->data & RXH_IP_SRC) || +- !(nfc->data & RXH_IP_DST) || +- (nfc->data & RXH_L4_B_0_1) || +- (nfc->data & RXH_L4_B_2_3)) +- return -EINVAL; +- break; +- default: +- return -EINVAL; +- } +- +- /* if we changed something we need to update flags */ +- if (flags != adapter->flags) { +- struct e1000_hw *hw = &adapter->hw; +- u32 mrqc = rd32(E1000_MRQC); +- +- if ((flags & UDP_RSS_FLAGS) && +- !(adapter->flags & UDP_RSS_FLAGS)) +- dev_err(&adapter->pdev->dev, +- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); +- +- adapter->flags = flags; +- +- /* Perform hash on these packet types */ +- mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | +- E1000_MRQC_RSS_FIELD_IPV4_TCP | +- E1000_MRQC_RSS_FIELD_IPV6 | +- E1000_MRQC_RSS_FIELD_IPV6_TCP; +- +- mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | +- E1000_MRQC_RSS_FIELD_IPV6_UDP); +- +- if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) +- mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; +- +- if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) +- mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; +- +- wr32(E1000_MRQC, mrqc); +- } ++ rc = ethtool_op_set_flags(netdev, data, supported_flags); ++ if (rc) ++ return rc; ++#ifndef HAVE_VLAN_RX_REGISTER ++ ++ if (changed & ETH_FLAG_RXVLAN) ++ igb_vlan_mode(netdev, data); ++#endif + + return 0; + } + +-static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ++#endif /* ETHTOOL_GFLAGS */ ++#endif /* HAVE_NDO_SET_FEATURES */ ++#ifdef ETHTOOL_SADV_COAL ++static int igb_set_adv_coal(struct net_device *netdev, ++ struct ethtool_value *edata) + { +- struct igb_adapter *adapter = netdev_priv(dev); +- int ret = -EOPNOTSUPP; ++ struct igb_adapter *adapter = netdev_priv(netdev); + +- switch (cmd->cmd) { +- case ETHTOOL_SRXFH: +- ret = igb_set_rss_hash_opt(adapter, cmd); ++ switch (edata->data) { ++ case IGB_DMAC_DISABLE: ++ adapter->dmac = edata->data; + break; +- default: ++ case IGB_DMAC_MIN: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_500: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_EN_DEFAULT: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_2000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_3000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_4000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_5000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_6000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_7000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_8000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_9000: ++ adapter->dmac = edata->data; ++ break; ++ case IGB_DMAC_MAX: ++ adapter->dmac = edata->data; + break; ++ default: ++ adapter->dmac = IGB_DMAC_DISABLE; ++ netdev_info(netdev, ++ "set_dmac: invalid setting, setting DMAC to %d\n", ++ adapter->dmac); + } ++ netdev_info(netdev, "%s: setting DMAC to %d\n", ++ netdev->name, adapter->dmac); ++ return 0; ++} + +- return ret; ++#endif /* ETHTOOL_SADV_COAL */ ++#ifdef ETHTOOL_GADV_COAL ++static void igb_get_dmac(struct net_device *netdev, ++ struct ethtool_value *edata) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ edata->data = adapter->dmac; ++ ++ return; + } ++#endif + ++#ifdef ETHTOOL_GEEE + static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) + { + struct igb_adapter *adapter = netdev_priv(netdev); +@@ -2604,17 +2443,18 @@ + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); ++ + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { +- igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); ++ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + +- eeer = rd32(E1000_EEER); ++ eeer = E1000_READ_REG(hw, E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) +@@ -2627,19 +2467,20 @@ + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: +- ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, +- &phy_data); ++ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, ++ &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); ++ + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: +- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, +- E1000_EEE_LP_ADV_DEV_I210, +- &phy_data); ++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, ++ E1000_EEE_LP_ADV_DEV_I210, ++ &phy_data); + if (ret_val) + return -ENODATA; + +@@ -2656,7 +2497,8 @@ + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + +- /* Report correct negotiated EEE status for devices that ++ /* ++ * report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { +@@ -2668,60 +2510,59 @@ + + return 0; + } ++#endif + ++#ifdef ETHTOOL_SEEE + static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; ++ bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + +- memset(&eee_curr, 0, sizeof(struct ethtool_eee)); +- + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + +- /* Tx LPI timer is not implemented currently */ ++ /* Tx LPI time is not implemented currently */ + if (edata->tx_lpi_timer) { +- dev_err(&adapter->pdev->dev, ++ dev_err(pci_dev_to_dev(adapter->pdev), + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + +- if (edata->advertised & +- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { +- dev_err(&adapter->pdev->dev, +- "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); ++ if (!edata->advertised || (edata->advertised & ++ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n"); + return -EINVAL; + } ++ adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); ++ adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { +- dev_err(&adapter->pdev->dev, +- "Setting EEE options are not supported with EEE disabled\n"); ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "Setting EEE options is not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); ++ + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; +- adapter->flags |= IGB_FLAG_EEE; +- if (hw->mac.type == e1000_i350) +- igb_set_eee_i350(hw); +- else +- igb_set_eee_i354(hw); + + /* reset link */ + if (netif_running(netdev)) +@@ -2730,109 +2571,232 @@ + igb_reset(adapter); + } + ++ if (hw->mac.type == e1000_i354) ++ ret_val = e1000_set_eee_i354(hw, adv1g_eee, adv100m_eee); ++ else ++ ret_val = e1000_set_eee_i350(hw, adv1g_eee, adv100m_eee); ++ ++ if (ret_val) { ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "Problem setting EEE advertisement options\n"); ++ return -EINVAL; ++ } ++ + return 0; + } ++#endif /* ETHTOOL_SEEE */ ++#ifdef ETHTOOL_GRXFH ++#ifdef ETHTOOL_GRXFHINDIR + +-static int igb_get_module_info(struct net_device *netdev, +- struct ethtool_modinfo *modinfo) ++static int igb_get_rss_hash_opts(struct igb_adapter *adapter, ++ struct ethtool_rxnfc *cmd) + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- u32 status = 0; +- u16 sff8472_rev, addr_mode; +- bool page_swap = false; +- +- if ((hw->phy.media_type == e1000_media_type_copper) || +- (hw->phy.media_type == e1000_media_type_unknown)) +- return -EOPNOTSUPP; ++ cmd->data = 0; + +- /* Check whether we support SFF-8472 or not */ +- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); +- if (status) +- return -EIO; +- +- /* addressing mode is not supported */ +- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); +- if (status) +- return -EIO; +- +- /* addressing mode is not supported */ +- if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { +- hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); +- page_swap = true; +- } +- +- if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { +- /* We have an SFP, but it does not support SFF-8472 */ +- modinfo->type = ETH_MODULE_SFF_8079; +- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; +- } else { +- /* We have an SFP which supports a revision of SFF-8472 */ +- modinfo->type = ETH_MODULE_SFF_8472; +- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; ++ /* Report default options for RSS on igb */ ++ switch (cmd->flow_type) { ++ case TCP_V4_FLOW: ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case UDP_V4_FLOW: ++ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case SCTP_V4_FLOW: ++ case AH_ESP_V4_FLOW: ++ case AH_V4_FLOW: ++ case ESP_V4_FLOW: ++ case IPV4_FLOW: ++ cmd->data |= RXH_IP_SRC | RXH_IP_DST; ++ break; ++ case TCP_V6_FLOW: ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case UDP_V6_FLOW: ++ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) ++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ /* Fall through */ ++ case SCTP_V6_FLOW: ++ case AH_ESP_V6_FLOW: ++ case AH_V6_FLOW: ++ case ESP_V6_FLOW: ++ case IPV6_FLOW: ++ cmd->data |= RXH_IP_SRC | RXH_IP_DST; ++ break; ++ default: ++ return -EINVAL; + } + + return 0; + } + +-static int igb_get_module_eeprom(struct net_device *netdev, +- struct ethtool_eeprom *ee, u8 *data) ++#endif /* ETHTOOL_GRXFHINDIR */ ++static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ++#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS ++ void *rule_locs) ++#else ++ u32 *rule_locs) ++#endif + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- u32 status = 0; +- u16 *dataword; +- u16 first_word, last_word; +- int i = 0; ++ struct igb_adapter *adapter = netdev_priv(dev); ++ int ret = -EOPNOTSUPP; + +- if (ee->len == 0) +- return -EINVAL; ++ switch (cmd->cmd) { ++ case ETHTOOL_GRXRINGS: ++ cmd->data = adapter->num_rx_queues; ++ ret = 0; ++ break; ++#ifdef ETHTOOL_GRXFHINDIR ++ case ETHTOOL_GRXFHINDIR: ++ ret = igb_get_rss_hash_opts(adapter, cmd); ++ break; ++#endif /* ETHTOOL_GRXFHINDIR */ ++ default: ++ break; ++ } + +- first_word = ee->offset >> 1; +- last_word = (ee->offset + ee->len - 1) >> 1; ++ return ret; ++} + +- dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1), +- GFP_KERNEL); +- if (!dataword) +- return -ENOMEM; ++#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ ++ IGB_FLAG_RSS_FIELD_IPV6_UDP) ++static int igb_set_rss_hash_opt(struct igb_adapter *adapter, ++ struct ethtool_rxnfc *nfc) ++{ ++ u32 flags = adapter->flags; + +- /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ +- for (i = 0; i < last_word - first_word + 1; i++) { +- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); +- if (status) { +- /* Error occurred while reading module */ +- kfree(dataword); +- return -EIO; +- } ++ /* ++ * RSS does not support anything other than hashing ++ * to queues on src and dst IPs and ports ++ */ ++ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | ++ RXH_L4_B_0_1 | RXH_L4_B_2_3)) ++ return -EINVAL; + +- be16_to_cpus(&dataword[i]); ++ switch (nfc->flow_type) { ++ case TCP_V4_FLOW: ++ case TCP_V6_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST) || ++ !(nfc->data & RXH_L4_B_0_1) || ++ !(nfc->data & RXH_L4_B_2_3)) ++ return -EINVAL; ++ break; ++ case UDP_V4_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST)) ++ return -EINVAL; ++ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { ++ case 0: ++ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; ++ break; ++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): ++ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; ++ break; ++ default: ++ return -EINVAL; ++ } ++ break; ++ case UDP_V6_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST)) ++ return -EINVAL; ++ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { ++ case 0: ++ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; ++ break; ++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): ++ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; ++ break; ++ default: ++ return -EINVAL; ++ } ++ break; ++ case AH_ESP_V4_FLOW: ++ case AH_V4_FLOW: ++ case ESP_V4_FLOW: ++ case SCTP_V4_FLOW: ++ case AH_ESP_V6_FLOW: ++ case AH_V6_FLOW: ++ case ESP_V6_FLOW: ++ case SCTP_V6_FLOW: ++ if (!(nfc->data & RXH_IP_SRC) || ++ !(nfc->data & RXH_IP_DST) || ++ (nfc->data & RXH_L4_B_0_1) || ++ (nfc->data & RXH_L4_B_2_3)) ++ return -EINVAL; ++ break; ++ default: ++ return -EINVAL; + } + +- memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); +- kfree(dataword); ++ /* if we changed something we need to update flags */ ++ if (flags != adapter->flags) { ++ struct e1000_hw *hw = &adapter->hw; ++ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC); + +- return 0; +-} ++ if ((flags & UDP_RSS_FLAGS) && ++ !(adapter->flags & UDP_RSS_FLAGS)) ++ DPRINTK(DRV, WARNING, ++ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); ++ ++ adapter->flags = flags; ++ ++ /* Perform hash on these packet types */ ++ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | ++ E1000_MRQC_RSS_FIELD_IPV4_TCP | ++ E1000_MRQC_RSS_FIELD_IPV6 | ++ E1000_MRQC_RSS_FIELD_IPV6_TCP; ++ ++ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | ++ E1000_MRQC_RSS_FIELD_IPV6_UDP); ++ ++ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) ++ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; ++ ++ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) ++ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; ++ ++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc); ++ } + +-static int igb_ethtool_begin(struct net_device *netdev) +-{ +- struct igb_adapter *adapter = netdev_priv(netdev); +- pm_runtime_get_sync(&adapter->pdev->dev); + return 0; + } + +-static void igb_ethtool_complete(struct net_device *netdev) ++static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- pm_runtime_put(&adapter->pdev->dev); ++ struct igb_adapter *adapter = netdev_priv(dev); ++ int ret = -EOPNOTSUPP; ++ ++ switch (cmd->cmd) { ++ case ETHTOOL_SRXFH: ++ ret = igb_set_rss_hash_opt(adapter, cmd); ++ break; ++ default: ++ break; ++ } ++ ++ return ret; + } + ++#endif /* ETHTOOL_GRXFH */ ++#ifdef ETHTOOL_GRXFHINDIR ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE + static u32 igb_get_rxfh_indir_size(struct net_device *netdev) + { + return IGB_RETA_SIZE; + } + ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) ++#ifdef HAVE_RXFH_HASHFUNC ++static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ++ u8 *hfunc) ++#else + static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) ++#endif /* HAVE_RXFH_HASHFUNC */ ++#else ++static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) ++#endif /* HAVE_ETHTOOL_GSRSSH */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + int i; +@@ -2843,6 +2807,22 @@ + return 0; + } + ++#else ++static int igb_get_rxfh_indir(struct net_device *netdev, ++ struct ethtool_rxfh_indir *indir) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ size_t copy_size = ++ min_t(size_t, indir->size, ARRAY_SIZE(adapter->rss_indir_tbl)); ++ ++ indir->size = ARRAY_SIZE(adapter->rss_indir_tbl); ++ memcpy(indir->ring_index, adapter->rss_indir_tbl, ++ copy_size * sizeof(indir->ring_index[0])); ++ return 0; ++} ++#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ ++#endif /* ETHTOOL_GRXFHINDIR */ ++#ifdef ETHTOOL_SRXFHINDIR + void igb_write_rss_indir_tbl(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +@@ -2872,14 +2852,24 @@ + val |= adapter->rss_indir_tbl[i + j]; + } + +- wr32(reg, val << shift); ++ E1000_WRITE_REG(hw, reg, val << shift); + reg += 4; + i += 4; + } + } + ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) ++#ifdef HAVE_RXFH_HASHFUNC + static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, +- const u8 *key) ++ const u8 *key, const u8 hfunc) ++#else ++static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, ++ const u8 *key) ++#endif /* HAVE_RXFH_HASHFUNC */ ++#else ++static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) ++#endif /* HAVE_ETHTOOL_GSRSSH */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +@@ -2911,135 +2901,314 @@ + + return 0; + } ++#else ++static int igb_set_rxfh_indir(struct net_device *netdev, ++ const struct ethtool_rxfh_indir *indir) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ size_t i; ++ ++ if (indir->size != ARRAY_SIZE(adapter->rss_indir_tbl)) ++ return -EINVAL; ++ for (i = 0; i < ARRAY_SIZE(adapter->rss_indir_tbl); i++) ++ if (indir->ring_index[i] >= adapter->rss_queues) ++ return -EINVAL; + +-static unsigned int igb_max_channels(struct igb_adapter *adapter) ++ memcpy(adapter->rss_indir_tbl, indir->ring_index, ++ sizeof(adapter->rss_indir_tbl)); ++ igb_write_rss_indir_tbl(adapter); ++ return 0; ++} ++#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ ++#endif /* ETHTOOL_SRXFHINDIR */ ++#ifdef ETHTOOL_GCHANNELS ++ ++static unsigned int igb_max_rss_queues(struct igb_adapter *adapter) + { +- struct e1000_hw *hw = &adapter->hw; +- unsigned int max_combined = 0; ++ unsigned int max_rss_queues; + +- switch (hw->mac.type) { ++ /* Determine the maximum number of RSS queues supported. */ ++ switch (adapter->hw.mac.type) { + case e1000_i211: +- max_combined = IGB_MAX_RX_QUEUES_I211; ++ max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: +- max_combined = IGB_MAX_RX_QUEUES_82575; ++ max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: +- if (!!adapter->vfs_allocated_count) { +- max_combined = 1; ++ /* I350 cannot do RSS and SR-IOV at the same time */ ++ if (adapter->vfs_allocated_count) { ++ max_rss_queues = 1; + break; + } + /* fall through */ + case e1000_82576: +- if (!!adapter->vfs_allocated_count) { +- max_combined = 2; ++ if (adapter->vfs_allocated_count) { ++ max_rss_queues = 2; + break; + } + /* fall through */ + case e1000_82580: +- case e1000_i354: + default: +- max_combined = IGB_MAX_RX_QUEUES; ++ max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + +- return max_combined; ++ return max_rss_queues; + } + +-static void igb_get_channels(struct net_device *netdev, ++static void igb_get_channels(struct net_device *dev, + struct ethtool_channels *ch) + { +- struct igb_adapter *adapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = netdev_priv(dev); + +- /* Report maximum channels */ +- ch->max_combined = igb_max_channels(adapter); ++ /* report maximum channels */ ++ ch->max_combined = igb_max_rss_queues(adapter); ++ ch->max_rx = ch->max_combined; ++ if (adapter->vfs_allocated_count) ++ ch->max_tx = 1; ++ else ++ ch->max_tx = ch->max_combined; + +- /* Report info for other vector */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ /* report info for other vector */ ++ if (adapter->msix_entries) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + +- ch->combined_count = adapter->rss_queues; ++ /* record RSS/TSS queues */ ++ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) { ++ if (adapter->num_rx_queues > adapter->num_tx_queues) { ++ ch->combined_count = adapter->num_tx_queues; ++ ch->rx_count = adapter->num_rx_queues - ++ adapter->num_tx_queues; ++ } else if (adapter->num_rx_queues < adapter->num_tx_queues) { ++ ch->combined_count = adapter->num_rx_queues; ++ ch->tx_count = adapter->num_tx_queues - ++ adapter->num_rx_queues; ++ } else { ++ ch->combined_count = adapter->num_rx_queues; ++ } ++ } else { ++ ch->rx_count = adapter->num_rx_queues; ++ ch->tx_count = adapter->num_tx_queues; ++ } + } ++#endif /* ETHTOOL_GCHANNELS */ ++#ifdef ETHTOOL_SCHANNELS + +-static int igb_set_channels(struct net_device *netdev, +- struct ethtool_channels *ch) ++static int igb_set_channels(struct net_device *dev, ++ struct ethtool_channels *ch) + { +- struct igb_adapter *adapter = netdev_priv(netdev); +- unsigned int count = ch->combined_count; +- unsigned int max_combined = 0; ++ struct igb_adapter *adapter = netdev_priv(dev); ++ unsigned int max_rss_queues; + +- /* Verify they are not requesting separate vectors */ +- if (!count || ch->rx_count || ch->tx_count) ++ /* we cannot support combined, Rx, and Tx vectors simultaneously */ ++ if (ch->combined_count && ch->rx_count && ch->tx_count) + return -EINVAL; + +- /* Verify other_count is valid and has not been changed */ +- if (ch->other_count != NON_Q_VECTORS) ++ /* ignore other_count since it is not changeable */ ++ ++ /* verify we have at least one channel in each direction */ ++ if (!ch->combined_count && (!ch->rx_count || !ch->tx_count)) + return -EINVAL; + +- /* Verify the number of channels doesn't exceed hw limits */ +- max_combined = igb_max_channels(adapter); +- if (count > max_combined) ++ /* verify number of Tx queues does not exceed 1 if SR-IOV is enabled */ ++ if (adapter->vfs_allocated_count && ++ ((ch->combined_count + ch->tx_count) > 1)) + return -EINVAL; + +- if (count != adapter->rss_queues) { +- adapter->rss_queues = count; +- igb_set_flag_queue_pairs(adapter, max_combined); ++ /* verify the number of channels does not exceed hardware limits */ ++ max_rss_queues = igb_max_rss_queues(adapter); ++ if (((ch->combined_count + ch->rx_count) > max_rss_queues) || ++ ((ch->combined_count + ch->tx_count) > max_rss_queues)) ++ return -EINVAL; + +- /* Hardware has to reinitialize queues and interrupts to +- * match the new configuration. ++ /* Determine if we need to pair queues. */ ++ switch (adapter->hw.mac.type) { ++ case e1000_82575: ++ case e1000_i211: ++ /* Device supports enough interrupts without queue pairing. */ ++ break; ++ case e1000_i350: ++ /* The PF has 3 interrupts and 1 queue pair w/ SR-IOV */ ++ if (adapter->vfs_allocated_count) ++ break; ++ case e1000_82576: ++ /* ++ * The PF has access to 6 interrupt vectors if the number of ++ * VFs is less than 7. If that is the case we don't have ++ * to pair up the queues. + */ +- return igb_reinit_queues(adapter); ++ if ((adapter->vfs_allocated_count > 0) && ++ (adapter->vfs_allocated_count < 7)) ++ break; ++ /* fall through */ ++ case e1000_82580: ++ case e1000_i210: ++ default: ++ /* verify we can support as many queues as requested */ ++ if ((ch->combined_count + ++ ch->rx_count + ch->tx_count) > MAX_Q_VECTORS) ++ return -EINVAL; ++ break; + } + +- return 0; ++ /* update configuration values */ ++ adapter->rss_queues = ch->combined_count + ch->rx_count; ++ if (ch->rx_count == ch->tx_count || adapter->vfs_allocated_count) ++ adapter->tss_queues = 0; ++ else ++ adapter->tss_queues = ch->combined_count + ch->tx_count; ++ ++ if (ch->combined_count) ++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS; ++ else ++ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; ++ ++ /* update queue configuration for adapter */ ++ return igb_setup_queues(adapter); + } + ++#endif /* ETHTOOL_SCHANNELS */ + static const struct ethtool_ops igb_ethtool_ops = { +- .get_settings = igb_get_settings, +- .set_settings = igb_set_settings, +- .get_drvinfo = igb_get_drvinfo, +- .get_regs_len = igb_get_regs_len, +- .get_regs = igb_get_regs, +- .get_wol = igb_get_wol, +- .set_wol = igb_set_wol, +- .get_msglevel = igb_get_msglevel, +- .set_msglevel = igb_set_msglevel, +- .nway_reset = igb_nway_reset, +- .get_link = igb_get_link, +- .get_eeprom_len = igb_get_eeprom_len, +- .get_eeprom = igb_get_eeprom, +- .set_eeprom = igb_set_eeprom, +- .get_ringparam = igb_get_ringparam, +- .set_ringparam = igb_set_ringparam, +- .get_pauseparam = igb_get_pauseparam, +- .set_pauseparam = igb_set_pauseparam, +- .self_test = igb_diag_test, +- .get_strings = igb_get_strings, +- .set_phys_id = igb_set_phys_id, +- .get_sset_count = igb_get_sset_count, +- .get_ethtool_stats = igb_get_ethtool_stats, +- .get_coalesce = igb_get_coalesce, +- .set_coalesce = igb_set_coalesce, +- .get_ts_info = igb_get_ts_info, +- .get_rxnfc = igb_get_rxnfc, +- .set_rxnfc = igb_set_rxnfc, ++ .get_settings = igb_get_settings, ++ .set_settings = igb_set_settings, ++ .get_drvinfo = igb_get_drvinfo, ++ .get_regs_len = igb_get_regs_len, ++ .get_regs = igb_get_regs, ++ .get_wol = igb_get_wol, ++ .set_wol = igb_set_wol, ++ .get_msglevel = igb_get_msglevel, ++ .set_msglevel = igb_set_msglevel, ++ .nway_reset = igb_nway_reset, ++ .get_link = igb_get_link, ++ .get_eeprom_len = igb_get_eeprom_len, ++ .get_eeprom = igb_get_eeprom, ++ .set_eeprom = igb_set_eeprom, ++ .get_ringparam = igb_get_ringparam, ++ .set_ringparam = igb_set_ringparam, ++ .get_pauseparam = igb_get_pauseparam, ++ .set_pauseparam = igb_set_pauseparam, ++ .self_test = igb_diag_test, ++ .get_strings = igb_get_strings, ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID ++ .set_phys_id = igb_set_phys_id, ++#else ++ .phys_id = igb_phys_id, ++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT ++ .get_sset_count = igb_get_sset_count, ++#else ++ .get_stats_count = igb_get_stats_count, ++ .self_test_count = igb_diag_test_count, ++#endif ++ .get_ethtool_stats = igb_get_ethtool_stats, ++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++ .get_coalesce = igb_get_coalesce, ++ .set_coalesce = igb_set_coalesce, ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef HAVE_ETHTOOL_GET_TS_INFO ++ .get_ts_info = igb_get_ts_info, ++#endif /* HAVE_ETHTOOL_GET_TS_INFO */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifdef CONFIG_PM_RUNTIME ++ .begin = igb_ethtool_begin, ++ .complete = igb_ethtool_complete, ++#endif /* CONFIG_PM_RUNTIME */ ++#ifndef HAVE_NDO_SET_FEATURES ++ .get_rx_csum = igb_get_rx_csum, ++ .set_rx_csum = igb_set_rx_csum, ++ .get_tx_csum = ethtool_op_get_tx_csum, ++ .set_tx_csum = igb_set_tx_csum, ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++#ifdef NETIF_F_TSO ++ .get_tso = ethtool_op_get_tso, ++ .set_tso = igb_set_tso, ++#endif ++#ifdef ETHTOOL_GFLAGS ++ .get_flags = ethtool_op_get_flags, ++ .set_flags = igb_set_flags, ++#endif /* ETHTOOL_GFLAGS */ ++#endif /* HAVE_NDO_SET_FEATURES */ ++#ifdef ETHTOOL_GADV_COAL ++ .get_advcoal = igb_get_adv_coal, ++ .set_advcoal = igb_set_dmac_coal, ++#endif /* ETHTOOL_GADV_COAL */ ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef ETHTOOL_GEEE + .get_eee = igb_get_eee, ++#endif ++#ifdef ETHTOOL_SEEE + .set_eee = igb_set_eee, +- .get_module_info = igb_get_module_info, +- .get_module_eeprom = igb_get_module_eeprom, ++#endif ++#ifdef ETHTOOL_GRXFHINDIR ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE + .get_rxfh_indir_size = igb_get_rxfh_indir_size, ++#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) + .get_rxfh = igb_get_rxfh, ++#else ++ .get_rxfh_indir = igb_get_rxfh_indir, ++#endif /* HAVE_ETHTOOL_GSRSSH */ ++#endif /* ETHTOOL_GRXFHINDIR */ ++#ifdef ETHTOOL_SRXFHINDIR ++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) + .set_rxfh = igb_set_rxfh, +- .get_channels = igb_get_channels, +- .set_channels = igb_set_channels, +- .begin = igb_ethtool_begin, +- .complete = igb_ethtool_complete, ++#else ++ .set_rxfh_indir = igb_set_rxfh_indir, ++#endif /* HAVE_ETHTOOL_GSRSSH */ ++#endif /* ETHTOOL_SRXFHINDIR */ ++#ifdef ETHTOOL_GCHANNELS ++ .get_channels = igb_get_channels, ++#endif /* ETHTOOL_GCHANNELS */ ++#ifdef ETHTOOL_SCHANNELS ++ .set_channels = igb_set_channels, ++#endif /* ETHTOOL_SCHANNELS */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifdef ETHTOOL_GRXFH ++ .get_rxnfc = igb_get_rxnfc, ++ .set_rxnfc = igb_set_rxnfc, ++#endif ++}; ++ ++#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++static const struct ethtool_ops_ext igb_ethtool_ops_ext = { ++ .size = sizeof(struct ethtool_ops_ext), ++ .get_ts_info = igb_get_ts_info, ++ .set_phys_id = igb_set_phys_id, ++ .get_eee = igb_get_eee, ++ .set_eee = igb_set_eee, ++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE ++ .get_rxfh_indir_size = igb_get_rxfh_indir_size, ++#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ ++ .get_rxfh_indir = igb_get_rxfh_indir, ++ .set_rxfh_indir = igb_set_rxfh_indir, ++ .get_channels = igb_get_channels, ++ .set_channels = igb_set_channels, + }; + + void igb_set_ethtool_ops(struct net_device *netdev) + { +- netdev->ethtool_ops = &igb_ethtool_ops; ++ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); ++ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext); + } ++#else ++void igb_set_ethtool_ops(struct net_device *netdev) ++{ ++ /* have to "undeclare" const on this struct to remove warnings */ ++#ifndef ETHTOOL_OPS_COMPAT ++ netdev->ethtool_ops = (struct ethtool_ops *)&igb_ethtool_ops; ++#else ++ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops); ++#endif /* SET_ETHTOOL_OPS */ ++} ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#endif /* SIOCETHTOOL */ ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c +--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,30 +1,31 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #include "igb.h" + #include "e1000_82575.h" + #include "e1000_hw.h" +- ++#ifdef IGB_HWMON + #include + #include + #include +@@ -34,28 +35,29 @@ + #include + #include + +-#ifdef CONFIG_IGB_HWMON ++#ifdef HAVE_I2C_SUPPORT + static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), + }; ++#endif /* HAVE_I2C_SUPPORT */ + + /* hwmon callback functions */ + static ssize_t igb_hwmon_show_location(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); + } + + static ssize_t igb_hwmon_show_temp(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + unsigned int value; + + /* reset the temp field */ +@@ -70,11 +72,11 @@ + } + + static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ +@@ -84,11 +86,11 @@ + } + + static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, +- struct device_attribute *attr, +- char *buf) ++ struct device_attribute *attr, ++ char *buf) + { + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, +- dev_attr); ++ dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ +@@ -107,35 +109,34 @@ + * the data structures we need to get the data to display. + */ + static int igb_add_hwmon_attr(struct igb_adapter *adapter, +- unsigned int offset, int type) +-{ ++ unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + +- n_attr = adapter->igb_hwmon_buff->n_hwmon; +- igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; ++ n_attr = adapter->igb_hwmon_buff.n_hwmon; ++ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_label", offset + 1); ++ "temp%u_label", offset); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_input", offset + 1); ++ "temp%u_input", offset); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_max", offset + 1); ++ "temp%u_max", offset); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), +- "temp%u_crit", offset + 1); ++ "temp%u_crit", offset); + break; + default: + rc = -EPERM; +@@ -150,16 +151,30 @@ + igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); ++ rc = device_create_file(&adapter->pdev->dev, ++ &igb_attr->dev_attr); ++ if (rc == 0) ++ ++adapter->igb_hwmon_buff.n_hwmon; + +- adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; +- +- ++adapter->igb_hwmon_buff->n_hwmon; +- +- return 0; ++ return rc; + } + + static void igb_sysfs_del_adapter(struct igb_adapter *adapter) + { ++ int i; ++ ++ if (adapter == NULL) ++ return; ++ ++ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) { ++ device_remove_file(&adapter->pdev->dev, ++ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr); ++ } ++ ++ kfree(adapter->igb_hwmon_buff.hwmon_list); ++ ++ if (adapter->igb_hwmon_buff.device) ++ hwmon_device_unregister(adapter->igb_hwmon_buff.device); + } + + /* called from igb_main.c */ +@@ -171,11 +186,13 @@ + /* called from igb_main.c */ + int igb_sysfs_init(struct igb_adapter *adapter) + { +- struct hwmon_buff *igb_hwmon; +- struct i2c_client *client; +- struct device *hwmon_dev; ++ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff; + unsigned int i; ++ int n_attrs; + int rc = 0; ++#ifdef HAVE_I2C_SUPPORT ++ struct i2c_client *client = NULL; ++#endif /* HAVE_I2C_SUPPORT */ + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) +@@ -183,16 +200,35 @@ + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); +- if (rc) ++ if (rc) ++ goto exit; ++#ifdef HAVE_I2C_SUPPORT ++ /* init i2c_client */ ++ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); ++ if (client == NULL) { ++ dev_info(&adapter->pdev->dev, ++ "Failed to create new i2c device..\n"); + goto exit; ++ } ++ adapter->i2c_client = client; ++#endif /* HAVE_I2C_SUPPORT */ + +- igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), +- GFP_KERNEL); +- if (!igb_hwmon) { ++ /* Allocation space for max attributes ++ * max num sensors * values (loc, temp, max, caution) ++ */ ++ n_attrs = E1000_MAX_SENSORS * 4; ++ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), ++ GFP_KERNEL); ++ if (!igb_hwmon->hwmon_list) { + rc = -ENOMEM; +- goto exit; ++ goto err; ++ } ++ ++ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev); ++ if (IS_ERR(igb_hwmon->device)) { ++ rc = PTR_ERR(igb_hwmon->device); ++ goto err; + } +- adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + +@@ -204,39 +240,11 @@ + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); ++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); ++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); ++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) +- goto exit; +- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); +- if (rc) +- goto exit; +- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); +- if (rc) +- goto exit; +- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); +- if (rc) +- goto exit; +- } +- +- /* init i2c_client */ +- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); +- if (client == NULL) { +- dev_info(&adapter->pdev->dev, +- "Failed to create new i2c device.\n"); +- rc = -ENODEV; +- goto exit; +- } +- adapter->i2c_client = client; +- +- igb_hwmon->groups[0] = &igb_hwmon->group; +- igb_hwmon->group.attrs = igb_hwmon->attrs; +- +- hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, +- client->name, +- igb_hwmon, +- igb_hwmon->groups); +- if (IS_ERR(hwmon_dev)) { +- rc = PTR_ERR(hwmon_dev); +- goto err; ++ goto err; + } + + goto exit; +@@ -246,4 +254,4 @@ + exit: + return rc; + } +-#endif ++#endif /* IGB_HWMON */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +--- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,113 +1,114 @@ +-/* Intel(R) Gigabit Ethernet Linux driver +- * Copyright(c) 2007-2014 Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- * +- * The full GNU General Public License is included in this distribution in +- * the file called "COPYING". +- * +- * Contact Information: +- * e1000-devel Mailing List +- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. + +-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ + + #include + #include + #include +-#include + #include + #include + #include +-#include +-#include ++#include ++#ifdef NETIF_F_TSO + #include ++#ifdef NETIF_F_TSO6 ++#include + #include +-#include ++#endif ++#endif ++#ifdef SIOCGMIIPHY + #include ++#endif ++#ifdef SIOCETHTOOL + #include +-#include ++#endif + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#ifdef CONFIG_PM_RUNTIME + #include +-#ifdef CONFIG_IGB_DCA +-#include +-#endif +-#include ++#endif /* CONFIG_PM_RUNTIME */ ++ ++#include + #include "igb.h" ++#include "igb_vmdq.h" ++ ++#if defined(DEBUG) || defined(DEBUG_DUMP) || defined(DEBUG_ICR) \ ++ || defined(DEBUG_ITR) ++#define DRV_DEBUG "_debug" ++#else ++#define DRV_DEBUG ++#endif ++#define DRV_HW_PERF ++#define VERSION_SUFFIX + + #define MAJ 5 +-#define MIN 0 +-#define BUILD 5 +-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +-__stringify(BUILD) "-k" ++#define MIN 3 ++#define BUILD 5.4 ++#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "."\ ++ __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF ++ + char igb_driver_name[] = "igb"; + char igb_driver_version[] = DRV_VERSION; + static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; + static const char igb_copyright[] = +- "Copyright (c) 2007-2014 Intel Corporation."; +- +-static const struct e1000_info *igb_info_tbl[] = { +- [board_82575] = &e1000_82575_info, +-}; ++ "Copyright (c) 2007-2015 Intel Corporation."; + + static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) }, ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) }, + /* required last entry */ + {0, } + }; +@@ -122,84 +123,114 @@ + static int igb_probe(struct pci_dev *, const struct pci_device_id *); + static void igb_remove(struct pci_dev *pdev); + static int igb_sw_init(struct igb_adapter *); +-static int igb_open(struct net_device *); +-static int igb_close(struct net_device *); + static void igb_configure(struct igb_adapter *); + static void igb_configure_tx(struct igb_adapter *); + static void igb_configure_rx(struct igb_adapter *); + static void igb_clean_all_tx_rings(struct igb_adapter *); + static void igb_clean_all_rx_rings(struct igb_adapter *); + static void igb_clean_tx_ring(struct igb_ring *); +-static void igb_clean_rx_ring(struct igb_ring *); + static void igb_set_rx_mode(struct net_device *); + static void igb_update_phy_info(unsigned long); + static void igb_watchdog(unsigned long); + static void igb_watchdog_task(struct work_struct *); ++static void igb_dma_err_task(struct work_struct *); ++static void igb_dma_err_timer(unsigned long data); + static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, +- struct rtnl_link_stats64 *stats); ++static struct net_device_stats *igb_get_stats(struct net_device *); + static int igb_change_mtu(struct net_device *, int); ++/* void igb_full_sync_mac_table(struct igb_adapter *adapter); */ + static int igb_set_mac(struct net_device *, void *); + static void igb_set_uta(struct igb_adapter *adapter); + static irqreturn_t igb_intr(int irq, void *); + static irqreturn_t igb_intr_msi(int irq, void *); + static irqreturn_t igb_msix_other(int irq, void *); ++static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32, u8); + static irqreturn_t igb_msix_ring(int irq, void *); +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + static void igb_update_dca(struct igb_q_vector *); + static void igb_setup_dca(struct igb_adapter *); +-#endif /* CONFIG_IGB_DCA */ ++#endif /* IGB_DCA */ + static int igb_poll(struct napi_struct *, int); + static bool igb_clean_tx_irq(struct igb_q_vector *); + static bool igb_clean_rx_irq(struct igb_q_vector *, int); + static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); + static void igb_tx_timeout(struct net_device *); + static void igb_reset_task(struct work_struct *); +-static void igb_vlan_mode(struct net_device *netdev, +- netdev_features_t features); +-static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +-static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); ++#ifdef HAVE_VLAN_RX_REGISTER ++static void igb_vlan_mode(struct net_device *, struct vlan_group *); ++#endif ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++static int igb_vlan_rx_add_vid(struct net_device *, ++ __always_unused __be16 proto, u16); ++static int igb_vlan_rx_kill_vid(struct net_device *, ++ __always_unused __be16 proto, u16); ++#else ++static int igb_vlan_rx_add_vid(struct net_device *, u16); ++static int igb_vlan_rx_kill_vid(struct net_device *, u16); ++#endif ++#else ++static void igb_vlan_rx_add_vid(struct net_device *, u16); ++static void igb_vlan_rx_kill_vid(struct net_device *, u16); ++#endif + static void igb_restore_vlan(struct igb_adapter *); +-static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); + static void igb_ping_all_vfs(struct igb_adapter *); + static void igb_msg_task(struct igb_adapter *); + static void igb_vmm_control(struct igb_adapter *); + static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); + static void igb_restore_vf_multicasts(struct igb_adapter *adapter); ++static void igb_process_mdd_event(struct igb_adapter *); ++#ifdef IFLA_VF_MAX + static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); + static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos); +-static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, +- bool setting); ++ bool setting); ++#endif ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, ++ int min_tx_rate, int tx_rate); ++#else ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); + static void igb_check_vf_rate_limit(struct igb_adapter *); +- +-#ifdef CONFIG_PCI_IOV +-static int igb_vf_configure(struct igb_adapter *adapter, int vf); +-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); + #endif +- ++static int igb_vf_configure(struct igb_adapter *adapter, int vf); + #ifdef CONFIG_PM +-#ifdef CONFIG_PM_SLEEP +-static int igb_suspend(struct device *); +-#endif +-static int igb_resume(struct device *); ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS ++static int igb_suspend(struct device *dev); ++static int igb_resume(struct device *dev); + #ifdef CONFIG_PM_RUNTIME + static int igb_runtime_suspend(struct device *dev); + static int igb_runtime_resume(struct device *dev); + static int igb_runtime_idle(struct device *dev); +-#endif ++#endif /* CONFIG_PM_RUNTIME */ + static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) ++#ifdef CONFIG_PM_RUNTIME + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) ++#endif /* CONFIG_PM_RUNTIME */ + }; +-#endif ++#else ++static int igb_suspend(struct pci_dev *pdev, pm_message_t state); ++static int igb_resume(struct pci_dev *pdev); ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ ++#endif /* CONFIG_PM */ ++#ifndef USE_REBOOT_NOTIFIER + static void igb_shutdown(struct pci_dev *); +-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +-#ifdef CONFIG_IGB_DCA ++#else ++static int igb_notify_reboot(struct notifier_block *, unsigned long, void *); ++static struct notifier_block igb_notifier_reboot = { ++ .notifier_call = igb_notify_reboot, ++ .next = NULL, ++ .priority = 0 ++}; ++#endif ++#ifdef IGB_DCA + static int igb_notify_dca(struct notifier_block *, unsigned long, void *); + static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, +@@ -211,462 +242,87 @@ + /* for netdump / net console */ + static void igb_netpoll(struct net_device *); + #endif +-#ifdef CONFIG_PCI_IOV +-static unsigned int max_vfs; +-module_param(max_vfs, uint, 0); +-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +-#endif /* CONFIG_PCI_IOV */ + ++#ifdef HAVE_PCI_ERS + static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); + static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); + static void igb_io_resume(struct pci_dev *); + +-static const struct pci_error_handlers igb_err_handler = { ++static struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, + }; ++#endif + ++static void igb_init_fw(struct igb_adapter *adapter); + static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + + static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, +- .remove = igb_remove, ++ .remove = __devexit_p(igb_remove), + #ifdef CONFIG_PM ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + .driver.pm = &igb_pm_ops, +-#endif ++#else ++ .suspend = igb_suspend, ++ .resume = igb_resume, ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ ++#endif /* CONFIG_PM */ ++#ifndef USE_REBOOT_NOTIFIER + .shutdown = igb_shutdown, +- .sriov_configure = igb_pci_sriov_configure, ++#endif ++#ifdef HAVE_PCI_ERS + .err_handler = &igb_err_handler ++#endif + }; + ++/* u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); */ ++ + MODULE_AUTHOR("Intel Corporation, "); + MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + +-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +-static int debug = -1; +-module_param(debug, int, 0); +-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +- +-struct igb_reg_info { +- u32 ofs; +- char *name; +-}; +- +-static const struct igb_reg_info igb_reg_info_tbl[] = { +- +- /* General Registers */ +- {E1000_CTRL, "CTRL"}, +- {E1000_STATUS, "STATUS"}, +- {E1000_CTRL_EXT, "CTRL_EXT"}, +- +- /* Interrupt Registers */ +- {E1000_ICR, "ICR"}, +- +- /* RX Registers */ +- {E1000_RCTL, "RCTL"}, +- {E1000_RDLEN(0), "RDLEN"}, +- {E1000_RDH(0), "RDH"}, +- {E1000_RDT(0), "RDT"}, +- {E1000_RXDCTL(0), "RXDCTL"}, +- {E1000_RDBAL(0), "RDBAL"}, +- {E1000_RDBAH(0), "RDBAH"}, +- +- /* TX Registers */ +- {E1000_TCTL, "TCTL"}, +- {E1000_TDBAL(0), "TDBAL"}, +- {E1000_TDBAH(0), "TDBAH"}, +- {E1000_TDLEN(0), "TDLEN"}, +- {E1000_TDH(0), "TDH"}, +- {E1000_TDT(0), "TDT"}, +- {E1000_TXDCTL(0), "TXDCTL"}, +- {E1000_TDFH, "TDFH"}, +- {E1000_TDFT, "TDFT"}, +- {E1000_TDFHS, "TDFHS"}, +- {E1000_TDFPC, "TDFPC"}, +- +- /* List Terminator */ +- {} +-}; +- +-/* igb_regdump - register printout routine */ +-static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +-{ +- int n = 0; +- char rname[16]; +- u32 regs[8]; +- +- switch (reginfo->ofs) { +- case E1000_RDLEN(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDLEN(n)); +- break; +- case E1000_RDH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDH(n)); +- break; +- case E1000_RDT(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDT(n)); +- break; +- case E1000_RXDCTL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RXDCTL(n)); +- break; +- case E1000_RDBAL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDBAL(n)); +- break; +- case E1000_RDBAH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDBAH(n)); +- break; +- case E1000_TDBAL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_RDBAL(n)); +- break; +- case E1000_TDBAH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDBAH(n)); +- break; +- case E1000_TDLEN(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDLEN(n)); +- break; +- case E1000_TDH(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDH(n)); +- break; +- case E1000_TDT(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TDT(n)); +- break; +- case E1000_TXDCTL(0): +- for (n = 0; n < 4; n++) +- regs[n] = rd32(E1000_TXDCTL(n)); +- break; +- default: +- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); +- return; +- } +- +- snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); +- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], +- regs[2], regs[3]); +-} +- +-/* igb_dump - Print registers, Tx-rings and Rx-rings */ +-static void igb_dump(struct igb_adapter *adapter) +-{ +- struct net_device *netdev = adapter->netdev; +- struct e1000_hw *hw = &adapter->hw; +- struct igb_reg_info *reginfo; +- struct igb_ring *tx_ring; +- union e1000_adv_tx_desc *tx_desc; +- struct my_u0 { u64 a; u64 b; } *u0; +- struct igb_ring *rx_ring; +- union e1000_adv_rx_desc *rx_desc; +- u32 staterr; +- u16 i, n; +- +- if (!netif_msg_hw(adapter)) +- return; +- +- /* Print netdevice Info */ +- if (netdev) { +- dev_info(&adapter->pdev->dev, "Net device Info\n"); +- pr_info("Device Name state trans_start last_rx\n"); +- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, +- netdev->state, netdev->trans_start, netdev->last_rx); +- } +- +- /* Print Registers */ +- dev_info(&adapter->pdev->dev, "Register Dump\n"); +- pr_info(" Register Name Value\n"); +- for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; +- reginfo->name; reginfo++) { +- igb_regdump(hw, reginfo); +- } +- +- /* Print TX Ring Summary */ +- if (!netdev || !netif_running(netdev)) +- goto exit; +- +- dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); +- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); +- for (n = 0; n < adapter->num_tx_queues; n++) { +- struct igb_tx_buffer *buffer_info; +- tx_ring = adapter->tx_ring[n]; +- buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; +- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", +- n, tx_ring->next_to_use, tx_ring->next_to_clean, +- (u64)dma_unmap_addr(buffer_info, dma), +- dma_unmap_len(buffer_info, len), +- buffer_info->next_to_watch, +- (u64)buffer_info->time_stamp); +- } +- +- /* Print TX Rings */ +- if (!netif_msg_tx_done(adapter)) +- goto rx_ring_summary; +- +- dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); +- +- /* Transmit Descriptor Formats +- * +- * Advanced Transmit Descriptor +- * +--------------------------------------------------------------+ +- * 0 | Buffer Address [63:0] | +- * +--------------------------------------------------------------+ +- * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | +- * +--------------------------------------------------------------+ +- * 63 46 45 40 39 38 36 35 32 31 24 15 0 +- */ +- +- for (n = 0; n < adapter->num_tx_queues; n++) { +- tx_ring = adapter->tx_ring[n]; +- pr_info("------------------------------------\n"); +- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); +- pr_info("------------------------------------\n"); +- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); +- +- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { +- const char *next_desc; +- struct igb_tx_buffer *buffer_info; +- tx_desc = IGB_TX_DESC(tx_ring, i); +- buffer_info = &tx_ring->tx_buffer_info[i]; +- u0 = (struct my_u0 *)tx_desc; +- if (i == tx_ring->next_to_use && +- i == tx_ring->next_to_clean) +- next_desc = " NTC/U"; +- else if (i == tx_ring->next_to_use) +- next_desc = " NTU"; +- else if (i == tx_ring->next_to_clean) +- next_desc = " NTC"; +- else +- next_desc = ""; +- +- pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", +- i, le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- (u64)dma_unmap_addr(buffer_info, dma), +- dma_unmap_len(buffer_info, len), +- buffer_info->next_to_watch, +- (u64)buffer_info->time_stamp, +- buffer_info->skb, next_desc); +- +- if (netif_msg_pktdata(adapter) && buffer_info->skb) +- print_hex_dump(KERN_INFO, "", +- DUMP_PREFIX_ADDRESS, +- 16, 1, buffer_info->skb->data, +- dma_unmap_len(buffer_info, len), +- true); +- } +- } +- +- /* Print RX Rings Summary */ +-rx_ring_summary: +- dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); +- pr_info("Queue [NTU] [NTC]\n"); +- for (n = 0; n < adapter->num_rx_queues; n++) { +- rx_ring = adapter->rx_ring[n]; +- pr_info(" %5d %5X %5X\n", +- n, rx_ring->next_to_use, rx_ring->next_to_clean); +- } +- +- /* Print RX Rings */ +- if (!netif_msg_rx_status(adapter)) +- goto exit; +- +- dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); +- +- /* Advanced Receive Descriptor (Read) Format +- * 63 1 0 +- * +-----------------------------------------------------+ +- * 0 | Packet Buffer Address [63:1] |A0/NSE| +- * +----------------------------------------------+------+ +- * 8 | Header Buffer Address [63:1] | DD | +- * +-----------------------------------------------------+ +- * +- * +- * Advanced Receive Descriptor (Write-Back) Format +- * +- * 63 48 47 32 31 30 21 20 17 16 4 3 0 +- * +------------------------------------------------------+ +- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | +- * | Checksum Ident | | | | Type | Type | +- * +------------------------------------------------------+ +- * 8 | VLAN Tag | Length | Extended Error | Extended Status | +- * +------------------------------------------------------+ +- * 63 48 47 32 31 20 19 0 +- */ +- +- for (n = 0; n < adapter->num_rx_queues; n++) { +- rx_ring = adapter->rx_ring[n]; +- pr_info("------------------------------------\n"); +- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); +- pr_info("------------------------------------\n"); +- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); +- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); +- +- for (i = 0; i < rx_ring->count; i++) { +- const char *next_desc; +- struct igb_rx_buffer *buffer_info; +- buffer_info = &rx_ring->rx_buffer_info[i]; +- rx_desc = IGB_RX_DESC(rx_ring, i); +- u0 = (struct my_u0 *)rx_desc; +- staterr = le32_to_cpu(rx_desc->wb.upper.status_error); +- +- if (i == rx_ring->next_to_use) +- next_desc = " NTU"; +- else if (i == rx_ring->next_to_clean) +- next_desc = " NTC"; +- else +- next_desc = ""; +- +- if (staterr & E1000_RXD_STAT_DD) { +- /* Descriptor Done */ +- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", +- "RWB", i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- next_desc); +- } else { +- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", +- "R ", i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- (u64)buffer_info->dma, +- next_desc); +- +- if (netif_msg_pktdata(adapter) && +- buffer_info->dma && buffer_info->page) { +- print_hex_dump(KERN_INFO, "", +- DUMP_PREFIX_ADDRESS, +- 16, 1, +- page_address(buffer_info->page) + +- buffer_info->page_offset, +- IGB_RX_BUFSZ, true); +- } +- } +- } +- } +- +-exit: +- return; +-} +- +-/** +- * igb_get_i2c_data - Reads the I2C SDA data bit +- * @hw: pointer to hardware structure +- * @i2cctl: Current value of I2CCTL register +- * +- * Returns the I2C data bit value +- **/ +-static int igb_get_i2c_data(void *data) ++static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add) + { +- struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); ++ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie; ++ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; ++ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); ++ u32 vfta; + +- return !!(i2cctl & E1000_I2C_DATA_IN); +-} ++ /* ++ * if this is the management vlan the only option is to add it in so ++ * that the management pass through will continue to work ++ */ ++ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && ++ (vid == mng_cookie->vlan_id)) ++ add = TRUE; + +-/** +- * igb_set_i2c_data - Sets the I2C data bit +- * @data: pointer to hardware structure +- * @state: I2C data value (0 or 1) to set +- * +- * Sets the I2C data bit +- **/ +-static void igb_set_i2c_data(void *data, int state) +-{ +- struct igb_adapter *adapter = (struct igb_adapter *)data; +- struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); ++ vfta = adapter->shadow_vfta[index]; + +- if (state) +- i2cctl |= E1000_I2C_DATA_OUT; ++ if (add) ++ vfta |= mask; + else +- i2cctl &= ~E1000_I2C_DATA_OUT; ++ vfta &= ~mask; + +- i2cctl &= ~E1000_I2C_DATA_OE_N; +- i2cctl |= E1000_I2C_CLK_OE_N; +- wr32(E1000_I2CPARAMS, i2cctl); +- wrfl(); +- +-} +- +-/** +- * igb_set_i2c_clk - Sets the I2C SCL clock +- * @data: pointer to hardware structure +- * @state: state to set clock +- * +- * Sets the I2C clock line to state +- **/ +-static void igb_set_i2c_clk(void *data, int state) +-{ +- struct igb_adapter *adapter = (struct igb_adapter *)data; +- struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); +- +- if (state) { +- i2cctl |= E1000_I2C_CLK_OUT; +- i2cctl &= ~E1000_I2C_CLK_OE_N; +- } else { +- i2cctl &= ~E1000_I2C_CLK_OUT; +- i2cctl &= ~E1000_I2C_CLK_OE_N; +- } +- wr32(E1000_I2CPARAMS, i2cctl); +- wrfl(); +-} +- +-/** +- * igb_get_i2c_clk - Gets the I2C SCL clock state +- * @data: pointer to hardware structure +- * +- * Gets the I2C clock state +- **/ +-static int igb_get_i2c_clk(void *data) +-{ +- struct igb_adapter *adapter = (struct igb_adapter *)data; +- struct e1000_hw *hw = &adapter->hw; +- s32 i2cctl = rd32(E1000_I2CPARAMS); +- +- return !!(i2cctl & E1000_I2C_CLK_IN); ++ igb_e1000_write_vfta(hw, index, vfta); ++ adapter->shadow_vfta[index] = vfta; + } + +-static const struct i2c_algo_bit_data igb_i2c_algo = { +- .setsda = igb_set_i2c_data, +- .setscl = igb_set_i2c_clk, +- .getsda = igb_get_i2c_data, +- .getscl = igb_get_i2c_clk, +- .udelay = 5, +- .timeout = 20, +-}; +- +-/** +- * igb_get_hw_dev - return device +- * @hw: pointer to hardware structure +- * +- * used by hardware layer to print debugging information +- **/ +-struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +-{ +- struct igb_adapter *adapter = hw->back; +- return adapter->netdev; +-} ++static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; ++module_param(debug, int, 0); ++MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)"); + + /** +- * igb_init_module - Driver Registration Routine ++ * igb_init_module - Driver Registration Routine + * +- * igb_init_module is the first routine called when the driver is +- * loaded. All it does is register with the PCI subsystem. ++ * igb_init_module is the first routine called when the driver is ++ * loaded. All it does is register with the PCI subsystem. + **/ + static int __init igb_init_module(void) + { +@@ -674,76 +330,89 @@ + + pr_info("%s - version %s\n", + igb_driver_string, igb_driver_version); ++ + pr_info("%s\n", igb_copyright); ++#ifdef IGB_HWMON ++/* only use IGB_PROCFS if IGB_HWMON is not defined */ ++#else ++#ifdef IGB_PROCFS ++ if (igb_procfs_topdir_init()) ++ pr_info("Procfs failed to initialize topdir\n"); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + dca_register_notify(&dca_notifier); + #endif + ret = pci_register_driver(&igb_driver); ++#ifdef USE_REBOOT_NOTIFIER ++ if (ret >= 0) ++ register_reboot_notifier(&igb_notifier_reboot); ++#endif + return ret; + } + + module_init(igb_init_module); + + /** +- * igb_exit_module - Driver Exit Cleanup Routine ++ * igb_exit_module - Driver Exit Cleanup Routine + * +- * igb_exit_module is called just before the driver is removed +- * from memory. ++ * igb_exit_module is called just before the driver is removed ++ * from memory. + **/ + static void __exit igb_exit_module(void) + { +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + dca_unregister_notify(&dca_notifier); + #endif ++#ifdef USE_REBOOT_NOTIFIER ++ unregister_reboot_notifier(&igb_notifier_reboot); ++#endif + pci_unregister_driver(&igb_driver); ++ ++#ifdef IGB_HWMON ++/* only compile IGB_PROCFS if IGB_HWMON is not defined */ ++#else ++#ifdef IGB_PROCFS ++ igb_procfs_topdir_exit(); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + } + + module_exit(igb_exit_module); + + #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) + /** +- * igb_cache_ring_register - Descriptor ring to register mapping +- * @adapter: board private structure to initialize ++ * igb_cache_ring_register - Descriptor ring to register mapping ++ * @adapter: board private structure to initialize + * +- * Once we know the feature-set enabled for the device, we'll cache +- * the register offset the descriptor ring is assigned to. ++ * Once we know the feature-set enabled for the device, we'll cache ++ * the register offset the descriptor ring is assigned to. + **/ + static void igb_cache_ring_register(struct igb_adapter *adapter) + { + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + +- switch (adapter->hw.mac.type) { +- case e1000_82576: ++ if (adapter->hw.mac.type == e1000_82576) { + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ +- if (adapter->vfs_allocated_count) { ++ if ((adapter->rss_queues > 1) && adapter->vmdq_pools) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + +- Q_IDX_82576(i); ++ Q_IDX_82576(i); + } +- /* Fall through */ +- case e1000_82575: +- case e1000_82580: +- case e1000_i350: +- case e1000_i354: +- case e1000_i210: +- case e1000_i211: +- /* Fall through */ +- default: +- for (; i < adapter->num_rx_queues; i++) +- adapter->rx_ring[i]->reg_idx = rbase_offset + i; +- for (; j < adapter->num_tx_queues; j++) +- adapter->tx_ring[j]->reg_idx = rbase_offset + j; +- break; + } ++ for (; i < adapter->num_rx_queues; i++) ++ adapter->rx_ring[i]->reg_idx = rbase_offset + i; ++ for (; j < adapter->num_tx_queues; j++) ++ adapter->tx_ring[j]->reg_idx = rbase_offset + j; + } + +-u32 igb_rd32(struct e1000_hw *hw, u32 reg) ++u32 e1000_read_reg(struct e1000_hw *hw, u32 reg) + { + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); +@@ -757,6 +426,7 @@ + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; ++ + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); +@@ -765,6 +435,42 @@ + return value; + } + ++static void igb_configure_lli(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u16 port; ++ ++ /* LLI should only be enabled for MSI-X or MSI interrupts */ ++ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI)) ++ return; ++ ++ if (adapter->lli_port) { ++ /* use filter 0 for port */ ++ port = htons((u16)adapter->lli_port); ++ E1000_WRITE_REG(hw, E1000_IMIR(0), ++ (port | E1000_IMIR_PORT_IM_EN)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(0), ++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); ++ } ++ ++ if (adapter->flags & IGB_FLAG_LLI_PUSH) { ++ /* use filter 1 for push flag */ ++ E1000_WRITE_REG(hw, E1000_IMIR(1), ++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(1), ++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH)); ++ } ++ ++ if (adapter->lli_size) { ++ /* use filter 2 for size */ ++ E1000_WRITE_REG(hw, E1000_IMIR(2), ++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(2), ++ (adapter->lli_size | E1000_IMIREXT_CTRL_BP)); ++ } ++ ++} ++ + /** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure +@@ -780,7 +486,7 @@ + static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) + { +- u32 ivar = array_rd32(E1000_IVAR0, index); ++ u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); +@@ -788,7 +494,7 @@ + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + +- array_wr32(E1000_IVAR0, index, ivar); ++ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); + } + + #define IGB_N0_QUEUE -1 +@@ -816,13 +522,14 @@ + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; +- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) ++ if (!adapter->msix_entries && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; +- array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: +- /* 82576 uses a table that essentially consists of 2 columns ++ /* ++ * 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. +@@ -842,7 +549,8 @@ + case e1000_i354: + case e1000_i210: + case e1000_i211: +- /* On 82580 and newer adapters the scheme is similar to 82576 ++ /* ++ * On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the +@@ -871,11 +579,10 @@ + } + + /** +- * igb_configure_msix - Configure MSI-X hardware +- * @adapter: board private structure to initialize ++ * igb_configure_msix - Configure MSI-X hardware + * +- * igb_configure_msix sets up the hardware to properly +- * generate MSI-X interrupts. ++ * igb_configure_msix sets up the hardware to properly ++ * generate MSI-X interrupts. + **/ + static void igb_configure_msix(struct igb_adapter *adapter) + { +@@ -888,7 +595,7 @@ + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: +- tmp = rd32(E1000_CTRL_EXT); ++ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + +@@ -896,10 +603,11 @@ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + +- wr32(E1000_CTRL_EXT, tmp); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ +- array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); ++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++, ++ E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; +@@ -913,15 +621,15 @@ + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ +- wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | +- E1000_GPIE_PBA | E1000_GPIE_EIAME | +- E1000_GPIE_NSICR); ++ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | ++ E1000_GPIE_PBA | E1000_GPIE_EIAME | ++ E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = 1 << vector; + tmp = (vector++ | E1000_IVAR_VALID) << 8; + +- wr32(E1000_IVAR_MISC, tmp); ++ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ +@@ -933,24 +641,22 @@ + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + +- wrfl(); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_request_msix - Initialize MSI-X interrupts +- * @adapter: board private structure to initialize ++ * igb_request_msix - Initialize MSI-X interrupts + * +- * igb_request_msix allocates MSI-X vectors and requests interrupts from the +- * kernel. ++ * igb_request_msix allocates MSI-X vectors and requests interrupts from the ++ * kernel. + **/ + static int igb_request_msix(struct igb_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; +- struct e1000_hw *hw = &adapter->hw; + int i, err = 0, vector = 0, free_vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, +- igb_msix_other, 0, netdev->name, adapter); ++ &igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + +@@ -959,7 +665,7 @@ + + vector++; + +- q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); ++ q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, +@@ -997,11 +703,11 @@ + } + + /** +- * igb_free_q_vector - Free memory allocated for specific interrupt vector +- * @adapter: board private structure to initialize +- * @v_idx: Index of vector to be freed ++ * igb_free_q_vector - Free memory allocated for specific interrupt vector ++ * @adapter: board private structure to initialize ++ * @v_idx: Index of vector to be freed + * +- * This function frees the memory allocated to the q_vector. ++ * This function frees the memory allocated to the q_vector. + **/ + static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) + { +@@ -1013,6 +719,10 @@ + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); ++ ++#ifndef IGB_NO_LRO ++ __skb_queue_purge(&q_vector->lrolist.active); ++#endif + } + + /** +@@ -1027,8 +737,8 @@ + { + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + +- /* Coming from igb_set_interrupt_capability, the vectors are not yet +- * allocated. So, q_vector is NULL so we should stop here. ++ /* if we're coming from igb_set_interrupt_capability, the vectors are ++ * not yet allocated + */ + if (!q_vector) + return; +@@ -1047,22 +757,25 @@ + { + int v_idx = adapter->num_q_vectors; + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) ++ if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); +- else if (adapter->flags & IGB_FLAG_HAS_MSI) ++ kfree(adapter->msix_entries); ++ adapter->msix_entries = NULL; ++ } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); ++ } + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); + } + + /** +- * igb_free_q_vectors - Free memory allocated for interrupt vectors +- * @adapter: board private structure to initialize ++ * igb_free_q_vectors - Free memory allocated for interrupt vectors ++ * @adapter: board private structure to initialize + * +- * This function frees the memory allocated to the q_vectors. In addition if +- * NAPI is enabled it will delete any references to the NAPI struct prior +- * to freeing the q_vector. ++ * This function frees the memory allocated to the q_vectors. In addition if ++ * NAPI is enabled it will delete any references to the NAPI struct prior ++ * to freeing the q_vector. + **/ + static void igb_free_q_vectors(struct igb_adapter *adapter) + { +@@ -1079,11 +792,10 @@ + } + + /** +- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts +- * @adapter: board private structure to initialize ++ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * +- * This function resets the device so that it has 0 Rx queues, Tx queues, and +- * MSI-X interrupts allocated. ++ * This function resets the device so that it has 0 rx queues, tx queues, and ++ * MSI-X interrupts allocated. + */ + static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) + { +@@ -1092,108 +804,306 @@ + } + + /** +- * igb_set_interrupt_capability - set MSI or MSI-X if supported +- * @adapter: board private structure to initialize +- * @msix: boolean value of MSIX capability ++ * igb_process_mdd_event ++ * @adapter - board private structure + * +- * Attempt to configure interrupts using the best available +- * capabilities of the hardware and kernel. +- **/ +-static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) ++ * Identify a malicious VF, disable the VF TX/RX queues and log a message. ++ */ ++static void igb_process_mdd_event(struct igb_adapter *adapter) + { +- int err; +- int numvecs, i; +- +- if (!msix) +- goto msi_only; +- adapter->flags |= IGB_FLAG_HAS_MSIX; +- +- /* Number of supported queues. */ +- adapter->num_rx_queues = adapter->rss_queues; +- if (adapter->vfs_allocated_count) +- adapter->num_tx_queues = 1; +- else +- adapter->num_tx_queues = adapter->rss_queues; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 lvmmc, vfte, vfre, mdfb; ++ u8 vf_queue; + +- /* start with one vector for every Rx queue */ +- numvecs = adapter->num_rx_queues; ++ lvmmc = E1000_READ_REG(hw, E1000_LVMMC); ++ vf_queue = lvmmc >> 29; + +- /* if Tx handler is separate add 1 for every Tx queue */ +- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) +- numvecs += adapter->num_tx_queues; +- +- /* store the number of vectors reserved for queues */ +- adapter->num_q_vectors = numvecs; +- +- /* add 1 vector for link status interrupts */ +- numvecs++; +- for (i = 0; i < numvecs; i++) +- adapter->msix_entries[i].entry = i; +- +- err = pci_enable_msix_range(adapter->pdev, +- adapter->msix_entries, +- numvecs, +- numvecs); +- if (err > 0) ++ /* VF index cannot be bigger or equal to VFs allocated */ ++ if (vf_queue >= adapter->vfs_allocated_count) + return; + +- igb_reset_interrupt_capability(adapter); ++ netdev_info(adapter->netdev, ++ "VF %d misbehaved. VF queues are disabled. VM misbehavior code is 0x%x\n", ++ vf_queue, lvmmc); + +- /* If we can't do MSI-X, try MSI */ +-msi_only: +- adapter->flags &= ~IGB_FLAG_HAS_MSIX; +-#ifdef CONFIG_PCI_IOV +- /* disable SR-IOV for non MSI-X configurations */ +- if (adapter->vf_data) { +- struct e1000_hw *hw = &adapter->hw; +- /* disable iov and allow time for transactions to clear */ +- pci_disable_sriov(adapter->pdev); +- msleep(500); ++ /* Disable VFTE and VFRE related bits */ ++ vfte = E1000_READ_REG(hw, E1000_VFTE); ++ vfte &= ~(1 << vf_queue); ++ E1000_WRITE_REG(hw, E1000_VFTE, vfte); + +- kfree(adapter->vf_data); +- adapter->vf_data = NULL; +- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); +- wrfl(); +- msleep(100); +- dev_info(&adapter->pdev->dev, "IOV Disabled\n"); +- } +-#endif +- adapter->vfs_allocated_count = 0; +- adapter->rss_queues = 1; +- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; +- adapter->num_rx_queues = 1; +- adapter->num_tx_queues = 1; +- adapter->num_q_vectors = 1; +- if (!pci_enable_msi(adapter->pdev)) +- adapter->flags |= IGB_FLAG_HAS_MSI; +-} ++ vfre = E1000_READ_REG(hw, E1000_VFRE); ++ vfre &= ~(1 << vf_queue); ++ E1000_WRITE_REG(hw, E1000_VFRE, vfre); + +-static void igb_add_ring(struct igb_ring *ring, +- struct igb_ring_container *head) +-{ +- head->ring = ring; +- head->count++; ++ /* Disable MDFB related bit. Clear on write */ ++ mdfb = E1000_READ_REG(hw, E1000_MDFB); ++ mdfb |= (1 << vf_queue); ++ E1000_WRITE_REG(hw, E1000_MDFB, mdfb); ++ ++ /* Reset the specific VF */ ++ E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST); + } + + /** +- * igb_alloc_q_vector - Allocate memory for a single interrupt vector +- * @adapter: board private structure to initialize +- * @v_count: q_vectors allocated on adapter, used for ring interleaving +- * @v_idx: index of vector in adapter struct +- * @txr_count: total number of Tx rings to allocate +- * @txr_idx: index of first Tx ring to allocate +- * @rxr_count: total number of Rx rings to allocate +- * @rxr_idx: index of first Rx ring to allocate ++ * igb_disable_mdd ++ * @adapter - board private structure + * +- * We allocate one q_vector. If allocation fails we return -ENOMEM. ++ * Disable MDD behavior in the HW + **/ +-static int igb_alloc_q_vector(struct igb_adapter *adapter, +- int v_count, int v_idx, +- int txr_count, int txr_idx, +- int rxr_count, int rxr_idx) ++static void igb_disable_mdd(struct igb_adapter *adapter) + { +- struct igb_q_vector *q_vector; +- struct igb_ring *ring; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 reg; ++ ++ if ((hw->mac.type != e1000_i350) && ++ (hw->mac.type != e1000_i354)) ++ return; ++ ++ reg = E1000_READ_REG(hw, E1000_DTXCTL); ++ reg &= (~E1000_DTXCTL_MDP_EN); ++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); ++} ++ ++/** ++ * igb_enable_mdd ++ * @adapter - board private structure ++ * ++ * Enable the HW to detect malicious driver and sends an interrupt to ++ * the driver. ++ **/ ++static void igb_enable_mdd(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u32 reg; ++ ++ /* Only available on i350 device */ ++ if (hw->mac.type != e1000_i350) ++ return; ++ ++ reg = E1000_READ_REG(hw, E1000_DTXCTL); ++ reg |= E1000_DTXCTL_MDP_EN; ++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); ++} ++ ++/** ++ * igb_reset_sriov_capability - disable SR-IOV if enabled ++ * ++ * Attempt to disable single root IO virtualization capabilites present in the ++ * kernel. ++ **/ ++static void igb_reset_sriov_capability(struct igb_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ struct e1000_hw *hw = &adapter->hw; ++ ++ /* reclaim resources allocated to VFs */ ++ if (adapter->vf_data) { ++ if (!pci_vfs_assigned(pdev)) { ++ /* ++ * disable iov and allow time for transactions to ++ * clear ++ */ ++ pci_disable_sriov(pdev); ++ msleep(500); ++ ++ dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n"); ++ } else { ++ dev_info(pci_dev_to_dev(pdev), ++ "IOV Not Disabled\n VF(s) are assigned to guests!\n"); ++ } ++ /* Disable Malicious Driver Detection */ ++ igb_disable_mdd(adapter); ++ ++ /* free vf data storage */ ++ kfree(adapter->vf_data); ++ adapter->vf_data = NULL; ++ ++ /* switch rings back to PF ownership */ ++ E1000_WRITE_REG(hw, E1000_IOVCTL, ++ E1000_IOVCTL_REUSE_VFQ); ++ E1000_WRITE_FLUSH(hw); ++ msleep(100); ++ } ++ ++ adapter->vfs_allocated_count = 0; ++} ++ ++/** ++ * igb_set_sriov_capability - setup SR-IOV if supported ++ * ++ * Attempt to enable single root IO virtualization capabilites present in the ++ * kernel. ++ **/ ++static void igb_set_sriov_capability(struct igb_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ int old_vfs = 0; ++ int i; ++ ++ old_vfs = pci_num_vf(pdev); ++ if (old_vfs) { ++ dev_info(pci_dev_to_dev(pdev), ++ "%d pre-allocated VFs found - override max_vfs setting of %d\n", ++ old_vfs, adapter->vfs_allocated_count); ++ adapter->vfs_allocated_count = old_vfs; ++ } ++ /* no VFs requested, do nothing */ ++ if (!adapter->vfs_allocated_count) ++ return; ++ ++ /* allocate vf data storage */ ++ adapter->vf_data = kcalloc(adapter->vfs_allocated_count, ++ sizeof(struct vf_data_storage), ++ GFP_KERNEL); ++ ++ if (adapter->vf_data) { ++ if (!old_vfs) { ++ if (pci_enable_sriov(pdev, ++ adapter->vfs_allocated_count)) ++ goto err_out; ++ dev_warn(pci_dev_to_dev(pdev), ++ "SR-IOV has been enabled: configure port VLANs to keep your VFs secure\n"); ++ } ++ for (i = 0; i < adapter->vfs_allocated_count; i++) ++ igb_vf_configure(adapter, i); ++ ++ switch (adapter->hw.mac.type) { ++ case e1000_82576: ++ case e1000_i350: ++ /* Enable VM to VM loopback by default */ ++ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; ++ break; ++ default: ++ /* Currently no other hardware supports loopback */ ++ break; ++ } ++ ++ /* DMA Coalescing is not supported in IOV mode. */ ++ if (adapter->hw.mac.type >= e1000_i350) ++ adapter->dmac = IGB_DMAC_DISABLE; ++ if (adapter->hw.mac.type < e1000_i350) ++ adapter->flags |= IGB_FLAG_DETECT_BAD_DMA; ++ return; ++ ++ } ++ ++err_out: ++ kfree(adapter->vf_data); ++ adapter->vf_data = NULL; ++ adapter->vfs_allocated_count = 0; ++ dev_warn(pci_dev_to_dev(pdev), ++ "Failed to initialize SR-IOV virtualization\n"); ++} ++ ++/** ++ * igb_set_interrupt_capability - set MSI or MSI-X if supported ++ * ++ * Attempt to configure interrupts using the best available ++ * capabilities of the hardware and kernel. ++ **/ ++static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ int err; ++ int numvecs, i; ++ ++ if (!msix) ++ adapter->int_mode = IGB_INT_MODE_MSI; ++ ++ /* Number of supported queues. */ ++ adapter->num_rx_queues = adapter->rss_queues; ++ ++ if (adapter->vmdq_pools > 1) ++ adapter->num_rx_queues += adapter->vmdq_pools - 1; ++ ++#ifdef HAVE_TX_MQ ++ if (adapter->vmdq_pools) ++ adapter->num_tx_queues = adapter->vmdq_pools; ++ else ++ adapter->num_tx_queues = adapter->num_rx_queues; ++#else ++ adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools); ++#endif ++ ++ switch (adapter->int_mode) { ++ case IGB_INT_MODE_MSIX: ++ /* start with one vector for every Tx/Rx queue */ ++ numvecs = max_t(int, adapter->num_tx_queues, ++ adapter->num_rx_queues); ++ ++ /* if tx handler is separate make it 1 for every queue */ ++ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) ++ numvecs = adapter->num_tx_queues + ++ adapter->num_rx_queues; ++ ++ /* store the number of vectors reserved for queues */ ++ adapter->num_q_vectors = numvecs; ++ ++ /* add 1 vector for link status interrupts */ ++ numvecs++; ++ adapter->msix_entries = kcalloc(numvecs, ++ sizeof(struct msix_entry), ++ GFP_KERNEL); ++ if (adapter->msix_entries) { ++ for (i = 0; i < numvecs; i++) ++ adapter->msix_entries[i].entry = i; ++ ++ err = pci_enable_msix(pdev, ++ adapter->msix_entries, numvecs); ++ if (err == 0) ++ break; ++ } ++ /* MSI-X failed, so fall through and try MSI */ ++ dev_warn(pci_dev_to_dev(pdev), ++ "Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); ++ igb_reset_interrupt_capability(adapter); ++ case IGB_INT_MODE_MSI: ++ if (!pci_enable_msi(pdev)) ++ adapter->flags |= IGB_FLAG_HAS_MSI; ++ else ++ dev_warn(pci_dev_to_dev(pdev), ++ "Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); ++ /* Fall through */ ++ case IGB_INT_MODE_LEGACY: ++ /* disable advanced features and set number of queues to 1 */ ++ igb_reset_sriov_capability(adapter); ++ adapter->vmdq_pools = 0; ++ adapter->rss_queues = 1; ++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS; ++ adapter->num_rx_queues = 1; ++ adapter->num_tx_queues = 1; ++ adapter->num_q_vectors = 1; ++ /* Don't do anything; this is system default */ ++ break; ++ } ++} ++ ++static void igb_add_ring(struct igb_ring *ring, ++ struct igb_ring_container *head) ++{ ++ head->ring = ring; ++ head->count++; ++} ++ ++/** ++ * igb_alloc_q_vector - Allocate memory for a single interrupt vector ++ * @adapter: board private structure to initialize ++ * @v_count: q_vectors allocated on adapter, used for ring interleaving ++ * @v_idx: index of vector in adapter struct ++ * @txr_count: total number of Tx rings to allocate ++ * @txr_idx: index of first Tx ring to allocate ++ * @rxr_count: total number of Rx rings to allocate ++ * @rxr_idx: index of first Rx ring to allocate ++ * ++ * We allocate one q_vector. If allocation fails we return -ENOMEM. ++ **/ ++static int igb_alloc_q_vector(struct igb_adapter *adapter, ++ unsigned int v_count, unsigned int v_idx, ++ unsigned int txr_count, unsigned int txr_idx, ++ unsigned int rxr_count, unsigned int rxr_idx) ++{ ++ struct igb_q_vector *q_vector; ++ struct igb_ring *ring; + int ring_count, size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ +@@ -1206,17 +1116,18 @@ + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; +- if (!q_vector) { +- q_vector = kzalloc(size, GFP_KERNEL); +- } else if (size > ksize(q_vector)) { +- kfree_rcu(q_vector, rcu); ++ if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); +- } else { ++ else + memset(q_vector, 0, size); +- } + if (!q_vector) + return -ENOMEM; + ++#ifndef IGB_NO_LRO ++ /* initialize LRO */ ++ __skb_queue_head_init(&q_vector->lrolist.active); ++ ++#endif + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); +@@ -1229,7 +1140,7 @@ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ +- q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); ++ q_vector->itr_register = adapter->io_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ +@@ -1265,9 +1176,6 @@ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + +- u64_stats_init(&ring->tx_syncp); +- u64_stats_init(&ring->tx_syncp2); +- + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + +@@ -1286,22 +1194,23 @@ + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + ++#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES) ++ /* enable rx checksum */ ++ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); ++ ++#endif + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + +- /* On i350, i354, i210, and i211, loopback VLAN packets +- * have the tag byte-swapped. +- */ +- if (adapter->hw.mac.type >= e1000_i350) ++ if ((adapter->hw.mac.type == e1000_i350) || ++ (adapter->hw.mac.type == e1000_i354)) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + +- u64_stats_init(&ring->rx_syncp); +- + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } +@@ -1309,13 +1218,12 @@ + return 0; + } + +- + /** +- * igb_alloc_q_vectors - Allocate memory for interrupt vectors +- * @adapter: board private structure to initialize ++ * igb_alloc_q_vectors - Allocate memory for interrupt vectors ++ * @adapter: board private structure to initialize + * +- * We allocate one q_vector per queue interrupt. If allocation fails we +- * return -ENOMEM. ++ * We allocate one q_vector per queue interrupt. If allocation fails we ++ * return -ENOMEM. + **/ + static int igb_alloc_q_vectors(struct igb_adapter *adapter) + { +@@ -1370,11 +1278,9 @@ + } + + /** +- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors +- * @adapter: board private structure to initialize +- * @msix: boolean value of MSIX capability ++ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * +- * This function initializes the interrupts and allocates all of the queues. ++ * This function initializes the interrupts and allocates all of the queues. + **/ + static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) + { +@@ -1385,7 +1291,7 @@ + + err = igb_alloc_q_vectors(adapter); + if (err) { +- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); ++ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + +@@ -1399,11 +1305,10 @@ + } + + /** +- * igb_request_irq - initialize interrupts +- * @adapter: board private structure to initialize ++ * igb_request_irq - initialize interrupts + * +- * Attempts to configure interrupts using the best available +- * capabilities of the hardware and kernel. ++ * Attempts to configure interrupts using the best available ++ * capabilities of the hardware and kernel. + **/ + static int igb_request_irq(struct igb_adapter *adapter) + { +@@ -1411,7 +1316,7 @@ + struct pci_dev *pdev = adapter->pdev; + int err = 0; + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; +@@ -1420,10 +1325,10 @@ + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); ++ igb_reset_sriov_capability(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; +- + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); +@@ -1432,7 +1337,7 @@ + igb_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGB_FLAG_HAS_MSI) { +- err = request_irq(pdev->irq, igb_intr_msi, 0, ++ err = request_irq(pdev->irq, &igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; +@@ -1442,11 +1347,11 @@ + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + +- err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, ++ err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) +- dev_err(&pdev->dev, "Error %d getting interrupt\n", ++ dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n", + err); + + request_done: +@@ -1455,7 +1360,7 @@ + + static void igb_free_irq(struct igb_adapter *adapter) + { +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); +@@ -1469,64 +1374,76 @@ + } + + /** +- * igb_irq_disable - Mask off interrupt generation on the NIC +- * @adapter: board private structure ++ * igb_irq_disable - Mask off interrupt generation on the NIC ++ * @adapter: board private structure + **/ + static void igb_irq_disable(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + +- /* we need to be careful when disabling interrupts. The VFs are also ++ /* ++ * we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { +- u32 regval = rd32(E1000_EIAM); ++ if (adapter->msix_entries) { ++ u32 regval = E1000_READ_REG(hw, E1000_EIAM); + +- wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); +- wr32(E1000_EIMC, adapter->eims_enable_mask); +- regval = rd32(E1000_EIAC); +- wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); +- } ++ E1000_WRITE_REG(hw, E1000_EIAM, regval ++ & ~adapter->eims_enable_mask); ++ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask); ++ regval = E1000_READ_REG(hw, E1000_EIAC); ++ E1000_WRITE_REG(hw, E1000_EIAC, regval ++ & ~adapter->eims_enable_mask); ++ } ++ ++ E1000_WRITE_REG(hw, E1000_IAM, 0); ++ E1000_WRITE_REG(hw, E1000_IMC, ~0); ++ E1000_WRITE_FLUSH(hw); + +- wr32(E1000_IAM, 0); +- wr32(E1000_IMC, ~0); +- wrfl(); +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { +- int i; ++ if (adapter->msix_entries) { ++ int vector = 0, i; ++ ++ synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) +- synchronize_irq(adapter->msix_entries[i].vector); ++ synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } + } + + /** +- * igb_irq_enable - Enable default interrupt generation settings +- * @adapter: board private structure ++ * igb_irq_enable - Enable default interrupt generation settings ++ * @adapter: board private structure + **/ + static void igb_irq_enable(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ if (adapter->msix_entries) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; +- u32 regval = rd32(E1000_EIAC); ++ u32 regval = E1000_READ_REG(hw, E1000_EIAC); + +- wr32(E1000_EIAC, regval | adapter->eims_enable_mask); +- regval = rd32(E1000_EIAM); +- wr32(E1000_EIAM, regval | adapter->eims_enable_mask); +- wr32(E1000_EIMS, adapter->eims_enable_mask); ++ E1000_WRITE_REG(hw, E1000_EIAC, regval ++ | adapter->eims_enable_mask); ++ regval = E1000_READ_REG(hw, E1000_EIAM); ++ E1000_WRITE_REG(hw, E1000_EIAM, regval ++ | adapter->eims_enable_mask); ++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { +- wr32(E1000_MBVFIMR, 0xFF); ++ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; ++ if (adapter->mdd) ++ if ((adapter->hw.mac.type == e1000_i350) || ++ (adapter->hw.mac.type == e1000_i354)) ++ ims |= E1000_IMS_MDDET; + } +- wr32(E1000_IMS, ims); ++ E1000_WRITE_REG(hw, E1000_IMS, ims); + } else { +- wr32(E1000_IMS, IMS_ENABLE_MASK | ++ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); +- wr32(E1000_IAM, IMS_ENABLE_MASK | ++ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } + } +@@ -1539,7 +1456,7 @@ + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ +- igb_vfta_set(hw, vid, true); ++ igb_vfta_set(adapter, vid, TRUE); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; +@@ -1547,19 +1464,24 @@ + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && ++#ifdef HAVE_VLAN_RX_REGISTER ++ !vlan_group_get_device(adapter->vlgrp, old_vid)) { ++#else + !test_bit(old_vid, adapter->active_vlans)) { ++#endif + /* remove VID from filter table */ +- igb_vfta_set(hw, old_vid, false); ++ igb_vfta_set(adapter, old_vid, FALSE); + } + } + + /** +- * igb_release_hw_control - release control of the h/w to f/w +- * @adapter: address of board private structure ++ * igb_release_hw_control - release control of the h/w to f/w ++ * @adapter: address of board private structure ++ * ++ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. ++ * For ASF and Pass Through versions of f/w this means that the ++ * driver is no longer loaded. + * +- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. +- * For ASF and Pass Through versions of f/w this means that the +- * driver is no longer loaded. + **/ + static void igb_release_hw_control(struct igb_adapter *adapter) + { +@@ -1567,18 +1489,19 @@ + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- wr32(E1000_CTRL_EXT, ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } + + /** +- * igb_get_hw_control - get control of the h/w from f/w +- * @adapter: address of board private structure ++ * igb_get_hw_control - get control of the h/w from f/w ++ * @adapter: address of board private structure ++ * ++ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. ++ * For ASF and Pass Through versions of f/w this means that ++ * the driver is loaded. + * +- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. +- * For ASF and Pass Through versions of f/w this means that +- * the driver is loaded. + **/ + static void igb_get_hw_control(struct igb_adapter *adapter) + { +@@ -1586,14 +1509,14 @@ + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ +- ctrl_ext = rd32(E1000_CTRL_EXT); +- wr32(E1000_CTRL_EXT, ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } + + /** +- * igb_configure - configure the hardware for RX and TX +- * @adapter: private board structure ++ * igb_configure - configure the hardware for RX and TX ++ * @adapter: private board structure + **/ + static void igb_configure(struct igb_adapter *adapter) + { +@@ -1612,7 +1535,13 @@ + igb_configure_tx(adapter); + igb_configure_rx(adapter); + +- igb_rx_fifo_flush_82575(&adapter->hw); ++ e1000_rx_fifo_flush_82575(&adapter->hw); ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++ if (adapter->num_tx_queues > 1) ++ netdev->features |= NETIF_F_MULTI_QUEUE; ++ else ++ netdev->features &= ~NETIF_F_MULTI_QUEUE; ++#endif + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure +@@ -1625,45 +1554,42 @@ + } + + /** +- * igb_power_up_link - Power up the phy/serdes link +- * @adapter: address of board private structure ++ * igb_power_up_link - Power up the phy/serdes link ++ * @adapter: address of board private structure + **/ + void igb_power_up_link(struct igb_adapter *adapter) + { +- igb_reset_phy(&adapter->hw); ++ igb_e1000_phy_hw_reset(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) +- igb_power_up_phy_copper(&adapter->hw); ++ igb_e1000_power_up_phy(&adapter->hw); + else +- igb_power_up_serdes_link_82575(&adapter->hw); +- +- igb_setup_link(&adapter->hw); ++ e1000_power_up_fiber_serdes_link(&adapter->hw); + } + + /** +- * igb_power_down_link - Power down the phy/serdes link +- * @adapter: address of board private structure ++ * igb_power_down_link - Power down the phy/serdes link ++ * @adapter: address of board private structure + */ + static void igb_power_down_link(struct igb_adapter *adapter) + { + if (adapter->hw.phy.media_type == e1000_media_type_copper) +- igb_power_down_phy_copper_82575(&adapter->hw); ++ e1000_power_down_phy(&adapter->hw); + else +- igb_shutdown_serdes_link_82575(&adapter->hw); ++ e1000_shutdown_fiber_serdes_link(&adapter->hw); + } + +-/** +- * Detect and switch function for Media Auto Sense +- * @adapter: address of the board private structure +- **/ ++/* Detect and switch function for Media Auto Sense */ + static void igb_check_swap_media(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; ++ bool link; + +- ctrl_ext = rd32(E1000_CTRL_EXT); +- connsw = rd32(E1000_CONNSW); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ connsw = E1000_READ_REG(hw, E1000_CONNSW); ++ link = igb_has_link(adapter); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. +@@ -1674,10 +1600,10 @@ + swap_now = true; + } else if (!(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ +- if (adapter->copper_tries < 4) { ++ if (adapter->copper_tries < 3) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; +- wr32(E1000_CONNSW, connsw); ++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; +@@ -1685,143 +1611,263 @@ + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; +- wr32(E1000_CONNSW, connsw); ++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); + } + } + } + +- if (!swap_now) +- return; +- +- switch (hw->phy.media_type) { +- case e1000_media_type_copper: +- netdev_info(adapter->netdev, +- "MAS: changing media to fiber/serdes\n"); +- ctrl_ext |= +- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; +- adapter->flags |= IGB_FLAG_MEDIA_RESET; +- adapter->copper_tries = 0; +- break; +- case e1000_media_type_internal_serdes: +- case e1000_media_type_fiber: +- netdev_info(adapter->netdev, +- "MAS: changing media to copper\n"); +- ctrl_ext &= +- ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; +- adapter->flags |= IGB_FLAG_MEDIA_RESET; +- break; +- default: +- /* shouldn't get here during regular operation */ +- netdev_err(adapter->netdev, +- "AMS: Invalid media type found, returning\n"); +- break; ++ if (swap_now) { ++ switch (hw->phy.media_type) { ++ case e1000_media_type_copper: ++ dev_info(pci_dev_to_dev(adapter->pdev), ++ "%s:MAS: changing media to fiber/serdes\n", ++ adapter->netdev->name); ++ ctrl_ext |= ++ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ++ adapter->flags |= IGB_FLAG_MEDIA_RESET; ++ adapter->copper_tries = 0; ++ break; ++ case e1000_media_type_internal_serdes: ++ case e1000_media_type_fiber: ++ dev_info(pci_dev_to_dev(adapter->pdev), ++ "%s:MAS: changing media to copper\n", ++ adapter->netdev->name); ++ ctrl_ext &= ++ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ++ adapter->flags |= IGB_FLAG_MEDIA_RESET; ++ break; ++ default: ++ /* shouldn't get here during regular operation */ ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "%s:AMS: Invalid media type found, returning\n", ++ adapter->netdev->name); ++ break; ++ } ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } +- wr32(E1000_CTRL_EXT, ctrl_ext); + } + +-/** +- * igb_up - Open the interface and prepare it to handle traffic +- * @adapter: board private structure +- **/ +-int igb_up(struct igb_adapter *adapter) ++#ifdef HAVE_I2C_SUPPORT ++/* igb_get_i2c_data - Reads the I2C SDA data bit ++ * @hw: pointer to hardware structure ++ * @i2cctl: Current value of I2CCTL register ++ * ++ * Returns the I2C data bit value ++ */ ++static int igb_get_i2c_data(void *data) + { ++ struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; +- int i; +- +- /* hardware has been reset, we need to reload some things */ +- igb_configure(adapter); ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- clear_bit(__IGB_DOWN, &adapter->state); ++ return !!(i2cctl & E1000_I2C_DATA_IN); ++} + +- for (i = 0; i < adapter->num_q_vectors; i++) +- napi_enable(&(adapter->q_vector[i]->napi)); ++/* igb_set_i2c_data - Sets the I2C data bit ++ * @data: pointer to hardware structure ++ * @state: I2C data value (0 or 1) to set ++ * ++ * Sets the I2C data bit ++ */ ++static void igb_set_i2c_data(void *data, int state) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ struct e1000_hw *hw = &adapter->hw; ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- if (adapter->flags & IGB_FLAG_HAS_MSIX) +- igb_configure_msix(adapter); ++ if (state) ++ i2cctl |= E1000_I2C_DATA_OUT; + else +- igb_assign_vector(adapter->q_vector[0], 0); +- +- /* Clear any pending interrupts. */ +- rd32(E1000_ICR); +- igb_irq_enable(adapter); +- +- /* notify VFs that reset has been completed */ +- if (adapter->vfs_allocated_count) { +- u32 reg_data = rd32(E1000_CTRL_EXT); ++ i2cctl &= ~E1000_I2C_DATA_OUT; + +- reg_data |= E1000_CTRL_EXT_PFRSTD; +- wr32(E1000_CTRL_EXT, reg_data); +- } ++ i2cctl &= ~E1000_I2C_DATA_OE_N; ++ i2cctl |= E1000_I2C_CLK_OE_N; + +- netif_tx_start_all_queues(adapter->netdev); ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); ++ E1000_WRITE_FLUSH(hw); + +- /* start the watchdog. */ +- hw->mac.get_link_status = 1; +- schedule_work(&adapter->watchdog_task); ++} + +- if ((adapter->flags & IGB_FLAG_EEE) && +- (!hw->dev_spec._82575.eee_disable)) +- adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; ++/* igb_set_i2c_clk - Sets the I2C SCL clock ++ * @data: pointer to hardware structure ++ * @state: state to set clock ++ * ++ * Sets the I2C clock line to state ++ */ ++static void igb_set_i2c_clk(void *data, int state) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ struct e1000_hw *hw = &adapter->hw; ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- return 0; ++ if (state) { ++ i2cctl |= E1000_I2C_CLK_OUT; ++ i2cctl &= ~E1000_I2C_CLK_OE_N; ++ } else { ++ i2cctl &= ~E1000_I2C_CLK_OUT; ++ i2cctl &= ~E1000_I2C_CLK_OE_N; ++ } ++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); ++ E1000_WRITE_FLUSH(hw); + } + +-void igb_down(struct igb_adapter *adapter) ++/* igb_get_i2c_clk - Gets the I2C SCL clock state ++ * @data: pointer to hardware structure ++ * ++ * Gets the I2C clock state ++ */ ++static int igb_get_i2c_clk(void *data) + { +- struct net_device *netdev = adapter->netdev; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; +- u32 tctl, rctl; +- int i; ++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + +- /* signal that we're down so the interrupt handler does not +- * reschedule our watchdog timer +- */ +- set_bit(__IGB_DOWN, &adapter->state); ++ return !!(i2cctl & E1000_I2C_CLK_IN); ++} ++ ++static const struct i2c_algo_bit_data igb_i2c_algo = { ++ .setsda = igb_set_i2c_data, ++ .setscl = igb_set_i2c_clk, ++ .getsda = igb_get_i2c_data, ++ .getscl = igb_get_i2c_clk, ++ .udelay = 5, ++ .timeout = 20, ++}; ++ ++/* igb_init_i2c - Init I2C interface ++ * @adapter: pointer to adapter structure ++ * ++ */ ++static s32 igb_init_i2c(struct igb_adapter *adapter) ++{ ++ s32 status = E1000_SUCCESS; ++ ++ /* I2C interface supported on i350 devices */ ++ if (adapter->hw.mac.type != e1000_i350) ++ return E1000_SUCCESS; ++ ++ /* Initialize the i2c bus which is controlled by the registers. ++ * This bus will use the i2c_algo_bit structue that implements ++ * the protocol through toggling of the 4 bits in the register. ++ */ ++ adapter->i2c_adap.owner = THIS_MODULE; ++ adapter->i2c_algo = igb_i2c_algo; ++ adapter->i2c_algo.data = adapter; ++ adapter->i2c_adap.algo_data = &adapter->i2c_algo; ++ adapter->i2c_adap.dev.parent = &adapter->pdev->dev; ++ strlcpy(adapter->i2c_adap.name, "igb BB", ++ sizeof(adapter->i2c_adap.name)); ++ status = i2c_bit_add_bus(&adapter->i2c_adap); ++ return status; ++} ++ ++#endif /* HAVE_I2C_SUPPORT */ ++/** ++ * igb_up - Open the interface and prepare it to handle traffic ++ * @adapter: board private structure ++ **/ ++int igb_up(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ int i; ++ ++ /* hardware has been reset, we need to reload some things */ ++ igb_configure(adapter); ++ ++ clear_bit(__IGB_DOWN, &adapter->state); ++ ++ for (i = 0; i < adapter->num_q_vectors; i++) ++ napi_enable(&(adapter->q_vector[i]->napi)); ++ ++ if (adapter->msix_entries) ++ igb_configure_msix(adapter); ++ else ++ igb_assign_vector(adapter->q_vector[0], 0); ++ ++ igb_configure_lli(adapter); ++ ++ /* Clear any pending interrupts. */ ++ E1000_READ_REG(hw, E1000_ICR); ++ igb_irq_enable(adapter); ++ ++ /* notify VFs that reset has been completed */ ++ if (adapter->vfs_allocated_count) { ++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ ++ reg_data |= E1000_CTRL_EXT_PFRSTD; ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); ++ } ++ ++ netif_tx_start_all_queues(adapter->netdev); ++ ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ schedule_work(&adapter->dma_err_task); ++ /* start the watchdog. */ ++ hw->mac.get_link_status = 1; ++ schedule_work(&adapter->watchdog_task); ++ ++ if ((adapter->flags & IGB_FLAG_EEE) && ++ (!hw->dev_spec._82575.eee_disable)) ++ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; ++ ++ return 0; ++} ++ ++void igb_down(struct igb_adapter *adapter) ++{ ++ struct net_device *netdev = adapter->netdev; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 tctl, rctl; ++ int i; ++ ++ /* signal that we're down so the interrupt handler does not ++ * reschedule our watchdog timer ++ */ ++ set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ +- rctl = rd32(E1000_RCTL); +- wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + ++ netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ +- tctl = rd32(E1000_TCTL); ++ tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_EN; +- wr32(E1000_TCTL, tctl); ++ E1000_WRITE_REG(hw, E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ +- wrfl(); +- usleep_range(10000, 11000); ++ E1000_WRITE_FLUSH(hw); ++ usleep_range(10000, 20000); ++ ++ for (i = 0; i < adapter->num_q_vectors; i++) ++ napi_disable(&(adapter->q_vector[i]->napi)); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + +- for (i = 0; i < adapter->num_q_vectors; i++) { +- napi_synchronize(&(adapter->q_vector[i]->napi)); +- napi_disable(&(adapter->q_vector[i]->napi)); +- } +- +- + del_timer_sync(&adapter->watchdog_timer); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ del_timer_sync(&adapter->dma_err_timer); + del_timer_sync(&adapter->phy_info_timer); + +- netif_carrier_off(netdev); +- + /* record the stats before reset*/ +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, &adapter->stats64); +- spin_unlock(&adapter->stats64_lock); ++ igb_update_stats(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + ++#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); ++#else ++ igb_reset(adapter); ++#endif + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +-#ifdef CONFIG_IGB_DCA +- ++#ifdef IGB_DCA + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); + #endif +@@ -1837,35 +1883,26 @@ + clear_bit(__IGB_RESETTING, &adapter->state); + } + +-/** igb_enable_mas - Media Autosense re-enable after swap ++/** ++ * igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +-static s32 igb_enable_mas(struct igb_adapter *adapter) ++void igb_enable_mas(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 connsw; +- s32 ret_val = 0; + +- connsw = rd32(E1000_CONNSW); +- if (!(hw->phy.media_type == e1000_media_type_copper)) +- return ret_val; ++ connsw = E1000_READ_REG(hw, E1000_CONNSW); + + /* configure for SerDes media detect */ +- if (!(connsw & E1000_CONNSW_SERDESD)) { ++ if ((hw->phy.media_type == e1000_media_type_copper) && ++ (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; +- wr32(E1000_CONNSW, connsw); +- wrfl(); +- } else if (connsw & E1000_CONNSW_SERDESD) { +- /* already SerDes, no need to enable anything */ +- return ret_val; +- } else { +- netdev_info(adapter->netdev, +- "MAS: Unable to configure feature, disabling..\n"); +- adapter->flags &= ~IGB_FLAG_MAS_ENABLE; ++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); ++ E1000_WRITE_FLUSH(hw); + } +- return ret_val; + } + + void igb_reset(struct igb_adapter *adapter) +@@ -1881,13 +1918,13 @@ + */ + switch (mac->type) { + case e1000_i350: +- case e1000_i354: + case e1000_82580: +- pba = rd32(E1000_RXPBS); +- pba = igb_rxpbs_adjust_82580(pba); ++ case e1000_i354: ++ pba = E1000_READ_REG(hw, E1000_RXPBS); ++ pba = e1000_rxpbs_adjust_82580(pba); + break; + case e1000_82576: +- pba = rd32(E1000_RXPBS); ++ pba = E1000_READ_REG(hw, E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: +@@ -1901,7 +1938,7 @@ + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && + (mac->type < e1000_82576)) { + /* adjust PBA for jumbo frames */ +- wr32(E1000_PBA, pba); ++ E1000_WRITE_REG(hw, E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, +@@ -1910,12 +1947,12 @@ + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ +- pba = rd32(E1000_PBA); ++ pba = E1000_READ_REG(hw, E1000_PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; +- /* the Tx fifo also stores 16 bytes of information about the Tx ++ /* the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + +@@ -1936,13 +1973,13 @@ + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + +- /* if short on Rx space, Rx wins and must trump Tx ++ /* if short on rx space, rx wins and must trump tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } +- wr32(E1000_PBA, pba); ++ E1000_WRITE_REG(hw, E1000_PBA, pba); + } + + /* flow control settings */ +@@ -1965,6 +2002,10 @@ + if (adapter->vfs_allocated_count) { + int i; + ++ /* ++ * Clear all flags except indication that the PF has set ++ * the VF MAC addresses administratively ++ */ + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + +@@ -1972,116 +2013,334 @@ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ +- wr32(E1000_VFRE, 0); +- wr32(E1000_VFTE, 0); ++ E1000_WRITE_REG(hw, E1000_VFRE, 0); ++ E1000_WRITE_REG(hw, E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ +- hw->mac.ops.reset_hw(hw); +- wr32(E1000_WUC, 0); ++ igb_e1000_reset_hw(hw); ++ E1000_WRITE_REG(hw, E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { +- /* need to resetup here after media swap */ +- adapter->ei.get_invariants(hw); ++ e1000_setup_init_funcs(hw, TRUE); ++ igb_check_options(adapter); ++ igb_e1000_get_bus_info(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } +- if (adapter->flags & IGB_FLAG_MAS_ENABLE) { +- if (igb_enable_mas(adapter)) +- dev_err(&pdev->dev, +- "Error enabling Media Auto Sense\n"); ++ if ((mac->type == e1000_82575) && ++ (adapter->flags & IGB_FLAG_MAS_ENABLE)) { ++ igb_enable_mas(adapter); + } +- if (hw->mac.ops.init_hw(hw)) +- dev_err(&pdev->dev, "Hardware Error\n"); ++ if (igb_e1000_init_hw(hw)) ++ dev_err(pci_dev_to_dev(pdev), "Hardware Error\n"); + +- /* Flow control settings reset on hardware reset, so guarantee flow ++ /* ++ * Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) +- igb_force_mac_fc(hw); ++ igb_e1000_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +-#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ +- if (!test_bit(__IGB_DOWN, &adapter->state)) { +- if (mac->type == e1000_i350 && hw->bus.func == 0) { +- /* If present, re-initialize the external thermal sensor +- * interface. +- */ +- if (adapter->ets) +- mac->ops.init_thermal_sensor_thresh(hw); +- } ++ if (mac->type == e1000_i350 && hw->bus.func == 0) { ++ /* ++ * If present, re-initialize the external thermal sensor ++ * interface. ++ */ ++ if (adapter->ets) ++ e1000_set_i2c_bb(hw); ++ e1000_init_thermal_sensor_thresh(hw); + } +-#endif +- /* Re-establish EEE setting */ ++ ++ /*Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: +- igb_set_eee_i350(hw); ++ e1000_set_eee_i350(hw, true, true); + break; + case e1000_i354: +- igb_set_eee_i354(hw); ++ e1000_set_eee_i354(hw, true, true); + break; + default: + break; + } + } ++ + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ +- wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); ++ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + ++#ifdef HAVE_PTP_1588_CLOCK + /* Re-enable PTP, where applicable. */ + igb_ptp_reset(adapter); ++#endif /* HAVE_PTP_1588_CLOCK */ + +- igb_get_phy_info(hw); ++ e1000_get_phy_info(hw); ++ ++ adapter->devrc++; + } + ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++static u32 igb_fix_features(struct net_device *netdev, ++ u32 features) ++#else + static netdev_features_t igb_fix_features(struct net_device *netdev, +- netdev_features_t features) ++ netdev_features_t features) ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + { +- /* Since there is no support for separate Rx/Tx vlan accel +- * enable/disable make sure Tx flag is always in same state as Rx. ++ /* ++ * Since there is no support for separate tx vlan accel ++ * enabled make sure tx flag is cleared if rx is. + */ +- if (features & NETIF_F_HW_VLAN_CTAG_RX) +- features |= NETIF_F_HW_VLAN_CTAG_TX; +- else ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; ++#else ++ if (!(features & NETIF_F_HW_VLAN_RX)) ++ features &= ~NETIF_F_HW_VLAN_TX; ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++ ++#ifndef IGB_NO_LRO ++ /* If Rx checksum is disabled, then LRO should also be disabled */ ++ if (!(features & NETIF_F_RXCSUM)) ++ features &= ~NETIF_F_LRO; + ++#endif + return features; + } + + static int igb_set_features(struct net_device *netdev, +- netdev_features_t features) ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ u32 features) ++#else ++ netdev_features_t features) ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + { + netdev_features_t changed = netdev->features ^ features; ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + struct igb_adapter *adapter = netdev_priv(netdev); ++#endif + ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + if (changed & NETIF_F_HW_VLAN_CTAG_RX) ++#else ++ if (changed & NETIF_F_HW_VLAN_RX) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++ netdev->features = features; ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_vlan_mode(netdev, adapter->vlgrp); ++#else + igb_vlan_mode(netdev, features); ++#endif + +- if (!(changed & NETIF_F_RXALL)) ++ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + netdev->features = features; + +- if (netif_running(netdev)) +- igb_reinit_locked(adapter); +- else +- igb_reset(adapter); ++ return 0; ++} ++#endif /* HAVE_NDO_SET_FEATURES */ ++ ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr, ++#ifdef HAVE_NDO_FDB_ADD_VID ++ u16 vid, ++#endif /* HAVE_NDO_FDB_ADD_VID */ ++ u16 flags) ++#else /* USE_CONST_DEV_UC_CHAR */ ++static int igb_ndo_fdb_add(struct ndmsg *ndm, ++ struct net_device *dev, ++ unsigned char *addr, ++ u16 flags) ++#endif /* USE_CONST_DEV_UC_CHAR */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ struct e1000_hw *hw = &adapter->hw; ++ int err; ++ ++ if (!(adapter->vfs_allocated_count)) ++ return -EOPNOTSUPP; ++ ++ /* Hardware does not support aging addresses so if a ++ * ndm_state is given only allow permanent addresses ++ */ ++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", ++ igb_driver_name); ++ return -EINVAL; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { ++ u32 rar_uc_entries = hw->mac.rar_entry_count - ++ (adapter->vfs_allocated_count + 1); ++ ++ if (netdev_uc_count(dev) < rar_uc_entries) ++ err = dev_uc_add_excl(dev, addr); ++ else ++ err = -ENOMEM; ++ } else if (is_multicast_ether_addr(addr)) { ++ err = dev_mc_add_excl(dev, addr); ++ } else { ++ err = -EINVAL; ++ } ++ ++ /* Only return duplicate errors if NLM_F_EXCL is set */ ++ if (err == -EEXIST && !(flags & NLM_F_EXCL)) ++ err = 0; ++ ++ return err; ++} ++ ++#ifndef USE_DEFAULT_FDB_DEL_DUMP ++#ifdef USE_CONST_DEV_UC_CHAR ++static int igb_ndo_fdb_del(struct ndmsg *ndm, ++ struct net_device *dev, ++ const unsigned char *addr) ++#else ++static int igb_ndo_fdb_del(struct ndmsg *ndm, ++ struct net_device *dev, ++ unsigned char *addr) ++#endif /* USE_CONST_DEV_UC_CHAR */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ int err = -EOPNOTSUPP; ++ ++ if (ndm->ndm_state & NUD_PERMANENT) { ++ pr_info("%s: FDB only supports static addresses\n", ++ igb_driver_name); ++ return -EINVAL; ++ } ++ ++ if (adapter->vfs_allocated_count) { ++ if (is_unicast_ether_addr(addr)) ++ err = dev_uc_del(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_del(dev, addr); ++ else ++ err = -EINVAL; ++ } ++ ++ return err; ++} ++ ++static int igb_ndo_fdb_dump(struct sk_buff *skb, ++ struct netlink_callback *cb, ++ struct net_device *dev, ++ int idx) ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ ++ if (adapter->vfs_allocated_count) ++ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); ++ ++ return idx; ++} ++#endif /* USE_DEFAULT_FDB_DEL_DUMP */ ++#ifdef HAVE_BRIDGE_ATTRIBS ++#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++static int igb_ndo_bridge_setlink(struct net_device *dev, ++ struct nlmsghdr *nlh, ++ u16 flags) ++#else ++static int igb_ndo_bridge_setlink(struct net_device *dev, ++ struct nlmsghdr *nlh) ++#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ struct e1000_hw *hw = &adapter->hw; ++ struct nlattr *attr, *br_spec; ++ int rem; ++ ++ if (!(adapter->vfs_allocated_count)) ++ return -EOPNOTSUPP; ++ ++ switch (adapter->hw.mac.type) { ++ case e1000_82576: ++ case e1000_i350: ++ case e1000_i354: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); ++ ++ nla_for_each_nested(attr, br_spec, rem) { ++ __u16 mode; ++ ++ if (nla_type(attr) != IFLA_BRIDGE_MODE) ++ continue; ++ ++ mode = nla_get_u16(attr); ++ if (mode == BRIDGE_MODE_VEPA) { ++ e1000_vmdq_set_loopback_pf(hw, 0); ++ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE; ++ } else if (mode == BRIDGE_MODE_VEB) { ++ e1000_vmdq_set_loopback_pf(hw, 1); ++ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; ++ } else ++ return -EINVAL; ++ ++ netdev_info(adapter->netdev, "enabling bridge mode: %s\n", ++ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); ++ } + + return 0; + } + ++#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev, u32 filter_mask, ++ int nlflags) ++#elif defined(HAVE_BRIDGE_FILTER) ++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev, u32 filter_mask) ++#else ++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev) ++#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ ++{ ++ struct igb_adapter *adapter = netdev_priv(dev); ++ u16 mode; ++ ++ if (!(adapter->vfs_allocated_count)) ++ return -EOPNOTSUPP; ++ ++ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE) ++ mode = BRIDGE_MODE_VEB; ++ else ++ mode = BRIDGE_MODE_VEPA; ++#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, ++ filter_mask, NULL); ++#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); ++#elif defined(NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS) ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); ++#else ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); ++#endif /* NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS */ ++} ++#endif /* HAVE_BRIDGE_ATTRIBS */ ++#endif /* HAVE_FDB_OPS */ ++#ifdef HAVE_NET_DEVICE_OPS + static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, +- .ndo_get_stats64 = igb_get_stats64, ++ .ndo_get_stats = igb_get_stats, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, +@@ -2090,60 +2349,190 @@ + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, ++#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = igb_ndo_set_vf_bw, +- .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, ++#else ++ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, ++#endif /*HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + .ndo_get_vf_config = igb_ndo_get_vf_config, ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ++ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, ++#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ ++#endif /* IFLA_VF_MAX */ + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igb_netpoll, + #endif ++#ifdef HAVE_VLAN_RX_REGISTER ++ .ndo_vlan_rx_register = igb_vlan_mode, ++#endif ++#ifdef HAVE_FDB_OPS ++ .ndo_fdb_add = igb_ndo_fdb_add, ++#ifndef USE_DEFAULT_FDB_DEL_DUMP ++ .ndo_fdb_del = igb_ndo_fdb_del, ++ .ndo_fdb_dump = igb_ndo_fdb_dump, ++#endif ++#ifdef HAVE_BRIDGE_ATTRIBS ++ .ndo_bridge_setlink = igb_ndo_bridge_setlink, ++ .ndo_bridge_getlink = igb_ndo_bridge_getlink, ++#endif /* HAVE_BRIDGE_ATTRIBS */ ++#endif ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++}; ++ ++/* RHEL6 keeps these operations in a separate structure */ ++static const struct net_device_ops_ext igb_netdev_ops_ext = { ++ .size = sizeof(struct net_device_ops_ext), ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, ++#endif /* HAVE_NDO_SET_FEATURES */ + }; + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++static const struct net_device_ops igb_vmdq_ops = { ++ .ndo_open = &igb_vmdq_open, ++ .ndo_stop = &igb_vmdq_close, ++ .ndo_start_xmit = &igb_vmdq_xmit_frame, ++ .ndo_get_stats = &igb_vmdq_get_stats, ++ .ndo_set_rx_mode = &igb_vmdq_set_rx_mode, ++ .ndo_validate_addr = eth_validate_addr, ++ .ndo_set_mac_address = &igb_vmdq_set_mac, ++ .ndo_change_mtu = &igb_vmdq_change_mtu, ++ .ndo_tx_timeout = &igb_vmdq_tx_timeout, ++ .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register, ++ .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid, ++ .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid, ++}; ++ ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++#endif /* HAVE_NET_DEVICE_OPS */ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev) ++{ ++#ifdef HAVE_NET_DEVICE_OPS ++ vnetdev->netdev_ops = &igb_vmdq_ops; ++#else ++ dev->open = &igb_vmdq_open; ++ dev->stop = &igb_vmdq_close; ++ dev->hard_start_xmit = &igb_vmdq_xmit_frame; ++ dev->get_stats = &igb_vmdq_get_stats; ++#ifdef HAVE_SET_RX_MODE ++ dev->set_rx_mode = &igb_vmdq_set_rx_mode; ++#endif ++ dev->set_multicast_list = &igb_vmdq_set_rx_mode; ++ dev->set_mac_address = &igb_vmdq_set_mac; ++ dev->change_mtu = &igb_vmdq_change_mtu; ++#ifdef HAVE_TX_TIMEOUT ++ dev->tx_timeout = &igb_vmdq_tx_timeout; ++#endif ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++ dev->vlan_rx_register = &igb_vmdq_vlan_rx_register; ++ dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid; ++ dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid; ++#endif ++#endif /* HAVE_NET_DEVICE_OPS */ ++ igb_vmdq_set_ethtool_ops(vnetdev); ++ vnetdev->watchdog_timeo = 5 * HZ; ++ ++} ++ ++int igb_init_vmdq_netdevs(struct igb_adapter *adapter) ++{ ++ int pool, err = 0, base_queue; ++ struct net_device *vnetdev; ++ struct igb_vmdq_adapter *vmdq_adapter; ++ ++ for (pool = 1; pool < adapter->vmdq_pools; pool++) { ++ int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues); ++ ++ base_queue = pool * qpp; ++ vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter)); ++ if (!vnetdev) { ++ err = -ENOMEM; ++ break; ++ } ++ ++ vmdq_adapter = netdev_priv(vnetdev); ++ vmdq_adapter->vnetdev = vnetdev; ++ vmdq_adapter->real_adapter = adapter; ++ vmdq_adapter->rx_ring = adapter->rx_ring[base_queue]; ++ vmdq_adapter->tx_ring = adapter->tx_ring[base_queue]; ++ igb_assign_vmdq_netdev_ops(vnetdev); ++ snprintf(vnetdev->name, IFNAMSIZ, "%sv%d", ++ adapter->netdev->name, pool); ++ vnetdev->features = adapter->netdev->features; ++#ifdef HAVE_NETDEV_VLAN_FEATURES ++ vnetdev->vlan_features = adapter->netdev->vlan_features; ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ adapter->vmdq_netdev[pool-1] = vnetdev; ++ err = register_netdev(vnetdev); ++ if (err) ++ break; ++ } ++ return err; ++} ++ ++int igb_remove_vmdq_netdevs(struct igb_adapter *adapter) ++{ ++ int pool, err = 0; ++ ++ for (pool = 1; pool < adapter->vmdq_pools; pool++) { ++ unregister_netdev(adapter->vmdq_netdev[pool-1]); ++ free_netdev(adapter->vmdq_netdev[pool-1]); ++ adapter->vmdq_netdev[pool-1] = NULL; ++ } ++ return err; ++} ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ + /** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct ++ * + **/ +-void igb_set_fw_version(struct igb_adapter *adapter) ++static void igb_set_fw_version(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + +- igb_get_fw_version(hw, &fw); ++ e1000_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: +- if (!(igb_get_flash_presence_i210(hw))) { ++ if (!(e1000_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, +- sizeof(adapter->fw_version), +- "%2d.%2d-%d", +- fw.invm_major, fw.invm_minor, +- fw.invm_img_type); ++ sizeof(adapter->fw_version), ++ "%2d.%2d-%d", ++ fw.invm_major, fw.invm_minor, fw.invm_img_type); + break; + } + /* fall through */ + default: +- /* if option is rom valid, display its version too */ ++ /* if option rom is valid, display its version too*/ + if (fw.or_valid) { + snprintf(adapter->fw_version, +- sizeof(adapter->fw_version), +- "%d.%d, 0x%08x, %d.%d.%d", +- fw.eep_major, fw.eep_minor, fw.etrack_id, +- fw.or_major, fw.or_build, fw.or_patch); ++ sizeof(adapter->fw_version), ++ "%d.%d, 0x%08x, %d.%d.%d", ++ fw.eep_major, fw.eep_minor, fw.etrack_id, ++ fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ +- } else if (fw.etrack_id != 0X0000) { ++ } else { ++ if (fw.etrack_id != 0X0000) { ++ snprintf(adapter->fw_version, ++ sizeof(adapter->fw_version), ++ "%d.%d, 0x%08x", ++ fw.eep_major, fw.eep_minor, fw.etrack_id); ++ } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), +- "%d.%d, 0x%08x", +- fw.eep_major, fw.eep_minor, fw.etrack_id); +- } else { +- snprintf(adapter->fw_version, +- sizeof(adapter->fw_version), +- "%d.%d.%d", +- fw.eep_major, fw.eep_minor, fw.eep_build); ++ "%d.%d.%d", ++ fw.eep_major, fw.eep_minor, fw.eep_build); ++ } + } + break; + } +@@ -2159,126 +2548,130 @@ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + +- hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); ++ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: +- if (eeprom_data & IGB_MAS_ENABLE_0) { ++ if (eeprom_data & IGB_MAS_ENABLE_0) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + case E1000_FUNC_1: +- if (eeprom_data & IGB_MAS_ENABLE_1) { ++ if (eeprom_data & IGB_MAS_ENABLE_1) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + case E1000_FUNC_2: +- if (eeprom_data & IGB_MAS_ENABLE_2) { ++ if (eeprom_data & IGB_MAS_ENABLE_2) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + case E1000_FUNC_3: +- if (eeprom_data & IGB_MAS_ENABLE_3) { ++ if (eeprom_data & IGB_MAS_ENABLE_3) + adapter->flags |= IGB_FLAG_MAS_ENABLE; +- netdev_info(adapter->netdev, +- "MAS: Enabling Media Autosense for port %d\n", +- hw->bus.func); +- } + break; + default: + /* Shouldn't get here */ +- netdev_err(adapter->netdev, +- "MAS: Invalid port configuration, returning\n"); ++ dev_err(pci_dev_to_dev(adapter->pdev), ++ "%s:AMS: Invalid port configuration, returning\n", ++ adapter->netdev->name); + break; + } + } + +-/** +- * igb_init_i2c - Init I2C interface +- * @adapter: pointer to adapter structure +- **/ +-static s32 igb_init_i2c(struct igb_adapter *adapter) ++void igb_rar_set(struct igb_adapter *adapter, u32 index) + { +- s32 status = 0; ++ u32 rar_low, rar_high; ++ struct e1000_hw *hw = &adapter->hw; ++ u8 *addr = adapter->mac_table[index].addr; ++ /* HW expects these in little endian so we reverse the byte order ++ * from network order (big endian) to little endian ++ */ ++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); ++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + +- /* I2C interface supported on i350 devices */ +- if (adapter->hw.mac.type != e1000_i350) +- return 0; ++ /* Indicate to hardware the Address is Valid. */ ++ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) ++ rar_high |= E1000_RAH_AV; + +- /* Initialize the i2c bus which is controlled by the registers. +- * This bus will use the i2c_algo_bit structue that implements +- * the protocol through toggling of the 4 bits in the register. +- */ +- adapter->i2c_adap.owner = THIS_MODULE; +- adapter->i2c_algo = igb_i2c_algo; +- adapter->i2c_algo.data = adapter; +- adapter->i2c_adap.algo_data = &adapter->i2c_algo; +- adapter->i2c_adap.dev.parent = &adapter->pdev->dev; +- strlcpy(adapter->i2c_adap.name, "igb BB", +- sizeof(adapter->i2c_adap.name)); +- status = i2c_bit_add_bus(&adapter->i2c_adap); +- return status; ++ if (hw->mac.type == e1000_82575) ++ rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; ++ else ++ rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; ++ ++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); ++ E1000_WRITE_FLUSH(hw); ++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); ++ E1000_WRITE_FLUSH(hw); + } + + /** +- * igb_probe - Device Initialization Routine +- * @pdev: PCI device information struct +- * @ent: entry in igb_pci_tbl ++ * igb_probe - Device Initialization Routine ++ * @pdev: PCI device information struct ++ * @ent: entry in igb_pci_tbl + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + * +- * igb_probe initializes an adapter identified by a pci_dev structure. +- * The OS initialization, configuring of the adapter private structure, +- * and a hardware reset occur. ++ * igb_probe initializes an adapter identified by a pci_dev structure. ++ * The OS initialization, configuring of the adapter private structure, ++ * and a hardware reset occur. + **/ +-static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++static int igb_probe(struct pci_dev *pdev, ++ const struct pci_device_id *ent) + { + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; ++ u8 pba_str[E1000_PBANUM_LENGTH]; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ +- const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + int err, pci_using_dac; +- u8 part_str[E1000_PBANUM_LENGTH]; +- +- /* Catch broken hardware that put the wrong VF device ID in +- * the PCIe SR-IOV capability. +- */ +- if (pdev->is_virtfn) { +- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", +- pci_name(pdev), pdev->vendor, pdev->device); +- return -EINVAL; +- } ++ static int cards_found; ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ u32 hw_features; ++#else ++ netdev_features_t hw_features; ++#endif ++#endif + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; +- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); + if (!err) { +- pci_using_dac = 1; ++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), ++ DMA_BIT_MASK(64)); ++ if (!err) ++ pci_using_dac = 1; + } else { +- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { +- dev_err(&pdev->dev, +- "No usable DMA configuration, aborting\n"); +- goto err_dma; ++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), ++ DMA_BIT_MASK(32)); ++ if (err) { ++ IGB_ERR( ++ "No usable DMA configuration, aborting\n"); ++ goto err_dma; ++ } + } + } + +- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, +- IORESOURCE_MEM), ++#ifndef HAVE_ASPM_QUIRKS ++ /* 82575 requires that the pci-e link partner disable the L0s state */ ++ switch (pdev->device) { ++ case E1000_DEV_ID_82575EB_COPPER: ++ case E1000_DEV_ID_82575EB_FIBER_SERDES: ++ case E1000_DEV_ID_82575GB_QUAD_COPPER: ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); ++ default: ++ break; ++ } ++ ++#endif /* HAVE_ASPM_QUIRKS */ ++ err = pci_request_selected_regions(pdev, ++ pci_select_bars(pdev, ++ IORESOURCE_MEM), + igb_driver_name); + if (err) + goto err_pci_reg; +@@ -2286,14 +2679,18 @@ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); +- pci_save_state(pdev); + + err = -ENOMEM; ++#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); ++#else ++ netdev = alloc_etherdev(sizeof(struct igb_adapter)); ++#endif /* HAVE_TX_MQ */ + if (!netdev) + goto err_alloc_etherdev; + ++ SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); +@@ -2302,158 +2699,225 @@ + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; +- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); ++ adapter->port_num = hw->bus.func; ++ adapter->msg_enable = (1 << debug) - 1; + ++#ifdef HAVE_PCI_ERS ++ err = pci_save_state(pdev); ++ if (err) ++ goto err_ioremap; ++#endif + err = -EIO; +- hw->hw_addr = pci_iomap(pdev, 0, 0); +- if (!hw->hw_addr) ++ adapter->io_addr = ioremap(pci_resource_start(pdev, 0), ++ pci_resource_len(pdev, 0)); ++ if (!adapter->io_addr) + goto err_ioremap; ++ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ ++ hw->hw_addr = adapter->io_addr; + ++#ifdef HAVE_NET_DEVICE_OPS + netdev->netdev_ops = &igb_netdev_ops; ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ set_netdev_ops_ext(netdev, &igb_netdev_ops_ext); ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++#else /* HAVE_NET_DEVICE_OPS */ ++ netdev->open = &igb_open; ++ netdev->stop = &igb_close; ++ netdev->get_stats = &igb_get_stats; ++#ifdef HAVE_SET_RX_MODE ++ netdev->set_rx_mode = &igb_set_rx_mode; ++#endif ++ netdev->set_multicast_list = &igb_set_rx_mode; ++ netdev->set_mac_address = &igb_set_mac; ++ netdev->change_mtu = &igb_change_mtu; ++ netdev->do_ioctl = &igb_ioctl; ++#ifdef HAVE_TX_TIMEOUT ++ netdev->tx_timeout = &igb_tx_timeout; ++#endif ++ netdev->vlan_rx_register = igb_vlan_mode; ++ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; ++ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = igb_netpoll; ++#endif ++ netdev->hard_start_xmit = &igb_xmit_frame; ++#endif /* HAVE_NET_DEVICE_OPS */ + igb_set_ethtool_ops(netdev); ++#ifdef HAVE_TX_TIMEOUT + netdev->watchdog_timeo = 5 * HZ; ++#endif + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + +- netdev->mem_start = pci_resource_start(pdev, 0); +- netdev->mem_end = pci_resource_end(pdev, 0); +- +- /* PCI config space info */ +- hw->vendor_id = pdev->vendor; +- hw->device_id = pdev->device; +- hw->revision_id = pdev->revision; +- hw->subsystem_vendor_id = pdev->subsystem_vendor; +- hw->subsystem_device_id = pdev->subsystem_device; +- +- /* Copy the default MAC, PHY and NVM function pointers */ +- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); +- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); +- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); +- /* Initialize skew-specific constants */ +- err = ei->get_invariants(hw); +- if (err) +- goto err_sw_init; ++ adapter->bd_number = cards_found; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + +- igb_get_bus_info_pcie(hw); ++ igb_e1000_get_bus_info(hw); + +- hw->phy.autoneg_wait_to_complete = false; ++ hw->phy.autoneg_wait_to_complete = FALSE; ++ hw->mac.adaptive_ifs = FALSE; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; +- hw->phy.disable_polarity_correction = false; ++ hw->phy.disable_polarity_correction = FALSE; + hw->phy.ms_type = e1000_ms_hw_default; + } + +- if (igb_check_reset_block(hw)) +- dev_info(&pdev->dev, ++ if (e1000_check_reset_block(hw)) ++ dev_info(pci_dev_to_dev(pdev), + "PHY reset is blocked due to SOL/IDER session.\n"); + +- /* features is initialized to 0 in allocation, it might have bits ++ /* ++ * features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM | ++#ifdef NETIF_F_IPV6_CSUM + NETIF_F_IPV6_CSUM | ++#endif ++#ifdef NETIF_F_TSO + NETIF_F_TSO | ++#ifdef NETIF_F_TSO6 + NETIF_F_TSO6 | ++#endif ++#endif /* NETIF_F_TSO */ ++#ifdef NETIF_F_RXHASH + NETIF_F_RXHASH | ++#endif + NETIF_F_RXCSUM | ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; ++#else ++ NETIF_F_HW_VLAN_RX | ++ NETIF_F_HW_VLAN_TX; ++#endif ++ ++ if (hw->mac.type >= e1000_82576) ++ netdev->features |= NETIF_F_SCTP_CSUM; + ++#ifdef HAVE_NDO_SET_FEATURES + /* copy netdev features into list of user selectable features */ +- netdev->hw_features |= netdev->features; +- netdev->hw_features |= NETIF_F_RXALL; ++#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ hw_features = netdev->hw_features; ++ ++ /* give us the option of enabling LRO later */ ++ hw_features |= NETIF_F_LRO; ++ ++#else ++ hw_features = get_netdev_hw_features(netdev); ++ ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++ hw_features |= netdev->features; ++ ++#else ++#ifdef NETIF_F_GRO ++ ++ /* this is only needed on kernels prior to 2.6.39 */ ++ netdev->features |= NETIF_F_GRO; ++#endif /* NETIF_F_GRO */ ++#endif /* HAVE_NDO_SET_FEATURES */ + + /* set this bit last since it cannot be part of hw_features */ ++#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ++#endif /* NETIF_F_HW_FLAN_CTAG_FILTER */ ++#ifdef NETIF_F_HW_VLAN_TX ++ netdev->features |= NETIF_F_HW_VLAN_FILTER; ++#endif /* NETIF_F_HW_VLAN_TX */ ++ ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ set_netdev_hw_features(netdev, hw_features); ++#else ++ netdev->hw_features = hw_features; ++#endif ++#endif + ++#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + +- netdev->priv_flags |= IFF_SUPP_NOFCS; +- +- if (pci_using_dac) { ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; +- netdev->vlan_features |= NETIF_F_HIGHDMA; +- } + +- if (hw->mac.type >= e1000_82576) { +- netdev->hw_features |= NETIF_F_SCTP_CSUM; +- netdev->features |= NETIF_F_SCTP_CSUM; +- } +- +- netdev->priv_flags |= IFF_UNICAST_FLT; +- +- adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); ++ adapter->en_mng_pt = igb_e1000_enable_mng_pass_thru(hw); ++#ifdef DEBUG ++ if (adapter->dmac != IGB_DMAC_DISABLE) ++ netdev_info(netdev, "%s: DMA Coalescing is enabled..\n", ++ netdev->name); ++#endif + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ +- hw->mac.ops.reset_hw(hw); ++ igb_e1000_reset_hw(hw); + +- /* make sure the NVM is good , i211/i210 parts can have special NVM +- * that doesn't contain a checksum +- */ +- switch (hw->mac.type) { +- case e1000_i210: +- case e1000_i211: +- if (igb_get_flash_presence_i210(hw)) { +- if (hw->nvm.ops.validate(hw) < 0) { +- dev_err(&pdev->dev, +- "The NVM Checksum Is Not Valid\n"); +- err = -EIO; +- goto err_eeprom; +- } +- } +- break; +- default: +- if (hw->nvm.ops.validate(hw) < 0) { +- dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); +- err = -EIO; +- goto err_eeprom; +- } +- break; ++ /* make sure the NVM is good */ ++ if (e1000_validate_nvm_checksum(hw) < 0) { ++ dev_err(pci_dev_to_dev(pdev), ++ "The NVM Checksum Is Not Valid\n"); ++ err = -EIO; ++ goto err_eeprom; + } + + /* copy the MAC address out of the NVM */ +- if (hw->mac.ops.read_mac_addr(hw)) +- dev_err(&pdev->dev, "NVM Read Error\n"); +- ++ if (igb_e1000_read_mac_addr(hw)) ++ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n"); + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); ++#ifdef ETHTOOL_GPERMADDR ++ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + ++ if (!is_valid_ether_addr(netdev->perm_addr)) { ++#else + if (!is_valid_ether_addr(netdev->dev_addr)) { +- dev_err(&pdev->dev, "Invalid MAC Address\n"); ++#endif ++ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + ++ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len); ++ adapter->mac_table[0].queue = adapter->vfs_allocated_count; ++ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT ++ | IGB_MAC_STATE_IN_USE); ++ igb_rar_set(adapter, 0); ++ + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { +- wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); +- wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); ++ E1000_WRITE_REG(hw, E1000_RXPBS, I210_RXPBSIZE_DEFAULT); ++ E1000_WRITE_REG(hw, E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + +- setup_timer(&adapter->watchdog_timer, igb_watchdog, ++ /* Check if Media Autosense is enabled */ ++ if (hw->mac.type == e1000_82580) ++ igb_init_mas(adapter); ++ setup_timer(&adapter->watchdog_timer, &igb_watchdog, + (unsigned long) adapter); +- setup_timer(&adapter->phy_info_timer, igb_update_phy_info, ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer, ++ (unsigned long) adapter); ++ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; +@@ -2463,19 +2927,19 @@ + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + +- igb_validate_mdi_setting(hw); ++ igb_e1000_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + +- /* Check the NVM for wake support on non-port A ports */ ++ /* Check the NVM for wake support for non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) +- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); ++ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; +@@ -2494,7 +2958,7 @@ + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ +- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) ++ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: +@@ -2509,9 +2973,7 @@ + global_quad_port_a = 0; + break; + default: +- /* If the device can't wake, don't set software support */ +- if (!device_can_wakeup(&adapter->pdev->dev)) +- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; ++ break; + } + + /* initialize the wol settings based on the eeprom settings */ +@@ -2525,145 +2987,185 @@ + adapter->wol = 0; + } + +- device_set_wakeup_enable(&adapter->pdev->dev, ++ /* Some vendors want the ability to Use the EEPROM setting as ++ * enable/disable only, and not for capability ++ */ ++ if (((hw->mac.type == e1000_i350) || ++ (hw->mac.type == e1000_i354)) && ++ (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { ++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; ++ adapter->wol = 0; ++ } ++ if (hw->mac.type == e1000_i350) { ++ if (((pdev->subsystem_device == 0x5001) || ++ (pdev->subsystem_device == 0x5002)) && ++ (hw->bus.func == 0)) { ++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; ++ adapter->wol = 0; ++ } ++ if (pdev->subsystem_device == 0x1F52) ++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; ++ } ++ ++ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); ++ adapter->devrc = 0; + ++#ifdef HAVE_I2C_SUPPORT + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } ++#endif /* HAVE_I2C_SUPPORT */ + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + +- strcpy(netdev->name, "eth%d"); ++ strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ err = igb_init_vmdq_netdevs(adapter); ++ if (err) ++ goto err_register; ++#endif + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +-#ifdef CONFIG_IGB_DCA +- if (dca_add_requester(&pdev->dev) == 0) { ++#ifdef IGB_DCA ++ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; +- dev_info(&pdev->dev, "DCA enabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); + igb_setup_dca(adapter); + } + + #endif +-#ifdef CONFIG_IGB_HWMON ++#ifdef HAVE_PTP_1588_CLOCK ++ /* do hw tstamp init after resetting */ ++ igb_ptp_init(adapter); ++#endif /* HAVE_PTP_1588_CLOCK */ ++ ++ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n"); ++ /* print bus type/speed/width info */ ++ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ", ++ netdev->name, ++ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" : ++ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" : ++ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"), ++ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : ++ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : ++ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : ++ (hw->mac.type == e1000_i354) ? "integrated" : "unknown")); ++ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); ++ ++ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH); ++ if (ret_val) ++ strcpy(pba_str, "Unknown"); ++ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name, ++ pba_str); ++ + /* Initialize the thermal sensor on i350 devices. */ +- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { +- u16 ets_word; ++ if (hw->mac.type == e1000_i350) { ++ if (hw->bus.func == 0) { ++ u16 ets_word; + +- /* Read the NVM to determine if this i350 device supports an +- * external thermal sensor. +- */ +- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); +- if (ets_word != 0x0000 && ets_word != 0xFFFF) +- adapter->ets = true; +- else +- adapter->ets = false; +- if (igb_sysfs_init(adapter)) +- dev_err(&pdev->dev, +- "failed to allocate sysfs resources\n"); +- } else { +- adapter->ets = false; +- } +-#endif +- /* Check if Media Autosense is enabled */ +- adapter->ei = *ei; +- if (hw->dev_spec._82575.mas_capable) +- igb_init_mas(adapter); ++ /* ++ * Read the NVM to determine if this i350 device ++ * supports an external thermal sensor. ++ */ ++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word); ++ if (ets_word != 0x0000 && ets_word != 0xFFFF) ++ adapter->ets = true; ++ else ++ adapter->ets = false; ++ } ++#ifdef IGB_HWMON + +- /* do hw tstamp init after resetting */ +- igb_ptp_init(adapter); ++ igb_sysfs_init(adapter); ++#else ++#ifdef IGB_PROCFS + +- dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); +- /* print bus type/speed/width info, not applicable to i354 */ +- if (hw->mac.type != e1000_i354) { +- dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", +- netdev->name, +- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : +- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : +- "unknown"), +- ((hw->bus.width == e1000_bus_width_pcie_x4) ? +- "Width x4" : +- (hw->bus.width == e1000_bus_width_pcie_x2) ? +- "Width x2" : +- (hw->bus.width == e1000_bus_width_pcie_x1) ? +- "Width x1" : "unknown"), netdev->dev_addr); +- } +- +- if ((hw->mac.type >= e1000_i210 || +- igb_get_flash_presence_i210(hw))) { +- ret_val = igb_read_part_string(hw, part_str, +- E1000_PBANUM_LENGTH); ++ igb_procfs_init(adapter); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ + } else { +- ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; ++ adapter->ets = false; + } + +- if (ret_val) +- strcpy(part_str, "Unknown"); +- dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); +- dev_info(&pdev->dev, +- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", +- (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : +- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", +- adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ +- err = igb_set_eee_i350(hw); ++ err = e1000_set_eee_i350(hw, true, true); + if ((!err) && +- (!hw->dev_spec._82575.eee_disable)) { ++ (adapter->flags & IGB_FLAG_EEE)) + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; +- adapter->flags |= IGB_FLAG_EEE; +- } + break; + case e1000_i354: +- if ((rd32(E1000_CTRL_EXT) & +- E1000_CTRL_EXT_LINK_MODE_SGMII)) { +- err = igb_set_eee_i354(hw); ++ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) & ++ (E1000_CTRL_EXT_LINK_MODE_SGMII)) { ++ err = e1000_set_eee_i354(hw, true, true); + if ((!err) && +- (!hw->dev_spec._82575.eee_disable)) { ++ (adapter->flags & IGB_FLAG_EEE)) + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; +- adapter->flags |= IGB_FLAG_EEE; +- } + } + break; + default: + break; + } + } ++ ++ /* send driver version info to firmware */ ++ if ((hw->mac.type >= e1000_i350) && ++ (e1000_get_flash_presence_i210(hw))) ++ igb_init_fw(adapter); ++ ++#ifndef IGB_NO_LRO ++ if (netdev->features & NETIF_F_LRO) ++ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled\n"); ++ else ++ dev_info(pci_dev_to_dev(pdev), "LRO is disabled\n"); ++#endif ++ dev_info(pci_dev_to_dev(pdev), ++ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", ++ adapter->msix_entries ? "MSI-X" : ++ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", ++ adapter->num_rx_queues, adapter->num_tx_queues); ++ ++ cards_found++; ++ + pm_runtime_put_noidle(&pdev->dev); + return 0; + + err_register: + igb_release_hw_control(adapter); ++#ifdef HAVE_I2C_SUPPORT + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); ++#endif /* HAVE_I2C_SUPPORT */ + err_eeprom: +- if (!igb_check_reset_block(hw)) +- igb_reset_phy(hw); ++ if (!e1000_check_reset_block(hw)) ++ igb_e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + err_sw_init: ++ kfree(adapter->shadow_vfta); + igb_clear_interrupt_scheme(adapter); +- pci_iounmap(pdev, hw->hw_addr); ++ igb_reset_sriov_capability(adapter); ++ iounmap(adapter->io_addr); + err_ioremap: + free_netdev(netdev); + err_alloc_etherdev: +@@ -2674,117 +3176,28 @@ + pci_disable_device(pdev); + return err; + } +- +-#ifdef CONFIG_PCI_IOV +-static int igb_disable_sriov(struct pci_dev *pdev) +-{ +- struct net_device *netdev = pci_get_drvdata(pdev); +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- +- /* reclaim resources allocated to VFs */ +- if (adapter->vf_data) { +- /* disable iov and allow time for transactions to clear */ +- if (pci_vfs_assigned(pdev)) { +- dev_warn(&pdev->dev, +- "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); +- return -EPERM; +- } else { +- pci_disable_sriov(pdev); +- msleep(500); +- } +- +- kfree(adapter->vf_data); +- adapter->vf_data = NULL; +- adapter->vfs_allocated_count = 0; +- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); +- wrfl(); +- msleep(100); +- dev_info(&pdev->dev, "IOV Disabled\n"); +- +- /* Re-enable DMA Coalescing flag since IOV is turned off */ +- adapter->flags |= IGB_FLAG_DMAC; +- } +- +- return 0; +-} +- +-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +-{ +- struct net_device *netdev = pci_get_drvdata(pdev); +- struct igb_adapter *adapter = netdev_priv(netdev); +- int old_vfs = pci_num_vf(pdev); +- int err = 0; +- int i; +- +- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { +- err = -EPERM; +- goto out; +- } +- if (!num_vfs) +- goto out; +- +- if (old_vfs) { +- dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", +- old_vfs, max_vfs); +- adapter->vfs_allocated_count = old_vfs; +- } else +- adapter->vfs_allocated_count = num_vfs; +- +- adapter->vf_data = kcalloc(adapter->vfs_allocated_count, +- sizeof(struct vf_data_storage), GFP_KERNEL); +- +- /* if allocation failed then we do not support SR-IOV */ +- if (!adapter->vf_data) { +- adapter->vfs_allocated_count = 0; +- dev_err(&pdev->dev, +- "Unable to allocate memory for VF Data Storage\n"); +- err = -ENOMEM; +- goto out; +- } +- +- /* only call pci_enable_sriov() if no VFs are allocated already */ +- if (!old_vfs) { +- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); +- if (err) +- goto err_out; +- } +- dev_info(&pdev->dev, "%d VFs allocated\n", +- adapter->vfs_allocated_count); +- for (i = 0; i < adapter->vfs_allocated_count; i++) +- igb_vf_configure(adapter, i); +- +- /* DMA Coalescing is not supported in IOV mode. */ +- adapter->flags &= ~IGB_FLAG_DMAC; +- goto out; +- +-err_out: +- kfree(adapter->vf_data); +- adapter->vf_data = NULL; +- adapter->vfs_allocated_count = 0; +-out: +- return err; +-} +- +-#endif +-/** ++#ifdef HAVE_I2C_SUPPORT ++/* + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure +- **/ ++ * ++ */ + static void igb_remove_i2c(struct igb_adapter *adapter) + { ++ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); + } ++#endif /* HAVE_I2C_SUPPORT */ + + /** +- * igb_remove - Device Removal Routine +- * @pdev: PCI device information struct ++ * igb_remove - Device Removal Routine ++ * @pdev: PCI device information struct + * +- * igb_remove is called by the PCI subsystem to alert the driver +- * that it should release a PCI device. The could be caused by a +- * Hot-Plug event, or because the driver is going to be removed from +- * memory. ++ * igb_remove is called by the PCI subsystem to alert the driver ++ * that it should release a PCI device. The could be caused by a ++ * Hot-Plug event, or because the driver is going to be removed from ++ * memory. + **/ + static void igb_remove(struct pci_dev *pdev) + { +@@ -2793,30 +3206,39 @@ + struct e1000_hw *hw = &adapter->hw; + + pm_runtime_get_noresume(&pdev->dev); +-#ifdef CONFIG_IGB_HWMON +- igb_sysfs_exit(adapter); +-#endif ++#ifdef HAVE_I2C_SUPPORT + igb_remove_i2c(adapter); ++#endif /* HAVE_I2C_SUPPORT */ ++#ifdef HAVE_PTP_1588_CLOCK + igb_ptp_stop(adapter); +- /* The watchdog timer may be rescheduled, so explicitly +- * disable watchdog from being rescheduled. ++#endif /* HAVE_PTP_1588_CLOCK */ ++ ++ /* flush_scheduled work may reschedule our watchdog task, so ++ * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ del_timer_sync(&adapter->dma_err_timer); + del_timer_sync(&adapter->phy_info_timer); + +- cancel_work_sync(&adapter->reset_task); +- cancel_work_sync(&adapter->watchdog_task); ++ flush_scheduled_work(); + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { +- dev_info(&pdev->dev, "DCA disabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; +- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); ++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); + } + #endif + ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ igb_remove_vmdq_netdevs(adapter); ++#endif ++ ++ igb_reset_sriov_capability(adapter); ++ + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ +@@ -2826,16 +3248,21 @@ + + igb_clear_interrupt_scheme(adapter); + +-#ifdef CONFIG_PCI_IOV +- igb_disable_sriov(pdev); +-#endif +- +- pci_iounmap(pdev, hw->hw_addr); ++ if (adapter->io_addr) ++ iounmap(adapter->io_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + ++#ifdef IGB_HWMON ++ igb_sysfs_exit(adapter); ++#else ++#ifdef IGB_PROCFS ++ igb_procfs_exit(adapter); ++#endif /* IGB_PROCFS */ ++#endif /* IGB_HWMON */ ++ kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + +@@ -2845,110 +3272,12 @@ + } + + /** +- * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space +- * @adapter: board private structure to initialize +- * +- * This function initializes the vf specific data storage and then attempts to +- * allocate the VFs. The reason for ordering it this way is because it is much +- * mor expensive time wise to disable SR-IOV than it is to allocate and free +- * the memory for the VFs. +- **/ +-static void igb_probe_vfs(struct igb_adapter *adapter) +-{ +-#ifdef CONFIG_PCI_IOV +- struct pci_dev *pdev = adapter->pdev; +- struct e1000_hw *hw = &adapter->hw; +- +- /* Virtualization features not supported on i210 family. */ +- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) +- return; +- +- pci_sriov_set_totalvfs(pdev, 7); +- igb_pci_enable_sriov(pdev, max_vfs); +- +-#endif /* CONFIG_PCI_IOV */ +-} +- +-static void igb_init_queue_configuration(struct igb_adapter *adapter) +-{ +- struct e1000_hw *hw = &adapter->hw; +- u32 max_rss_queues; +- +- /* Determine the maximum number of RSS queues supported. */ +- switch (hw->mac.type) { +- case e1000_i211: +- max_rss_queues = IGB_MAX_RX_QUEUES_I211; +- break; +- case e1000_82575: +- case e1000_i210: +- max_rss_queues = IGB_MAX_RX_QUEUES_82575; +- break; +- case e1000_i350: +- /* I350 cannot do RSS and SR-IOV at the same time */ +- if (!!adapter->vfs_allocated_count) { +- max_rss_queues = 1; +- break; +- } +- /* fall through */ +- case e1000_82576: +- if (!!adapter->vfs_allocated_count) { +- max_rss_queues = 2; +- break; +- } +- /* fall through */ +- case e1000_82580: +- case e1000_i354: +- default: +- max_rss_queues = IGB_MAX_RX_QUEUES; +- break; +- } +- +- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); +- +- igb_set_flag_queue_pairs(adapter, max_rss_queues); +-} +- +-void igb_set_flag_queue_pairs(struct igb_adapter *adapter, +- const u32 max_rss_queues) +-{ +- struct e1000_hw *hw = &adapter->hw; +- +- /* Determine if we need to pair queues. */ +- switch (hw->mac.type) { +- case e1000_82575: +- case e1000_i211: +- /* Device supports enough interrupts without queue pairing. */ +- break; +- case e1000_82576: +- /* If VFs are going to be allocated with RSS queues then we +- * should pair the queues in order to conserve interrupts due +- * to limited supply. +- */ +- if ((adapter->rss_queues > 1) && +- (adapter->vfs_allocated_count > 6)) +- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; +- /* fall through */ +- case e1000_82580: +- case e1000_i350: +- case e1000_i354: +- case e1000_i210: +- default: +- /* If rss_queues > half of max_rss_queues, pair the queues in +- * order to conserve interrupts due to limited supply. +- */ +- if (adapter->rss_queues > (max_rss_queues / 2)) +- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; +- break; +- } +-} +- +-/** +- * igb_sw_init - Initialize general software structures (struct igb_adapter) +- * @adapter: board private structure to initialize ++ * igb_sw_init - Initialize general software structures (struct igb_adapter) ++ * @adapter: board private structure to initialize + * +- * igb_sw_init initializes the Adapter private data structure. +- * Fields are initialized based on PCI device information and +- * OS network device settings (MTU size). ++ * igb_sw_init initializes the Adapter private data structure. ++ * Fields are initialized based on PCI device information and ++ * OS network device settings (MTU size). + **/ + static int igb_sw_init(struct igb_adapter *adapter) + { +@@ -2956,84 +3285,78 @@ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + ++ /* PCI config space info */ ++ ++ hw->vendor_id = pdev->vendor; ++ hw->device_id = pdev->device; ++ hw->subsystem_vendor_id = pdev->subsystem_vendor; ++ hw->subsystem_device_id = pdev->subsystem_device; ++ ++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); ++ + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + +- /* set default ITR values */ +- adapter->rx_itr_setting = IGB_DEFAULT_ITR; +- adapter->tx_itr_setting = IGB_DEFAULT_ITR; +- + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + +- VLAN_HLEN; +- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; ++ VLAN_HLEN; + +- spin_lock_init(&adapter->stats64_lock); +-#ifdef CONFIG_PCI_IOV +- switch (hw->mac.type) { +- case e1000_82576: +- case e1000_i350: +- if (max_vfs > 7) { +- dev_warn(&pdev->dev, +- "Maximum of 7 VFs per PF, using max\n"); +- max_vfs = adapter->vfs_allocated_count = 7; +- } else +- adapter->vfs_allocated_count = max_vfs; +- if (adapter->vfs_allocated_count) +- dev_warn(&pdev->dev, +- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); +- break; +- default: +- break; ++ /* Initialize the hardware-specific values */ ++ if (e1000_setup_init_funcs(hw, TRUE)) { ++ dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n"); ++ return -EIO; + } +-#endif /* CONFIG_PCI_IOV */ + +- igb_init_queue_configuration(adapter); ++ igb_check_options(adapter); ++ ++ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) * ++ hw->mac.rar_entry_count, ++ GFP_ATOMIC); + + /* Setup and initialize a copy of the hw vlan table array */ +- adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), +- GFP_ATOMIC); ++ adapter->shadow_vfta = kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES, ++ GFP_ATOMIC); ++ ++ /* These calls may decrease the number of queues */ ++ if (hw->mac.type < e1000_i210) ++ igb_set_sriov_capability(adapter); + +- /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { +- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); ++ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + +- igb_probe_vfs(adapter); +- + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + +- if (hw->mac.type >= e1000_i350) +- adapter->flags &= ~IGB_FLAG_DMAC; +- + set_bit(__IGB_DOWN, &adapter->state); + return 0; + } + + /** +- * igb_open - Called when a network interface is made active +- * @netdev: network interface device structure ++ * igb_open - Called when a network interface is made active ++ * @netdev: network interface device structure + * +- * Returns 0 on success, negative value on failure ++ * Returns 0 on success, negative value on failure + * +- * The open entry point is called when a network interface is made +- * active by the system (IFF_UP). At this point all resources needed +- * for transmit and receive operations are allocated, the interrupt +- * handler is registered with the OS, the watchdog timer is started, +- * and the stack is notified that the interface is ready. ++ * The open entry point is called when a network interface is made ++ * active by the system (IFF_UP). At this point all resources needed ++ * for transmit and receive operations are allocated, the interrupt ++ * handler is registered with the OS, the watchdog timer is started, ++ * and the stack is notified that the interface is ready. + **/ + static int __igb_open(struct net_device *netdev, bool resuming) + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; ++#ifdef CONFIG_PM_RUNTIME + struct pci_dev *pdev = adapter->pdev; ++#endif /* CONFIG_PM_RUNTIME */ + int err; + int i; + +@@ -3043,8 +3366,10 @@ + return -EBUSY; + } + ++#ifdef CONFIG_PM_RUNTIME + if (!resuming) + pm_runtime_get_sync(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ + + netif_carrier_off(netdev); + +@@ -3072,12 +3397,12 @@ + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ +- err = netif_set_real_num_tx_queues(adapter->netdev, +- adapter->num_tx_queues); +- if (err) +- goto err_set_queues; ++ netif_set_real_num_tx_queues(netdev, ++ adapter->vmdq_pools ? 1 : ++ adapter->num_tx_queues); + +- err = netif_set_real_num_rx_queues(adapter->netdev, ++ err = netif_set_real_num_rx_queues(netdev, ++ adapter->vmdq_pools ? 1 : + adapter->num_rx_queues); + if (err) + goto err_set_queues; +@@ -3087,30 +3412,31 @@ + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); ++ igb_configure_lli(adapter); + + /* Clear any pending interrupts. */ +- rd32(E1000_ICR); ++ E1000_READ_REG(hw, E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { +- u32 reg_data = rd32(E1000_CTRL_EXT); ++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; +- wr32(E1000_CTRL_EXT, reg_data); ++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(netdev); + +- if (!resuming) +- pm_runtime_put(&pdev->dev); ++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) ++ schedule_work(&adapter->dma_err_task); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + +- return 0; ++ return E1000_SUCCESS; + + err_set_queues: + igb_free_irq(adapter); +@@ -3122,59 +3448,72 @@ + igb_free_all_tx_resources(adapter); + err_setup_tx: + igb_reset(adapter); ++ ++#ifdef CONFIG_PM_RUNTIME + if (!resuming) + pm_runtime_put(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ + + return err; + } + +-static int igb_open(struct net_device *netdev) ++int igb_open(struct net_device *netdev) + { + return __igb_open(netdev, false); + } + + /** +- * igb_close - Disables a network interface +- * @netdev: network interface device structure ++ * igb_close - Disables a network interface ++ * @netdev: network interface device structure + * +- * Returns 0, this is not allowed to fail ++ * Returns 0, this is not allowed to fail + * +- * The close entry point is called when an interface is de-activated +- * by the OS. The hardware is still under the driver's control, but +- * needs to be disabled. A global MAC reset is issued to stop the +- * hardware, and all transmit and receive resources are freed. ++ * The close entry point is called when an interface is de-activated ++ * by the OS. The hardware is still under the driver's control, but ++ * needs to be disabled. A global MAC reset is issued to stop the ++ * hardware, and all transmit and receive resources are freed. + **/ + static int __igb_close(struct net_device *netdev, bool suspending) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++#ifdef CONFIG_PM_RUNTIME + struct pci_dev *pdev = adapter->pdev; ++#endif /* CONFIG_PM_RUNTIME */ + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + ++#ifdef CONFIG_PM_RUNTIME + if (!suspending) + pm_runtime_get_sync(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ + + igb_down(adapter); ++ ++ igb_release_hw_control(adapter); ++ + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + ++#ifdef CONFIG_PM_RUNTIME + if (!suspending) + pm_runtime_put_sync(&pdev->dev); ++#endif /* CONFIG_PM_RUNTIME */ ++ + return 0; + } + +-static int igb_close(struct net_device *netdev) ++int igb_close(struct net_device *netdev) + { + return __igb_close(netdev, false); + } + + /** +- * igb_setup_tx_resources - allocate Tx resources (Descriptors) +- * @tx_ring: tx descriptor ring (for a specific queue) to setup ++ * igb_setup_tx_resources - allocate Tx resources (Descriptors) ++ * @tx_ring: tx descriptor ring (for a specific queue) to setup + * +- * Return 0 on success, negative on failure ++ * Return 0 on success, negative on failure + **/ + int igb_setup_tx_resources(struct igb_ring *tx_ring) + { +@@ -3182,7 +3521,6 @@ + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; +- + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; +@@ -3193,6 +3531,7 @@ + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); ++ + if (!tx_ring->desc) + goto err; + +@@ -3203,17 +3542,17 @@ + + err: + vfree(tx_ring->tx_buffer_info); +- tx_ring->tx_buffer_info = NULL; +- dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); ++ dev_err(dev, ++ "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + + /** +- * igb_setup_all_tx_resources - wrapper to allocate Tx resources +- * (Descriptors) for all queues +- * @adapter: board private structure ++ * igb_setup_all_tx_resources - wrapper to allocate Tx resources ++ * (Descriptors) for all queues ++ * @adapter: board private structure + * +- * Return 0 on success, negative on failure ++ * Return 0 on success, negative on failure + **/ + static int igb_setup_all_tx_resources(struct igb_adapter *adapter) + { +@@ -3223,7 +3562,7 @@ + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); +@@ -3235,8 +3574,8 @@ + } + + /** +- * igb_setup_tctl - configure the transmit control registers +- * @adapter: Board private structure ++ * igb_setup_tctl - configure the transmit control registers ++ * @adapter: Board private structure + **/ + void igb_setup_tctl(struct igb_adapter *adapter) + { +@@ -3244,28 +3583,45 @@ + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ +- wr32(E1000_TXDCTL(0), 0); ++ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ +- tctl = rd32(E1000_TCTL); ++ tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + +- igb_config_collision_dist(hw); ++ igb_e1000_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + +- wr32(E1000_TCTL, tctl); ++ E1000_WRITE_REG(hw, E1000_TCTL, tctl); ++} ++ ++static u32 igb_tx_wthresh(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ ++ switch (hw->mac.type) { ++ case e1000_i354: ++ return 4; ++ case e1000_82576: ++ if (adapter->msix_entries) ++ return 1; ++ default: ++ break; ++ } ++ ++ return 16; + } + + /** +- * igb_configure_tx_ring - Configure transmit ring after Reset +- * @adapter: board private structure +- * @ring: tx ring to configure ++ * igb_configure_tx_ring - Configure transmit ring after Reset ++ * @adapter: board private structure ++ * @ring: tx ring to configure + * +- * Configure a transmit ring after a reset. ++ * Configure a transmit ring after a reset. + **/ + void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +@@ -3276,33 +3632,33 @@ + int reg_idx = ring->reg_idx; + + /* disable the queue */ +- wr32(E1000_TXDCTL(reg_idx), 0); +- wrfl(); ++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0); ++ E1000_WRITE_FLUSH(hw); + mdelay(10); + +- wr32(E1000_TDLEN(reg_idx), +- ring->count * sizeof(union e1000_adv_tx_desc)); +- wr32(E1000_TDBAL(reg_idx), +- tdba & 0x00000000ffffffffULL); +- wr32(E1000_TDBAH(reg_idx), tdba >> 32); ++ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx), ++ ring->count * sizeof(union e1000_adv_tx_desc)); ++ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx), ++ tdba & 0x00000000ffffffffULL); ++ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32); + +- ring->tail = hw->hw_addr + E1000_TDT(reg_idx); +- wr32(E1000_TDH(reg_idx), 0); ++ ring->tail = adapter->io_addr + E1000_TDT(reg_idx); ++ E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; +- txdctl |= IGB_TX_WTHRESH << 16; ++ txdctl |= igb_tx_wthresh(adapter) << 16; + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; +- wr32(E1000_TXDCTL(reg_idx), txdctl); ++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl); + } + + /** +- * igb_configure_tx - Configure transmit Unit after Reset +- * @adapter: board private structure ++ * igb_configure_tx - Configure transmit Unit after Reset ++ * @adapter: board private structure + * +- * Configure the Tx unit of the MAC after a reset. ++ * Configure the Tx unit of the MAC after a reset. + **/ + static void igb_configure_tx(struct igb_adapter *adapter) + { +@@ -3313,28 +3669,30 @@ + } + + /** +- * igb_setup_rx_resources - allocate Rx resources (Descriptors) +- * @rx_ring: Rx descriptor ring (for a specific queue) to setup ++ * igb_setup_rx_resources - allocate Rx resources (Descriptors) ++ * @rx_ring: rx descriptor ring (for a specific queue) to setup + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + **/ + int igb_setup_rx_resources(struct igb_ring *rx_ring) + { + struct device *dev = rx_ring->dev; +- int size; ++ int size, desc_len; + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; +- + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + ++ desc_len = sizeof(union e1000_adv_rx_desc); ++ + /* Round up to nearest 4K */ +- rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); ++ rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); ++ + if (!rx_ring->desc) + goto err; + +@@ -3347,16 +3705,17 @@ + err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; +- dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); ++ dev_err(dev, ++ "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + + /** +- * igb_setup_all_rx_resources - wrapper to allocate Rx resources +- * (Descriptors) for all queues +- * @adapter: board private structure ++ * igb_setup_all_rx_resources - wrapper to allocate Rx resources ++ * (Descriptors) for all queues ++ * @adapter: board private structure + * +- * Return 0 on success, negative on failure ++ * Return 0 on success, negative on failure + **/ + static int igb_setup_all_rx_resources(struct igb_adapter *adapter) + { +@@ -3366,7 +3725,7 @@ + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); +@@ -3378,14 +3737,17 @@ + } + + /** +- * igb_setup_mrqc - configure the multiple receive queue control registers +- * @adapter: Board private structure ++ * igb_setup_mrqc - configure the multiple receive queue control registers ++ * @adapter: Board private structure + **/ + static void igb_setup_mrqc(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; ++#ifndef ETHTOOL_SRXFHINDIR ++ u32 shift = 0, shift2 = 0; ++#endif /* ETHTOOL_SRXFHINDIR */ + static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, + 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, + 0xA32DCB77, 0x0CF23080, 0x3BB7426A, +@@ -3393,33 +3755,72 @@ + + /* Fill out hash function seeds */ + for (j = 0; j < 10; j++) +- wr32(E1000_RSSRK(j), rsskey[j]); ++ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]); + + num_rx_queues = adapter->rss_queues; + +- switch (hw->mac.type) { +- case e1000_82576: ++#ifdef ETHTOOL_SRXFHINDIR ++ if (hw->mac.type == e1000_82576) { + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; +- break; +- default: +- break; + } +- + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = +- (j * num_rx_queues) / IGB_RETA_SIZE; ++ (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); ++#else ++ /* 82575 and 82576 supports 2 RSS queues for VMDq */ ++ switch (hw->mac.type) { ++ case e1000_82575: ++ if (adapter->vmdq_pools) { ++ shift = 2; ++ shift2 = 6; ++ } ++ shift = 6; ++ break; ++ case e1000_82576: ++ /* 82576 supports 2 RSS queues for SR-IOV */ ++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) { ++ shift = 3; ++ num_rx_queues = 2; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ /* ++ * Populate the redirection table 4 entries at a time. To do this ++ * we are generating the results for n and n+2 and then interleaving ++ * those with the results with n+1 and n+3. ++ */ ++ for (j = 0; j < 32; j++) { ++ /* first pass generates n and n+2 */ ++ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; ++ u32 reta = (base & 0x07800780) >> (7 - shift); ++ ++ /* second pass generates n+1 and n+3 */ ++ base += 0x00010001 * num_rx_queues; ++ reta |= (base & 0x07800780) << (1 + shift); ++ ++ /* generate 2nd table for 82575 based parts */ ++ if (shift2) ++ reta |= (0x01010101 * num_rx_queues) << shift2; ++ ++ E1000_WRITE_REG(hw, E1000_RETA(j), reta); ++ } ++#endif /* ETHTOOL_SRXFHINDIR */ + +- /* Disable raw packet checksumming so that RSS hash is placed in ++ /* ++ * Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ +- rxcsum = rd32(E1000_RXCSUM); ++ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) +@@ -3427,7 +3828,7 @@ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ +- wr32(E1000_RXCSUM, rxcsum); ++ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses +@@ -3447,40 +3848,39 @@ + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ +- if (adapter->vfs_allocated_count) { ++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ +- u32 vtctl = rd32(E1000_VT_CTL); ++ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; +- wr32(E1000_VT_CTL, vtctl); ++ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { +- if (hw->mac.type != e1000_i211) +- mrqc |= E1000_MRQC_ENABLE_RSS_4Q; ++ mrqc |= E1000_MRQC_ENABLE_RSS_4Q; + } + igb_vmm_control(adapter); + +- wr32(E1000_MRQC, mrqc); ++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + } + + /** +- * igb_setup_rctl - configure the receive control registers +- * @adapter: Board private structure ++ * igb_setup_rctl - configure the receive control registers ++ * @adapter: Board private structure + **/ + void igb_setup_rctl(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); +@@ -3488,7 +3888,8 @@ + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + +- /* enable stripping of CRC. It's unlikely this will break BMC ++ /* ++ * enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ +@@ -3501,7 +3902,7 @@ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ +- wr32(E1000_RXDCTL(0), 0); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking +@@ -3509,27 +3910,10 @@ + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ +- wr32(E1000_QDE, ALL_QUEUES); +- } +- +- /* This is useful for sniffing bad packets. */ +- if (adapter->netdev->features & NETIF_F_RXALL) { +- /* UPE and MPE will be handled by normal PROMISC logic +- * in e1000e_set_rx_mode +- */ +- rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ +- E1000_RCTL_BAM | /* RX All Bcast Pkts */ +- E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ +- +- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ +- E1000_RCTL_DPF | /* Allow filtered pause */ +- E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ +- /* Do not mess with E1000_CTRL_VME, it affects transmit as well, +- * and that breaks VLANs. +- */ ++ E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES); + } + +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + + static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, +@@ -3543,21 +3927,31 @@ + */ + if (vfn < adapter->vfs_allocated_count && + adapter->vf_data[vfn].vlans_enabled) +- size += VLAN_TAG_SIZE; ++ size += VLAN_HLEN; + +- vmolr = rd32(E1000_VMOLR(vfn)); ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (vfn >= adapter->vfs_allocated_count) { ++ int queue = vfn - adapter->vfs_allocated_count; ++ struct igb_vmdq_adapter *vadapter; ++ ++ vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]); ++ if (vadapter->vlgrp) ++ size += VLAN_HLEN; ++ } ++#endif ++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; +- wr32(E1000_VMOLR(vfn), vmolr); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + + return 0; + } + + /** +- * igb_rlpml_set - set maximum receive packet size +- * @adapter: board private structure ++ * igb_rlpml_set - set maximum receive packet size ++ * @adapter: board private structure + * +- * Configure maximum receivable packet size. ++ * Configure maximum receivable packet size. + **/ + static void igb_rlpml_set(struct igb_adapter *adapter) + { +@@ -3565,9 +3959,13 @@ + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + +- if (pf_id) { +- igb_set_vf_rlpml(adapter, max_frame_size, pf_id); +- /* If we're in VMDQ or SR-IOV mode, then set global RLPML ++ if (adapter->vmdq_pools && hw->mac.type != e1000_82575) { ++ int i; ++ ++ for (i = 0; i < adapter->vmdq_pools; i++) ++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i); ++ /* ++ * If we're in VMDQ or SR-IOV mode, then set global RLPML + * to our max jumbo frame size, in case we need to enable + * jumbo frames on one of the rings later. + * This will not pass over-length frames into the default +@@ -3575,56 +3973,73 @@ + */ + max_frame_size = MAX_JUMBO_FRAME_SIZE; + } ++ /* Set VF RLPML for the PF device. */ ++ if (adapter->vfs_allocated_count) ++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id); + +- wr32(E1000_RLPML, max_frame_size); ++ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size); + } + ++static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, ++ int vfn, bool enable) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u32 val; ++ void __iomem *reg; ++ ++ if (hw->mac.type < e1000_82576) ++ return; ++ ++ if (hw->mac.type == e1000_i350) ++ reg = hw->hw_addr + E1000_DVMOLR(vfn); ++ else ++ reg = hw->hw_addr + E1000_VMOLR(vfn); ++ ++ val = readl(reg); ++ if (enable) ++ val |= E1000_VMOLR_STRVLAN; ++ else ++ val &= ~(E1000_VMOLR_STRVLAN); ++ writel(val, reg); ++} + static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) + { + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + +- /* This register exists only on 82576 and newer so if we are older then ++ /* ++ * This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + +- vmolr = rd32(E1000_VMOLR(vfn)); +- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ +- if (hw->mac.type == e1000_i350) { +- u32 dvmolr; ++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); + +- dvmolr = rd32(E1000_DVMOLR(vfn)); +- dvmolr |= E1000_DVMOLR_STRVLAN; +- wr32(E1000_DVMOLR(vfn), dvmolr); +- } + if (aupe) +- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ ++ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ +- vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); ++ vmolr &= ~E1000_VMOLR_RSSE; + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ +- /* for VMDq only allow the VFs and pool 0 to accept broadcast and +- * multicast packets +- */ +- if (vfn <= adapter->vfs_allocated_count) +- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + +- wr32(E1000_VMOLR(vfn), vmolr); ++ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ ++ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */ ++ ++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + } + + /** +- * igb_configure_rx_ring - Configure a receive ring after Reset +- * @adapter: board private structure +- * @ring: receive ring to be configured ++ * igb_configure_rx_ring - Configure a receive ring after Reset ++ * @adapter: board private structure ++ * @ring: receive ring to be configured + * +- * Configure the Rx unit of the MAC after a reset. ++ * Configure the Rx unit of the MAC after a reset. + **/ + void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +@@ -3634,32 +4049,67 @@ + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ /* ++ * RLPML prevents us from receiving a frame larger than max_frame so ++ * it is safe to just set the rx_buffer_len to max_frame without the ++ * risk of an skb over panic. ++ */ ++ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size, ++ MAXIMUM_ETHERNET_VLAN_SIZE); ++ ++#endif + /* disable the queue */ +- wr32(E1000_RXDCTL(reg_idx), 0); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ +- wr32(E1000_RDBAL(reg_idx), +- rdba & 0x00000000ffffffffULL); +- wr32(E1000_RDBAH(reg_idx), rdba >> 32); +- wr32(E1000_RDLEN(reg_idx), +- ring->count * sizeof(union e1000_adv_rx_desc)); ++ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx), ++ rdba & 0x00000000ffffffffULL); ++ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32); ++ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx), ++ ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ +- ring->tail = hw->hw_addr + E1000_RDT(reg_idx); +- wr32(E1000_RDH(reg_idx), 0); ++ ring->tail = adapter->io_addr + E1000_RDT(reg_idx); ++ E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + ++ /* reset next-to- use/clean to place SW in sync with hardwdare */ ++ ring->next_to_clean = 0; ++ ring->next_to_use = 0; ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ ring->next_to_alloc = 0; ++ ++#endif + /* set descriptor configuration */ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; ++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++ srrctl = ALIGN(ring->rx_buffer_len, 1024) >> ++ E1000_SRRCTL_BSIZEPKT_SHIFT; ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; ++#ifdef HAVE_PTP_1588_CLOCK + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; +- /* Only set Drop Enable if we are supporting multiple queues */ +- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) ++#endif /* HAVE_PTP_1588_CLOCK */ ++ /* ++ * We should set the drop enable bit if: ++ * SR-IOV is enabled ++ * or ++ * Flow Control is disabled and number of RX queues > 1 ++ * ++ * This allows us to avoid head of line blocking for security ++ * and performance reasons. ++ */ ++ if (adapter->vfs_allocated_count || ++ (adapter->num_rx_queues > 1 && ++ (hw->fc.requested_mode == e1000_fc_none || ++ hw->fc.requested_mode == e1000_fc_rx_pause))) + srrctl |= E1000_SRRCTL_DROP_EN; + +- wr32(E1000_SRRCTL(reg_idx), srrctl); ++ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); +@@ -3670,14 +4120,14 @@ + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; +- wr32(E1000_RXDCTL(reg_idx), rxdctl); ++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl); + } + + /** +- * igb_configure_rx - Configure receive Unit after Reset +- * @adapter: board private structure ++ * igb_configure_rx - Configure receive Unit after Reset ++ * @adapter: board private structure + * +- * Configure the Rx unit of the MAC after a reset. ++ * Configure the Rx unit of the MAC after a reset. + **/ + static void igb_configure_rx(struct igb_adapter *adapter) + { +@@ -3698,10 +4148,10 @@ + } + + /** +- * igb_free_tx_resources - Free Tx Resources per Queue +- * @tx_ring: Tx descriptor ring for a specific queue ++ * igb_free_tx_resources - Free Tx Resources per Queue ++ * @tx_ring: Tx descriptor ring for a specific queue + * +- * Free all transmit software resources ++ * Free all transmit software resources + **/ + void igb_free_tx_resources(struct igb_ring *tx_ring) + { +@@ -3721,10 +4171,10 @@ + } + + /** +- * igb_free_all_tx_resources - Free Tx Resources for All Queues +- * @adapter: board private structure ++ * igb_free_all_tx_resources - Free Tx Resources for All Queues ++ * @adapter: board private structure + * +- * Free all transmit software resources ++ * Free all transmit software resources + **/ + static void igb_free_all_tx_resources(struct igb_adapter *adapter) + { +@@ -3746,9 +4196,9 @@ + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, +- dma_unmap_addr(tx_buffer, dma), +- dma_unmap_len(tx_buffer, len), +- DMA_TO_DEVICE); ++ dma_unmap_addr(tx_buffer, dma), ++ dma_unmap_len(tx_buffer, len), ++ DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; +@@ -3757,8 +4207,8 @@ + } + + /** +- * igb_clean_tx_ring - Free Tx Buffers +- * @tx_ring: ring to be cleaned ++ * igb_clean_tx_ring - Free Tx Buffers ++ * @tx_ring: ring to be cleaned + **/ + static void igb_clean_tx_ring(struct igb_ring *tx_ring) + { +@@ -3788,8 +4238,8 @@ + } + + /** +- * igb_clean_all_tx_rings - Free Tx Buffers for all queues +- * @adapter: board private structure ++ * igb_clean_all_tx_rings - Free Tx Buffers for all queues ++ * @adapter: board private structure + **/ + static void igb_clean_all_tx_rings(struct igb_adapter *adapter) + { +@@ -3800,10 +4250,10 @@ + } + + /** +- * igb_free_rx_resources - Free Rx Resources +- * @rx_ring: ring to clean the resources from ++ * igb_free_rx_resources - Free Rx Resources ++ * @rx_ring: ring to clean the resources from + * +- * Free all receive software resources ++ * Free all receive software resources + **/ + void igb_free_rx_resources(struct igb_ring *rx_ring) + { +@@ -3823,10 +4273,10 @@ + } + + /** +- * igb_free_all_rx_resources - Free Rx Resources for All Queues +- * @adapter: board private structure ++ * igb_free_all_rx_resources - Free Rx Resources for All Queues ++ * @adapter: board private structure + * +- * Free all receive software resources ++ * Free all receive software resources + **/ + static void igb_free_all_rx_resources(struct igb_adapter *adapter) + { +@@ -3837,25 +4287,40 @@ + } + + /** +- * igb_clean_rx_ring - Free Rx Buffers per Queue +- * @rx_ring: ring to free buffers from ++ * igb_clean_rx_ring - Free Rx Buffers per Queue ++ * @rx_ring: ring to free buffers from + **/ +-static void igb_clean_rx_ring(struct igb_ring *rx_ring) ++void igb_clean_rx_ring(struct igb_ring *rx_ring) + { + unsigned long size; + u16 i; + ++ if (!rx_ring->rx_buffer_info) ++ return; ++ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + +- if (!rx_ring->rx_buffer_info) +- return; +- ++#endif + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ if (buffer_info->dma) { ++ dma_unmap_single(rx_ring->dev, ++ buffer_info->dma, ++ rx_ring->rx_buffer_len, ++ DMA_FROM_DEVICE); ++ buffer_info->dma = 0; ++ } + ++ if (buffer_info->skb) { ++ dev_kfree_skb(buffer_info->skb); ++ buffer_info->skb = NULL; ++ } ++#else + if (!buffer_info->page) + continue; + +@@ -3866,6 +4331,7 @@ + __free_page(buffer_info->page); + + buffer_info->page = NULL; ++#endif + } + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; +@@ -3880,8 +4346,8 @@ + } + + /** +- * igb_clean_all_rx_rings - Free Rx Buffers for all queues +- * @adapter: board private structure ++ * igb_clean_all_rx_rings - Free Rx Buffers for all queues ++ * @adapter: board private structure + **/ + static void igb_clean_all_rx_rings(struct igb_adapter *adapter) + { +@@ -3892,11 +4358,11 @@ + } + + /** +- * igb_set_mac - Change the Ethernet Address of the NIC +- * @netdev: network interface device structure +- * @p: pointer to an address structure ++ * igb_set_mac - Change the Ethernet Address of the NIC ++ * @netdev: network interface device structure ++ * @p: pointer to an address structure + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + **/ + static int igb_set_mac(struct net_device *netdev, void *p) + { +@@ -3910,60 +4376,155 @@ + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + +- /* set the correct pool for the new PF MAC address in entry 0 */ +- igb_rar_set_qsel(adapter, hw->mac.addr, 0, +- adapter->vfs_allocated_count); ++ /* set the correct pool for the new PF MAC address in entry 0 */ ++ igb_rar_set_qsel(adapter, hw->mac.addr, 0, ++ adapter->vfs_allocated_count); ++ ++ return 0; ++} ++ ++/** ++ * igb_write_mc_addr_list - write multicast addresses to MTA ++ * @netdev: network interface device structure ++ * ++ * Writes multicast address list to the MTA hash table. ++ * Returns: -ENOMEM on failure ++ * 0 on no addresses written ++ * X on writing X addresses to MTA ++ **/ ++int igb_write_mc_addr_list(struct net_device *netdev) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *ha; ++#endif ++ u8 *mta_list; ++ int i, count; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ int vm; ++#endif ++ count = netdev_mc_count(netdev); ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ for (vm = 1; vm < adapter->vmdq_pools; vm++) { ++ if (!adapter->vmdq_netdev[vm]) ++ break; ++ if (!netif_running(adapter->vmdq_netdev[vm])) ++ continue; ++ count += netdev_mc_count(adapter->vmdq_netdev[vm]); ++ } ++#endif ++ ++ if (!count) { ++ e1000_update_mc_addr_list(hw, NULL, 0); ++ return 0; ++ } ++ mta_list = kzalloc(count * 6, GFP_ATOMIC); ++ if (!mta_list) ++ return -ENOMEM; ++ ++ /* The shared function expects a packed array of only addresses. */ ++ i = 0; ++ netdev_for_each_mc_addr(ha, netdev) ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); ++#else ++ memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); ++#endif ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ for (vm = 1; vm < adapter->vmdq_pools; vm++) { ++ if (!adapter->vmdq_netdev[vm]) ++ break; ++ if (!netif_running(adapter->vmdq_netdev[vm]) || ++ !netdev_mc_count(adapter->vmdq_netdev[vm])) ++ continue; ++ netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm]) ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ memcpy(mta_list + (i++ * ETH_ALEN), ++ ha->addr, ETH_ALEN); ++#else ++ memcpy(mta_list + (i++ * ETH_ALEN), ++ ha->dmi_addr, ETH_ALEN); ++#endif ++ } ++#endif ++ e1000_update_mc_addr_list(hw, mta_list, i); ++ kfree(mta_list); ++ ++ return count; ++} ++ ++void igb_full_sync_mac_table(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ int i; + +- return 0; ++ for (i = 0; i < hw->mac.rar_entry_count; i++) ++ igb_rar_set(adapter, i); + } + +-/** +- * igb_write_mc_addr_list - write multicast addresses to MTA +- * @netdev: network interface device structure +- * +- * Writes multicast address list to the MTA hash table. +- * Returns: -ENOMEM on failure +- * 0 on no addresses written +- * X on writing X addresses to MTA +- **/ +-static int igb_write_mc_addr_list(struct net_device *netdev) ++void igb_sync_mac_table(struct igb_adapter *adapter) + { +- struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- struct netdev_hw_addr *ha; +- u8 *mta_list; + int i; + +- if (netdev_mc_empty(netdev)) { +- /* nothing to program, so clear mc list */ +- igb_update_mc_addr_list(hw, NULL, 0); +- igb_restore_vf_multicasts(adapter); +- return 0; ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED) ++ igb_rar_set(adapter, i); ++ adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED); + } ++} + +- mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); +- if (!mta_list) +- return -ENOMEM; ++int igb_available_rars(struct igb_adapter *adapter) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ int i, count = 0; + +- /* The shared function expects a packed array of only addresses. */ +- i = 0; +- netdev_for_each_mc_addr(ha, netdev) +- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (adapter->mac_table[i].state == 0) ++ count++; ++ } ++ return count; ++} + +- igb_update_mc_addr_list(hw, mta_list, i); +- kfree(mta_list); ++static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, ++ u8 qsel) ++{ ++ u32 rar_low, rar_high; ++ struct e1000_hw *hw = &adapter->hw; ++ ++ /* HW expects these in little endian so we reverse the byte order ++ * from network order (big endian) to little endian ++ */ ++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); ++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); ++ ++ /* Indicate to hardware the Address is Valid. */ ++ rar_high |= E1000_RAH_AV; ++ ++ if (hw->mac.type == e1000_82575) ++ rar_high |= E1000_RAH_POOL_1 * qsel; ++ else ++ rar_high |= E1000_RAH_POOL_1 << qsel; + +- return netdev_mc_count(netdev); ++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); ++ E1000_WRITE_FLUSH(hw); ++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); ++ E1000_WRITE_FLUSH(hw); + } + ++#ifdef HAVE_SET_RX_MODE + /** +- * igb_write_uc_addr_list - write unicast addresses to RAR table +- * @netdev: network interface device structure ++ * igb_write_uc_addr_list - write unicast addresses to RAR table ++ * @netdev: network interface device structure + * +- * Writes unicast address list to the RAR table. +- * Returns: -ENOMEM on failure/insufficient address space +- * 0 on no addresses written +- * X on writing X addresses to the RAR table ++ * Writes unicast address list to the RAR table. ++ * Returns: -ENOMEM on failure/insufficient address space ++ * 0 on no addresses written ++ * X on writing X addresses to the RAR table + **/ + static int igb_write_uc_addr_list(struct net_device *netdev) + { +@@ -3974,39 +4535,48 @@ + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ +- if (netdev_uc_count(netdev) > rar_entries) ++ if (netdev_uc_count(netdev) > igb_available_rars(adapter)) + return -ENOMEM; +- + if (!netdev_uc_empty(netdev) && rar_entries) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; +- ++#else ++ struct dev_mc_list *ha; ++#endif + netdev_for_each_uc_addr(ha, netdev) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST + if (!rar_entries) + break; + igb_rar_set_qsel(adapter, ha->addr, + rar_entries--, + vfn); ++#else ++ igb_rar_set_qsel(adapter, ha->da_addr, ++ rar_entries--, ++ vfn); ++#endif + count++; + } + } ++ + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) { +- wr32(E1000_RAH(rar_entries), 0); +- wr32(E1000_RAL(rar_entries), 0); ++ E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0); ++ E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0); + } +- wrfl(); +- ++ E1000_WRITE_FLUSH(hw); + return count; + } + ++#endif /* HAVE_SET_RX_MODE */ + /** +- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set +- * @netdev: network interface device structure ++ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set ++ * @netdev: network interface device structure + * +- * The set_rx_mode entry point is called whenever the unicast or multicast +- * address lists or the network interface flags are updated. This routine is +- * responsible for configuring the hardware for proper unicast, multicast, +- * promiscuous mode, and all-multi behavior. ++ * The set_rx_mode entry point is called whenever the unicast or multicast ++ * address lists or the network interface flags are updated. This routine is ++ * responsible for configuring the hardware for proper unicast, multicast, ++ * promiscuous mode, and all-multi behavior. + **/ + static void igb_set_rx_mode(struct net_device *netdev) + { +@@ -4017,23 +4587,24 @@ + int count; + + /* Check for Promiscuous and All Multicast modes */ +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* clear the effected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); + + if (netdev->flags & IFF_PROMISC) { +- /* retain VLAN HW filtering if in VT mode */ +- if (adapter->vfs_allocated_count) +- rctl |= E1000_RCTL_VFE; + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); ++ /* retain VLAN HW filtering if in VT mode */ ++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) ++ rctl |= E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { +- /* Write addresses to the MTA, if the attempt fails ++ /* ++ * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ +@@ -4045,7 +4616,9 @@ + vmolr |= E1000_VMOLR_ROMPE; + } + } +- /* Write addresses to available RAR registers, if there is not ++#ifdef HAVE_SET_RX_MODE ++ /* ++ * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ +@@ -4054,21 +4627,23 @@ + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } ++#endif /* HAVE_SET_RX_MODE */ + rctl |= E1000_RCTL_VFE; + } +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + +- /* In order to support SR-IOV and eventually VMDq it is necessary to set ++ /* ++ * In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ +- if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) ++ if (hw->mac.type < e1000_82576) + return; + +- vmolr |= rd32(E1000_VMOLR(vfn)) & +- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); +- wr32(E1000_VMOLR(vfn), vmolr); ++ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) & ++ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + igb_restore_vf_multicasts(adapter); + } + +@@ -4080,7 +4655,7 @@ + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: +- wvbr = rd32(E1000_WVBR); ++ wvbr = E1000_READ_REG(hw, E1000_WVBR); + if (!wvbr) + return; + break; +@@ -4100,15 +4675,34 @@ + if (!adapter->wvbr) + return; + +- for (j = 0; j < adapter->vfs_allocated_count; j++) { +- if (adapter->wvbr & (1 << j) || +- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { +- dev_warn(&adapter->pdev->dev, +- "Spoof event(s) detected on VF %d\n", j); +- adapter->wvbr &= +- ~((1 << j) | +- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); ++ switch (adapter->hw.mac.type) { ++ case e1000_82576: ++ for (j = 0; j < adapter->vfs_allocated_count; j++) { ++ if (adapter->wvbr & (1 << j) || ++ adapter->wvbr & (1 << (j ++ + IGB_STAGGERED_QUEUE_OFFSET))) { ++ DPRINTK(DRV, WARNING, ++ "Spoof event(s) detected on VF %d\n", ++ j); ++ adapter->wvbr &= ++ ~((1 << j) | ++ (1 << (j + ++ IGB_STAGGERED_QUEUE_OFFSET))); ++ } ++ } ++ break; ++ case e1000_i350: ++ for (j = 0; j < adapter->vfs_allocated_count; j++) { ++ if (adapter->wvbr & (1 << j)) { ++ DPRINTK(DRV, WARNING, ++ "Spoof event(s) detected on VF %d\n", ++ j); ++ adapter->wvbr &= ~(1 << j); ++ } + } ++ break; ++ default: ++ break; + } + } + +@@ -4118,21 +4712,22 @@ + static void igb_update_phy_info(unsigned long data) + { + struct igb_adapter *adapter = (struct igb_adapter *) data; +- igb_get_phy_info(&adapter->hw); ++ ++ e1000_get_phy_info(&adapter->hw); + } + + /** +- * igb_has_link - check shared code for link and determine up/down +- * @adapter: pointer to driver private info ++ * igb_has_link - check shared code for link and determine up/down ++ * @adapter: pointer to driver private info + **/ + bool igb_has_link(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +- bool link_active = false; ++ bool link_active = FALSE; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay +- * false until the e1000_check_for_link establishes link ++ * false until the igb_e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { +@@ -4140,11 +4735,11 @@ + if (!hw->mac.get_link_status) + return true; + case e1000_media_type_internal_serdes: +- hw->mac.ops.check_for_link(hw); ++ igb_e1000_check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; +- default: + case e1000_media_type_unknown: ++ default: + break; + } + +@@ -4162,27 +4757,9 @@ + return link_active; + } + +-static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +-{ +- bool ret = false; +- u32 ctrl_ext, thstat; +- +- /* check for thermal sensor event on i350 copper only */ +- if (hw->mac.type == e1000_i350) { +- thstat = rd32(E1000_THSTAT); +- ctrl_ext = rd32(E1000_CTRL_EXT); +- +- if ((hw->phy.media_type == e1000_media_type_copper) && +- !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) +- ret = !!(thstat & event); +- } +- +- return ret; +-} +- + /** +- * igb_watchdog - Timer Call-back +- * @data: pointer to adapter cast into an unsigned long ++ * igb_watchdog - Timer Call-back ++ * @data: pointer to adapter cast into an unsigned long + **/ + static void igb_watchdog(unsigned long data) + { +@@ -4197,29 +4774,28 @@ + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; +- struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; +- u32 link; ++ u32 thstat, ctrl_ext, link; + int i; + u32 connsw; + + link = igb_has_link(adapter); + +- if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { +- if (time_after(jiffies, (adapter->link_check_timeout + HZ))) +- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; +- else +- link = false; +- } +- + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { +- connsw = rd32(E1000_CONNSW); ++ connsw = E1000_READ_REG(hw, E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } ++ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { ++ if (time_after(jiffies, (adapter->link_check_timeout + HZ))) ++ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; ++ else ++ link = FALSE; ++ } ++ + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { +@@ -4227,48 +4803,29 @@ + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } ++ + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + +- hw->mac.ops.get_speed_and_duplex(hw, +- &adapter->link_speed, +- &adapter->link_duplex); ++ igb_e1000_get_speed_and_duplex(hw, ++ &adapter->link_speed, ++ &adapter->link_duplex); + +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, +- "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", +- netdev->name, +- adapter->link_speed, +- adapter->link_duplex == FULL_DUPLEX ? +- "Full" : "Half", +- (ctrl & E1000_CTRL_TFCE) && +- (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : +- (ctrl & E1000_CTRL_RFCE) ? "RX" : +- (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); +- +- /* disable EEE if enabled */ +- if ((adapter->flags & IGB_FLAG_EEE) && +- (adapter->link_duplex == HALF_DUPLEX)) { +- dev_info(&adapter->pdev->dev, +- "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); +- adapter->hw.dev_spec._82575.eee_disable = true; +- adapter->flags &= ~IGB_FLAG_EEE; +- } +- +- /* check if SmartSpeed worked */ +- igb_check_downshift(hw); +- if (phy->speed_downgraded) +- netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); +- +- /* check for thermal sensor event */ +- if (igb_thermal_sensor_event(hw, +- E1000_THSTAT_LINK_THROTTLE)) +- netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); +- ++ "igb: %s NIC Link is Up %d Mbps %s, Flow Control: %s\n", ++ netdev->name, ++ adapter->link_speed, ++ adapter->link_duplex == FULL_DUPLEX ? ++ "Full Duplex" : "Half Duplex", ++ ((ctrl & E1000_CTRL_TFCE) && ++ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : ++ ((ctrl & E1000_CTRL_RFCE) ? "RX" : ++ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { +@@ -4278,12 +4835,17 @@ + case SPEED_100: + /* maybe add some timeout factor ? */ + break; ++ default: ++ break; + } + + netif_carrier_on(netdev); ++ netif_tx_wake_all_queues(netdev); + + igb_ping_all_vfs(adapter); ++#ifdef IFLA_VF_MAX + igb_check_vf_rate_limit(adapter); ++#endif /* IFLA_VF_MAX */ + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) +@@ -4294,17 +4856,33 @@ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; +- +- /* check for thermal sensor event */ +- if (igb_thermal_sensor_event(hw, +- E1000_THSTAT_PWR_DOWN)) { +- netdev_err(netdev, "The network adapter was stopped because it overheated\n"); ++ /* check for thermal sensor event on i350 */ ++ if (hw->mac.type == e1000_i350) { ++ thstat = E1000_READ_REG(hw, E1000_THSTAT); ++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ++ if ((hw->phy.media_type == ++ e1000_media_type_copper) && ++ !(ctrl_ext & ++ E1000_CTRL_EXT_LINK_MODE_SGMII)) { ++ if (thstat & E1000_THSTAT_PWR_DOWN) { ++ netdev_err(netdev, ++ "igb: %s The network adapter was stopped because it overheated.\n", ++ netdev->name); ++ } ++ if (thstat & ++ E1000_THSTAT_LINK_THROTTLE) { ++ netdev_err(netdev, ++ "igb: %s The network adapter supported link speed was downshifted because it overheated.\n", ++ netdev->name); ++ } ++ } + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); + + igb_ping_all_vfs(adapter); + +@@ -4312,7 +4890,6 @@ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); +- + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); +@@ -4328,6 +4905,7 @@ + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { ++ hw->mac.ops.power_up_serdes(hw); + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); +@@ -4337,12 +4915,11 @@ + } + } + +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, &adapter->stats64); +- spin_unlock(&adapter->stats64_lock); ++ igb_update_stats(adapter); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; ++ + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going +@@ -4361,19 +4938,18 @@ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + +- /* Cause software interrupt to ensure Rx ring is cleaned */ +- if (adapter->flags & IGB_FLAG_HAS_MSIX) { ++ /* Cause software interrupt to ensure rx ring is cleaned */ ++ if (adapter->msix_entries) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; +- wr32(E1000_EICS, eics); ++ E1000_WRITE_REG(hw, E1000_EICS, eics); + } else { +- wr32(E1000_ICS, E1000_ICS_RXDMT0); ++ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); +- igb_ptp_rx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { +@@ -4386,6 +4962,70 @@ + } + } + ++static void igb_dma_err_task(struct work_struct *work) ++{ ++ struct igb_adapter *adapter = container_of(work, ++ struct igb_adapter, ++ dma_err_task); ++ int vf; ++ struct e1000_hw *hw = &adapter->hw; ++ struct net_device *netdev = adapter->netdev; ++ u32 hgptc; ++ u32 ciaa, ciad; ++ ++ hgptc = E1000_READ_REG(hw, E1000_HGPTC); ++ if (hgptc) /* If incrementing then no need for the check below */ ++ goto dma_timer_reset; ++ /* ++ * Check to see if a bad DMA write target from an errant or ++ * malicious VF has caused a PCIe error. If so then we can ++ * issue a VFLR to the offending VF(s) and then resume without ++ * requesting a full slot reset. ++ */ ++ ++ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { ++ ciaa = (vf << 16) | 0x80000000; ++ /* 32 bit read so align, we really want status at offset 6 */ ++ ciaa |= PCI_COMMAND; ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ ciad = E1000_READ_REG(hw, E1000_CIAD); ++ ciaa &= 0x7FFFFFFF; ++ /* disable debug mode asap after reading data */ ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ /* Get the upper 16 bits which will be the PCI status reg */ ++ ciad >>= 16; ++ if (ciad & (PCI_STATUS_REC_MASTER_ABORT | ++ PCI_STATUS_REC_TARGET_ABORT | ++ PCI_STATUS_SIG_SYSTEM_ERROR)) { ++ netdev_err(netdev, "VF %d suffered error\n", vf); ++ /* Issue VFLR */ ++ ciaa = (vf << 16) | 0x80000000; ++ ciaa |= 0xA8; ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ ciad = 0x00008000; /* VFLR */ ++ E1000_WRITE_REG(hw, E1000_CIAD, ciad); ++ ciaa &= 0x7FFFFFFF; ++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); ++ } ++ } ++dma_timer_reset: ++ /* Reset the timer */ ++ if (!test_bit(__IGB_DOWN, &adapter->state)) ++ mod_timer(&adapter->dma_err_timer, ++ round_jiffies(jiffies + HZ / 10)); ++} ++ ++/** ++ * igb_dma_err_timer - Timer Call-back ++ * @data: pointer to adapter cast into an unsigned long ++ **/ ++static void igb_dma_err_timer(unsigned long data) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ /* Do the rest outside of interrupt context */ ++ schedule_work(&adapter->dma_err_task); ++} ++ + enum latency_range { + lowest_latency = 0, + low_latency = 1, +@@ -4394,19 +5034,20 @@ + }; + + /** +- * igb_update_ring_itr - update the dynamic ITR value based on packet size +- * @q_vector: pointer to q_vector ++ * igb_update_ring_itr - update the dynamic ITR value based on packet size + * +- * Stores a new ITR value based on strictly on packet size. This +- * algorithm is less sophisticated than that used in igb_update_itr, +- * due to the difficulty of synchronizing statistics across multiple +- * receive rings. The divisors and thresholds used by this function +- * were determined based on theoretical maximum wire speed and testing +- * data, in order to minimize response time while increasing bulk +- * throughput. +- * This functionality is controlled by ethtool's coalescing settings. +- * NOTE: This function is called only when operating in a multiqueue +- * receive environment. ++ * Stores a new ITR value based on strictly on packet size. This ++ * algorithm is less sophisticated than that used in igb_update_itr, ++ * due to the difficulty of synchronizing statistics across multiple ++ * receive rings. The divisors and thresholds used by this function ++ * were determined based on theoretical maximum wire speed and testing ++ * data, in order to minimize response time while increasing bulk ++ * throughput. ++ * This functionality is controlled by the InterruptThrottleRate module ++ * parameter (see igb_param.c) ++ * NOTE: This function is called only when operating in a multiqueue ++ * receive environment. ++ * @q_vector: pointer to q_vector + **/ + static void igb_update_ring_itr(struct igb_q_vector *q_vector) + { +@@ -4418,9 +5059,13 @@ + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ +- if (adapter->link_speed != SPEED_1000) { ++ switch (adapter->link_speed) { ++ case SPEED_10: ++ case SPEED_100: + new_val = IGB_4K_ITR; + goto set_itr_val; ++ default: ++ break; + } + + packets = q_vector->rx.total_packets; +@@ -4467,20 +5112,20 @@ + } + + /** +- * igb_update_itr - update the dynamic ITR value based on statistics +- * @q_vector: pointer to q_vector +- * @ring_container: ring info to update the itr for +- * +- * Stores a new ITR value based on packets and byte +- * counts during the last interrupt. The advantage of per interrupt +- * computation is faster updates and more accurate ITR for the current +- * traffic pattern. Constants in this function were computed +- * based on theoretical maximum wire speed and thresholds were set based +- * on testing data as well as attempting to minimize response time +- * while increasing bulk throughput. +- * This functionality is controlled by ethtool's coalescing settings. +- * NOTE: These calculations are only valid when operating in a single- +- * queue environment. ++ * igb_update_itr - update the dynamic ITR value based on statistics ++ * Stores a new ITR value based on packets and byte ++ * counts during the last interrupt. The advantage of per interrupt ++ * computation is faster updates and more accurate ITR for the current ++ * traffic pattern. Constants in this function were computed ++ * based on theoretical maximum wire speed and thresholds were set based ++ * on testing data as well as attempting to minimize response time ++ * while increasing bulk throughput. ++ * this functionality is controlled by the InterruptThrottleRate module ++ * parameter (see igb_param.c) ++ * NOTE: These calculations are only valid when operating in a single- ++ * queue environment. ++ * @q_vector: pointer to q_vector ++ * @ring_container: ring info to update the itr for + **/ + static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +@@ -4504,12 +5149,13 @@ + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ +- if (bytes/packets > 8000) ++ if (bytes/packets > 8000) { + itrval = bulk_latency; +- else if ((packets < 10) || ((bytes/packets) > 1200)) ++ } else if ((packets < 10) || ((bytes/packets) > 1200)) { + itrval = bulk_latency; +- else if ((packets > 35)) ++ } else if ((packets > 35)) { + itrval = lowest_latency; ++ } + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { +@@ -4541,10 +5187,14 @@ + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ +- if (adapter->link_speed != SPEED_1000) { ++ switch (adapter->link_speed) { ++ case SPEED_10: ++ case SPEED_100: + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; ++ default: ++ break; + } + + igb_update_itr(q_vector, &q_vector->tx); +@@ -4580,9 +5230,9 @@ + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? +- max((new_itr * q_vector->itr_val) / +- (new_itr + (q_vector->itr_val >> 2)), +- new_itr) : new_itr; ++ max((new_itr * q_vector->itr_val) / ++ (new_itr + (q_vector->itr_val >> 2)), ++ new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR +@@ -4594,8 +5244,8 @@ + } + } + +-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, +- u32 type_tucmd, u32 mss_l4len_idx) ++void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, ++ u32 type_tucmd, u32 mss_l4len_idx) + { + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; +@@ -4622,26 +5272,32 @@ + struct igb_tx_buffer *first, + u8 *hdr_len) + { ++#ifdef NETIF_F_TSO + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; +- int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) ++#endif /* NETIF_F_TSO */ + return 0; ++#ifdef NETIF_F_TSO + +- err = skb_cow_head(skb, 0); +- if (err < 0) +- return err; ++ if (skb_header_cloned(skb)) { ++ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++ ++ if (err) ++ return err; ++ } + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); ++ + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, +@@ -4652,6 +5308,7 @@ + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; ++#ifdef NETIF_F_TSO6 + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, +@@ -4659,6 +5316,7 @@ + 0, IPPROTO_TCP, 0); + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; ++#endif + } + + /* compute header lengths */ +@@ -4681,6 +5339,7 @@ + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; ++#endif /* NETIF_F_TSO */ + } + + static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +@@ -4694,38 +5353,42 @@ + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) + return; + } else { +- u8 l4_hdr = 0; ++ u8 nexthdr = 0; + + switch (first->protocol) { +- case htons(ETH_P_IP): ++ case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; +- l4_hdr = ip_hdr(skb)->protocol; ++ nexthdr = ip_hdr(skb)->protocol; + break; +- case htons(ETH_P_IPV6): ++#ifdef NETIF_F_IPV6_CSUM ++ case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); +- l4_hdr = ipv6_hdr(skb)->nexthdr; ++ nexthdr = ipv6_hdr(skb)->nexthdr; + break; ++#endif + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, +- "partial checksum but proto=%x!\n", +- first->protocol); ++ "partial checksum but proto=%x!\n", ++ first->protocol); + } + break; + } + +- switch (l4_hdr) { ++ switch (nexthdr) { + case IPPROTO_TCP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + E1000_ADVTXD_L4LEN_SHIFT; + break; ++#ifdef HAVE_SCTP + case IPPROTO_SCTP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + E1000_ADVTXD_L4LEN_SHIFT; + break; ++#endif + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + E1000_ADVTXD_L4LEN_SHIFT; +@@ -4733,8 +5396,8 @@ + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, +- "partial checksum but l4 proto=%x!\n", +- l4_hdr); ++ "partial checksum but l4 proto=%x!\n", ++ nexthdr); + } + break; + } +@@ -4773,9 +5436,6 @@ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + +- /* insert frame checksum */ +- cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); +- + return cmd_type; + } + +@@ -4882,11 +5542,11 @@ + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); +- + /* set the timestamp */ + first->time_stamp = jiffies; + +- /* Force memory writes to complete before letting h/w know there ++ /* ++ * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * +@@ -4907,7 +5567,7 @@ + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail +- * at a time, it synchronizes IO on IA64/Altix systems ++ * at a time, it syncronizes IO on IA64/Altix systems + */ + mmiowb(); + +@@ -4932,9 +5592,12 @@ + + static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) + { +- struct net_device *netdev = tx_ring->netdev; ++ struct net_device *netdev = netdev_ring(tx_ring); + +- netif_stop_subqueue(netdev, tx_ring->queue_index); ++ if (netif_is_multiqueue(netdev)) ++ netif_stop_subqueue(netdev, ring_queue_index(tx_ring)); ++ else ++ netif_stop_queue(netdev); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); +@@ -4949,11 +5612,12 @@ + return -EBUSY; + + /* A reprieve! */ +- netif_wake_subqueue(netdev, tx_ring->queue_index); ++ if (netif_is_multiqueue(netdev)) ++ netif_wake_subqueue(netdev, ring_queue_index(tx_ring)); ++ else ++ netif_wake_queue(netdev); + +- u64_stats_update_begin(&tx_ring->tx_syncp2); +- tx_ring->tx_stats.restart_queue2++; +- u64_stats_update_end(&tx_ring->tx_syncp2); ++ tx_ring->tx_stats.restart_queue++; + + return 0; + } +@@ -4971,25 +5635,26 @@ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; ++#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD ++ unsigned short f; ++#endif + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + +- /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, ++ /* ++ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { +- unsigned short f; +- +- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) +- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +- } else { +- count += skb_shinfo(skb)->nr_frags; +- } +- ++#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD ++ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) ++ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); ++#else ++ count += skb_shinfo(skb)->nr_frags; ++#endif + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; +@@ -5001,12 +5666,21 @@ + first->bytecount = skb->len; + first->gso_segs = 1; + ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef SKB_SHARED_TX_IS_UNION ++ if (unlikely(skb_tx(skb)->hardware)) { ++#else + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { ++#endif + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { ++#ifdef SKB_SHARED_TX_IS_UNION ++ skb_tx(skb)->in_progress = 1; ++#else + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++#endif + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); +@@ -5015,12 +5689,11 @@ + schedule_work(&adapter->ptp_tx_work); + } + } +- ++#endif /* HAVE_PTP_1588_CLOCK */ + skb_tx_timestamp(skb); +- +- if (vlan_tx_tag_present(skb)) { ++ if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; +- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); ++ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ +@@ -5035,6 +5708,10 @@ + + igb_tx_map(tx_ring, first, hdr_len); + ++#ifndef HAVE_TRANS_START_IN_QUEUE ++ netdev_ring(tx_ring)->trans_start = jiffies; ++ ++#endif + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + +@@ -5046,6 +5723,7 @@ + return NETDEV_TX_OK; + } + ++#ifdef HAVE_TX_MQ + static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) + { +@@ -5056,6 +5734,9 @@ + + return adapter->tx_ring[r_idx]; + } ++#else ++#define igb_tx_queue_mapping(_adapter, _skb) ((_adapter)->tx_ring[0]) ++#endif + + static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +@@ -5072,22 +5753,22 @@ + return NETDEV_TX_OK; + } + +- /* The minimum packet size with TCTL.PSP set is 17 so pad the skb ++ /* ++ * The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ +- if (unlikely(skb->len < 17)) { +- if (skb_pad(skb, 17 - skb->len)) ++ if (skb->len < 17) { ++ if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; +- skb_set_tail_pointer(skb, 17); + } + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); + } + + /** +- * igb_tx_timeout - Respond to a Tx Hang +- * @netdev: network interface device structure ++ * igb_tx_timeout - Respond to a Tx Hang ++ * @netdev: network interface device structure + **/ + static void igb_tx_timeout(struct net_device *netdev) + { +@@ -5101,59 +5782,64 @@ + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); +- wr32(E1000_EICS, +- (adapter->eims_enable_mask & ~adapter->eims_other)); ++ E1000_WRITE_REG(hw, E1000_EICS, ++ (adapter->eims_enable_mask & ~adapter->eims_other)); + } + + static void igb_reset_task(struct work_struct *work) + { + struct igb_adapter *adapter; ++ + adapter = container_of(work, struct igb_adapter, reset_task); + +- igb_dump(adapter); +- netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); + } + + /** +- * igb_get_stats64 - Get System Network Statistics +- * @netdev: network interface device structure +- * @stats: rtnl_link_stats64 pointer ++ * igb_get_stats - Get System Network Statistics ++ * @netdev: network interface device structure ++ * ++ * Returns the address of the device statistics structure. ++ * The statistics are updated here and also from the timer callback. + **/ +-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) ++static struct net_device_stats *igb_get_stats(struct net_device *netdev) + { + struct igb_adapter *adapter = netdev_priv(netdev); + +- spin_lock(&adapter->stats64_lock); +- igb_update_stats(adapter, &adapter->stats64); +- memcpy(stats, &adapter->stats64, sizeof(*stats)); +- spin_unlock(&adapter->stats64_lock); ++ if (!test_bit(__IGB_RESETTING, &adapter->state)) ++ igb_update_stats(adapter); + +- return stats; ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ /* only return the current stats */ ++ return &netdev->stats; ++#else ++ /* only return the current stats */ ++ return &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + } + + /** +- * igb_change_mtu - Change the Maximum Transfer Unit +- * @netdev: network interface device structure +- * @new_mtu: new value for maximum frame size ++ * igb_change_mtu - Change the Maximum Transfer Unit ++ * @netdev: network interface device structure ++ * @new_mtu: new value for maximum frame size + * +- * Returns 0 on success, negative on failure ++ * Returns 0 on success, negative on failure + **/ + static int igb_change_mtu(struct net_device *netdev, int new_mtu) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { +- dev_err(&pdev->dev, "Invalid MTU setting\n"); ++ dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n"); + return -EINVAL; + } + + #define MAX_STD_JUMBO_FRAME_SIZE 9238 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { +- dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); ++ dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n"); + return -EINVAL; + } + +@@ -5170,9 +5856,10 @@ + if (netif_running(netdev)) + igb_down(adapter); + +- dev_info(&pdev->dev, "changing MTU from %d to %d\n", ++ dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; ++ hw->dev_spec._82575.mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); +@@ -5185,53 +5872,74 @@ + } + + /** +- * igb_update_stats - Update the board statistics counters +- * @adapter: board private structure ++ * igb_update_stats - Update the board statistics counters ++ * @adapter: board private structure + **/ +-void igb_update_stats(struct igb_adapter *adapter, +- struct rtnl_link_stats64 *net_stats) ++ ++void igb_update_stats(struct igb_adapter *adapter) + { ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats *net_stats = &adapter->netdev->stats; ++#else ++ struct net_device_stats *net_stats = &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct e1000_hw *hw = &adapter->hw; ++#ifdef HAVE_PCI_ERS + struct pci_dev *pdev = adapter->pdev; ++#endif + u32 reg, mpc; + u16 phy_tmp; + int i; + u64 bytes, packets; +- unsigned int start; +- u64 _bytes, _packets; ++#ifndef IGB_NO_LRO ++ u32 flushed = 0, coal = 0; ++ struct igb_q_vector *q_vector; ++#endif + + #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + +- /* Prevent stats update while adapter is being reset, or if the pci ++ /* ++ * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; ++#ifdef HAVE_PCI_ERS + if (pci_channel_offline(pdev)) + return; + ++#endif ++#ifndef IGB_NO_LRO ++ for (i = 0; i < adapter->num_q_vectors; i++) { ++ q_vector = adapter->q_vector[i]; ++ if (!q_vector) ++ continue; ++ flushed += q_vector->lrolist.stats.flushed; ++ coal += q_vector->lrolist.stats.coal; ++ } ++ adapter->lro_stats.flushed = flushed; ++ adapter->lro_stats.coal = coal; ++ ++#endif + bytes = 0; + packets = 0; +- +- rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; +- u32 rqdpc = rd32(E1000_RQDPC(i)); +- if (hw->mac.type >= e1000_i210) +- wr32(E1000_RQDPC(i), 0); ++ u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF; + +- if (rqdpc) { +- ring->rx_stats.drops += rqdpc; +- net_stats->rx_fifo_errors += rqdpc; ++ if (hw->mac.type >= e1000_i210) ++ E1000_WRITE_REG(hw, E1000_RQDPC(i), 0); ++ ring->rx_stats.drops += rqdpc_tmp; ++ net_stats->rx_fifo_errors += rqdpc_tmp; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (!ring->vmdq_netdev) { ++ bytes += ring->rx_stats.bytes; ++ packets += ring->rx_stats.packets; + } +- +- do { +- start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +- _bytes = ring->rx_stats.bytes; +- _packets = ring->rx_stats.packets; +- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +- bytes += _bytes; +- packets += _packets; ++#else ++ bytes += ring->rx_stats.bytes; ++ packets += ring->rx_stats.packets; ++#endif + } + + net_stats->rx_bytes = bytes; +@@ -5241,98 +5949,98 @@ + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; +- do { +- start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +- _bytes = ring->tx_stats.bytes; +- _packets = ring->tx_stats.packets; +- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +- bytes += _bytes; +- packets += _packets; ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (!ring->vmdq_netdev) { ++ bytes += ring->tx_stats.bytes; ++ packets += ring->tx_stats.packets; ++ } ++#else ++ bytes += ring->tx_stats.bytes; ++ packets += ring->tx_stats.packets; ++#endif + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; +- rcu_read_unlock(); + + /* read stats registers */ +- adapter->stats.crcerrs += rd32(E1000_CRCERRS); +- adapter->stats.gprc += rd32(E1000_GPRC); +- adapter->stats.gorc += rd32(E1000_GORCL); +- rd32(E1000_GORCH); /* clear GORCL */ +- adapter->stats.bprc += rd32(E1000_BPRC); +- adapter->stats.mprc += rd32(E1000_MPRC); +- adapter->stats.roc += rd32(E1000_ROC); +- +- adapter->stats.prc64 += rd32(E1000_PRC64); +- adapter->stats.prc127 += rd32(E1000_PRC127); +- adapter->stats.prc255 += rd32(E1000_PRC255); +- adapter->stats.prc511 += rd32(E1000_PRC511); +- adapter->stats.prc1023 += rd32(E1000_PRC1023); +- adapter->stats.prc1522 += rd32(E1000_PRC1522); +- adapter->stats.symerrs += rd32(E1000_SYMERRS); +- adapter->stats.sec += rd32(E1000_SEC); ++ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); ++ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC); ++ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL); ++ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */ ++ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC); ++ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC); ++ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC); ++ ++ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64); ++ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127); ++ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255); ++ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511); ++ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023); ++ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522); ++ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS); ++ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC); + +- mpc = rd32(E1000_MPC); ++ mpc = E1000_READ_REG(hw, E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; +- adapter->stats.scc += rd32(E1000_SCC); +- adapter->stats.ecol += rd32(E1000_ECOL); +- adapter->stats.mcc += rd32(E1000_MCC); +- adapter->stats.latecol += rd32(E1000_LATECOL); +- adapter->stats.dc += rd32(E1000_DC); +- adapter->stats.rlec += rd32(E1000_RLEC); +- adapter->stats.xonrxc += rd32(E1000_XONRXC); +- adapter->stats.xontxc += rd32(E1000_XONTXC); +- adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); +- adapter->stats.xofftxc += rd32(E1000_XOFFTXC); +- adapter->stats.fcruc += rd32(E1000_FCRUC); +- adapter->stats.gptc += rd32(E1000_GPTC); +- adapter->stats.gotc += rd32(E1000_GOTCL); +- rd32(E1000_GOTCH); /* clear GOTCL */ +- adapter->stats.rnbc += rd32(E1000_RNBC); +- adapter->stats.ruc += rd32(E1000_RUC); +- adapter->stats.rfc += rd32(E1000_RFC); +- adapter->stats.rjc += rd32(E1000_RJC); +- adapter->stats.tor += rd32(E1000_TORH); +- adapter->stats.tot += rd32(E1000_TOTH); +- adapter->stats.tpr += rd32(E1000_TPR); +- +- adapter->stats.ptc64 += rd32(E1000_PTC64); +- adapter->stats.ptc127 += rd32(E1000_PTC127); +- adapter->stats.ptc255 += rd32(E1000_PTC255); +- adapter->stats.ptc511 += rd32(E1000_PTC511); +- adapter->stats.ptc1023 += rd32(E1000_PTC1023); +- adapter->stats.ptc1522 += rd32(E1000_PTC1522); +- +- adapter->stats.mptc += rd32(E1000_MPTC); +- adapter->stats.bptc += rd32(E1000_BPTC); +- +- adapter->stats.tpt += rd32(E1000_TPT); +- adapter->stats.colc += rd32(E1000_COLC); +- +- adapter->stats.algnerrc += rd32(E1000_ALGNERRC); +- /* read internal phy specific stats */ +- reg = rd32(E1000_CTRL_EXT); ++ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC); ++ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL); ++ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC); ++ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL); ++ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC); ++ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC); ++ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC); ++ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC); ++ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC); ++ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); ++ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC); ++ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC); ++ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL); ++ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */ ++ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC); ++ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC); ++ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC); ++ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC); ++ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH); ++ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH); ++ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR); ++ ++ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64); ++ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127); ++ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255); ++ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511); ++ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); ++ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); ++ ++ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC); ++ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC); ++ ++ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT); ++ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC); ++ ++ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); ++ /* read internal phy sepecific stats */ ++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { +- adapter->stats.rxerrc += rd32(E1000_RXERRC); ++ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) +- adapter->stats.tncrs += rd32(E1000_TNCRS); ++ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS); + } ++ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC); ++ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + +- adapter->stats.tsctc += rd32(E1000_TSCTC); +- adapter->stats.tsctfc += rd32(E1000_TSCTFC); +- +- adapter->stats.iac += rd32(E1000_IAC); +- adapter->stats.icrxoc += rd32(E1000_ICRXOC); +- adapter->stats.icrxptc += rd32(E1000_ICRXPTC); +- adapter->stats.icrxatc += rd32(E1000_ICRXATC); +- adapter->stats.ictxptc += rd32(E1000_ICTXPTC); +- adapter->stats.ictxatc += rd32(E1000_ICTXATC); +- adapter->stats.ictxqec += rd32(E1000_ICTXQEC); +- adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); +- adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); ++ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC); ++ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); ++ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); ++ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); ++ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); ++ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); ++ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); ++ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); ++ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; +@@ -5365,24 +6073,20 @@ + /* Phy Stats */ + if (hw->phy.media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && +- (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { ++ (!igb_e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + } + + /* Management Stats */ +- adapter->stats.mgptc += rd32(E1000_MGTPTC); +- adapter->stats.mgprc += rd32(E1000_MGTPRC); +- adapter->stats.mgpdc += rd32(E1000_MGTPDC); +- +- /* OS2BMC Stats */ +- reg = rd32(E1000_MANC); +- if (reg & E1000_MANC_EN_BMC2OS) { +- adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); +- adapter->stats.o2bspc += rd32(E1000_O2BSPC); +- adapter->stats.b2ospc += rd32(E1000_B2OSPC); +- adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); ++ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC); ++ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC); ++ if (hw->mac.type > e1000_82580) { ++ adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC); ++ adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC); ++ adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC); ++ adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC); + } + } + +@@ -5390,7 +6094,7 @@ + { + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; +- u32 icr = rd32(E1000_ICR); ++ u32 icr = E1000_READ_REG(hw, E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) +@@ -5417,18 +6121,24 @@ + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + ++#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { +- u32 tsicr = rd32(E1000_TSICR); ++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ +- wr32(E1000_TSICR, E1000_TSICR_TXTS); ++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ + +- wr32(E1000_EIMS, adapter->eims_other); ++ /* Check for MDD event */ ++ if (icr & E1000_ICR_MDDET) ++ igb_process_mdd_event(adapter); ++ ++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; + } +@@ -5465,7 +6175,7 @@ + return IRQ_HANDLED; + } + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +@@ -5474,9 +6184,10 @@ + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) +- txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; ++ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576; + +- /* We can enable relaxed ordering for reads, but not writes when ++ /* ++ * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ +@@ -5484,7 +6195,7 @@ + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + +- wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); ++ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); + } + + static void igb_update_rx_dca(struct igb_adapter *adapter, +@@ -5495,16 +6206,17 @@ + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) +- rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; ++ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576; + +- /* We can enable relaxed ordering for reads, but not writes when ++ /* ++ * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + +- wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); ++ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); + } + + static void igb_update_dca(struct igb_q_vector *q_vector) +@@ -5535,7 +6247,7 @@ + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ +- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); ++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; +@@ -5556,9 +6268,9 @@ + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; +- if (dca_add_requester(dev) == 0) { ++ if (dca_add_requester(dev) == E1000_SUCCESS) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; +- dev_info(&pdev->dev, "DCA enabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } +@@ -5569,14 +6281,15 @@ + * hanging around in the sysfs model + */ + dca_remove_requester(dev); +- dev_info(&pdev->dev, "DCA disabled\n"); ++ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; +- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); ++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, ++ E1000_DCA_CTRL_DCA_DISABLE); + } + break; + } + +- return 0; ++ return E1000_SUCCESS; + } + + static int igb_notify_dca(struct notifier_block *nb, unsigned long event, +@@ -5585,27 +6298,29 @@ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, +- __igb_notify_dca); ++ __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; + } +-#endif /* CONFIG_IGB_DCA */ ++#endif /* IGB_DCA */ + +-#ifdef CONFIG_PCI_IOV + static int igb_vf_configure(struct igb_adapter *adapter, int vf) + { + unsigned char mac_addr[ETH_ALEN]; + +- eth_zero_addr(mac_addr); ++ random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + ++#ifdef IFLA_VF_MAX ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; ++#endif ++#endif + +- return 0; ++ return true; + } + +-#endif + static void igb_ping_all_vfs(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +@@ -5616,26 +6331,71 @@ + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; +- igb_write_mbx(hw, &ping, 1, i); ++ e1000_write_mbx(hw, &ping, 1, i); + } + } + ++/** ++ * igb_mta_set_ - Set multicast filter table address ++ * @adapter: pointer to the adapter structure ++ * @hash_value: determines the MTA register and bit to set ++ * ++ * The multicast table address is a register array of 32-bit registers. ++ * The hash_value is used to determine what register the bit is in, the ++ * current value is read, the new bit is OR'd in and the new value is ++ * written back into the register. ++ **/ ++void igb_mta_set(struct igb_adapter *adapter, u32 hash_value) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ u32 hash_bit, hash_reg, mta; ++ ++ /* ++ * The MTA is a register array of 32-bit registers. It is ++ * treated like an array of (32*mta_reg_count) bits. We want to ++ * set bit BitArray[hash_value]. So we figure out what register ++ * the bit is in, read it, OR in the new bit, then write ++ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a ++ * mask to bits 31:5 of the hash value which gives us the ++ * register we're modifying. The hash bit within that register ++ * is determined by the lower 5 bits of the hash value. ++ */ ++ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); ++ hash_bit = hash_value & 0x1F; ++ ++ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); ++ ++ mta |= (1 << hash_bit); ++ ++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); ++ E1000_WRITE_FLUSH(hw); ++} ++ + static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) + { ++ + struct e1000_hw *hw = &adapter->hw; +- u32 vmolr = rd32(E1000_VMOLR(vf)); ++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + ++#ifdef IGB_ENABLE_VF_PROMISC ++ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) { ++ vmolr |= E1000_VMOLR_ROPE; ++ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC; ++ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST; ++ } ++#endif + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { +- /* if we have hashes and we are clearing a multicast promisc ++ /* ++ * if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ +@@ -5646,17 +6406,18 @@ + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) +- igb_mta_set(hw, vf_data->vf_mc_hashes[j]); ++ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); + } + } + +- wr32(E1000_VMOLR(vf), vmolr); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; ++ + } + + static int igb_set_vf_multicasts(struct igb_adapter *adapter, +@@ -5694,7 +6455,7 @@ + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { +- u32 vmolr = rd32(E1000_VMOLR(i)); ++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + +@@ -5706,9 +6467,9 @@ + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) +- igb_mta_set(hw, vf_data->vf_mc_hashes[j]); ++ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); + } +- wr32(E1000_VMOLR(i), vmolr); ++ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + } + +@@ -5716,13 +6477,14 @@ + { + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, reg, vid; ++ u16 vlan_default; + int i; + + pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + + /* remove the vf from the pool */ + reg &= ~pool_mask; +@@ -5732,16 +6494,20 @@ + (reg & E1000_VLVF_VLANID_ENABLE)) { + reg = 0; + vid = reg & E1000_VLVF_VLANID_MASK; +- igb_vfta_set(hw, vid, false); ++ igb_vfta_set(adapter, vid, FALSE); + } + +- wr32(E1000_VLVF(i), reg); ++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); + } + + adapter->vf_data[vf].vlans_enabled = 0; ++ ++ vlan_default = adapter->vf_data[vf].default_vf_vlan_id; ++ if (vlan_default) ++ igb_vlvf_set(adapter, vlan_default, true, vf); + } + +-static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) ++s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) + { + struct e1000_hw *hw = &adapter->hw; + u32 reg, i; +@@ -5751,12 +6517,12 @@ + return -1; + + /* we only need to do this if VMDq is enabled */ +- if (!adapter->vfs_allocated_count) ++ if (!adapter->vmdq_pools) + return -1; + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; +@@ -5769,7 +6535,7 @@ + * one without the enable bit set + */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if (!(reg & E1000_VLVF_VLANID_ENABLE)) + break; + } +@@ -5781,26 +6547,26 @@ + /* if !enabled we need to set this up in vfta */ + if (!(reg & E1000_VLVF_VLANID_ENABLE)) { + /* add VID to filter table */ +- igb_vfta_set(hw, vid, true); ++ igb_vfta_set(adapter, vid, TRUE); + reg |= E1000_VLVF_VLANID_ENABLE; + } + reg &= ~E1000_VLVF_VLANID_MASK; + reg |= vid; +- wr32(E1000_VLVF(i), reg); ++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) +- return 0; ++ return E1000_SUCCESS; + + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + +- reg = rd32(E1000_VMOLR(vf)); ++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size += 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; +- wr32(E1000_VMOLR(vf), reg); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); + } + + adapter->vf_data[vf].vlans_enabled++; +@@ -5812,38 +6578,40 @@ + /* if pool is empty then remove entry from vfta */ + if (!(reg & E1000_VLVF_POOLSEL_MASK)) { + reg = 0; +- igb_vfta_set(hw, vid, false); ++ igb_vfta_set(adapter, vid, FALSE); + } +- wr32(E1000_VLVF(i), reg); ++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) +- return 0; ++ return E1000_SUCCESS; + + adapter->vf_data[vf].vlans_enabled--; + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + +- reg = rd32(E1000_VMOLR(vf)); ++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size -= 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; +- wr32(E1000_VMOLR(vf), reg); ++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); + } + } + } +- return 0; ++ return E1000_SUCCESS; + } + ++#ifdef IFLA_VF_MAX + static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) + { + struct e1000_hw *hw = &adapter->hw; + + if (vid) +- wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); ++ E1000_WRITE_REG(hw, E1000_VMVIR(vf), ++ (vid | E1000_VMVIR_VLANA_DEFAULT)); + else +- wr32(E1000_VMVIR(vf), 0); ++ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); + } + + static int igb_ndo_set_vf_vlan(struct net_device *netdev, +@@ -5852,7 +6620,9 @@ + int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); + +- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) ++ /* VLAN IDs accepted range 0-4094 */ ++ if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) ++ || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = igb_vlvf_set(adapter, vlan, !!vlan, vf); +@@ -5862,6 +6632,7 @@ + igb_set_vmolr(adapter, vf, !vlan); + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; ++ igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { +@@ -5871,10 +6642,14 @@ + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { ++ if (adapter->vf_data[vf].pf_vlan) ++ dev_info(&adapter->pdev->dev, ++ "Clearing VLAN on VF %d\n", vf); + igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, +- false, vf); ++ false, vf); + igb_set_vmvir(adapter, vlan, vf); + igb_set_vmolr(adapter, vf, true); ++ igb_set_vf_vlan_strip(adapter, vf, false); + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + } +@@ -5882,6 +6657,36 @@ + return err; + } + ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE ++static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, ++ bool setting) ++{ ++ struct igb_adapter *adapter = netdev_priv(netdev); ++ struct e1000_hw *hw = &adapter->hw; ++ u32 dtxswc, reg_offset; ++ ++ if (!adapter->vfs_allocated_count) ++ return -EOPNOTSUPP; ++ ++ if (vf >= adapter->vfs_allocated_count) ++ return -EINVAL; ++ ++ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; ++ dtxswc = E1000_READ_REG(hw, reg_offset); ++ if (setting) ++ dtxswc |= ((1 << vf) | ++ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); ++ else ++ dtxswc &= ~((1 << vf) | ++ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); ++ E1000_WRITE_REG(hw, reg_offset, dtxswc); ++ ++ adapter->vf_data[vf].spoofchk_enabled = setting; ++ return E1000_SUCCESS; ++} ++#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ ++#endif /* IFLA_VF_MAX */ ++ + static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) + { + struct e1000_hw *hw = &adapter->hw; +@@ -5890,7 +6695,7 @@ + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { +- reg = rd32(E1000_VLVF(i)); ++ reg = E1000_READ_REG(hw, E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; +@@ -5909,6 +6714,11 @@ + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int err = 0; + ++ if (vid) ++ igb_set_vf_vlan_strip(adapter, vf, true); ++ else ++ igb_set_vf_vlan_strip(adapter, vf, false); ++ + /* If in promiscuous mode we need to make sure the PF also has + * the VLAN filter set. + */ +@@ -5928,6 +6738,7 @@ + */ + if (!add && (adapter->netdev->flags & IFF_PROMISC)) { + u32 vlvf, bits; ++ + int regndx = igb_find_vlvf_entry(adapter, vid); + + if (regndx < 0) +@@ -5935,7 +6746,7 @@ + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ +- vlvf = bits = rd32(E1000_VLVF(regndx)); ++ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx)); + bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + /* If the filter was removed then ensure PF pool bit +@@ -5943,7 +6754,9 @@ + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && ++#ifndef HAVE_VLAN_RX_REGISTER + !test_bit(vid, adapter->active_vlans) && ++#endif + !bits) + igb_vlvf_set(adapter, vid, add, + adapter->vfs_allocated_count); +@@ -5955,7 +6768,9 @@ + + static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) + { +- /* clear flags - except flag that indicates PF has set the MAC */ ++ struct e1000_hw *hw = &adapter->hw; ++ ++ /* clear flags except flag that the PF has set the MAC */ + adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; + adapter->vf_data[vf].last_nack = jiffies; + +@@ -5964,27 +6779,40 @@ + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); ++#ifdef IFLA_VF_MAX + if (adapter->vf_data[vf].pf_vlan) + igb_ndo_set_vf_vlan(adapter->netdev, vf, + adapter->vf_data[vf].pf_vlan, + adapter->vf_data[vf].pf_qos); + else + igb_clear_vf_vfta(adapter, vf); ++#endif + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); ++ ++ /* ++ * Reset the VFs TDWBAL and TDWBAH registers which are not ++ * cleared by a VFLR ++ */ ++ E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0); ++ E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0); ++ if (hw->mac.type == e1000_82576) { ++ E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0); ++ E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0); ++ } + } + + static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) + { + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + +- /* clear mac address as we were hotplug removed/added */ ++ /* generate a new mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) +- eth_zero_addr(vf_mac); ++ random_ether_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +@@ -6005,25 +6833,26 @@ + igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + + /* enable transmit and receive for vf */ +- reg = rd32(E1000_VFTE); +- wr32(E1000_VFTE, reg | (1 << vf)); +- reg = rd32(E1000_VFRE); +- wr32(E1000_VFRE, reg | (1 << vf)); ++ reg = E1000_READ_REG(hw, E1000_VFTE); ++ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf)); ++ reg = E1000_READ_REG(hw, E1000_VFRE); ++ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; +- memcpy(addr, vf_mac, ETH_ALEN); +- igb_write_mbx(hw, msgbuf, 3, vf); ++ memcpy(addr, vf_mac, 6); ++ e1000_write_mbx(hw, msgbuf, 3, vf); + } + + static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) + { +- /* The VF MAC Address is stored in a packed array of bytes ++ /* ++ * The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ +- unsigned char *addr = (char *)&msg[1]; ++ unsigned char *addr = (unsigned char *)&msg[1]; + int err = -1; + + if (is_valid_ether_addr(addr)) +@@ -6041,7 +6870,7 @@ + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { +- igb_write_mbx(hw, &msg, 1, vf); ++ e1000_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } + } +@@ -6054,45 +6883,47 @@ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + +- retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); ++ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + + if (retval) { +- /* if receive failed revoke VF CTS stats and restart init */ +- dev_err(&pdev->dev, "Error receiving message from VF\n"); +- vf_data->flags &= ~IGB_VF_FLAG_CTS; +- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) +- return; +- goto out; ++ dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n"); ++ return; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + return; + +- /* until the vf completes a reset it should not be ++ /* ++ * until the vf completes a reset it should not be + * allowed to start any configuration. + */ ++ + if (msgbuf[0] == E1000_VF_RESET) { + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { +- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) +- return; +- retval = -1; +- goto out; ++ msgbuf[0] = E1000_VT_MSGTYPE_NACK; ++ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) { ++ e1000_write_mbx(hw, msgbuf, 1, vf); ++ vf_data->last_nack = jiffies; ++ } ++ return; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = -EINVAL; ++#ifndef IGB_DISABLE_VF_MAC_SET + if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + else +- dev_warn(&pdev->dev, +- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", +- vf); ++ DPRINTK(DRV, INFO, ++ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", ++ vf); ++#endif + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); +@@ -6105,28 +6936,31 @@ + break; + case E1000_VF_SET_VLAN: + retval = -1; ++#ifdef IFLA_VF_MAX + if (vf_data->pf_vlan) +- dev_warn(&pdev->dev, +- "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", +- vf); ++ DPRINTK(DRV, INFO, ++ "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", ++ vf); + else ++#endif + retval = igb_set_vf_vlan(adapter, msgbuf, vf); + break; + default: +- dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); +- retval = -1; ++ dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", ++ msgbuf[0]); ++ retval = -E1000_ERR_MBX; + break; + } + +- msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +-out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + +- igb_write_mbx(hw, msgbuf, 1, vf); ++ msgbuf[0] |= E1000_VT_MSGTYPE_CTS; ++ ++ e1000_write_mbx(hw, msgbuf, 1, vf); + } + + static void igb_msg_task(struct igb_adapter *adapter) +@@ -6136,15 +6970,15 @@ + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ +- if (!igb_check_for_rst(hw, vf)) ++ if (!e1000_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ +- if (!igb_check_for_msg(hw, vf)) ++ if (!e1000_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ +- if (!igb_check_for_ack(hw, vf)) ++ if (!e1000_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } + } +@@ -6169,17 +7003,17 @@ + return; + + /* we only need to do this if VMDq is enabled */ +- if (!adapter->vfs_allocated_count) ++ if (!adapter->vmdq_pools) + return; + + for (i = 0; i < hw->mac.uta_reg_count; i++) +- array_wr32(E1000_UTA, i, ~0); ++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0); + } + + /** +- * igb_intr_msi - Interrupt Handler +- * @irq: interrupt number +- * @data: pointer to a network interface device structure ++ * igb_intr_msi - Interrupt Handler ++ * @irq: interrupt number ++ * @data: pointer to a network interface device structure + **/ + static irqreturn_t igb_intr_msi(int irq, void *data) + { +@@ -6187,7 +7021,7 @@ + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ +- u32 icr = rd32(E1000_ICR); ++ u32 icr = E1000_READ_REG(hw, E1000_ICR); + + igb_write_itr(q_vector); + +@@ -6205,16 +7039,18 @@ + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + ++#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { +- u32 tsicr = rd32(E1000_TSICR); ++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ +- wr32(E1000_TSICR, E1000_TSICR_TXTS); ++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ + + napi_schedule(&q_vector->napi); + +@@ -6222,9 +7058,9 @@ + } + + /** +- * igb_intr - Legacy Interrupt Handler +- * @irq: interrupt number +- * @data: pointer to a network interface device structure ++ * igb_intr - Legacy Interrupt Handler ++ * @irq: interrupt number ++ * @data: pointer to a network interface device structure + **/ + static irqreturn_t igb_intr(int irq, void *data) + { +@@ -6234,7 +7070,7 @@ + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ +- u32 icr = rd32(E1000_ICR); ++ u32 icr = E1000_READ_REG(hw, E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt +@@ -6259,23 +7095,25 @@ + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + ++#ifdef HAVE_PTP_1588_CLOCK + if (icr & E1000_ICR_TS) { +- u32 tsicr = rd32(E1000_TSICR); ++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ +- wr32(E1000_TSICR, E1000_TSICR_TXTS); ++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; + } + +-static void igb_ring_irq_enable(struct igb_q_vector *q_vector) ++void igb_ring_irq_enable(struct igb_q_vector *q_vector) + { + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; +@@ -6289,26 +7127,25 @@ + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { +- if (adapter->flags & IGB_FLAG_HAS_MSIX) +- wr32(E1000_EIMS, q_vector->eims_value); ++ if (adapter->msix_entries) ++ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } + } + + /** +- * igb_poll - NAPI Rx polling callback +- * @napi: napi polling structure +- * @budget: count of how many packets we should handle ++ * igb_poll - NAPI Rx polling callback ++ * @napi: napi polling structure ++ * @budget: count of how many packets we should handle + **/ + static int igb_poll(struct napi_struct *napi, int budget) + { + struct igb_q_vector *q_vector = container_of(napi, +- struct igb_q_vector, +- napi); ++ struct igb_q_vector, napi); + bool clean_complete = true; + +-#ifdef CONFIG_IGB_DCA ++#ifdef IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); + #endif +@@ -6318,6 +7155,12 @@ + if (q_vector->rx.ring) + clean_complete &= igb_clean_rx_irq(q_vector, budget); + ++#ifndef HAVE_NETDEV_NAPI_LIST ++ /* if netdev is disabled we need to stop polling */ ++ if (!netif_running(q_vector->adapter->netdev)) ++ clean_complete = true; ++ ++#endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; +@@ -6330,10 +7173,9 @@ + } + + /** +- * igb_clean_tx_irq - Reclaim resources after transmit completes +- * @q_vector: pointer to q_vector containing needed info +- * +- * returns true if ring is completely cleaned ++ * igb_clean_tx_irq - Reclaim resources after transmit completes ++ * @q_vector: pointer to q_vector containing needed info ++ * returns TRUE if ring is completely cleaned + **/ + static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) + { +@@ -6426,16 +7268,20 @@ + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); ++ + i += tx_ring->count; + tx_ring->next_to_clean = i; +- u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; +- u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + ++#ifdef DEBUG ++ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) && ++ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) { ++#else + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { ++#endif + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the +@@ -6444,10 +7290,23 @@ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + +- (adapter->tx_timeout_factor * HZ)) && +- !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { ++ (adapter->tx_timeout_factor * HZ)) ++ && !(E1000_READ_REG(hw, E1000_STATUS) & ++ E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ ++#ifdef DEBUG ++ adapter->tx_hang_detected = TRUE; ++ if (adapter->disable_hw_reset) { ++ DPRINTK(DRV, WARNING, ++ "Deactivating netdev watchdog timer\n"); ++ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer)) ++ dev_put(netdev_ring(tx_ring)); ++#ifndef HAVE_NET_DEVICE_OPS ++ netdev_ring(tx_ring)->tx_timeout = NULL; ++#endif ++ } ++#endif /* DEBUG */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" +@@ -6461,7 +7320,7 @@ + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, +- rd32(E1000_TDH(tx_ring->reg_idx)), ++ E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, +@@ -6469,8 +7328,11 @@ + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); +- netif_stop_subqueue(tx_ring->netdev, +- tx_ring->queue_index); ++ if (netif_is_multiqueue(netdev_ring(tx_ring))) ++ netif_stop_subqueue(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)); ++ else ++ netif_stop_queue(netdev_ring(tx_ring)); + + /* we are about to reset, no point in enabling stuff */ + return true; +@@ -6479,33 +7341,63 @@ + + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && +- netif_carrier_ok(tx_ring->netdev) && +- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { ++ netif_carrier_ok(netdev_ring(tx_ring)) && ++ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +- if (__netif_subqueue_stopped(tx_ring->netdev, +- tx_ring->queue_index) && +- !(test_bit(__IGB_DOWN, &adapter->state))) { +- netif_wake_subqueue(tx_ring->netdev, +- tx_ring->queue_index); +- +- u64_stats_update_begin(&tx_ring->tx_syncp); +- tx_ring->tx_stats.restart_queue++; +- u64_stats_update_end(&tx_ring->tx_syncp); ++ if (netif_is_multiqueue(netdev_ring(tx_ring))) { ++ if (__netif_subqueue_stopped(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)) && ++ !(test_bit(__IGB_DOWN, &adapter->state))) { ++ netif_wake_subqueue(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)); ++ tx_ring->tx_stats.restart_queue++; ++ } ++ } else { ++ if (netif_queue_stopped(netdev_ring(tx_ring)) && ++ !(test_bit(__IGB_DOWN, &adapter->state))) { ++ netif_wake_queue(netdev_ring(tx_ring)); ++ tx_ring->tx_stats.restart_queue++; ++ } + } + } + + return !!budget; + } + ++#ifdef HAVE_VLAN_RX_REGISTER ++/** ++ * igb_receive_skb - helper function to handle rx indications ++ * @q_vector: structure containing interrupt and ring information ++ * @skb: packet to send up ++ **/ ++static void igb_receive_skb(struct igb_q_vector *q_vector, ++ struct sk_buff *skb) ++{ ++ struct vlan_group **vlgrp = netdev_priv(skb->dev); ++ ++ if (IGB_CB(skb)->vid) { ++ if (*vlgrp) { ++ vlan_gro_receive(&q_vector->napi, *vlgrp, ++ IGB_CB(skb)->vid, skb); ++ } else { ++ dev_kfree_skb_any(skb); ++ } ++ } else { ++ napi_gro_receive(&q_vector->napi, skb); ++ } ++} ++ ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + /** +- * igb_reuse_rx_page - page flip buffer and store it back on the ring +- * @rx_ring: rx descriptor ring to store buffers on +- * @old_buff: donor buffer to have page reused ++ * igb_reuse_rx_page - page flip buffer and store it back on the ring ++ * @rx_ring: rx descriptor ring to store buffers on ++ * @old_buff: donor buffer to have page reused + * +- * Synchronizes page for reuse by the adapter ++ * Synchronizes page for reuse by the adapter + **/ + static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +@@ -6545,39 +7437,34 @@ + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IGB_RX_BUFSZ; + +- /* since we are the only owner of the page and we need to +- * increment it, just set the value to 2 in order to avoid +- * an unnecessary locked operation +- */ +- atomic_set(&page->_count, 2); + #else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; ++#endif + + /* bump ref count on page before it is given to the stack */ + get_page(page); +-#endif + + return true; + } + + /** +- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff +- * @rx_ring: rx descriptor ring to transact packets on +- * @rx_buffer: buffer containing page to add +- * @rx_desc: descriptor containing length of buffer written by hardware +- * @skb: sk_buff to place the data into +- * +- * This function will add the data contained in rx_buffer->page to the skb. +- * This is done either through a direct copy if the data in the buffer is +- * less than the skb header size, otherwise it will just attach the page as +- * a frag to the skb. ++ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff ++ * @rx_ring: rx descriptor ring to transact packets on ++ * @rx_buffer: buffer containing page to add ++ * @rx_desc: descriptor containing length of buffer written by hardware ++ * @skb: sk_buff to place the data into ++ * ++ * This function will add the data contained in rx_buffer->page to the skb. ++ * This is done either through a direct copy if the data in the buffer is ++ * less than the skb header size, otherwise it will just attach the page as ++ * a frag to the skb. + * +- * The function will then update the page offset if necessary and return +- * true if the buffer can be reused by the adapter. ++ * The function will then update the page offset if necessary and return ++ * true if the buffer can be reused by the adapter. + **/ + static bool igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, +@@ -6585,22 +7472,27 @@ + struct sk_buff *skb) + { + struct page *page = rx_buffer->page; ++ unsigned char *va = page_address(page) + rx_buffer->page_offset; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + #if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; + #else +- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); ++ unsigned int truesize = SKB_DATA_ALIGN(size); + #endif ++ unsigned int pull_len; + +- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { +- unsigned char *va = page_address(page) + rx_buffer->page_offset; ++ if (unlikely(skb_is_nonlinear(skb))) ++ goto add_tail_frag; + +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { +- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); +- va += IGB_TS_HDR_LEN; +- size -= IGB_TS_HDR_LEN; +- } ++#ifdef HAVE_PTP_1588_CLOCK ++ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { ++ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); ++ va += IGB_TS_HDR_LEN; ++ size -= IGB_TS_HDR_LEN; ++ } ++#endif /* HAVE_PTP_1588_CLOCK */ + ++ if (likely(size <= IGB_RX_HDR_LEN)) { + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ +@@ -6612,8 +7504,21 @@ + return false; + } + ++ /* we need the header to contain the greater of either ETH_HLEN or ++ * 60 bytes if the skb->len is less than 60 for skb_pad. ++ */ ++ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); ++ ++ /* align pull length to size of long to optimize memcpy performance */ ++ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); ++ ++ /* update all of the pointers */ ++ va += pull_len; ++ size -= pull_len; ++ ++add_tail_frag: + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, +- rx_buffer->page_offset, size, truesize); ++ (unsigned long)va & ~PAGE_MASK, size, truesize); + + return igb_can_reuse_rx_page(rx_buffer, page, truesize); + } +@@ -6648,7 +7553,8 @@ + return NULL; + } + +- /* we will be copying header into skb->data in ++ /* ++ * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ +@@ -6672,72 +7578,606 @@ + PAGE_SIZE, DMA_FROM_DEVICE); + } + +- /* clear contents of rx_buffer */ +- rx_buffer->page = NULL; ++ /* clear contents of rx_buffer */ ++ rx_buffer->page = NULL; ++ ++ return skb; ++} ++ ++#endif ++static inline void igb_rx_checksum(struct igb_ring *ring, ++ union e1000_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ skb_checksum_none_assert(skb); ++ ++ /* Ignore Checksum bit is set */ ++ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) ++ return; ++ ++ /* Rx checksum disabled via ethtool */ ++ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) ++ return; ++ ++ /* TCP/UDP checksum error bit is set */ ++ if (igb_test_staterr(rx_desc, ++ E1000_RXDEXT_STATERR_TCPE | ++ E1000_RXDEXT_STATERR_IPE)) { ++ /* ++ * work around errata with sctp packets where the TCPE aka ++ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) ++ * packets, (aka let the stack check the crc32c) ++ */ ++ if (!((skb->len == 60) && ++ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) ++ ring->rx_stats.csum_err++; ++ ++ /* let the stack verify checksum errors */ ++ return; ++ } ++ /* It must be a TCP or UDP packet with a valid checksum */ ++ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | ++ E1000_RXD_STAT_UDPCS)) ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++#ifdef NETIF_F_RXHASH ++static inline void igb_rx_hash(struct igb_ring *ring, ++ union e1000_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ if (netdev_ring(ring)->features & NETIF_F_RXHASH) ++ skb_set_hash(skb, ++ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), ++ PKT_HASH_TYPE_L3); ++} ++ ++#endif ++#ifndef IGB_NO_LRO ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++/** ++ * igb_merge_active_tail - merge active tail into lro skb ++ * @tail: pointer to active tail in frag_list ++ * ++ * This function merges the length and data of an active tail into the ++ * skb containing the frag_list. It resets the tail's pointer to the head, ++ * but it leaves the heads pointer to tail intact. ++ **/ ++static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail) ++{ ++ struct sk_buff *head = IGB_CB(tail)->head; ++ ++ if (!head) ++ return tail; ++ ++ head->len += tail->len; ++ head->data_len += tail->len; ++ head->truesize += tail->len; ++ ++ IGB_CB(tail)->head = NULL; ++ ++ return head; ++} ++ ++/** ++ * igb_add_active_tail - adds an active tail into the skb frag_list ++ * @head: pointer to the start of the skb ++ * @tail: pointer to active tail to add to frag_list ++ * ++ * This function adds an active tail to the end of the frag list. This tail ++ * will still be receiving data so we cannot yet ad it's stats to the main ++ * skb. That is done via igb_merge_active_tail. ++ **/ ++static inline void igb_add_active_tail(struct sk_buff *head, ++ struct sk_buff *tail) ++{ ++ struct sk_buff *old_tail = IGB_CB(head)->tail; ++ ++ if (old_tail) { ++ igb_merge_active_tail(old_tail); ++ old_tail->next = tail; ++ } else { ++ skb_shinfo(head)->frag_list = tail; ++ } ++ ++ IGB_CB(tail)->head = head; ++ IGB_CB(head)->tail = tail; ++ ++ IGB_CB(head)->append_cnt++; ++} ++ ++/** ++ * igb_close_active_frag_list - cleanup pointers on a frag_list skb ++ * @head: pointer to head of an active frag list ++ * ++ * This function will clear the frag_tail_tracker pointer on an active ++ * frag_list and returns true if the pointer was actually set ++ **/ ++static inline bool igb_close_active_frag_list(struct sk_buff *head) ++{ ++ struct sk_buff *tail = IGB_CB(head)->tail; ++ ++ if (!tail) ++ return false; ++ ++ igb_merge_active_tail(tail); ++ ++ IGB_CB(head)->tail = NULL; ++ ++ return true; ++} ++ ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++/** ++ * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled ++ * @adapter: board private structure ++ * @rx_desc: pointer to the rx descriptor ++ * @skb: pointer to the skb to be merged ++ * ++ **/ ++static inline bool igb_can_lro(struct igb_ring *rx_ring, ++ union e1000_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ struct iphdr *iph = (struct iphdr *)skb->data; ++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; ++ ++ /* verify hardware indicates this is IPv4/TCP */ ++ if ((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) || ++ !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4)))) ++ return false; ++ ++ /* .. and LRO is enabled */ ++ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO)) ++ return false; ++ ++ /* .. and we are not in promiscuous mode */ ++ if (netdev_ring(rx_ring)->flags & IFF_PROMISC) ++ return false; ++ ++ /* .. and the header is large enough for us to read IP/TCP fields */ ++ if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr))) ++ return false; ++ ++ /* .. and there are no VLANs on packet */ ++ if (skb->protocol != htons(ETH_P_IP)) ++ return false; ++ ++ /* .. and we are version 4 with no options */ ++ if (*(u8 *)iph != 0x45) ++ return false; ++ ++ /* .. and the packet is not fragmented */ ++ if (iph->frag_off & htons(IP_MF | IP_OFFSET)) ++ return false; ++ ++ /* .. and that next header is TCP */ ++ if (iph->protocol != IPPROTO_TCP) ++ return false; ++ ++ return true; ++} ++ ++static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb) ++{ ++ return (struct igb_lrohdr *)skb->data; ++} ++ ++/** ++ * igb_lro_flush - Indicate packets to upper layer. ++ * ++ * Update IP and TCP header part of head skb if more than one ++ * skb's chained and indicate packets to upper layer. ++ **/ ++static void igb_lro_flush(struct igb_q_vector *q_vector, ++ struct sk_buff *skb) ++{ ++ struct igb_lro_list *lrolist = &q_vector->lrolist; ++ ++ __skb_unlink(skb, &lrolist->active); ++ ++ if (IGB_CB(skb)->append_cnt) { ++ struct igb_lrohdr *lroh = igb_lro_hdr(skb); ++ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ /* close any active lro contexts */ ++ igb_close_active_frag_list(skb); ++ ++#endif ++ /* incorporate ip header and re-calculate checksum */ ++ lroh->iph.tot_len = ntohs(skb->len); ++ lroh->iph.check = 0; ++ ++ /* header length is 5 since we know no options exist */ ++ lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); ++ ++ /* clear TCP checksum to indicate we are an LRO frame */ ++ lroh->th.check = 0; ++ ++ /* incorporate latest timestamp into the tcp header */ ++ if (IGB_CB(skb)->tsecr) { ++ lroh->ts[2] = IGB_CB(skb)->tsecr; ++ lroh->ts[1] = htonl(IGB_CB(skb)->tsval); ++ } ++#ifdef NETIF_F_GSO ++ ++#ifdef NAPI_GRO_CB ++ NAPI_GRO_CB(skb)->data_offset = 0; ++#endif ++ skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss; ++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; ++#endif ++ } ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, skb); ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++ lrolist->stats.flushed++; ++} ++ ++static void igb_lro_flush_all(struct igb_q_vector *q_vector) ++{ ++ struct igb_lro_list *lrolist = &q_vector->lrolist; ++ struct sk_buff *skb, *tmp; ++ ++ skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) ++ igb_lro_flush(q_vector, skb); ++} ++ ++/* ++ * igb_lro_header_ok - Main LRO function. ++ **/ ++static void igb_lro_header_ok(struct sk_buff *skb) ++{ ++ struct igb_lrohdr *lroh = igb_lro_hdr(skb); ++ u16 opt_bytes, data_len; ++ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ IGB_CB(skb)->tail = NULL; ++#endif ++ IGB_CB(skb)->tsecr = 0; ++ IGB_CB(skb)->append_cnt = 0; ++ IGB_CB(skb)->mss = 0; ++ ++ /* ensure that the checksum is valid */ ++ if (skb->ip_summed != CHECKSUM_UNNECESSARY) ++ return; ++ ++ /* If we see CE codepoint in IP header, packet is not mergeable */ ++ if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) ++ return; ++ ++ /* ensure no bits set besides ack or psh */ ++ if (lroh->th.fin || lroh->th.syn || lroh->th.rst || ++ lroh->th.urg || lroh->th.ece || lroh->th.cwr || ++ !lroh->th.ack) ++ return; ++ ++ /* store the total packet length */ ++ data_len = ntohs(lroh->iph.tot_len); ++ ++ /* remove any padding from the end of the skb */ ++ __pskb_trim(skb, data_len); ++ ++ /* remove header length from data length */ ++ data_len -= sizeof(struct igb_lrohdr); ++ ++ /* ++ * check for timestamps. Since the only option we handle are timestamps, ++ * we only have to handle the simple case of aligned timestamps ++ */ ++ opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); ++ if (opt_bytes != 0) { ++ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || ++ !pskb_may_pull(skb, sizeof(struct igb_lrohdr) + ++ TCPOLEN_TSTAMP_ALIGNED) || ++ (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | ++ (TCPOPT_NOP << 16) | ++ (TCPOPT_TIMESTAMP << 8) | ++ TCPOLEN_TIMESTAMP)) || ++ (lroh->ts[2] == 0)) { ++ return; ++ } ++ ++ IGB_CB(skb)->tsval = ntohl(lroh->ts[1]); ++ IGB_CB(skb)->tsecr = lroh->ts[2]; ++ ++ data_len -= TCPOLEN_TSTAMP_ALIGNED; ++ } ++ ++ /* record data_len as mss for the packet */ ++ IGB_CB(skb)->mss = data_len; ++ IGB_CB(skb)->next_seq = ntohl(lroh->th.seq); ++} ++ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) ++{ ++ struct skb_shared_info *sh_info; ++ struct skb_shared_info *new_skb_info; ++ unsigned int data_len; ++ ++ sh_info = skb_shinfo(lro_skb); ++ new_skb_info = skb_shinfo(new_skb); ++ ++ /* copy frags into the last skb */ ++ memcpy(sh_info->frags + sh_info->nr_frags, ++ new_skb_info->frags, ++ new_skb_info->nr_frags * sizeof(skb_frag_t)); ++ ++ /* copy size data over */ ++ sh_info->nr_frags += new_skb_info->nr_frags; ++ data_len = IGB_CB(new_skb)->mss; ++ lro_skb->len += data_len; ++ lro_skb->data_len += data_len; ++ lro_skb->truesize += data_len; ++ ++ /* wipe record of data from new_skb */ ++ new_skb_info->nr_frags = 0; ++ new_skb->len = new_skb->data_len = 0; ++ dev_kfree_skb_any(new_skb); ++} ++ ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++/** ++ * igb_lro_receive - if able, queue skb into lro chain ++ * @q_vector: structure containing interrupt and ring information ++ * @new_skb: pointer to current skb being checked ++ * ++ * Checks whether the skb given is eligible for LRO and if that's ++ * fine chains it to the existing lro_skb based on flowid. If an LRO for ++ * the flow doesn't exist create one. ++ **/ ++static void igb_lro_receive(struct igb_q_vector *q_vector, ++ struct sk_buff *new_skb) ++{ ++ struct sk_buff *lro_skb; ++ struct igb_lro_list *lrolist = &q_vector->lrolist; ++ struct igb_lrohdr *lroh = igb_lro_hdr(new_skb); ++ __be32 saddr = lroh->iph.saddr; ++ __be32 daddr = lroh->iph.daddr; ++ __be32 tcp_ports = *(__be32 *)&lroh->th; ++ u16 data_len; ++#ifdef HAVE_VLAN_RX_REGISTER ++ u16 vid = IGB_CB(new_skb)->vid; ++#else ++ u16 vid = new_skb->vlan_tci; ++#endif ++ ++ igb_lro_header_ok(new_skb); ++ ++ /* ++ * we have a packet that might be eligible for LRO, ++ * so see if it matches anything we might expect ++ */ ++ skb_queue_walk(&lrolist->active, lro_skb) { ++ if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports || ++ igb_lro_hdr(lro_skb)->iph.saddr != saddr || ++ igb_lro_hdr(lro_skb)->iph.daddr != daddr) ++ continue; ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ if (IGB_CB(lro_skb)->vid != vid) ++#else ++ if (lro_skb->vlan_tci != vid) ++#endif ++ continue; ++ ++ /* out of order packet */ ++ if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) { ++ igb_lro_flush(q_vector, lro_skb); ++ IGB_CB(new_skb)->mss = 0; ++ break; ++ } ++ ++ /* TCP timestamp options have changed */ ++ if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) { ++ igb_lro_flush(q_vector, lro_skb); ++ break; ++ } ++ ++ /* make sure timestamp values are increasing */ ++ if (IGB_CB(lro_skb)->tsecr && ++ IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) { ++ igb_lro_flush(q_vector, lro_skb); ++ IGB_CB(new_skb)->mss = 0; ++ break; ++ } ++ ++ data_len = IGB_CB(new_skb)->mss; ++ ++ /* Check for all of the above below ++ * malformed header ++ * no tcp data ++ * resultant packet would be too large ++ * new skb is larger than our current mss ++ * data would remain in header ++ * we would consume more frags then the sk_buff contains ++ * ack sequence numbers changed ++ * window size has changed ++ */ ++ if (data_len == 0 || ++ data_len > IGB_CB(lro_skb)->mss || ++ data_len > IGB_CB(lro_skb)->free || ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ data_len != new_skb->data_len || ++ skb_shinfo(new_skb)->nr_frags >= ++ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) || ++#endif ++ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || ++ igb_lro_hdr(lro_skb)->th.window != lroh->th.window) { ++ igb_lro_flush(q_vector, lro_skb); ++ break; ++ } ++ ++ /* Remove IP and TCP header*/ ++ skb_pull(new_skb, new_skb->len - data_len); ++ ++ /* update timestamp and timestamp echo response */ ++ IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval; ++ IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr; ++ ++ /* update sequence and free space */ ++ IGB_CB(lro_skb)->next_seq += data_len; ++ IGB_CB(lro_skb)->free -= data_len; ++ ++ /* update append_cnt */ ++ IGB_CB(lro_skb)->append_cnt++; ++ ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ /* if header is empty pull pages into current skb */ ++ igb_merge_frags(lro_skb, new_skb); ++#else ++ /* chain this new skb in frag_list */ ++ igb_add_active_tail(lro_skb, new_skb); ++#endif ++ ++ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh || ++ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) { ++ igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; ++ igb_lro_flush(q_vector, lro_skb); ++ } ++ ++ lrolist->stats.coal++; ++ return; ++ } ++ ++ if (IGB_CB(new_skb)->mss && !lroh->th.psh) { ++ /* if we are at capacity flush the tail */ ++ if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) { ++ lro_skb = skb_peek_tail(&lrolist->active); ++ if (lro_skb) ++ igb_lro_flush(q_vector, lro_skb); ++ } ++ ++ /* update sequence and free space */ ++ IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss; ++ IGB_CB(new_skb)->free = 65521 - new_skb->len; + +- return skb; ++ /* .. and insert at the front of the active list */ ++ __skb_queue_head(&lrolist->active, new_skb); ++ ++ lrolist->stats.coal++; ++ return; ++ } ++ ++ /* packet not handled by any of the above, pass it to the stack */ ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, new_skb); ++#else ++ napi_gro_receive(&q_vector->napi, new_skb); ++#endif + } + +-static inline void igb_rx_checksum(struct igb_ring *ring, ++#endif /* IGB_NO_LRO */ ++/** ++ * igb_process_skb_fields - Populate skb header fields from Rx descriptor ++ * @rx_ring: rx descriptor ring packet is being transacted on ++ * @rx_desc: pointer to the EOP Rx descriptor ++ * @skb: pointer to current skb being populated ++ * ++ * This function checks the ring, descriptor, and packet information in ++ * order to populate the hash, checksum, VLAN, timestamp, protocol, and ++ * other fields within the skb. ++ **/ ++static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- skb_checksum_none_assert(skb); ++ struct net_device *dev = rx_ring->netdev; ++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; ++ bool notype; + +- /* Ignore Checksum bit is set */ +- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) +- return; ++#ifdef NETIF_F_RXHASH ++ igb_rx_hash(rx_ring, rx_desc, skb); + +- /* Rx checksum disabled via ethtool */ +- if (!(ring->netdev->features & NETIF_F_RXCSUM)) +- return; ++#endif ++ igb_rx_checksum(rx_ring, rx_desc, skb); + +- /* TCP/UDP checksum error bit is set */ +- if (igb_test_staterr(rx_desc, +- E1000_RXDEXT_STATERR_TCPE | +- E1000_RXDEXT_STATERR_IPE)) { +- /* work around errata with sctp packets where the TCPE aka +- * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) +- * packets, (aka let the stack check the crc32c) +- */ +- if (!((skb->len == 60) && +- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { +- u64_stats_update_begin(&ring->rx_syncp); +- ring->rx_stats.csum_err++; +- u64_stats_update_end(&ring->rx_syncp); +- } +- /* let the stack verify checksum errors */ +- return; ++ /* update packet type stats */ ++ switch (pkt_info & E1000_RXDADV_PKTTYPE_ILMASK) { ++ case E1000_RXDADV_PKTTYPE_IPV4: ++ rx_ring->pkt_stats.ipv4_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_IPV4_EX: ++ rx_ring->pkt_stats.ipv4e_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_IPV6: ++ rx_ring->pkt_stats.ipv6_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_IPV6_EX: ++ rx_ring->pkt_stats.ipv6e_packets++; ++ break; ++ default: ++ notype = true; ++ break; + } +- /* It must be a TCP or UDP packet with a valid checksum */ +- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | +- E1000_RXD_STAT_UDPCS)) +- skb->ip_summed = CHECKSUM_UNNECESSARY; + +- dev_dbg(ring->dev, "cksum success: bits %08X\n", +- le32_to_cpu(rx_desc->wb.upper.status_error)); +-} ++ switch (pkt_info & E1000_RXDADV_PKTTYPE_TLMASK) { ++ case E1000_RXDADV_PKTTYPE_TCP: ++ rx_ring->pkt_stats.tcp_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_UDP: ++ rx_ring->pkt_stats.udp_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_SCTP: ++ rx_ring->pkt_stats.sctp_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_NFS: ++ rx_ring->pkt_stats.nfs_packets++; ++ break; ++ case E1000_RXDADV_PKTTYPE_NONE: ++ if (notype) ++ rx_ring->pkt_stats.other_packets++; ++ break; ++ default: ++ break; ++ } + +-static inline void igb_rx_hash(struct igb_ring *ring, +- union e1000_adv_rx_desc *rx_desc, +- struct sk_buff *skb) +-{ +- if (ring->netdev->features & NETIF_F_RXHASH) +- skb_set_hash(skb, +- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), +- PKT_HASH_TYPE_L3); ++#ifdef HAVE_PTP_1588_CLOCK ++ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && ++ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) ++ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); ++ ++#endif /* HAVE_PTP_1588_CLOCK */ ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ++#else ++ if ((dev->features & NETIF_F_HW_VLAN_RX) && ++#endif ++ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { ++ u16 vid = 0; ++ ++ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && ++ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) ++ vid = be16_to_cpu(rx_desc->wb.upper.vlan); ++ else ++ vid = le16_to_cpu(rx_desc->wb.upper.vlan); ++#ifdef HAVE_VLAN_RX_REGISTER ++ IGB_CB(skb)->vid = vid; ++ } else { ++ IGB_CB(skb)->vid = 0; ++#else ++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); ++#endif ++ } ++ ++ skb_record_rx_queue(skb, rx_ring->queue_index); ++ ++ skb->protocol = eth_type_trans(skb, dev); + } + + /** +- * igb_is_non_eop - process handling of non-EOP buffers +- * @rx_ring: Rx ring being processed +- * @rx_desc: Rx descriptor for current buffer +- * @skb: current socket buffer containing buffer in progress +- * +- * This function updates next to clean. If the buffer is an EOP buffer +- * this function exits returning false, otherwise it will place the +- * sk_buff in the next buffer to be chained and return true indicating +- * that this is in fact a non-EOP buffer. ++ * igb_is_non_eop - process handling of non-EOP buffers ++ * @rx_ring: Rx ring being processed ++ * @rx_desc: Rx descriptor for current buffer ++ * ++ * This function updates next to clean. If the buffer is an EOP buffer ++ * this function exits returning false, otherwise it will place the ++ * sk_buff in the next buffer to be chained and return true indicating ++ * that this is in fact a non-EOP buffer. + **/ + static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +@@ -6756,200 +8196,134 @@ + return true; + } + +-/** +- * igb_get_headlen - determine size of header for LRO/GRO +- * @data: pointer to the start of the headers +- * @max_len: total length of section to find headers in +- * +- * This function is meant to determine the length of headers that will +- * be recognized by hardware for LRO, and GRO offloads. The main +- * motivation of doing this is to only perform one pull for IPv4 TCP +- * packets so that we can do basic things like calculating the gso_size +- * based on the average data per packet. +- **/ +-static unsigned int igb_get_headlen(unsigned char *data, +- unsigned int max_len) +-{ +- union { +- unsigned char *network; +- /* l2 headers */ +- struct ethhdr *eth; +- struct vlan_hdr *vlan; +- /* l3 headers */ +- struct iphdr *ipv4; +- struct ipv6hdr *ipv6; +- } hdr; +- __be16 protocol; +- u8 nexthdr = 0; /* default to not TCP */ +- u8 hlen; +- +- /* this should never happen, but better safe than sorry */ +- if (max_len < ETH_HLEN) +- return max_len; +- +- /* initialize network frame pointer */ +- hdr.network = data; +- +- /* set first protocol and move network header forward */ +- protocol = hdr.eth->h_proto; +- hdr.network += ETH_HLEN; +- +- /* handle any vlan tag if present */ +- if (protocol == htons(ETH_P_8021Q)) { +- if ((hdr.network - data) > (max_len - VLAN_HLEN)) +- return max_len; +- +- protocol = hdr.vlan->h_vlan_encapsulated_proto; +- hdr.network += VLAN_HLEN; +- } +- +- /* handle L3 protocols */ +- if (protocol == htons(ETH_P_IP)) { +- if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) +- return max_len; +- +- /* access ihl as a u8 to avoid unaligned access on ia64 */ +- hlen = (hdr.network[0] & 0x0F) << 2; +- +- /* verify hlen meets minimum size requirements */ +- if (hlen < sizeof(struct iphdr)) +- return hdr.network - data; +- +- /* record next protocol if header is present */ +- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) +- nexthdr = hdr.ipv4->protocol; +- } else if (protocol == htons(ETH_P_IPV6)) { +- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) +- return max_len; +- +- /* record next protocol */ +- nexthdr = hdr.ipv6->nexthdr; +- hlen = sizeof(struct ipv6hdr); +- } else { +- return hdr.network - data; +- } ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++/* igb_clean_rx_irq -- * legacy */ ++static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) ++{ ++ struct igb_ring *rx_ring = q_vector->rx.ring; ++ unsigned int total_bytes = 0, total_packets = 0; ++ u16 cleaned_count = igb_desc_unused(rx_ring); + +- /* relocate pointer to start of L4 header */ +- hdr.network += hlen; ++ do { ++ struct igb_rx_buffer *rx_buffer; ++ union e1000_adv_rx_desc *rx_desc; ++ struct sk_buff *skb; ++ u16 ntc; + +- /* finally sort out TCP */ +- if (nexthdr == IPPROTO_TCP) { +- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) +- return max_len; ++ /* return some buffers to hardware, one at a time is too slow */ ++ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { ++ igb_alloc_rx_buffers(rx_ring, cleaned_count); ++ cleaned_count = 0; ++ } + +- /* access doff as a u8 to avoid unaligned access on ia64 */ +- hlen = (hdr.network[12] & 0xF0) >> 2; ++ ntc = rx_ring->next_to_clean; ++ rx_desc = IGB_RX_DESC(rx_ring, ntc); ++ rx_buffer = &rx_ring->rx_buffer_info[ntc]; + +- /* verify hlen meets minimum size requirements */ +- if (hlen < sizeof(struct tcphdr)) +- return hdr.network - data; ++ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) ++ break; + +- hdr.network += hlen; +- } else if (nexthdr == IPPROTO_UDP) { +- if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) +- return max_len; ++ /* ++ * This memory barrier is needed to keep us from reading ++ * any other fields out of the rx_desc until we know the ++ * RXD_STAT_DD bit is set ++ */ ++ rmb(); + +- hdr.network += sizeof(struct udphdr); +- } ++ skb = rx_buffer->skb; + +- /* If everything has gone correctly hdr.network should be the +- * data section of the packet and will be the end of the header. +- * If not then it probably represents the end of the last recognized +- * header. +- */ +- if ((hdr.network - data) < max_len) +- return hdr.network - data; +- else +- return max_len; +-} ++ prefetch(skb->data); + +-/** +- * igb_pull_tail - igb specific version of skb_pull_tail +- * @rx_ring: rx descriptor ring packet is being transacted on +- * @rx_desc: pointer to the EOP Rx descriptor +- * @skb: pointer to current skb being adjusted +- * +- * This function is an igb specific version of __pskb_pull_tail. The +- * main difference between this version and the original function is that +- * this function can make several assumptions about the state of things +- * that allow for significant optimizations versus the standard function. +- * As a result we can do things like drop a frag and maintain an accurate +- * truesize for the skb. +- */ +-static void igb_pull_tail(struct igb_ring *rx_ring, +- union e1000_adv_rx_desc *rx_desc, +- struct sk_buff *skb) +-{ +- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; +- unsigned char *va; +- unsigned int pull_len; ++ /* pull the header of the skb in */ ++ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); + +- /* it is valid to use page_address instead of kmap since we are +- * working with pages allocated out of the lomem pool per +- * alloc_page(GFP_ATOMIC) +- */ +- va = skb_frag_address(frag); ++ /* clear skb reference in buffer info structure */ ++ rx_buffer->skb = NULL; + +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { +- /* retrieve timestamp from buffer */ +- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); ++ cleaned_count++; + +- /* update pointers to remove timestamp header */ +- skb_frag_size_sub(frag, IGB_TS_HDR_LEN); +- frag->page_offset += IGB_TS_HDR_LEN; +- skb->data_len -= IGB_TS_HDR_LEN; +- skb->len -= IGB_TS_HDR_LEN; ++ BUG_ON(igb_is_non_eop(rx_ring, rx_desc)); + +- /* move va to start of packet data */ +- va += IGB_TS_HDR_LEN; +- } ++ dma_unmap_single(rx_ring->dev, rx_buffer->dma, ++ rx_ring->rx_buffer_len, ++ DMA_FROM_DEVICE); ++ rx_buffer->dma = 0; + +- /* we need the header to contain the greater of either ETH_HLEN or +- * 60 bytes if the skb->len is less than 60 for skb_pad. +- */ +- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); ++ if (igb_test_staterr(rx_desc, ++ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { ++ dev_kfree_skb_any(skb); ++ continue; ++ } + +- /* align pull length to size of long to optimize memcpy performance */ +- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); ++ total_bytes += skb->len; + +- /* update all of the pointers */ +- skb_frag_size_sub(frag, pull_len); +- frag->page_offset += pull_len; +- skb->data_len -= pull_len; +- skb->tail += pull_len; ++ /* populate checksum, timestamp, VLAN, and protocol */ ++ igb_process_skb_fields(rx_ring, rx_desc, skb); ++ ++#ifndef IGB_NO_LRO ++ if (igb_can_lro(rx_ring, rx_desc, skb)) ++ igb_lro_receive(q_vector, skb); ++ else ++#endif ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, skb); ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++ ++#ifndef NETIF_F_GRO ++ netdev_ring(rx_ring)->last_rx = jiffies; ++ ++#endif ++ /* update budget accounting */ ++ total_packets++; ++ } while (likely(total_packets < budget)); ++ ++ rx_ring->rx_stats.packets += total_packets; ++ rx_ring->rx_stats.bytes += total_bytes; ++ q_vector->rx.total_packets += total_packets; ++ q_vector->rx.total_bytes += total_bytes; ++ ++ if (cleaned_count) ++ igb_alloc_rx_buffers(rx_ring, cleaned_count); ++ ++#ifndef IGB_NO_LRO ++ igb_lro_flush_all(q_vector); ++ ++#endif /* IGB_NO_LRO */ ++ return (total_packets < budget); + } ++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + + /** +- * igb_cleanup_headers - Correct corrupted or empty headers +- * @rx_ring: rx descriptor ring packet is being transacted on +- * @rx_desc: pointer to the EOP Rx descriptor +- * @skb: pointer to current skb being fixed ++ * igb_cleanup_headers - Correct corrupted or empty headers ++ * @rx_ring: rx descriptor ring packet is being transacted on ++ * @rx_desc: pointer to the EOP Rx descriptor ++ * @skb: pointer to current skb being fixed + * +- * Address the case where we are pulling data in on pages only +- * and as such no data is present in the skb header. ++ * Address the case where we are pulling data in on pages only ++ * and as such no data is present in the skb header. + * +- * In addition if skb is not at least 60 bytes we need to pad it so that +- * it is large enough to qualify as a valid Ethernet frame. ++ * In addition if skb is not at least 60 bytes we need to pad it so that ++ * it is large enough to qualify as a valid Ethernet frame. + * +- * Returns true if an error was encountered and skb was freed. ++ * Returns true if an error was encountered and skb was freed. + **/ + static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { ++ + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; ++ + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + +- /* place header in linear portion of buffer */ +- if (skb_is_nonlinear(skb)) +- igb_pull_tail(rx_ring, rx_desc, skb); +- + /* if skb_pad returns an error the skb was freed */ + if (unlikely(skb->len < 60)) { + int pad_len = 60 - skb->len; +@@ -6962,56 +8336,15 @@ + return false; + } + +-/** +- * igb_process_skb_fields - Populate skb header fields from Rx descriptor +- * @rx_ring: rx descriptor ring packet is being transacted on +- * @rx_desc: pointer to the EOP Rx descriptor +- * @skb: pointer to current skb being populated +- * +- * This function checks the ring, descriptor, and packet information in +- * order to populate the hash, checksum, VLAN, timestamp, protocol, and +- * other fields within the skb. +- **/ +-static void igb_process_skb_fields(struct igb_ring *rx_ring, +- union e1000_adv_rx_desc *rx_desc, +- struct sk_buff *skb) +-{ +- struct net_device *dev = rx_ring->netdev; +- +- igb_rx_hash(rx_ring, rx_desc, skb); +- +- igb_rx_checksum(rx_ring, rx_desc, skb); +- +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && +- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) +- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); +- +- if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +- igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { +- u16 vid; +- +- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && +- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) +- vid = be16_to_cpu(rx_desc->wb.upper.vlan); +- else +- vid = le16_to_cpu(rx_desc->wb.upper.vlan); +- +- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); +- } +- +- skb_record_rx_queue(skb, rx_ring->queue_index); +- +- skb->protocol = eth_type_trans(skb, rx_ring->netdev); +-} +- +-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ++/* igb_clean_rx_irq -- * packet split */ ++static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) + { + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + +- while (likely(total_packets < budget)) { ++ do { + union e1000_adv_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ +@@ -7025,7 +8358,8 @@ + if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) + break; + +- /* This memory barrier is needed to keep us from reading ++ /* ++ * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ +@@ -7056,31 +8390,89 @@ + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + +- napi_gro_receive(&q_vector->napi, skb); ++#ifndef IGB_NO_LRO ++ if (igb_can_lro(rx_ring, rx_desc, skb)) ++ igb_lro_receive(q_vector, skb); ++ else ++#endif ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_receive_skb(q_vector, skb); ++#else ++ napi_gro_receive(&q_vector->napi, skb); ++#endif ++#ifndef NETIF_F_GRO ++ ++ netdev_ring(rx_ring)->last_rx = jiffies; ++#endif + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; +- } ++ } while (likely(total_packets < budget)); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + +- u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; +- u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + +- return total_packets < budget; ++#ifndef IGB_NO_LRO ++ igb_lro_flush_all(q_vector); ++ ++#endif /* IGB_NO_LRO */ ++ return (total_packets < budget); ++} ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ ++ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, ++ struct igb_rx_buffer *bi) ++{ ++ struct sk_buff *skb = bi->skb; ++ dma_addr_t dma = bi->dma; ++ ++ if (dma) ++ return true; ++ ++ if (likely(!skb)) { ++ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), ++ rx_ring->rx_buffer_len); ++ bi->skb = skb; ++ if (!skb) { ++ rx_ring->rx_stats.alloc_failed++; ++ return false; ++ } ++ ++ /* initialize skb for ring */ ++ skb_record_rx_queue(skb, ring_queue_index(rx_ring)); ++ } ++ ++ dma = dma_map_single(rx_ring->dev, skb->data, ++ rx_ring->rx_buffer_len, DMA_FROM_DEVICE); ++ ++ /* if mapping failed free memory back to system since ++ * there isn't much point in holding memory we can't use ++ */ ++ if (dma_mapping_error(rx_ring->dev, dma)) { ++ dev_kfree_skb_any(skb); ++ bi->skb = NULL; ++ ++ rx_ring->rx_stats.alloc_failed++; ++ return false; ++ } ++ ++ bi->dma = dma; ++ return true; + } + ++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) + { +@@ -7092,7 +8484,7 @@ + return true; + + /* alloc new page for storage */ +- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); ++ page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; +@@ -7101,7 +8493,8 @@ + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + +- /* if mapping failed free memory back to system since ++ /* ++ * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { +@@ -7118,9 +8511,10 @@ + return true; + } + ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + /** +- * igb_alloc_rx_buffers - Replace used receive buffers; packet split +- * @adapter: address of board private structure ++ * igb_alloc_rx_buffers - Replace used receive buffers; packet split ++ * @adapter: address of board private structure + **/ + void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) + { +@@ -7137,13 +8531,22 @@ + i -= rx_ring->count; + + do { ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ if (!igb_alloc_mapped_skb(rx_ring, bi)) ++#else + if (!igb_alloc_mapped_page(rx_ring, bi)) ++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ + break; + +- /* Refresh the desc even if buffer_addrs didn't change ++ /* ++ * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ ++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT ++ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); ++#else + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); ++#endif + + rx_desc++; + bi++; +@@ -7166,10 +8569,13 @@ + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + +- /* Force memory writes to complete before letting h/w ++#endif ++ /* ++ * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). +@@ -7179,6 +8585,7 @@ + } + } + ++#ifdef SIOCGMIIPHY + /** + * igb_mii_ioctl - + * @netdev: +@@ -7198,17 +8605,20 @@ + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: +- if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, +- &data->val_out)) ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, ++ &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } +- return 0; ++ return E1000_SUCCESS; + } + ++#endif + /** + * igb_ioctl - + * @netdev: +@@ -7218,156 +8628,295 @@ + static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) + { + switch (cmd) { ++#ifdef SIOCGMIIPHY + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); ++#endif ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef SIOCGHWTSTAMP + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); ++#endif + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); ++#endif /* HAVE_PTP_1588_CLOCK */ ++#ifdef ETHTOOL_OPS_COMPAT ++ case SIOCETHTOOL: ++ return ethtool_ioctl(ifr); ++#endif + default: + return -EOPNOTSUPP; + } + } + +-void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) ++void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); + } + +-void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) ++void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); + } + +-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) ++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; ++ u16 cap_offset; + +- if (pcie_capability_read_word(adapter->pdev, reg, value)) ++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); ++ if (!cap_offset) + return -E1000_ERR_CONFIG; + +- return 0; ++ pci_read_config_word(adapter->pdev, cap_offset + reg, value); ++ ++ return E1000_SUCCESS; + } + +-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) ++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) + { + struct igb_adapter *adapter = hw->back; ++ u16 cap_offset; + +- if (pcie_capability_write_word(adapter->pdev, reg, *value)) ++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); ++ if (!cap_offset) + return -E1000_ERR_CONFIG; + +- return 0; ++ pci_write_config_word(adapter->pdev, cap_offset + reg, *value); ++ ++ return E1000_SUCCESS; + } + +-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) ++#ifdef HAVE_VLAN_RX_REGISTER ++static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp) ++#else ++void igb_vlan_mode(struct net_device *netdev, u32 features) ++#endif /* HAVE_VLAN_RX_REGISTER */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; +- bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); ++ bool enable; ++ int i; ++#ifdef HAVE_VLAN_RX_REGISTER ++ enable = !!vlgrp; ++ igb_irq_disable(adapter); ++ ++ adapter->vlgrp = vlgrp; ++ ++ if (!test_bit(__IGB_DOWN, &adapter->state)) ++ igb_irq_enable(adapter); ++#else ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); ++#else ++ enable = !!(features & NETIF_F_HW_VLAN_RX); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ + + if (enable) { + /* enable VLAN tag insert/strip */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_VME; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Disable CFI check */ +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ +- ctrl = rd32(E1000_CTRL); ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ++ } ++ ++#ifndef CONFIG_IGB_VMDQ_NETDEV ++ for (i = 0; i < adapter->vmdq_pools; i++) { ++ igb_set_vf_vlan_strip(adapter, ++ adapter->vfs_allocated_count + i, ++ enable); ++ } ++ ++#else ++ igb_set_vf_vlan_strip(adapter, ++ adapter->vfs_allocated_count, ++ enable); ++ ++ for (i = 1; i < adapter->vmdq_pools; i++) { ++#ifdef HAVE_VLAN_RX_REGISTER ++ struct igb_vmdq_adapter *vadapter; ++ ++ vadapter = netdev_priv(adapter->vmdq_netdev[i-1]); ++ ++ enable = !!vadapter->vlgrp; ++#else ++ struct net_device *vnetdev; ++ ++ vnetdev = adapter->vmdq_netdev[i-1]; ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX); ++#else ++ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ ++ igb_set_vf_vlan_strip(adapter, ++ adapter->vfs_allocated_count + i, ++ enable); + } + ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ + igb_rlpml_set(adapter); + } + ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + static int igb_vlan_rx_add_vid(struct net_device *netdev, +- __be16 proto, u16 vid) ++ __always_unused __be16 proto, u16 vid) ++#else ++static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#else ++static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + { + struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* attempt to add filter to vlvf array */ +- igb_vlvf_set(adapter, vid, true, pf_id); ++ igb_vlvf_set(adapter, vid, TRUE, pf_id); + + /* add the filter since PF can receive vlans w/o entry in vlvf */ +- igb_vfta_set(hw, vid, true); ++ igb_vfta_set(adapter, vid, TRUE); ++#ifndef HAVE_NETDEV_VLAN_FEATURES + +- set_bit(vid, adapter->active_vlans); ++ /* Copy feature flags from netdev to the vlan netdev for this vid. ++ * This allows things like TSO to bubble down to our vlan device. ++ * There is no need to update netdev for vlan 0 (DCB), since it ++ * wouldn't has v_netdev. ++ */ ++ if (adapter->vlgrp) { ++ struct vlan_group *vlgrp = adapter->vlgrp; ++ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); + ++ if (v_netdev) { ++ v_netdev->features |= netdev->features; ++ vlan_group_set_device(vlgrp, vid, v_netdev); ++ } ++ } ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++#ifndef HAVE_VLAN_RX_REGISTER ++ ++ set_bit(vid, adapter->active_vlans); ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } + ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + static int igb_vlan_rx_kill_vid(struct net_device *netdev, +- __be16 proto, u16 vid) ++ __always_unused __be16 proto, u16 vid) ++#else ++static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#else ++static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + { + struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + s32 err; + ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_irq_disable(adapter); ++ ++ vlan_group_set_device(adapter->vlgrp, vid, NULL); ++ ++ if (!test_bit(__IGB_DOWN, &adapter->state)) ++ igb_irq_enable(adapter); ++ ++#endif /* HAVE_VLAN_RX_REGISTER */ + /* remove vlan from VLVF table array */ +- err = igb_vlvf_set(adapter, vid, false, pf_id); ++ err = igb_vlvf_set(adapter, vid, FALSE, pf_id); + + /* if vid was not present in VLVF just remove it from table */ + if (err) +- igb_vfta_set(hw, vid, false); ++ igb_vfta_set(adapter, vid, FALSE); ++#ifndef HAVE_VLAN_RX_REGISTER + + clear_bit(vid, adapter->active_vlans); +- ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + } + + static void igb_restore_vlan(struct igb_adapter *adapter) + { ++#ifdef HAVE_VLAN_RX_REGISTER ++ igb_vlan_mode(adapter->netdev, adapter->vlgrp); ++ ++ if (adapter->vlgrp) { ++ u16 vid; ++ ++ for (vid = 0; vid < VLAN_N_VID; vid++) { ++ if (!vlan_group_get_device(adapter->vlgrp, vid)) ++ continue; ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ igb_vlan_rx_add_vid(adapter->netdev, ++ htons(ETH_P_8021Q), vid); ++#else ++ igb_vlan_rx_add_vid(adapter->netdev, vid); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++ } ++ } ++#else + u16 vid; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) +- igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ igb_vlan_rx_add_vid(adapter->netdev, ++ htons(ETH_P_8021Q), vid); ++#else ++ igb_vlan_rx_add_vid(adapter->netdev, vid); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ + } + +-int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) ++int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) + { + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + +- /* Make sure dplx is at most 1 bit and lsb of speed is not set +- * for the switch() below to work +- */ +- if ((spd & 1) || (dplx & ~1)) +- goto err_inval; +- +- /* Fiber NIC's only allow 1000 gbps Full duplex +- * and 100Mbps Full duplex for 100baseFx sfp ++ /* SerDes device's does not support 10Mbps Full/duplex ++ * and 100Mbps Half duplex + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { +- switch (spd + dplx) { ++ switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: +- goto err_inval; ++ dev_err(pci_dev_to_dev(pdev), ++ "Unsupported Speed/Duplex configuration\n"); ++ return -EINVAL; + default: + break; + } + } + +- switch (spd + dplx) { ++ switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; +@@ -7386,17 +8935,52 @@ + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: +- goto err_inval; ++ dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n"); ++ return -EINVAL; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; ++} + +-err_inval: +- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); +- return -EINVAL; ++/* This function should only be called if RTNL lock is held */ ++int igb_setup_queues(struct igb_adapter *adapter) ++{ ++ struct net_device *dev = adapter->netdev; ++ int err; ++ ++ if (adapter->rss_queues == adapter->num_rx_queues) { ++ if (adapter->tss_queues) { ++ if (adapter->tss_queues == adapter->num_tx_queues) ++ return 0; ++ } else if (adapter->vfs_allocated_count || ++ adapter->rss_queues == adapter->num_tx_queues) { ++ return 0; ++ } ++ } ++ ++ /* ++ * Hardware has to reinitialize queues and interrupts to ++ * match the new configuration. Unfortunately, the hardware ++ * is not flexible enough to do this dynamically. ++ */ ++ if (netif_running(dev)) ++ igb_close(dev); ++ ++ igb_clear_interrupt_scheme(adapter); ++ ++ err = igb_init_interrupt_scheme(adapter, true); ++ if (err) { ++ dev_close(dev); ++ return err; ++ } ++ ++ if (netif_running(dev)) ++ err = igb_open(dev); ++ ++ return err; + } + + static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, +@@ -7413,6 +8997,10 @@ + + netif_device_detach(netdev); + ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if (status & E1000_STATUS_LU) ++ wufc &= ~E1000_WUFC_LNKC; ++ + if (netif_running(netdev)) + __igb_close(netdev, true); + +@@ -7424,37 +9012,31 @@ + return retval; + #endif + +- status = rd32(E1000_STATUS); +- if (status & E1000_STATUS_LU) +- wufc &= ~E1000_WUFC_LNKC; +- + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { +- rctl = rd32(E1000_RCTL); ++ rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; +- wr32(E1000_RCTL, rctl); ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + +- ctrl = rd32(E1000_CTRL); +- /* advertise wake from D3Cold */ +- #define E1000_CTRL_ADVD3WUC 0x00100000 ++ ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; +- wr32(E1000_CTRL, ctrl); ++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ +- igb_disable_pcie_master(hw); ++ e1000_disable_pcie_master(hw); + +- wr32(E1000_WUC, E1000_WUC_PME_EN); +- wr32(E1000_WUFC, wufc); ++ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN); ++ E1000_WRITE_REG(hw, E1000_WUFC, wufc); + } else { +- wr32(E1000_WUC, 0); +- wr32(E1000_WUFC, 0); ++ E1000_WRITE_REG(hw, E1000_WUC, 0); ++ E1000_WRITE_REG(hw, E1000_WUFC, 0); + } + + *enable_wake = wufc || adapter->en_mng_pt; +@@ -7474,12 +9056,17 @@ + } + + #ifdef CONFIG_PM +-#ifdef CONFIG_PM_SLEEP ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + static int igb_suspend(struct device *dev) ++#else ++static int igb_suspend(struct pci_dev *pdev, pm_message_t state) ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + { ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS ++ struct pci_dev *pdev = to_pci_dev(dev); ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + int retval; + bool wake; +- struct pci_dev *pdev = to_pci_dev(dev); + + retval = __igb_shutdown(pdev, &wake, 0); + if (retval) +@@ -7494,11 +9081,16 @@ + + return 0; + } +-#endif /* CONFIG_PM_SLEEP */ + ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + static int igb_resume(struct device *dev) ++#else ++static int igb_resume(struct pci_dev *pdev) ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + { ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + struct pci_dev *pdev = to_pci_dev(dev); ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +@@ -7510,7 +9102,7 @@ + + err = pci_enable_device_mem(pdev); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "igb: Cannot enable PCI device from suspend\n"); + return err; + } +@@ -7520,18 +9112,18 @@ + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { +- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); ++ dev_err(pci_dev_to_dev(pdev), ++ "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + +- /* let the f/w know that the h/w is now under the control of the +- * driver. ++ /* let the f/w know that the h/w is now under the control of the driver. + */ + igb_get_hw_control(adapter); + +- wr32(E1000_WUS, ~0); ++ E1000_WRITE_REG(hw, E1000_WUS, ~0); + + if (netdev->flags & IFF_UP) { + rtnl_lock(); +@@ -7542,10 +9134,12 @@ + } + + netif_device_attach(netdev); ++ + return 0; + } + + #ifdef CONFIG_PM_RUNTIME ++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS + static int igb_runtime_idle(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); +@@ -7582,91 +9176,51 @@ + { + return igb_resume(dev); + } ++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ + #endif /* CONFIG_PM_RUNTIME */ +-#endif ++#endif /* CONFIG_PM */ + +-static void igb_shutdown(struct pci_dev *pdev) ++#ifdef USE_REBOOT_NOTIFIER ++/* only want to do this for 2.4 kernels? */ ++static int igb_notify_reboot(struct notifier_block *nb, unsigned long event, ++ void *p) + { ++ struct pci_dev *pdev = NULL; + bool wake; + +- __igb_shutdown(pdev, &wake, 0); +- +- if (system_state == SYSTEM_POWER_OFF) { +- pci_wake_from_d3(pdev, wake); +- pci_set_power_state(pdev, PCI_D3hot); ++ switch (event) { ++ case SYS_DOWN: ++ case SYS_HALT: ++ case SYS_POWER_OFF: ++ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { ++ if (pci_dev_driver(pdev) == &igb_driver) { ++ __igb_shutdown(pdev, &wake, 0); ++ if (event == SYS_POWER_OFF) { ++ pci_wake_from_d3(pdev, wake); ++ pci_set_power_state(pdev, PCI_D3hot); ++ } ++ } ++ } + } ++ return NOTIFY_DONE; + } +- +-#ifdef CONFIG_PCI_IOV +-static int igb_sriov_reinit(struct pci_dev *dev) ++#else ++static void igb_shutdown(struct pci_dev *pdev) + { +- struct net_device *netdev = pci_get_drvdata(dev); +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct pci_dev *pdev = adapter->pdev; ++ bool wake = false; + +- rtnl_lock(); +- +- if (netif_running(netdev)) +- igb_close(netdev); +- else +- igb_reset(adapter); +- +- igb_clear_interrupt_scheme(adapter); +- +- igb_init_queue_configuration(adapter); ++ __igb_shutdown(pdev, &wake, 0); + +- if (igb_init_interrupt_scheme(adapter, true)) { +- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); +- return -ENOMEM; ++ if (system_state == SYSTEM_POWER_OFF) { ++ pci_wake_from_d3(pdev, wake); ++ pci_set_power_state(pdev, PCI_D3hot); + } +- +- if (netif_running(netdev)) +- igb_open(netdev); +- +- rtnl_unlock(); +- +- return 0; +-} +- +-static int igb_pci_disable_sriov(struct pci_dev *dev) +-{ +- int err = igb_disable_sriov(dev); +- +- if (!err) +- err = igb_sriov_reinit(dev); +- +- return err; +-} +- +-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +-{ +- int err = igb_enable_sriov(dev, num_vfs); +- +- if (err) +- goto out; +- +- err = igb_sriov_reinit(dev); +- if (!err) +- return num_vfs; +- +-out: +- return err; +-} +- +-#endif +-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +-{ +-#ifdef CONFIG_PCI_IOV +- if (num_vfs == 0) +- return igb_pci_disable_sriov(dev); +- else +- return igb_pci_enable_sriov(dev, num_vfs); +-#endif +- return 0; + } ++#endif /* USE_REBOOT_NOTIFIER */ + + #ifdef CONFIG_NET_POLL_CONTROLLER +-/* Polling 'interrupt' - used by things like netconsole to send skbs ++/* ++ * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +@@ -7679,8 +9233,8 @@ + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; +- if (adapter->flags & IGB_FLAG_HAS_MSIX) +- wr32(E1000_EIMC, q_vector->eims_value); ++ if (adapter->msix_entries) ++ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value); + else + igb_irq_disable(adapter); + napi_schedule(&q_vector->napi); +@@ -7688,20 +9242,98 @@ + } + #endif /* CONFIG_NET_POLL_CONTROLLER */ + ++#ifdef HAVE_PCI_ERS ++#define E1000_DEV_ID_82576_VF 0x10CA + /** +- * igb_io_error_detected - called when PCI error is detected +- * @pdev: Pointer to PCI device +- * @state: The current pci connection state ++ * igb_io_error_detected - called when PCI error is detected ++ * @pdev: Pointer to PCI device ++ * @state: The current pci connection state + * +- * This function is called after a PCI bus error affecting +- * this device has been detected. +- **/ ++ * This function is called after a PCI bus error affecting ++ * this device has been detected. ++ */ + static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + ++#ifdef CONFIG_PCI_IOV ++ struct pci_dev *bdev, *vfdev; ++ u32 dw0, dw1, dw2, dw3; ++ int vf, pos; ++ u16 req_id, pf_func; ++ ++ if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA)) ++ goto skip_bad_vf_detection; ++ ++ bdev = pdev->bus->self; ++ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) ++ bdev = bdev->bus->self; ++ ++ if (!bdev) ++ goto skip_bad_vf_detection; ++ ++ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); ++ if (!pos) ++ goto skip_bad_vf_detection; ++ ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); ++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); ++ ++ req_id = dw1 >> 16; ++ /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */ ++ if (!(req_id & 0x0080)) ++ goto skip_bad_vf_detection; ++ ++ pf_func = req_id & 0x01; ++ if ((pf_func & 1) == (pdev->devfn & 1)) { ++ ++ vf = (req_id & 0x7F) >> 1; ++ dev_err(pci_dev_to_dev(pdev), ++ "VF %d has caused a PCIe error\n", vf); ++ dev_err(pci_dev_to_dev(pdev), ++ "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2:\n%8.8x\tdw3: %8.8x\n", ++ dw0, dw1, dw2, dw3); ++ ++ /* Find the pci device of the offending VF */ ++ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, ++ E1000_DEV_ID_82576_VF, NULL); ++ while (vfdev) { ++ if (vfdev->devfn == (req_id & 0xFF)) ++ break; ++ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, ++ E1000_DEV_ID_82576_VF, vfdev); ++ } ++ /* ++ * There's a slim chance the VF could have been hot plugged, ++ * so if it is no longer present we don't need to issue the ++ * VFLR. Just clean up the AER in that case. ++ */ ++ if (vfdev) { ++ dev_err(pci_dev_to_dev(pdev), ++ "Issuing VFLR to VF %d\n", vf); ++ pci_write_config_dword(vfdev, 0xA8, 0x00008000); ++ } ++ ++ pci_cleanup_aer_uncorrect_error_status(pdev); ++ } ++ ++ /* ++ * Even though the error may have occurred on the other port ++ * we still need to increment the vf error reference count for ++ * both ports because the I/O resume function will be called ++ * for both of them. ++ */ ++ adapter->vferr_refcount++; ++ ++ return PCI_ERS_RESULT_RECOVERED; ++ ++skip_bad_vf_detection: ++#endif /* CONFIG_PCI_IOV */ ++ + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) +@@ -7716,22 +9348,21 @@ + } + + /** +- * igb_io_slot_reset - called after the pci bus has been reset. +- * @pdev: Pointer to PCI device ++ * igb_io_slot_reset - called after the pci bus has been reset. ++ * @pdev: Pointer to PCI device + * +- * Restart the card from scratch, as if from a cold-boot. Implementation +- * resembles the first-half of the igb_resume routine. +- **/ ++ * Restart the card from scratch, as if from a cold-boot. Implementation ++ * resembles the first-half of the igb_resume routine. ++ */ + static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; +- int err; + + if (pci_enable_device_mem(pdev)) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { +@@ -7742,77 +9373,91 @@ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + +- igb_reset(adapter); +- wr32(E1000_WUS, ~0); ++ schedule_work(&adapter->reset_task); ++ E1000_WRITE_REG(hw, E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + +- err = pci_cleanup_aer_uncorrect_error_status(pdev); +- if (err) { +- dev_err(&pdev->dev, +- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", +- err); +- /* non-fatal, continue */ +- } ++ pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; + } + + /** +- * igb_io_resume - called when traffic can start flowing again. +- * @pdev: Pointer to PCI device ++ * igb_io_resume - called when traffic can start flowing again. ++ * @pdev: Pointer to PCI device + * +- * This callback is called when the error recovery driver tells us that +- * its OK to resume normal operation. Implementation resembles the +- * second-half of the igb_resume routine. ++ * This callback is called when the error recovery driver tells us that ++ * its OK to resume normal operation. Implementation resembles the ++ * second-half of the igb_resume routine. + */ + static void igb_io_resume(struct pci_dev *pdev) + { + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + ++ if (adapter->vferr_refcount) { ++ dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n"); ++ adapter->vferr_refcount--; ++ return; ++ } ++ + if (netif_running(netdev)) { + if (igb_up(adapter)) { +- dev_err(&pdev->dev, "igb_up failed after reset\n"); ++ dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + +- /* let the f/w know that the h/w is now under the control of the +- * driver. ++ /* let the f/w know that the h/w is now under the control of the driver. + */ + igb_get_hw_control(adapter); + } + +-static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, +- u8 qsel) ++#endif /* HAVE_PCI_ERS */ ++ ++int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) + { +- u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; ++ int i; + +- /* HW expects these in little endian so we reverse the byte order +- * from network order (big endian) to little endian +- */ +- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | +- ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); +- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); +- +- /* Indicate to hardware the Address is Valid. */ +- rar_high |= E1000_RAH_AV; +- +- if (hw->mac.type == e1000_82575) +- rar_high |= E1000_RAH_POOL_1 * qsel; +- else +- rar_high |= E1000_RAH_POOL_1 << qsel; ++ if (is_zero_ether_addr(addr)) ++ return 0; + +- wr32(E1000_RAL(index), rar_low); +- wrfl(); +- wr32(E1000_RAH(index), rar_high); +- wrfl(); ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) ++ continue; ++ adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED | ++ IGB_MAC_STATE_IN_USE); ++ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); ++ adapter->mac_table[i].queue = queue; ++ igb_sync_mac_table(adapter); ++ return 0; ++ } ++ return -ENOMEM; + } ++int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) ++{ ++ /* search table for addr, if found, set to 0 and sync */ ++ int i; ++ struct e1000_hw *hw = &adapter->hw; + ++ if (is_zero_ether_addr(addr)) ++ return 0; ++ for (i = 0; i < hw->mac.rar_entry_count; i++) { ++ if (!ether_addr_equal(addr, adapter->mac_table[i].addr) && ++ adapter->mac_table[i].queue == queue) { ++ adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED; ++ memset(adapter->mac_table[i].addr, 0, ETH_ALEN); ++ adapter->mac_table[i].queue = 0; ++ igb_sync_mac_table(adapter); ++ return 0; ++ } ++ } ++ return -ENOMEM; ++} + static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) + { +@@ -7829,15 +9474,17 @@ + return 0; + } + ++#ifdef IFLA_VF_MAX + static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++ + if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + return -EINVAL; + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, +- "Reload the VF driver to make this change effective."); ++ "Reload the VF driver to make this change effective.\n"); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); +@@ -7854,13 +9501,15 @@ + return 100; + case SPEED_1000: + return 1000; ++ case SPEED_2500: ++ return 2500; + default: + return 0; + } + } + + static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, +- int link_speed) ++ int link_speed) + { + int rf_dec, rf_int; + u32 bcnrc_val; +@@ -7869,23 +9518,23 @@ + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); +- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / +- tx_rate; ++ rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) || +- (adapter->hw.mac.type != e1000_82576)) ++ (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); +@@ -7903,7 +9552,7 @@ + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, +- "Link speed has been changed. VF Transmit rate is disabled\n"); ++ "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { +@@ -7911,13 +9560,16 @@ + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, +- adapter->vf_data[i].tx_rate, +- actual_link_speed); ++ adapter->vf_data[i].tx_rate, actual_link_speed); + } + } + +-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, +- int min_tx_rate, int max_tx_rate) ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, ++ int max_tx_rate) ++#else ++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + { + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +@@ -7926,105 +9578,137 @@ + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + +- if (min_tx_rate) +- return -EINVAL; +- + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || +- (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || +- (max_tx_rate < 0) || +- (max_tx_rate > actual_link_speed)) ++ (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) || ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++ (max_tx_rate < 0) || (max_tx_rate > actual_link_speed)) ++#else ++ (tx_rate < 0) || (tx_rate > actual_link_speed)) ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); ++#else ++ adapter->vf_data[vf].tx_rate = (u16)tx_rate; ++ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + + return 0; + } + +-static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, +- bool setting) +-{ +- struct igb_adapter *adapter = netdev_priv(netdev); +- struct e1000_hw *hw = &adapter->hw; +- u32 reg_val, reg_offset; +- +- if (!adapter->vfs_allocated_count) +- return -EOPNOTSUPP; +- +- if (vf >= adapter->vfs_allocated_count) +- return -EINVAL; +- +- reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; +- reg_val = rd32(reg_offset); +- if (setting) +- reg_val |= ((1 << vf) | +- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); +- else +- reg_val &= ~((1 << vf) | +- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); +- wr32(reg_offset, reg_val); +- +- adapter->vf_data[vf].spoofchk_enabled = setting; +- return 0; +-} +- + static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) + { + struct igb_adapter *adapter = netdev_priv(netdev); ++ + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; ++#else ++ ivi->tx_rate = adapter->vf_data[vf].tx_rate; ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; ++#endif + return 0; + } +- ++#endif + static void igb_vmm_control(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; ++ int count; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: +- case e1000_i210: +- case e1000_i211: +- case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ +- reg = rd32(E1000_DTXCTL); +- reg |= E1000_DTXCTL_VLAN_ADDED; +- wr32(E1000_DTXCTL, reg); ++ reg = E1000_READ_REG(hw, E1000_DTXCTL); ++ reg |= (E1000_DTXCTL_VLAN_ADDED | ++ E1000_DTXCTL_SPOOF_INT); ++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); + /* Fall through */ + case e1000_82580: + /* enable replication vlan tag stripping */ +- reg = rd32(E1000_RPLOLR); ++ reg = E1000_READ_REG(hw, E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; +- wr32(E1000_RPLOLR, reg); ++ E1000_WRITE_REG(hw, E1000_RPLOLR, reg); + /* Fall through */ + case e1000_i350: ++ case e1000_i354: + /* none of the above registers are supported by i350 */ + break; + } + +- if (adapter->vfs_allocated_count) { +- igb_vmdq_set_loopback_pf(hw, true); +- igb_vmdq_set_replication_pf(hw, true); +- igb_vmdq_set_anti_spoofing_pf(hw, true, +- adapter->vfs_allocated_count); +- } else { +- igb_vmdq_set_loopback_pf(hw, false); +- igb_vmdq_set_replication_pf(hw, false); +- } ++ /* Enable Malicious Driver Detection */ ++ if ((adapter->vfs_allocated_count) && ++ (adapter->mdd)) { ++ if (hw->mac.type == e1000_i350) ++ igb_enable_mdd(adapter); ++ } ++ ++ /* enable replication and loopback support */ ++ count = adapter->vfs_allocated_count || adapter->vmdq_pools; ++ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count) ++ e1000_vmdq_set_loopback_pf(hw, 1); ++ e1000_vmdq_set_anti_spoofing_pf(hw, ++ adapter->vfs_allocated_count || adapter->vmdq_pools, ++ adapter->vfs_allocated_count); ++ e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count || ++ adapter->vmdq_pools); ++} ++ ++static void igb_init_fw(struct igb_adapter *adapter) ++{ ++ struct e1000_fw_drv_info fw_cmd; ++ struct e1000_hw *hw = &adapter->hw; ++ int i; ++ u16 mask; ++ ++ if (hw->mac.type == e1000_i210) ++ mask = E1000_SWFW_EEP_SM; ++ else ++ mask = E1000_SWFW_PHY0_SM; ++ /* i211 parts do not support this feature */ ++ if (hw->mac.type == e1000_i211) ++ hw->mac.arc_subsystem_valid = false; ++ ++ if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) { ++ for (i = 0; i <= FW_MAX_RETRIES; i++) { ++ E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI); ++ fw_cmd.hdr.cmd = FW_CMD_DRV_INFO; ++ fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN; ++ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED; ++ fw_cmd.port_num = hw->bus.func; ++ fw_cmd.drv_version = FW_FAMILY_DRV_VER; ++ fw_cmd.hdr.checksum = 0; ++ fw_cmd.hdr.checksum = ++ e1000_calculate_checksum((u8 *)&fw_cmd, ++ (FW_HDR_LEN + ++ fw_cmd.hdr.buf_len)); ++ e1000_host_interface_command(hw, (u8 *)&fw_cmd, ++ sizeof(fw_cmd)); ++ if (fw_cmd.hdr.cmd_or_resp.ret_status ++ == FW_STATUS_SUCCESS) ++ break; ++ } ++ } else ++ dev_warn(pci_dev_to_dev(adapter->pdev), ++ "Unable to get semaphore, firmware init failed.\n"); ++ hw->mac.ops.release_swfw_sync(hw, mask); + } + + static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +@@ -8032,34 +9716,40 @@ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; ++ u32 status; ++ ++ if (hw->mac.type == e1000_i211) ++ return; + + if (hw->mac.type > e1000_82580) { +- if (adapter->flags & IGB_FLAG_DMAC) { ++ if (adapter->dmac != IGB_DMAC_DISABLE) { + u32 reg; + +- /* force threshold to 0. */ +- wr32(E1000_DMCTXTH, 0); ++ /* force threshold to 0. */ ++ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); + +- /* DMA Coalescing high water mark needs to be greater ++ /* ++ * DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * pba - adapter->max_frame_size / 16; + if (hwm < 64 * (pba - 6)) + hwm = 64 * (pba - 6); +- reg = rd32(E1000_FCRTC); ++ reg = E1000_READ_REG(hw, E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); +- wr32(E1000_FCRTC, reg); ++ E1000_WRITE_REG(hw, E1000_FCRTC, reg); + +- /* Set the DMA Coalescing Rx threshold to PBA - 2 * max ++ /* ++ * Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - adapter->max_frame_size / 512; + if (dmac_thr < pba - 10) + dmac_thr = pba - 10; +- reg = rd32(E1000_DMACR); ++ reg = E1000_READ_REG(hw, E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); +@@ -8067,47 +9757,84 @@ + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + +- /* watchdog timer= +-1000 usec in 32usec intervals */ +- reg |= (1000 >> 5); ++ /* Check if status is 2.5Gb backplane connection ++ * before configuration of watchdog timer, which is ++ * in msec values in 12.8usec intervals ++ * watchdog timer= msec values in 32usec intervals ++ * for non 2.5Gb connection ++ */ ++ if (hw->mac.type == e1000_i354) { ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if ((status & E1000_STATUS_2P5_SKU) && ++ (!(status & E1000_STATUS_2P5_SKU_OVER))) ++ reg |= ((adapter->dmac * 5) >> 6); ++ else ++ reg |= ((adapter->dmac) >> 5); ++ } else { ++ reg |= ((adapter->dmac) >> 5); ++ } + +- /* Disable BMC-to-OS Watchdog Enable */ ++ /* ++ * Disable BMC-to-OS Watchdog enable ++ * on devices that support OS-to-BMC ++ */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; ++ E1000_WRITE_REG(hw, E1000_DMACR, reg); + +- wr32(E1000_DMACR, reg); ++ /* no lower threshold to disable coalescing ++ * (smart fifb)-UTRESH=0 ++ */ ++ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); + +- /* no lower threshold to disable +- * coalescing(smart fifb)-UTRESH=0 ++ /* This sets the time to wait before requesting ++ * transition to low power state to number of usecs ++ * needed to receive 1 512 byte frame at gigabit ++ * line rate. On i350 device, time to make transition ++ * to Lx state is delayed by 4 usec with flush disable ++ * bit set to avoid losing mailbox interrupts + */ +- wr32(E1000_DMCRTRH, 0); ++ reg = E1000_READ_REG(hw, E1000_DMCTLX); ++ if (hw->mac.type == e1000_i350) ++ reg |= IGB_DMCTLX_DCFLUSH_DIS; + +- reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); ++ /* in 2.5Gb connection, TTLX unit is 0.4 usec ++ * which is 0x4*2 = 0xA. But delay is still 4 usec ++ */ ++ if (hw->mac.type == e1000_i354) { ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ if ((status & E1000_STATUS_2P5_SKU) && ++ (!(status & E1000_STATUS_2P5_SKU_OVER))) ++ reg |= 0xA; ++ else ++ reg |= 0x4; ++ } else { ++ reg |= 0x4; ++ } + +- wr32(E1000_DMCTLX, reg); ++ E1000_WRITE_REG(hw, E1000_DMCTLX, reg); + +- /* free space in tx packet buffer to wake from +- * DMA coal +- */ +- wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - +- (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); ++ /* free space in tx pkt buffer to wake from DMA coal */ ++ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - ++ (IGB_TX_BUF_4096 + adapter->max_frame_size)) ++ >> 6); + +- /* make low power state decision controlled +- * by DMA coal +- */ +- reg = rd32(E1000_PCIEMISC); ++ /* low power state decision controlled by DMA coal */ ++ reg = E1000_READ_REG(hw, E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; +- wr32(E1000_PCIEMISC, reg); ++ E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { +- u32 reg = rd32(E1000_PCIEMISC); ++ u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); + +- wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); +- wr32(E1000_DMACR, 0); ++ E1000_WRITE_REG(hw, E1000_PCIEMISC, ++ reg & ~E1000_PCIEMISC_LX_DECISION); ++ E1000_WRITE_REG(hw, E1000_DMACR, 0); + } + } + +-/** +- * igb_read_i2c_byte - Reads 8 bit word over I2C ++#ifdef HAVE_I2C_SUPPORT ++/* igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address +@@ -8115,9 +9842,9 @@ + * + * Performs byte read operation over I2C interface at + * a specified device address. +- **/ ++ */ + s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 *data) ++ u8 dev_addr, u8 *data) + { + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; +@@ -8129,7 +9856,8 @@ + + swfw_mask = E1000_SWFW_PHY0_SM; + +- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) ++ != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); +@@ -8139,12 +9867,11 @@ + return E1000_ERR_I2C; + else { + *data = status; +- return 0; ++ return E1000_SUCCESS; + } + } + +-/** +- * igb_write_i2c_byte - Writes 8 bit word over I2C ++/* igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address +@@ -8152,9 +9879,9 @@ + * + * Performs byte write operation over I2C interface at + * a specified device address. +- **/ ++ */ + s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 data) ++ u8 dev_addr, u8 data) + { + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; +@@ -8164,7 +9891,7 @@ + if (!this_client) + return E1000_ERR_I2C; + +- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); +@@ -8172,9 +9899,9 @@ + if (status) + return E1000_ERR_I2C; + else +- return 0; +- ++ return E1000_SUCCESS; + } ++#endif /* HAVE_I2C_SUPPORT */ + + int igb_reinit_queues(struct igb_adapter *adapter) + { +@@ -8197,4 +9924,5 @@ + + return err; + } ++ + /* igb_main.c */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_param.c b/drivers/net/ethernet/intel/igb/igb_param.c +--- a/drivers/net/ethernet/intel/igb/igb_param.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_param.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,872 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include ++ ++#include "igb.h" ++ ++/* This is the only thing that needs to be changed to adjust the ++ * maximum number of ports that the driver can manage. ++ */ ++ ++#define IGB_MAX_NIC 32 ++ ++#define OPTION_UNSET -1 ++#define OPTION_DISABLED 0 ++#define OPTION_ENABLED 1 ++#define MAX_NUM_LIST_OPTS 15 ++ ++/* All parameters are treated the same, as an integer array of values. ++ * This macro just reduces the need to repeat the same declaration code ++ * over and over (plus this helps to avoid typo bugs). ++ */ ++ ++#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET } ++#ifndef module_param_array ++/* Module Parameters are always initialized to -1, so that the driver ++ * can tell the difference between no user specified value or the ++ * user asking for the default value. ++ * The true default values are loaded in when igb_check_options is called. ++ * ++ * This is a GCC extension to ANSI C. ++ * See the item "Labeled Elements in Initializers" in the section ++ * "Extensions to the C Language Family" of the GCC documentation. ++ */ ++ ++#define IGB_PARAM(X, desc) \ ++ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ ++ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \ ++ MODULE_PARM_DESC(X, desc); ++#else ++#define IGB_PARAM(X, desc) \ ++ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ ++ static unsigned int num_##X; \ ++ module_param_array_named(X, X, int, &num_##X, 0); \ ++ MODULE_PARM_DESC(X, desc); ++#endif ++ ++/* Interrupt Throttle Rate (interrupts/sec) ++ * ++ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) ++ */ ++IGB_PARAM(InterruptThrottleRate, ++ "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive"); ++#define DEFAULT_ITR 3 ++#define MAX_ITR 100000 ++/* #define MIN_ITR 120 */ ++#define MIN_ITR 0 ++/* IntMode (Interrupt Mode) ++ * ++ * Valid Range: 0 - 2 ++ * ++ * Default Value: 2 (MSI-X) ++ */ ++IGB_PARAM(IntMode, ++ "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2"); ++#define MAX_INTMODE IGB_INT_MODE_MSIX ++#define MIN_INTMODE IGB_INT_MODE_LEGACY ++ ++IGB_PARAM(Node, "set the starting node to allocate memory on, default -1"); ++ ++/* LLIPort (Low Latency Interrupt TCP Port) ++ * ++ * Valid Range: 0 - 65535 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IGB_PARAM(LLIPort, ++ "Low Latency Interrupt TCP Port (0-65535), default 0=off"); ++ ++#define DEFAULT_LLIPORT 0 ++#define MAX_LLIPORT 0xFFFF ++#define MIN_LLIPORT 0 ++ ++/* LLIPush (Low Latency Interrupt on TCP Push flag) ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off"); ++ ++#define DEFAULT_LLIPUSH 0 ++#define MAX_LLIPUSH 1 ++#define MIN_LLIPUSH 0 ++ ++/* LLISize (Low Latency Interrupt on Packet Size) ++ * ++ * Valid Range: 0 - 1500 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IGB_PARAM(LLISize, ++ "Low Latency Interrupt on Packet Size (0-1500), default 0=off"); ++ ++#define DEFAULT_LLISIZE 0 ++#define MAX_LLISIZE 1500 ++#define MIN_LLISIZE 0 ++ ++/* RSS (Enable RSS multiqueue receive) ++ * ++ * Valid Range: 0 - 8 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(RSS, ++ "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus"); ++ ++#define DEFAULT_RSS 1 ++#define MAX_RSS 8 ++#define MIN_RSS 0 ++ ++/* VMDQ (Enable VMDq multiqueue receive) ++ * ++ * Valid Range: 0 - 8 ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(VMDQ, ++ "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0"); ++ ++#define DEFAULT_VMDQ 0 ++#define MAX_VMDQ MAX_RSS ++#define MIN_VMDQ 0 ++ ++/* max_vfs (Enable SR-IOV VF devices) ++ * ++ * Valid Range: 0 - 7 ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(max_vfs, ++ "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0"); ++ ++#define DEFAULT_SRIOV 0 ++#define MAX_SRIOV 7 ++#define MIN_SRIOV 0 ++ ++/* MDD (Enable Malicious Driver Detection) ++ * ++ * Only available when SR-IOV is enabled - max_vfs is greater than 0 ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(MDD, ++ "Malicious Driver Detection (0/1), default 1 = enabled. Only available when max_vfs is greater than 0"); ++ ++#ifdef DEBUG ++ ++/* Disable Hardware Reset on Tx Hang ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 0 (disabled, i.e. h/w will reset) ++ */ ++IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang"); ++ ++/* Dump Transmit and Receive buffers ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request"); ++ ++#endif /* DEBUG */ ++ ++/* QueuePairs (Enable TX/RX queue pairs for interrupt handling) ++ * ++ * Valid Range: 0 - 1 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(QueuePairs, ++ "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on"); ++ ++#define DEFAULT_QUEUE_PAIRS 1 ++#define MAX_QUEUE_PAIRS 1 ++#define MIN_QUEUE_PAIRS 0 ++ ++/* Enable/disable EEE (a.k.a. IEEE802.3az) ++ * ++ * Valid Range: 0, 1 ++ * ++ * Default Value: 1 ++ */ ++IGB_PARAM(EEE, ++ "Enable/disable on parts that support the feature"); ++ ++/* Enable/disable DMA Coalescing ++ * ++ * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, ++ * 9000, 10000(msec), 250(usec), 500(usec) ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(DMAC, ++ "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))"); ++ ++#ifndef IGB_NO_LRO ++/* Enable/disable Large Receive Offload ++ * ++ * Valid Values: 0(off), 1(on) ++ * ++ * Default Value: 0 ++ */ ++IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off"); ++ ++#endif ++struct igb_opt_list { ++ int i; ++ char *str; ++}; ++struct igb_option { ++ enum { enable_option, range_option, list_option } type; ++ const char *name; ++ const char *err; ++ int def; ++ union { ++ struct { /* range_option info */ ++ int min; ++ int max; ++ } r; ++ struct { /* list_option info */ ++ int nr; ++ struct igb_opt_list *p; ++ } l; ++ } arg; ++}; ++ ++static int igb_validate_option(unsigned int *value, ++ struct igb_option *opt, ++ struct igb_adapter *adapter) ++{ ++ if (*value == OPTION_UNSET) { ++ *value = opt->def; ++ return 0; ++ } ++ ++ switch (opt->type) { ++ case enable_option: ++ switch (*value) { ++ case OPTION_ENABLED: ++ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); ++ return 0; ++ case OPTION_DISABLED: ++ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); ++ return 0; ++ } ++ break; ++ case range_option: ++ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { ++ DPRINTK(PROBE, INFO, ++ "%s set to %d\n", opt->name, *value); ++ return 0; ++ } ++ break; ++ case list_option: { ++ int i; ++ struct igb_opt_list *ent; ++ ++ for (i = 0; i < opt->arg.l.nr; i++) { ++ ent = &opt->arg.l.p[i]; ++ if (*value == ent->i) { ++ if (ent->str[0] != '\0') ++ DPRINTK(PROBE, INFO, "%s\n", ent->str); ++ return 0; ++ } ++ } ++ } ++ break; ++ default: ++ BUG(); ++ } ++ ++ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n", ++ opt->name, *value, opt->err); ++ *value = opt->def; ++ return -1; ++} ++ ++/** ++ * igb_check_options - Range Checking for Command Line Parameters ++ * @adapter: board private structure ++ * ++ * This routine checks all command line parameters for valid user ++ * input. If an invalid value is given, or if no user specified ++ * value exists, a default value is used. The final value is stored ++ * in a variable in the adapter structure. ++ **/ ++ ++void igb_check_options(struct igb_adapter *adapter) ++{ ++ int bd = adapter->bd_number; ++ struct e1000_hw *hw = &adapter->hw; ++ ++ if (bd >= IGB_MAX_NIC) { ++ DPRINTK(PROBE, NOTICE, ++ "Warning: no configuration for board #%d\n", bd); ++ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); ++#ifndef module_param_array ++ bd = IGB_MAX_NIC; ++#endif ++ } ++ ++ { /* Interrupt Throttling Rate */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Interrupt Throttling Rate (ints/sec)", ++ .err = "using default of "__MODULE_STRING(DEFAULT_ITR), ++ .def = DEFAULT_ITR, ++ .arg = { .r = { .min = MIN_ITR, ++ .max = MAX_ITR } } ++ }; ++ ++#ifdef module_param_array ++ if (num_InterruptThrottleRate > bd) { ++#endif ++ unsigned int itr = InterruptThrottleRate[bd]; ++ ++ switch (itr) { ++ case 0: ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ if (hw->mac.type >= e1000_i350) ++ adapter->dmac = IGB_DMAC_DISABLE; ++ adapter->rx_itr_setting = itr; ++ break; ++ case 1: ++ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", ++ opt.name); ++ adapter->rx_itr_setting = itr; ++ break; ++ case 3: ++ DPRINTK(PROBE, INFO, ++ "%s set to dynamic conservative mode\n", ++ opt.name); ++ adapter->rx_itr_setting = itr; ++ break; ++ default: ++ igb_validate_option(&itr, &opt, adapter); ++ /* Save the setting, because the dynamic bits ++ * change itr. In case of invalid user value, ++ * default to conservative mode, else need to ++ * clear the lower two bits because they are ++ * used as control */ ++ if (itr == 3) { ++ adapter->rx_itr_setting = itr; ++ } else { ++ adapter->rx_itr_setting = 1000000000 ++ / (itr * 256); ++ adapter->rx_itr_setting &= ~3; ++ } ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ adapter->rx_itr_setting = opt.def; ++ } ++#endif ++ adapter->tx_itr_setting = adapter->rx_itr_setting; ++ } ++ { /* Interrupt Mode */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Interrupt Mode", ++ .err = "defaulting to 2 (MSI-X)", ++ .def = IGB_INT_MODE_MSIX, ++ .arg = { .r = { .min = MIN_INTMODE, ++ .max = MAX_INTMODE } } ++ }; ++ ++#ifdef module_param_array ++ if (num_IntMode > bd) { ++#endif ++ unsigned int int_mode = IntMode[bd]; ++ igb_validate_option(&int_mode, &opt, adapter); ++ adapter->int_mode = int_mode; ++#ifdef module_param_array ++ } else { ++ adapter->int_mode = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt TCP Port */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt TCP Port", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIPORT), ++ .def = DEFAULT_LLIPORT, ++ .arg = { .r = { .min = MIN_LLIPORT, ++ .max = MAX_LLIPORT } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPort > bd) { ++#endif ++ adapter->lli_port = LLIPort[bd]; ++ if (adapter->lli_port) { ++ igb_validate_option(&adapter->lli_port, &opt, ++ adapter); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_port = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt on Packet Size */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on Packet Size", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLISIZE), ++ .def = DEFAULT_LLISIZE, ++ .arg = { .r = { .min = MIN_LLISIZE, ++ .max = MAX_LLISIZE } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLISize > bd) { ++#endif ++ adapter->lli_size = LLISize[bd]; ++ if (adapter->lli_size) { ++ igb_validate_option(&adapter->lli_size, &opt, ++ adapter); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_size = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt on TCP Push flag */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "Low Latency Interrupt on TCP Push flag", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPush > bd) { ++#endif ++ unsigned int lli_push = LLIPush[bd]; ++ igb_validate_option(&lli_push, &opt, adapter); ++ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0; ++#ifdef module_param_array ++ } else { ++ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0; ++ } ++#endif ++ } ++ { /* SRIOV - Enable SR-IOV VF devices */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "max_vfs - SR-IOV VF devices", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_SRIOV), ++ .def = DEFAULT_SRIOV, ++ .arg = { .r = { .min = MIN_SRIOV, ++ .max = MAX_SRIOV } } ++ }; ++ ++#ifdef module_param_array ++ if (num_max_vfs > bd) { ++#endif ++ adapter->vfs_allocated_count = max_vfs[bd]; ++ igb_validate_option(&adapter->vfs_allocated_count, ++ &opt, adapter); ++ ++#ifdef module_param_array ++ } else { ++ adapter->vfs_allocated_count = opt.def; ++ } ++#endif ++ if (adapter->vfs_allocated_count) { ++ switch (hw->mac.type) { ++ case e1000_82575: ++ case e1000_82580: ++ case e1000_i210: ++ case e1000_i211: ++ case e1000_i354: ++ adapter->vfs_allocated_count = 0; ++ DPRINTK(PROBE, INFO, ++ "SR-IOV option max_vfs not supported.\n"); ++ /* Fall through */ ++ default: ++ break; ++ } ++ } ++ } ++ { /* VMDQ - Enable VMDq multiqueue receive */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "VMDQ - VMDq multiqueue queue count", ++ .err = "using default of "__MODULE_STRING(DEFAULT_VMDQ), ++ .def = DEFAULT_VMDQ, ++ .arg = { .r = { .min = MIN_VMDQ, ++ .max = (MAX_VMDQ ++ - adapter->vfs_allocated_count)} } ++ }; ++ if ((hw->mac.type != e1000_i210) || ++ (hw->mac.type != e1000_i211)) { ++#ifdef module_param_array ++ if (num_VMDQ > bd) { ++#endif ++ adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]); ++ if (adapter->vfs_allocated_count && ++ !adapter->vmdq_pools) { ++ DPRINTK(PROBE, INFO, ++ "Enabling SR-IOV requires VMDq be set to at least 1\n"); ++ adapter->vmdq_pools = 1; ++ } ++ igb_validate_option(&adapter->vmdq_pools, &opt, ++ adapter); ++ ++#ifdef module_param_array ++ } else { ++ if (!adapter->vfs_allocated_count) ++ adapter->vmdq_pools = (opt.def == 1 ? 0 ++ : opt.def); ++ else ++ adapter->vmdq_pools = 1; ++ } ++#endif ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++ if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) { ++ DPRINTK(PROBE, INFO, ++ "VMDq not supported on this part.\n"); ++ adapter->vmdq_pools = 0; ++ } ++#endif ++ ++ } else { ++ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n"); ++ adapter->vmdq_pools = opt.def; ++ } ++ } ++ { /* RSS - Enable RSS multiqueue receives */ ++ struct igb_option opt = { ++ .type = range_option, ++ .name = "RSS - RSS multiqueue receive count", ++ .err = "using default of "__MODULE_STRING(DEFAULT_RSS), ++ .def = DEFAULT_RSS, ++ .arg = { .r = { .min = MIN_RSS, ++ .max = MAX_RSS } } ++ }; ++ ++ switch (hw->mac.type) { ++ case e1000_82575: ++#ifndef CONFIG_IGB_VMDQ_NETDEV ++ if (!!adapter->vmdq_pools) { ++ if (adapter->vmdq_pools <= 2) { ++ if (adapter->vmdq_pools == 2) ++ opt.arg.r.max = 3; ++ } else { ++ opt.arg.r.max = 1; ++ } ++ } else { ++ opt.arg.r.max = 4; ++ } ++#else ++ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4; ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ break; ++ case e1000_i210: ++ opt.arg.r.max = 4; ++ break; ++ case e1000_i211: ++ opt.arg.r.max = 2; ++ break; ++ case e1000_82576: ++#ifndef CONFIG_IGB_VMDQ_NETDEV ++ if (!!adapter->vmdq_pools) ++ opt.arg.r.max = 2; ++ break; ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ case e1000_82580: ++ case e1000_i350: ++ case e1000_i354: ++ default: ++ if (!!adapter->vmdq_pools) ++ opt.arg.r.max = 1; ++ break; ++ } ++ ++ if (adapter->int_mode != IGB_INT_MODE_MSIX) { ++ DPRINTK(PROBE, INFO, ++ "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n", ++ opt.err); ++ opt.arg.r.max = 1; ++ } ++ ++#ifdef module_param_array ++ if (num_RSS > bd) { ++#endif ++ adapter->rss_queues = RSS[bd]; ++ switch (adapter->rss_queues) { ++ case 1: ++ break; ++ default: ++ igb_validate_option(&adapter->rss_queues, &opt, ++ adapter); ++ if (adapter->rss_queues) ++ break; ++ case 0: ++ adapter->rss_queues = min_t(u32, opt.arg.r.max, ++ num_online_cpus()); ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ adapter->rss_queues = opt.def; ++ } ++#endif ++ } ++ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = ++ "QueuePairs - Tx/Rx queue pairs for interrupt handling", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++#ifdef module_param_array ++ if (num_QueuePairs > bd) { ++#endif ++ unsigned int qp = QueuePairs[bd]; ++ /* ++ * We must enable queue pairs if the number of queues ++ * exceeds the number of available interrupts. We are ++ * limited to 10, or 3 per unallocated vf. On I210 and ++ * I211 devices, we are limited to 5 interrupts. ++ * However, since I211 only supports 2 queues, we do not ++ * need to check and override the user option. ++ */ ++ if (qp == OPTION_DISABLED) { ++ if (adapter->rss_queues > 4) ++ qp = OPTION_ENABLED; ++ ++ if (adapter->vmdq_pools > 4) ++ qp = OPTION_ENABLED; ++ ++ if (adapter->rss_queues > 1 && ++ (adapter->vmdq_pools > 3 || ++ adapter->vfs_allocated_count > 6)) ++ qp = OPTION_ENABLED; ++ ++ if (hw->mac.type == e1000_i210 && ++ adapter->rss_queues > 2) ++ qp = OPTION_ENABLED; ++ ++ if (qp == OPTION_ENABLED) ++ DPRINTK(PROBE, INFO, ++ "Number of queues exceeds available interrupts, %s\n", ++ opt.err); ++ } ++ igb_validate_option(&qp, &opt, adapter); ++ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0; ++#ifdef module_param_array ++ } else { ++ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0; ++ } ++#endif ++ } ++ { /* EEE - Enable EEE for capable adapters */ ++ ++ if (hw->mac.type >= e1000_i350) { ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "EEE Support", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++#ifdef module_param_array ++ if (num_EEE > bd) { ++#endif ++ unsigned int eee = EEE[bd]; ++ igb_validate_option(&eee, &opt, adapter); ++ adapter->flags |= eee ? IGB_FLAG_EEE : 0; ++ if (eee) ++ hw->dev_spec._82575.eee_disable = false; ++ else ++ hw->dev_spec._82575.eee_disable = true; ++ ++#ifdef module_param_array ++ } else { ++ adapter->flags |= opt.def ? IGB_FLAG_EEE : 0; ++ if (adapter->flags & IGB_FLAG_EEE) ++ hw->dev_spec._82575.eee_disable = false; ++ else ++ hw->dev_spec._82575.eee_disable = true; ++ } ++#endif ++ } ++ } ++ { /* DMAC - Enable DMA Coalescing for capable adapters */ ++ ++ if (hw->mac.type >= e1000_i350) { ++ struct igb_opt_list list[] = { ++ { IGB_DMAC_DISABLE, "DMAC Disable"}, ++ { IGB_DMAC_MIN, "DMAC 250 usec"}, ++ { IGB_DMAC_500, "DMAC 500 usec"}, ++ { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"}, ++ { IGB_DMAC_2000, "DMAC 2000 usec"}, ++ { IGB_DMAC_3000, "DMAC 3000 usec"}, ++ { IGB_DMAC_4000, "DMAC 4000 usec"}, ++ { IGB_DMAC_5000, "DMAC 5000 usec"}, ++ { IGB_DMAC_6000, "DMAC 6000 usec"}, ++ { IGB_DMAC_7000, "DMAC 7000 usec"}, ++ { IGB_DMAC_8000, "DMAC 8000 usec"}, ++ { IGB_DMAC_9000, "DMAC 9000 usec"}, ++ { IGB_DMAC_MAX, "DMAC 10000 usec"} ++ }; ++ struct igb_option opt = { ++ .type = list_option, ++ .name = "DMA Coalescing", ++ .err = "using default of " ++ __MODULE_STRING(IGB_DMAC_DISABLE), ++ .def = IGB_DMAC_DISABLE, ++ .arg = { .l = { .nr = 13, ++ .p = list ++ } ++ } ++ }; ++#ifdef module_param_array ++ if (num_DMAC > bd) { ++#endif ++ unsigned int dmac = DMAC[bd]; ++ if (adapter->rx_itr_setting == IGB_DMAC_DISABLE) ++ dmac = IGB_DMAC_DISABLE; ++ igb_validate_option(&dmac, &opt, adapter); ++ switch (dmac) { ++ case IGB_DMAC_DISABLE: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_MIN: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_500: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_EN_DEFAULT: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_2000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_3000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_4000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_5000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_6000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_7000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_8000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_9000: ++ adapter->dmac = dmac; ++ break; ++ case IGB_DMAC_MAX: ++ adapter->dmac = dmac; ++ break; ++ default: ++ adapter->dmac = opt.def; ++ DPRINTK(PROBE, INFO, ++ "Invalid DMAC setting, resetting DMAC to %d\n", ++ opt.def); ++ } ++#ifdef module_param_array ++ } else ++ adapter->dmac = opt.def; ++#endif ++ } ++ } ++#ifndef IGB_NO_LRO ++ { /* LRO - Enable Large Receive Offload */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "LRO - Large Receive Offload", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ struct net_device *netdev = adapter->netdev; ++#ifdef module_param_array ++ if (num_LRO > bd) { ++#endif ++ unsigned int lro = LRO[bd]; ++ igb_validate_option(&lro, &opt, adapter); ++ netdev->features |= lro ? NETIF_F_LRO : 0; ++#ifdef module_param_array ++ } else if (opt.def == OPTION_ENABLED) { ++ netdev->features |= NETIF_F_LRO; ++ } ++#endif ++ } ++#endif /* IGB_NO_LRO */ ++ { /* MDD - Enable Malicious Driver Detection. Only available when ++ SR-IOV is enabled. */ ++ struct igb_option opt = { ++ .type = enable_option, ++ .name = "Malicious Driver Detection", ++ .err = "defaulting to 1", ++ .def = OPTION_ENABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = OPTION_ENABLED } } ++ }; ++ ++#ifdef module_param_array ++ if (num_MDD > bd) { ++#endif ++ adapter->mdd = MDD[bd]; ++ igb_validate_option((uint *)&adapter->mdd, &opt, ++ adapter); ++#ifdef module_param_array ++ } else { ++ adapter->mdd = opt.def; ++ } ++#endif ++ } ++} ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_procfs.c b/drivers/net/ethernet/intel/igb/igb_procfs.c +--- a/drivers/net/ethernet/intel/igb/igb_procfs.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_procfs.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,356 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "igb.h" ++#include "e1000_82575.h" ++#include "e1000_hw.h" ++ ++#ifdef IGB_PROCFS ++#ifndef IGB_HWMON ++ ++#include ++#include ++#include ++#include ++#include ++ ++static struct proc_dir_entry *igb_top_dir; ++ ++bool igb_thermal_present(struct igb_adapter *adapter) ++{ ++ s32 status; ++ struct e1000_hw *hw; ++ ++ if (adapter == NULL) ++ return false; ++ hw = &adapter->hw; ++ ++ /* ++ * Only set I2C bit-bang mode if an external thermal sensor is ++ * supported on this device. ++ */ ++ if (adapter->ets) { ++ status = e1000_set_i2c_bb(hw); ++ if (status != E1000_SUCCESS) ++ return false; ++ } ++ ++ status = hw->mac.ops.init_thermal_sensor_thresh(hw); ++ if (status != E1000_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++static int igb_macburn(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct e1000_hw *hw; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ if (hw == NULL) ++ return snprintf(page, count, "error: no hw data\n"); ++ ++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", ++ (unsigned int)hw->mac.perm_addr[0], ++ (unsigned int)hw->mac.perm_addr[1], ++ (unsigned int)hw->mac.perm_addr[2], ++ (unsigned int)hw->mac.perm_addr[3], ++ (unsigned int)hw->mac.perm_addr[4], ++ (unsigned int)hw->mac.perm_addr[5]); ++} ++ ++static int igb_macadmn(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct e1000_hw *hw; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ if (hw == NULL) ++ return snprintf(page, count, "error: no hw data\n"); ++ ++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", ++ (unsigned int)hw->mac.addr[0], ++ (unsigned int)hw->mac.addr[1], ++ (unsigned int)hw->mac.addr[2], ++ (unsigned int)hw->mac.addr[3], ++ (unsigned int)hw->mac.addr[4], ++ (unsigned int)hw->mac.addr[5]); ++} ++ ++static int igb_numeports(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct e1000_hw *hw; ++ int ports; ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ if (hw == NULL) ++ return snprintf(page, count, "error: no hw data\n"); ++ ++ ports = 4; ++ ++ return snprintf(page, count, "%d\n", ports); ++} ++ ++static int igb_porttype(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct igb_adapter *adapter = (struct igb_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", ++ test_bit(__IGB_DOWN, &adapter->state)); ++} ++ ++static int igb_therm_location(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", therm_data->sensor_data->location); ++} ++ ++static int igb_therm_maxopthresh(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", ++ therm_data->sensor_data->max_op_thresh); ++} ++ ++static int igb_therm_cautionthresh(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", ++ therm_data->sensor_data->caution_thresh); ++} ++ ++static int igb_therm_temp(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ s32 status; ++ struct igb_therm_proc_data *therm_data = ++ (struct igb_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ status = e1000_get_thermal_sensor_data(therm_data->hw); ++ if (status != E1000_SUCCESS) ++ snprintf(page, count, "error: status %d returned\n", status); ++ ++ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); ++} ++ ++struct igb_proc_type { ++ char name[32]; ++ int (*read)(char*, char**, off_t, int, int*, void*); ++}; ++ ++struct igb_proc_type igb_proc_entries[] = { ++ {"numeports", &igb_numeports}, ++ {"porttype", &igb_porttype}, ++ {"macburn", &igb_macburn}, ++ {"macadmn", &igb_macadmn}, ++ {"", NULL} ++}; ++ ++struct igb_proc_type igb_internal_entries[] = { ++ {"location", &igb_therm_location}, ++ {"temp", &igb_therm_temp}, ++ {"cautionthresh", &igb_therm_cautionthresh}, ++ {"maxopthresh", &igb_therm_maxopthresh}, ++ {"", NULL} ++}; ++ ++void igb_del_proc_entries(struct igb_adapter *adapter) ++{ ++ int index, i; ++ char buf[16]; /* much larger than the sensor number will ever be */ ++ ++ if (igb_top_dir == NULL) ++ return; ++ ++ for (i = 0; i < E1000_MAX_SENSORS; i++) { ++ if (adapter->therm_dir[i] == NULL) ++ continue; ++ ++ for (index = 0; ; index++) { ++ if (igb_internal_entries[index].read == NULL) ++ break; ++ ++ remove_proc_entry(igb_internal_entries[index].name, ++ adapter->therm_dir[i]); ++ } ++ snprintf(buf, sizeof(buf), "sensor_%d", i); ++ remove_proc_entry(buf, adapter->info_dir); ++ } ++ ++ if (adapter->info_dir != NULL) { ++ for (index = 0; ; index++) { ++ if (igb_proc_entries[index].read == NULL) ++ break; ++ remove_proc_entry(igb_proc_entries[index].name, ++ adapter->info_dir); ++ } ++ remove_proc_entry("info", adapter->eth_dir); ++ } ++ ++ if (adapter->eth_dir != NULL) ++ remove_proc_entry(pci_name(adapter->pdev), igb_top_dir); ++} ++ ++/* called from igb_main.c */ ++void igb_procfs_exit(struct igb_adapter *adapter) ++{ ++ igb_del_proc_entries(adapter); ++} ++ ++int igb_procfs_topdir_init(void) ++{ ++ igb_top_dir = proc_mkdir("driver/igb", NULL); ++ if (igb_top_dir == NULL) ++ return (-ENOMEM); ++ ++ return 0; ++} ++ ++void igb_procfs_topdir_exit(void) ++{ ++ remove_proc_entry("driver/igb", NULL); ++} ++ ++/* called from igb_main.c */ ++int igb_procfs_init(struct igb_adapter *adapter) ++{ ++ int rc = 0; ++ int i; ++ int index; ++ char buf[16]; /* much larger than the sensor number will ever be */ ++ ++ adapter->eth_dir = NULL; ++ adapter->info_dir = NULL; ++ for (i = 0; i < E1000_MAX_SENSORS; i++) ++ adapter->therm_dir[i] = NULL; ++ ++ if (igb_top_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir); ++ if (adapter->eth_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ adapter->info_dir = proc_mkdir("info", adapter->eth_dir); ++ if (adapter->info_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ for (index = 0; ; index++) { ++ if (igb_proc_entries[index].read == NULL) ++ break; ++ if (!(create_proc_read_entry(igb_proc_entries[index].name, ++ 0444, ++ adapter->info_dir, ++ igb_proc_entries[index].read, ++ adapter))) { ++ ++ rc = -ENOMEM; ++ goto fail; ++ } ++ } ++ if (igb_thermal_present(adapter) == false) ++ goto exit; ++ ++ for (i = 0; i < E1000_MAX_SENSORS; i++) { ++ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) ++ continue; ++ ++ snprintf(buf, sizeof(buf), "sensor_%d", i); ++ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); ++ if (adapter->therm_dir[i] == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ for (index = 0; ; index++) { ++ if (igb_internal_entries[index].read == NULL) ++ break; ++ /* ++ * therm_data struct contains pointer the read func ++ * will be needing ++ */ ++ adapter->therm_data[i].hw = &adapter->hw; ++ adapter->therm_data[i].sensor_data = ++ &adapter->hw.mac.thermal_sensor_data.sensor[i]; ++ ++ if (!(create_proc_read_entry( ++ igb_internal_entries[index].name, ++ 0444, ++ adapter->therm_dir[i], ++ igb_internal_entries[index].read, ++ &adapter->therm_data[i]))) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ } ++ } ++ goto exit; ++ ++fail: ++ igb_del_proc_entries(adapter); ++exit: ++ return rc; ++} ++ ++#endif /* !IGB_HWMON */ ++#endif /* IGB_PROCFS */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c +--- a/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-13 09:20:24.790171605 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-14 14:32:08.579567168 +0000 +@@ -1,31 +1,46 @@ +-/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 +- * +- * Copyright (C) 2011 Richard Cochran +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, see . +- */ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/****************************************************************************** ++ Copyright(c) 2011 Richard Cochran for some of the ++ 82576 and 82580 code ++******************************************************************************/ ++ ++#include "igb.h" ++ ++#ifdef HAVE_PTP_1588_CLOCK + #include + #include + #include + #include +- +-#include "igb.h" ++#include + + #define INCVALUE_MASK 0x7fffffff + #define ISGN 0x80000000 + +-/* The 82580 timesync updates the system timer every 8ns by 8ns, ++/* ++ * The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold +@@ -74,9 +89,10 @@ + #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) + #define IGB_NBITS_82580 40 + +-static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); ++/* ++ * SYSTIM read access for the 82576 ++ */ + +-/* SYSTIM read access for the 82576 */ + static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) + { + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); +@@ -84,8 +100,8 @@ + u64 val; + u32 lo, hi; + +- lo = rd32(E1000_SYSTIML); +- hi = rd32(E1000_SYSTIMH); ++ lo = E1000_READ_REG(hw, E1000_SYSTIML); ++ hi = E1000_READ_REG(hw, E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; +@@ -93,21 +109,24 @@ + return val; + } + +-/* SYSTIM read access for the 82580 */ ++/* ++ * SYSTIM read access for the 82580 ++ */ ++ + static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) + { + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; +- u32 lo, hi; + u64 val; ++ u32 lo, hi; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ +- rd32(E1000_SYSTIMR); +- lo = rd32(E1000_SYSTIML); +- hi = rd32(E1000_SYSTIMH); ++ E1000_READ_REG(hw, E1000_SYSTIMR); ++ lo = E1000_READ_REG(hw, E1000_SYSTIML); ++ hi = E1000_READ_REG(hw, E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; +@@ -115,7 +134,10 @@ + return val; + } + +-/* SYSTIM read access for I210/I211 */ ++/* ++ * SYSTIM read access for I210/I211 ++ */ ++ + static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) + { + struct e1000_hw *hw = &adapter->hw; +@@ -125,9 +147,9 @@ + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ +- rd32(E1000_SYSTIMR); +- nsec = rd32(E1000_SYSTIML); +- sec = rd32(E1000_SYSTIMH); ++ E1000_READ_REG(hw, E1000_SYSTIMR); ++ nsec = E1000_READ_REG(hw, E1000_SYSTIML); ++ sec = E1000_READ_REG(hw, E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +@@ -138,11 +160,12 @@ + { + struct e1000_hw *hw = &adapter->hw; + +- /* Writing the SYSTIMR register is not necessary as it only provides ++ /* ++ * Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ +- wr32(E1000_SYSTIML, ts->tv_nsec); +- wr32(E1000_SYSTIMH, ts->tv_sec); ++ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec); ++ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec); + } + + /** +@@ -172,8 +195,8 @@ + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); +@@ -195,7 +218,10 @@ + } + } + +-/* PTP clock operations */ ++/* ++ * PTP clock operations ++ */ ++ + static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) + { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, +@@ -220,7 +246,8 @@ + else + incvalue += rate; + +- wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 ++ | (incvalue & INCVALUE_82576_MASK)); + + return 0; + } +@@ -242,11 +269,24 @@ + rate <<= 26; + rate = div_u64(rate, 1953125); + ++ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x ++ * as quickly. Account for this by dividing the adjustment by 2.5. ++ */ ++ if (hw->mac.type == e1000_i354) { ++ u32 status = E1000_READ_REG(hw, E1000_STATUS); ++ ++ if ((status & E1000_STATUS_2P5_SKU) && ++ !(status & E1000_STATUS_2P5_SKU_OVER)) { ++ rate <<= 1; ++ rate = div_u64(rate, 5); ++ } ++ } ++ + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + +- wr32(E1000_TIMINCA, inca); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, inca); + + return 0; + } +@@ -287,14 +327,13 @@ + return 0; + } + +-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, +- struct timespec *ts) ++static int igb_ptp_gettime64_82576(struct ptp_clock_info *ptp, ++ struct timespec64 *ts64) + { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; +- u32 remainder; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + +@@ -302,28 +341,99 @@ + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + +- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); +- ts->tv_nsec = remainder; ++ *ts64 = ns_to_timespec64(ns); + + return 0; + } + +-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, +- struct timespec *ts) ++static int igb_ptp_gettime64_i210(struct ptp_clock_info *ptp, ++ struct timespec64 *ts64) ++{ ++ struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ++ ptp_caps); ++ struct timespec ts; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&igb->tmreg_lock, flags); ++ ++ igb_ptp_read_i210(igb, &ts); ++ *ts64 = timespec_to_timespec64(ts); ++ ++ spin_unlock_irqrestore(&igb->tmreg_lock, flags); ++ ++ return 0; ++} ++ ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++static int igb_ptp_settime64_82576(struct ptp_clock_info *ptp, ++ const struct timespec64 *ts64) ++{ ++ struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ++ ptp_caps); ++ unsigned long flags; ++ u64 ns; ++ ++ ns = timespec64_to_ns(ts64); ++ ++ spin_lock_irqsave(&igb->tmreg_lock, flags); ++ ++ timecounter_init(&igb->tc, &igb->cc, ns); ++ ++ spin_unlock_irqrestore(&igb->tmreg_lock, flags); ++ ++ return 0; ++} ++ ++#endif ++static int igb_ptp_settime64_i210(struct ptp_clock_info *ptp, ++ const struct timespec64 *ts64) + { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); ++ struct timespec ts; + unsigned long flags; + ++ ts = timespec64_to_timespec(*ts64); + spin_lock_irqsave(&igb->tmreg_lock, flags); + +- igb_ptp_read_i210(igb, ts); ++ igb_ptp_write_i210(igb, &ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; + } + ++#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 ++static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, ++ struct timespec *ts) ++{ ++ struct timespec64 ts64; ++ int err; ++ ++ err = igb_ptp_gettime64_82576(ptp, &ts64); ++ if (err) ++ return err; ++ ++ *ts = timespec64_to_timespec(ts64); ++ ++ return 0; ++} ++ ++static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, ++ struct timespec *ts) ++{ ++ struct timespec64 ts64; ++ int err; ++ ++ err = igb_ptp_gettime64_i210(ptp, &ts64); ++ if (err) ++ return err; ++ ++ *ts = timespec64_to_timespec(ts64); ++ ++ return 0; ++} ++ + static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec *ts) + { +@@ -360,8 +470,9 @@ + return 0; + } + +-static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, +- struct ptp_clock_request *rq, int on) ++#endif ++static int igb_ptp_enable(struct ptp_clock_info *ptp, ++ struct ptp_clock_request *rq, int on) + { + return -EOPNOTSUPP; + } +@@ -372,8 +483,8 @@ + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. +- **/ +-static void igb_ptp_tx_work(struct work_struct *work) ++ */ ++void igb_ptp_tx_work(struct work_struct *work) + { + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); +@@ -393,7 +504,7 @@ + return; + } + +- tsynctxctl = rd32(E1000_TSYNCTXCTL); ++ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else +@@ -401,15 +512,16 @@ + schedule_work(&adapter->ptp_tx_work); + } + +-static void igb_ptp_overflow_check(struct work_struct *work) ++static void igb_ptp_overflow_check_82576(struct work_struct *work) + { + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); +- struct timespec ts; ++ struct timespec64 ts64; + +- igb->ptp_caps.gettime(&igb->ptp_caps, &ts); ++ igb_ptp_gettime64_82576(&igb->ptp_caps, &ts64); + +- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); ++ pr_debug("igb overflow check at %lld.%09lu\n", ++ (long long)ts64.tv_sec, ts64.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +@@ -423,11 +535,11 @@ + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. +- **/ ++ */ + void igb_ptp_rx_hang(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; +- u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); ++ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + unsigned long rx_event; + + if (hw->mac.type != e1000_82576) +@@ -448,7 +560,7 @@ + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { +- rd32(E1000_RXSTMPH); ++ E1000_READ_REG(hw, E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); +@@ -462,15 +574,15 @@ + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. +- **/ +-static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) ++ */ ++void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) + { + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + +- regval = rd32(E1000_TXSTMPL); +- regval |= (u64)rd32(E1000_TXSTMPH) << 32; ++ regval = E1000_READ_REG(hw, E1000_TXSTMPL); ++ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); +@@ -488,14 +600,15 @@ + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8. +- **/ ++ */ + void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb) + { + __le64 *regval = (__le64 *)va; + +- /* The timestamp is recorded in little endian format. ++ /* ++ * The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ +@@ -510,7 +623,7 @@ + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. +- **/ ++ */ + void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb) + { +@@ -518,7 +631,8 @@ + struct e1000_hw *hw = &adapter->hw; + u64 regval; + +- /* If this bit is set, then the RX registers contain the time stamp. No ++ /* ++ * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register +@@ -528,11 +642,11 @@ + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ +- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) ++ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + +- regval = rd32(E1000_RXSTMPL); +- regval |= (u64)rd32(E1000_RXSTMPH) << 32; ++ regval = E1000_READ_REG(hw, E1000_RXSTMPL); ++ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + +@@ -576,6 +690,7 @@ + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". ++ * + */ + static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +@@ -631,7 +746,8 @@ + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: +- /* 82576 cannot timestamp all packets, which it needs to do to ++ /* ++ * 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { +@@ -651,9 +767,10 @@ + return 0; + } + +- /* Per-packet timestamping only works if all packets are ++ /* ++ * Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as +- * long as one Rx filter was configured. ++ * long as one rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; +@@ -664,63 +781,63 @@ + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { +- regval = rd32(E1000_RXPBS); ++ regval = E1000_READ_REG(hw, E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; +- wr32(E1000_RXPBS, regval); ++ E1000_WRITE_REG(hw, E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ +- regval = rd32(E1000_TSYNCTXCTL); ++ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; +- wr32(E1000_TSYNCTXCTL, regval); ++ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ +- regval = rd32(E1000_TSYNCRXCTL); ++ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; +- wr32(E1000_TSYNCRXCTL, regval); ++ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ +- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); ++ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) +- wr32(E1000_ETQF(3), ++ E1000_WRITE_REG(hw, E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else +- wr32(E1000_ETQF(3), 0); ++ E1000_WRITE_REG(hw, E1000_ETQF(3), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ +- | E1000_FTQF_VF_BP /* VF not compared */ +- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ +- | E1000_FTQF_MASK); /* mask all inputs */ ++ | E1000_FTQF_VF_BP /* VF not compared */ ++ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamp */ ++ | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + +- wr32(E1000_IMIR(3), htons(PTP_EV_PORT)); +- wr32(E1000_IMIREXT(3), ++ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT)); ++ E1000_WRITE_REG(hw, E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ +- wr32(E1000_SPQF(3), htons(PTP_EV_PORT)); ++ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } +- wr32(E1000_FTQF(3), ftqf); ++ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf); + } else { +- wr32(E1000_FTQF(3), E1000_FTQF_MASK); ++ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK); + } +- wrfl(); ++ E1000_WRITE_FLUSH(hw); + + /* clear TX/RX time stamp registers, just to be sure */ +- regval = rd32(E1000_TXSTMPL); +- regval = rd32(E1000_TXSTMPH); +- regval = rd32(E1000_RXSTMPL); +- regval = rd32(E1000_RXSTMPH); ++ regval = E1000_READ_REG(hw, E1000_TXSTMPL); ++ regval = E1000_READ_REG(hw, E1000_TXSTMPH); ++ regval = E1000_READ_REG(hw, E1000_RXSTMPL); ++ regval = E1000_READ_REG(hw, E1000_RXSTMPH); + + return 0; + } +@@ -766,19 +883,25 @@ + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576; ++ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576; ++#else + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; +- adapter->ptp_caps.enable = igb_ptp_feature_enable; ++#endif ++ adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + /* Dial the nominal frequency. */ +- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, ++ INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; +@@ -786,15 +909,20 @@ + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576; ++ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576; ++#else + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; +- adapter->ptp_caps.enable = igb_ptp_feature_enable; ++#endif ++ adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + /* Enable the timer functions by clearing bit 31. */ +- wr32(E1000_TSAUXC, 0x0); ++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + break; + case e1000_i210: + case e1000_i211: +@@ -805,33 +933,38 @@ + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_i210; ++ adapter->ptp_caps.settime64 = igb_ptp_settime64_i210; ++#else + adapter->ptp_caps.gettime = igb_ptp_gettime_i210; + adapter->ptp_caps.settime = igb_ptp_settime_i210; +- adapter->ptp_caps.enable = igb_ptp_feature_enable; ++#endif ++ adapter->ptp_caps.enable = igb_ptp_enable; + /* Enable the timer functions by clearing bit 31. */ +- wr32(E1000_TSAUXC, 0x0); ++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); + break; + default: + adapter->ptp_clock = NULL; + return; + } + +- wrfl(); ++ E1000_WRITE_FLUSH(hw); + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + /* Initialize the clock and overflow work for devices that need it. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { +- struct timespec ts = ktime_to_timespec(ktime_get_real()); ++ struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + +- igb_ptp_settime_i210(&adapter->ptp_caps, &ts); ++ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, +- igb_ptp_overflow_check); ++ igb_ptp_overflow_check_82576); + + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +@@ -839,8 +972,8 @@ + + /* Initialize the time sync interrupts for devices that support it. */ + if (hw->mac.type >= e1000_82580) { +- wr32(E1000_TSIM, TSYNC_INTERRUPTS); +- wr32(E1000_IMS, E1000_IMS_TS); ++ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); ++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); + } + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +@@ -869,8 +1002,8 @@ + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + break; + case e1000_i210: +@@ -915,17 +1048,18 @@ + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ +- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); ++ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | ++ INCVALUE_82576); + break; + case e1000_82580: +- case e1000_i354: + case e1000_i350: ++ case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Enable the timer functions and interrupts. */ +- wr32(E1000_TSAUXC, 0x0); +- wr32(E1000_TSIM, TSYNC_INTERRUPTS); +- wr32(E1000_IMS, E1000_IMS_TS); ++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); ++ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); ++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ +@@ -934,11 +1068,12 @@ + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { +- struct timespec ts = ktime_to_timespec(ktime_get_real()); ++ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real()); + +- igb_ptp_settime_i210(&adapter->ptp_caps, &ts); ++ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts64); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } + } ++#endif /* HAVE_PTP_1588_CLOCK */ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_regtest.h b/drivers/net/ethernet/intel/igb/igb_regtest.h +--- a/drivers/net/ethernet/intel/igb/igb_regtest.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_regtest.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,256 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ethtool register test data */ ++struct igb_reg_test { ++ u16 reg; ++ u16 reg_offset; ++ u16 array_len; ++ u16 test_type; ++ u32 mask; ++ u32 write; ++}; ++ ++/* In the hardware, registers are laid out either singly, in arrays ++ * spaced 0x100 bytes apart, or in contiguous tables. We assume ++ * most tests take place on arrays or single registers (handled ++ * as a single-element array) and special-case the tables. ++ * Table tests are always pattern tests. ++ * ++ * We also make provision for some required setup steps by specifying ++ * registers to be written without any read-back testing. ++ */ ++ ++#define PATTERN_TEST 1 ++#define SET_READ_TEST 2 ++#define WRITE_NO_TEST 3 ++#define TABLE32_TEST 4 ++#define TABLE64_TEST_LO 5 ++#define TABLE64_TEST_HI 6 ++ ++/* i210 reg test */ ++static struct igb_reg_test reg_test_i210[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* RDH is read-only for i210, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x900FFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* i350 reg test */ ++static struct igb_reg_test reg_test_i350[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ /* VET is readonly on i350 */ ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* RDH is read-only for i350, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0xC3FFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 16, TABLE64_TEST_HI, ++ 0xC3FFFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* 82580 reg test */ ++static struct igb_reg_test reg_test_82580[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ /* RDH is read-only for 82580, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* 82576 reg test */ ++static struct igb_reg_test reg_test_82576[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ /* Enable all queues before testing. */ ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, ++ E1000_RXDCTL_QUEUE_ENABLE }, ++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, ++ E1000_RXDCTL_QUEUE_ENABLE }, ++ /* RDH is read-only for 82576, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, ++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, ++ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA2, 0, 8, TABLE64_TEST_HI, ++ 0x83FFFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++/* 82575 register test */ ++static struct igb_reg_test reg_test_82575[] = { ++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, ++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, ++ 0xFFFFFFFF }, ++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, ++ 0xFFFFFFFF }, ++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, ++ /* Enable all four RX queues before testing. */ ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, ++ E1000_RXDCTL_QUEUE_ENABLE }, ++ /* RDH is read-only for 82575, only test RDT. */ ++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, ++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, ++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, ++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, ++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, ++ 0xFFFFFFFF }, ++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, ++ 0xFFFFFFFF }, ++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, ++ 0x000FFFFF }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, ++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, ++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, ++ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_LO, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { E1000_RA, 0, 16, TABLE64_TEST_HI, ++ 0x800FFFFF, 0xFFFFFFFF }, ++ { E1000_MTA, 0, 128, TABLE32_TEST, ++ 0xFFFFFFFF, 0xFFFFFFFF }, ++ { 0, 0, 0, 0 } ++}; ++ ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.c b/drivers/net/ethernet/intel/igb/igb_vmdq.c +--- a/drivers/net/ethernet/intel/igb/igb_vmdq.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_vmdq.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,433 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include ++ ++#include "igb.h" ++#include "igb_vmdq.h" ++#include ++ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++int igb_vmdq_open(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct net_device *main_netdev = adapter->netdev; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ if (test_bit(__IGB_DOWN, &adapter->state)) { ++ DPRINTK(DRV, WARNING, ++ "Open %s before opening this device.\n", ++ main_netdev->name); ++ return -EAGAIN; ++ } ++ netif_carrier_off(dev); ++ vadapter->tx_ring->vmdq_netdev = dev; ++ vadapter->rx_ring->vmdq_netdev = dev; ++ if (is_valid_ether_addr(dev->dev_addr)) { ++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); ++ igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); ++ } ++ netif_carrier_on(dev); ++ return 0; ++} ++ ++int igb_vmdq_close(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ netif_carrier_off(dev); ++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); ++ ++ vadapter->tx_ring->vmdq_netdev = NULL; ++ vadapter->rx_ring->vmdq_netdev = NULL; ++ return 0; ++} ++ ++netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ ++ return igb_xmit_frame_ring(skb, vadapter->tx_ring); ++} ++ ++struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ vadapter->net_stats.rx_packets += ++ E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0); ++ vadapter->net_stats.tx_packets += ++ E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0); ++ vadapter->net_stats.rx_bytes += ++ E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0); ++ vadapter->net_stats.tx_bytes += ++ E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0); ++ vadapter->net_stats.multicast += ++ E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue)); ++ E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0); ++ /* only return the current stats */ ++ return &vadapter->net_stats; ++} ++ ++/** ++ * igb_write_vm_addr_list - write unicast addresses to RAR table ++ * @netdev: network interface device structure ++ * ++ * Writes unicast address list to the RAR table. ++ * Returns: -ENOMEM on failure/insufficient address space ++ * 0 on no addresses written ++ * X on writing X addresses to the RAR table ++ **/ ++static int igb_write_vm_addr_list(struct net_device *netdev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int count = 0; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ /* return ENOMEM indicating insufficient memory for addresses */ ++ if (netdev_uc_count(netdev) > igb_available_rars(adapter)) ++ return -ENOMEM; ++ ++ if (!netdev_uc_empty(netdev)) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *ha; ++#endif ++ netdev_for_each_uc_addr(ha, netdev) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ igb_del_mac_filter(adapter, ha->addr, hw_queue); ++ igb_add_mac_filter(adapter, ha->addr, hw_queue); ++#else ++ igb_del_mac_filter(adapter, ha->da_addr, hw_queue); ++ igb_add_mac_filter(adapter, ha->da_addr, hw_queue); ++#endif ++ count++; ++ } ++ } ++ return count; ++} ++ ++ ++#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */ ++void igb_vmdq_set_rx_mode(struct net_device *dev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 vmolr, rctl; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ /* Check for Promiscuous and All Multicast modes */ ++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue)); ++ ++ /* clear the affected bits */ ++ vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME | ++ E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE); ++ ++ if (dev->flags & IFF_PROMISC) { ++ vmolr |= E1000_VMOLR_UPE; ++ rctl = E1000_READ_REG(hw, E1000_RCTL); ++ rctl |= E1000_RCTL_UPE; ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ } else { ++ rctl = E1000_READ_REG(hw, E1000_RCTL); ++ rctl &= ~E1000_RCTL_UPE; ++ E1000_WRITE_REG(hw, E1000_RCTL, rctl); ++ if (dev->flags & IFF_ALLMULTI) { ++ vmolr |= E1000_VMOLR_MPME; ++ } else { ++ /* ++ * Write addresses to the MTA, if the attempt fails ++ * then we should just turn on promiscous mode so ++ * that we can at least receive multicast traffic ++ */ ++ if (igb_write_mc_addr_list(adapter->netdev) != 0) ++ vmolr |= E1000_VMOLR_ROMPE; ++ } ++#ifdef HAVE_SET_RX_MODE ++ /* ++ * Write addresses to available RAR registers, if there is not ++ * sufficient space to store all the addresses then enable ++ * unicast promiscous mode ++ */ ++ if (igb_write_vm_addr_list(dev) < 0) ++ vmolr |= E1000_VMOLR_UPE; ++#endif ++ } ++ E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr); ++ ++ return; ++} ++ ++int igb_vmdq_set_mac(struct net_device *dev, void *p) ++{ ++ struct sockaddr *addr = p; ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); ++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); ++ return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); ++} ++ ++int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ ++ if (adapter->netdev->mtu < new_mtu) { ++ DPRINTK(PROBE, INFO, ++ "Set MTU on %s to >= %d before changing MTU on %s\n", ++ adapter->netdev->name, new_mtu, dev->name); ++ return -EINVAL; ++ } ++ dev->mtu = new_mtu; ++ return 0; ++} ++ ++void igb_vmdq_tx_timeout(struct net_device *dev) ++{ ++ return; ++} ++ ++void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ vadapter->vlgrp = grp; ++ ++ igb_enable_vlan_tags(adapter); ++ E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0); ++ ++ return; ++} ++void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ struct net_device *v_netdev; ++#endif ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ /* attempt to add filter to vlvf array */ ++ igb_vlvf_set(adapter, vid, TRUE, hw_queue); ++ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ ++ /* Copy feature flags from netdev to the vlan netdev for this vid. ++ * This allows things like TSO to bubble down to our vlan device. ++ */ ++ v_netdev = vlan_group_get_device(vadapter->vlgrp, vid); ++ v_netdev->features |= adapter->netdev->features; ++ vlan_group_set_device(vadapter->vlgrp, vid, v_netdev); ++#endif ++ ++ return; ++} ++void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ int hw_queue = vadapter->rx_ring->queue_index + ++ adapter->vfs_allocated_count; ++ ++ vlan_group_set_device(vadapter->vlgrp, vid, NULL); ++ /* remove vlan from VLVF table array */ ++ igb_vlvf_set(adapter, vid, FALSE, hw_queue); ++ ++ ++ return; ++} ++ ++static int igb_vmdq_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *ecmd) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct e1000_hw *hw = &adapter->hw; ++ u32 status; ++ ++ if (hw->phy.media_type == e1000_media_type_copper) { ++ ++ ecmd->supported = (SUPPORTED_10baseT_Half | ++ SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | ++ SUPPORTED_100baseT_Full | ++ SUPPORTED_1000baseT_Full| ++ SUPPORTED_Autoneg | ++ SUPPORTED_TP); ++ ecmd->advertising = ADVERTISED_TP; ++ ++ if (hw->mac.autoneg == 1) { ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ /* the e1000 autoneg seems to match ethtool nicely */ ++ ecmd->advertising |= hw->phy.autoneg_advertised; ++ } ++ ++ ecmd->port = PORT_TP; ++ ecmd->phy_address = hw->phy.addr; ++ } else { ++ ecmd->supported = (SUPPORTED_1000baseT_Full | ++ SUPPORTED_FIBRE | ++ SUPPORTED_Autoneg); ++ ++ ecmd->advertising = (ADVERTISED_1000baseT_Full | ++ ADVERTISED_FIBRE | ++ ADVERTISED_Autoneg); ++ ++ ecmd->port = PORT_FIBRE; ++ } ++ ++ ecmd->transceiver = XCVR_INTERNAL; ++ ++ status = E1000_READ_REG(hw, E1000_STATUS); ++ ++ if (status & E1000_STATUS_LU) { ++ ++ if ((status & E1000_STATUS_SPEED_1000) || ++ hw->phy.media_type != e1000_media_type_copper) ++ ethtool_cmd_speed_set(ecmd, SPEED_1000); ++ else if (status & E1000_STATUS_SPEED_100) ++ ethtool_cmd_speed_set(ecmd, SPEED_100); ++ else ++ ethtool_cmd_speed_set(ecmd, SPEED_10); ++ ++ if ((status & E1000_STATUS_FD) || ++ hw->phy.media_type != e1000_media_type_copper) ++ ecmd->duplex = DUPLEX_FULL; ++ else ++ ecmd->duplex = DUPLEX_HALF; ++ } else { ++ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ++ ecmd->duplex = -1; ++ } ++ ++ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; ++ return 0; ++} ++ ++ ++static u32 igb_vmdq_get_msglevel(struct net_device *netdev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ return adapter->msg_enable; ++} ++ ++static void igb_vmdq_get_drvinfo(struct net_device *netdev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ struct net_device *main_netdev = adapter->netdev; ++ ++ strncpy(drvinfo->driver, igb_driver_name, 32); ++ strncpy(drvinfo->version, igb_driver_version, 32); ++ ++ strncpy(drvinfo->fw_version, "N/A", 4); ++ snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name, ++ vadapter->rx_ring->queue_index); ++ drvinfo->n_stats = 0; ++ drvinfo->testinfo_len = 0; ++ drvinfo->regdump_len = 0; ++} ++ ++static void igb_vmdq_get_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ ++ struct igb_ring *tx_ring = vadapter->tx_ring; ++ struct igb_ring *rx_ring = vadapter->rx_ring; ++ ++ ring->rx_max_pending = IGB_MAX_RXD; ++ ring->tx_max_pending = IGB_MAX_TXD; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; ++ ring->rx_pending = rx_ring->count; ++ ring->tx_pending = tx_ring->count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; ++} ++static u32 igb_vmdq_get_rx_csum(struct net_device *netdev) ++{ ++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); ++ struct igb_adapter *adapter = vadapter->real_adapter; ++ ++ return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags); ++} ++ ++ ++static struct ethtool_ops igb_vmdq_ethtool_ops = { ++ .get_settings = igb_vmdq_get_settings, ++ .get_drvinfo = igb_vmdq_get_drvinfo, ++ .get_link = ethtool_op_get_link, ++ .get_ringparam = igb_vmdq_get_ringparam, ++ .get_rx_csum = igb_vmdq_get_rx_csum, ++ .get_tx_csum = ethtool_op_get_tx_csum, ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++ .get_msglevel = igb_vmdq_get_msglevel, ++#ifdef NETIF_F_TSO ++ .get_tso = ethtool_op_get_tso, ++#endif ++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++}; ++ ++void igb_vmdq_set_ethtool_ops(struct net_device *netdev) ++{ ++ SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops); ++} ++ ++ ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++ +diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.h b/drivers/net/ethernet/intel/igb/igb_vmdq.h +--- a/drivers/net/ethernet/intel/igb/igb_vmdq.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_vmdq.h 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,43 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IGB_VMDQ_H_ ++#define _IGB_VMDQ_H_ ++ ++#ifdef CONFIG_IGB_VMDQ_NETDEV ++int igb_vmdq_open(struct net_device *dev); ++int igb_vmdq_close(struct net_device *dev); ++netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev); ++struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev); ++void igb_vmdq_set_rx_mode(struct net_device *dev); ++int igb_vmdq_set_mac(struct net_device *dev, void *addr); ++int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu); ++void igb_vmdq_tx_timeout(struct net_device *dev); ++void igb_vmdq_vlan_rx_register(struct net_device *dev, ++ struct vlan_group *grp); ++void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); ++void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); ++void igb_vmdq_set_ethtool_ops(struct net_device *netdev); ++#endif /* CONFIG_IGB_VMDQ_NETDEV */ ++#endif /* _IGB_VMDQ_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.c b/drivers/net/ethernet/intel/igb/kcompat.c +--- a/drivers/net/ethernet/intel/igb/kcompat.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/kcompat.c 2016-11-14 14:32:08.579567168 +0000 +@@ -0,0 +1,2082 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "igb.h" ++#include "kcompat.h" ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++/* From lib/vsprintf.c */ ++#include ++ ++static int skip_atoi(const char **s) ++{ ++ int i=0; ++ ++ while (isdigit(**s)) ++ i = i*10 + *((*s)++) - '0'; ++ return i; ++} ++ ++#define _kc_ZEROPAD 1 /* pad with zero */ ++#define _kc_SIGN 2 /* unsigned/signed long */ ++#define _kc_PLUS 4 /* show plus */ ++#define _kc_SPACE 8 /* space if plus */ ++#define _kc_LEFT 16 /* left justified */ ++#define _kc_SPECIAL 32 /* 0x */ ++#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ ++ ++static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) ++{ ++ char c,sign,tmp[66]; ++ const char *digits; ++ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; ++ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; ++ int i; ++ ++ digits = (type & _kc_LARGE) ? large_digits : small_digits; ++ if (type & _kc_LEFT) ++ type &= ~_kc_ZEROPAD; ++ if (base < 2 || base > 36) ++ return 0; ++ c = (type & _kc_ZEROPAD) ? '0' : ' '; ++ sign = 0; ++ if (type & _kc_SIGN) { ++ if (num < 0) { ++ sign = '-'; ++ num = -num; ++ size--; ++ } else if (type & _kc_PLUS) { ++ sign = '+'; ++ size--; ++ } else if (type & _kc_SPACE) { ++ sign = ' '; ++ size--; ++ } ++ } ++ if (type & _kc_SPECIAL) { ++ if (base == 16) ++ size -= 2; ++ else if (base == 8) ++ size--; ++ } ++ i = 0; ++ if (num == 0) ++ tmp[i++]='0'; ++ else while (num != 0) ++ tmp[i++] = digits[do_div(num,base)]; ++ if (i > precision) ++ precision = i; ++ size -= precision; ++ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { ++ while(size-->0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ } ++ if (sign) { ++ if (buf <= end) ++ *buf = sign; ++ ++buf; ++ } ++ if (type & _kc_SPECIAL) { ++ if (base==8) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } else if (base==16) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ if (buf <= end) ++ *buf = digits[33]; ++ ++buf; ++ } ++ } ++ if (!(type & _kc_LEFT)) { ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = c; ++ ++buf; ++ } ++ } ++ while (i < precision--) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } ++ while (i-- > 0) { ++ if (buf <= end) ++ *buf = tmp[i]; ++ ++buf; ++ } ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ return buf; ++} ++ ++int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ++{ ++ int len; ++ unsigned long long num; ++ int i, base; ++ char *str, *end, c; ++ const char *s; ++ ++ int flags; /* flags to number() */ ++ ++ int field_width; /* width of output field */ ++ int precision; /* min. # of digits for integers; max ++ number of chars for from string */ ++ int qualifier; /* 'h', 'l', or 'L' for integer fields */ ++ /* 'z' support added 23/7/1999 S.H. */ ++ /* 'z' changed to 'Z' --davidm 1/25/99 */ ++ ++ str = buf; ++ end = buf + size - 1; ++ ++ if (end < buf - 1) { ++ end = ((void *) -1); ++ size = end - buf + 1; ++ } ++ ++ for (; *fmt ; ++fmt) { ++ if (*fmt != '%') { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ continue; ++ } ++ ++ /* process flags */ ++ flags = 0; ++ repeat: ++ ++fmt; /* this also skips first '%' */ ++ switch (*fmt) { ++ case '-': flags |= _kc_LEFT; goto repeat; ++ case '+': flags |= _kc_PLUS; goto repeat; ++ case ' ': flags |= _kc_SPACE; goto repeat; ++ case '#': flags |= _kc_SPECIAL; goto repeat; ++ case '0': flags |= _kc_ZEROPAD; goto repeat; ++ } ++ ++ /* get field width */ ++ field_width = -1; ++ if (isdigit(*fmt)) ++ field_width = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ field_width = va_arg(args, int); ++ if (field_width < 0) { ++ field_width = -field_width; ++ flags |= _kc_LEFT; ++ } ++ } ++ ++ /* get the precision */ ++ precision = -1; ++ if (*fmt == '.') { ++ ++fmt; ++ if (isdigit(*fmt)) ++ precision = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ precision = va_arg(args, int); ++ } ++ if (precision < 0) ++ precision = 0; ++ } ++ ++ /* get the conversion qualifier */ ++ qualifier = -1; ++ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { ++ qualifier = *fmt; ++ ++fmt; ++ } ++ ++ /* default base */ ++ base = 10; ++ ++ switch (*fmt) { ++ case 'c': ++ if (!(flags & _kc_LEFT)) { ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ c = (unsigned char) va_arg(args, int); ++ if (str <= end) ++ *str = c; ++ ++str; ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 's': ++ s = va_arg(args, char *); ++ if (!s) ++ s = ""; ++ ++ len = strnlen(s, precision); ++ ++ if (!(flags & _kc_LEFT)) { ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ for (i = 0; i < len; ++i) { ++ if (str <= end) ++ *str = *s; ++ ++str; ++s; ++ } ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 'p': ++ if (field_width == -1) { ++ field_width = 2*sizeof(void *); ++ flags |= _kc_ZEROPAD; ++ } ++ str = number(str, end, ++ (unsigned long) va_arg(args, void *), ++ 16, field_width, precision, flags); ++ continue; ++ ++ ++ case 'n': ++ /* FIXME: ++ * What does C99 say about the overflow case here? */ ++ if (qualifier == 'l') { ++ long * ip = va_arg(args, long *); ++ *ip = (str - buf); ++ } else if (qualifier == 'Z') { ++ size_t * ip = va_arg(args, size_t *); ++ *ip = (str - buf); ++ } else { ++ int * ip = va_arg(args, int *); ++ *ip = (str - buf); ++ } ++ continue; ++ ++ case '%': ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ continue; ++ ++ /* integer number formats - set up the flags and "break" */ ++ case 'o': ++ base = 8; ++ break; ++ ++ case 'X': ++ flags |= _kc_LARGE; ++ case 'x': ++ base = 16; ++ break; ++ ++ case 'd': ++ case 'i': ++ flags |= _kc_SIGN; ++ case 'u': ++ break; ++ ++ default: ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ if (*fmt) { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ } else { ++ --fmt; ++ } ++ continue; ++ } ++ if (qualifier == 'L') ++ num = va_arg(args, long long); ++ else if (qualifier == 'l') { ++ num = va_arg(args, unsigned long); ++ if (flags & _kc_SIGN) ++ num = (signed long) num; ++ } else if (qualifier == 'Z') { ++ num = va_arg(args, size_t); ++ } else if (qualifier == 'h') { ++ num = (unsigned short) va_arg(args, int); ++ if (flags & _kc_SIGN) ++ num = (signed short) num; ++ } else { ++ num = va_arg(args, unsigned int); ++ if (flags & _kc_SIGN) ++ num = (signed int) num; ++ } ++ str = number(str, end, num, base, ++ field_width, precision, flags); ++ } ++ if (str <= end) ++ *str = '\0'; ++ else if (size > 0) ++ /* don't write out a null byte if the buf size is zero */ ++ *end = '\0'; ++ /* the trailing null byte doesn't count towards the total ++ * ++str; ++ */ ++ return str-buf; ++} ++ ++int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = _kc_vsnprintf(buf,size,fmt,args); ++ va_end(args); ++ return i; ++} ++#endif /* < 2.4.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#if defined(CONFIG_HIGHMEM) ++ ++#ifndef PCI_DRAM_OFFSET ++#define PCI_DRAM_OFFSET 0 ++#endif ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + ++ PCI_DRAM_OFFSET); ++} ++ ++#else /* CONFIG_HIGHMEM */ ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return pci_map_single(dev, (void *)page_address(page) + offset, size, ++ direction); ++} ++ ++#endif /* CONFIG_HIGHMEM */ ++ ++void ++_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, ++ int direction) ++{ ++ return pci_unmap_single(dev, dma_addr, size, direction); ++} ++ ++#endif /* 2.4.13 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++int ++_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) ++{ ++ if (!pci_dma_supported(dev, mask)) ++ return -EIO; ++ dev->dma_mask = mask; ++ return 0; ++} ++ ++int ++_kc_pci_request_regions(struct pci_dev *dev, char *res_name) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) { ++ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { ++ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } ++ } ++ return 0; ++} ++ ++void ++_kc_pci_release_regions(struct pci_dev *dev) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) ++ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ ++ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) ++ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ } ++} ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++struct net_device * ++_kc_alloc_etherdev(int sizeof_priv) ++{ ++ struct net_device *dev; ++ int alloc_size; ++ ++ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; ++ dev = kzalloc(alloc_size, GFP_KERNEL); ++ if (!dev) ++ return NULL; ++ ++ if (sizeof_priv) ++ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); ++ dev->name[0] = '\0'; ++ ether_setup(dev); ++ ++ return dev; ++} ++ ++int ++_kc_is_valid_ether_addr(u8 *addr) ++{ ++ const char zaddr[6] = { 0, }; ++ ++ return !(addr[0] & 1) && memcmp(addr, zaddr, 6); ++} ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++int ++_kc_pci_set_power_state(struct pci_dev *dev, int state) ++{ ++ return 0; ++} ++ ++int ++_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) ++{ ++ return 0; ++} ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, ++ int off, int size) ++{ ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ frag->page = page; ++ frag->page_offset = off; ++ frag->size = size; ++ skb_shinfo(skb)->nr_frags = i + 1; ++} ++ ++/* ++ * Original Copyright: ++ * find_next_bit.c: fallback find next bit implementation ++ * ++ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. ++ * Written by David Howells (dhowells@redhat.com) ++ */ ++ ++/** ++ * find_next_bit - find the next set bit in a memory region ++ * @addr: The address to base the search on ++ * @offset: The bitnumber to start searching at ++ * @size: The maximum size to search ++ */ ++unsigned long find_next_bit(const unsigned long *addr, unsigned long size, ++ unsigned long offset) ++{ ++ const unsigned long *p = addr + BITOP_WORD(offset); ++ unsigned long result = offset & ~(BITS_PER_LONG-1); ++ unsigned long tmp; ++ ++ if (offset >= size) ++ return size; ++ size -= result; ++ offset %= BITS_PER_LONG; ++ if (offset) { ++ tmp = *(p++); ++ tmp &= (~0UL << offset); ++ if (size < BITS_PER_LONG) ++ goto found_first; ++ if (tmp) ++ goto found_middle; ++ size -= BITS_PER_LONG; ++ result += BITS_PER_LONG; ++ } ++ while (size & ~(BITS_PER_LONG-1)) { ++ if ((tmp = *(p++))) ++ goto found_middle; ++ result += BITS_PER_LONG; ++ size -= BITS_PER_LONG; ++ } ++ if (!size) ++ return result; ++ tmp = *p; ++ ++found_first: ++ tmp &= (~0UL >> (BITS_PER_LONG - size)); ++ if (tmp == 0UL) /* Are any bits set? */ ++ return result + size; /* Nope. */ ++found_middle: ++ return result + ffs(tmp); ++} ++ ++size_t _kc_strlcpy(char *dest, const char *src, size_t size) ++{ ++ size_t ret = strlen(src); ++ ++ if (size) { ++ size_t len = (ret >= size) ? size - 1 : ret; ++ memcpy(dest, src, len); ++ dest[len] = '\0'; ++ } ++ return ret; ++} ++ ++#ifndef do_div ++#if BITS_PER_LONG == 32 ++uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) ++{ ++ uint64_t rem = *n; ++ uint64_t b = base; ++ uint64_t res, d = 1; ++ uint32_t high = rem >> 32; ++ ++ /* Reduce the thing a bit first */ ++ res = 0; ++ if (high >= base) { ++ high /= base; ++ res = (uint64_t) high << 32; ++ rem -= (uint64_t) (high*base) << 32; ++ } ++ ++ while ((int64_t)b > 0 && b < rem) { ++ b = b+b; ++ d = d+d; ++ } ++ ++ do { ++ if (rem >= b) { ++ rem -= b; ++ res += d; ++ } ++ b >>= 1; ++ d >>= 1; ++ } while (d); ++ ++ *n = res; ++ return rem; ++} ++#endif /* BITS_PER_LONG == 32 */ ++#endif /* do_div */ ++#endif /* 2.6.0 => 2.4.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = vsnprintf(buf, size, fmt, args); ++ va_end(args); ++ return (i >= size) ? (size - 1) : i; ++} ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) ++char *_kc_kstrdup(const char *s, unsigned int gfp) ++{ ++ size_t len; ++ char *buf; ++ ++ if (!s) ++ return NULL; ++ ++ len = strlen(s) + 1; ++ buf = kmalloc(len, gfp); ++ if (buf) ++ memcpy(buf, s, len); ++ return buf; ++} ++#endif /* < 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++void *_kc_kzalloc(size_t size, int flags) ++{ ++ void *ret = kmalloc(size, flags); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif /* <= 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++int _kc_skb_pad(struct sk_buff *skb, int pad) ++{ ++ int ntail; ++ ++ /* If the skbuff is non linear tailroom is always zero.. */ ++ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { ++ memset(skb->data+skb->len, 0, pad); ++ return 0; ++ } ++ ++ ntail = skb->data_len + pad - (skb->end - skb->tail); ++ if (likely(skb_cloned(skb) || ntail > 0)) { ++ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) ++ goto free_skb; ++ } ++ ++#ifdef MAX_SKB_FRAGS ++ if (skb_is_nonlinear(skb) && ++ !__pskb_pull_tail(skb, skb->data_len)) ++ goto free_skb; ++ ++#endif ++ memset(skb->data + skb->len, 0, pad); ++ return 0; ++ ++free_skb: ++ kfree_skb(skb); ++ return -ENOMEM; ++} ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) ++int _kc_pci_save_state(struct pci_dev *pdev) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset, pcie_link_status; ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++ /* no ->dev for 2.4 kernels */ ++ WARN_ON(pdev->dev.driver_data == NULL); ++#endif ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset) { ++ if (!pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ } ++ pci_config_space_ich8lan(); ++#ifdef HAVE_PCI_ERS ++ if (adapter->config_space == NULL) ++#else ++ WARN_ON(adapter->config_space != NULL); ++#endif ++ adapter->config_space = kmalloc(size, GFP_KERNEL); ++ if (!adapter->config_space) { ++ printk(KERN_ERR "Out of memory in pci_save_state\n"); ++ return -ENOMEM; ++ } ++ for (i = 0; i < (size / 4); i++) ++ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); ++ return 0; ++} ++ ++void _kc_pci_restore_state(struct pci_dev *pdev) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset; ++ u16 pcie_link_status; ++ ++ if (adapter->config_space != NULL) { ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset && ++ !pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ ++ pci_config_space_ich8lan(); ++ for (i = 0; i < (size / 4); i++) ++ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); ++#ifndef HAVE_PCI_ERS ++ kfree(adapter->config_space); ++ adapter->config_space = NULL; ++#endif ++ } ++} ++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ ++ ++#ifdef HAVE_PCI_ERS ++void _kc_free_netdev(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ ++ kfree(adapter->config_space); ++#ifdef CONFIG_SYSFS ++ if (netdev->reg_state == NETREG_UNINITIALIZED) { ++ kfree((char *)netdev - netdev->padded); ++ } else { ++ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); ++ netdev->reg_state = NETREG_RELEASED; ++ class_device_put(&netdev->class_dev); ++ } ++#else ++ kfree((char *)netdev - netdev->padded); ++#endif ++} ++#endif ++ ++void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) ++{ ++ void *p; ++ ++ p = kzalloc(len, gfp); ++ if (p) ++ memcpy(p, src, len); ++ return p; ++} ++#endif /* <= 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) ++{ ++ return ((struct adapter_struct *)netdev_priv(netdev))->pdev; ++} ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++/* hexdump code taken from lib/hexdump.c */ ++static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, ++ int groupsize, unsigned char *linebuf, ++ size_t linebuflen, bool ascii) ++{ ++ const u8 *ptr = buf; ++ u8 ch; ++ int j, lx = 0; ++ int ascii_column; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ if (!len) ++ goto nil; ++ if (len > rowsize) /* limit to one line at a time */ ++ len = rowsize; ++ if ((len % groupsize) != 0) /* no mixed size output */ ++ groupsize = 1; ++ ++ switch (groupsize) { ++ case 8: { ++ const u64 *ptr8 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%16.16llx", j ? " " : "", ++ (unsigned long long)*(ptr8 + j)); ++ ascii_column = 17 * ngroups + 2; ++ break; ++ } ++ ++ case 4: { ++ const u32 *ptr4 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%8.8x", j ? " " : "", *(ptr4 + j)); ++ ascii_column = 9 * ngroups + 2; ++ break; ++ } ++ ++ case 2: { ++ const u16 *ptr2 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%4.4x", j ? " " : "", *(ptr2 + j)); ++ ascii_column = 5 * ngroups + 2; ++ break; ++ } ++ ++ default: ++ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ++ ch = ptr[j]; ++ linebuf[lx++] = hex_asc(ch >> 4); ++ linebuf[lx++] = hex_asc(ch & 0x0f); ++ linebuf[lx++] = ' '; ++ } ++ if (j) ++ lx--; ++ ++ ascii_column = 3 * rowsize + 2; ++ break; ++ } ++ if (!ascii) ++ goto nil; ++ ++ while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) ++ linebuf[lx++] = ' '; ++ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) ++ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] ++ : '.'; ++nil: ++ linebuf[lx++] = '\0'; ++} ++ ++void _kc_print_hex_dump(const char *level, ++ const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii) ++{ ++ const u8 *ptr = buf; ++ int i, linelen, remaining = len; ++ unsigned char linebuf[200]; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ for (i = 0; i < len; i += rowsize) { ++ linelen = min(remaining, rowsize); ++ remaining -= rowsize; ++ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, ++ linebuf, sizeof(linebuf), ascii); ++ ++ switch (prefix_type) { ++ case DUMP_PREFIX_ADDRESS: ++ printk("%s%s%*p: %s\n", level, prefix_str, ++ (int)(2 * sizeof(void *)), ptr + i, linebuf); ++ break; ++ case DUMP_PREFIX_OFFSET: ++ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); ++ break; ++ default: ++ printk("%s%s%s\n", level, prefix_str, linebuf); ++ break; ++ } ++ } ++} ++ ++#ifdef HAVE_I2C_SUPPORT ++struct i2c_client * ++_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) ++{ ++ struct i2c_client *client; ++ int status; ++ ++ client = kzalloc(sizeof *client, GFP_KERNEL); ++ if (!client) ++ return NULL; ++ ++ client->adapter = adap; ++ ++ client->dev.platform_data = info->platform_data; ++ ++ client->flags = info->flags; ++ client->addr = info->addr; ++ ++ strlcpy(client->name, info->type, sizeof(client->name)); ++ ++ /* Check for address business */ ++ status = i2c_check_addr(adap, client->addr); ++ if (status) ++ goto out_err; ++ ++ client->dev.parent = &client->adapter->dev; ++ client->dev.bus = &i2c_bus_type; ++ ++ status = i2c_attach_client(client); ++ if (status) ++ goto out_err; ++ ++ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", ++ client->name, dev_name(&client->dev)); ++ ++ return client; ++ ++out_err: ++ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x " ++ "(%d)\n", client->name, client->addr, status); ++ kfree(client); ++ return NULL; ++} ++#endif /* HAVE_I2C_SUPPORT */ ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifdef NAPI ++struct net_device *napi_to_poll_dev(const struct napi_struct *napi) ++{ ++ struct adapter_q_vector *q_vector = container_of(napi, ++ struct adapter_q_vector, ++ napi); ++ return &q_vector->poll_dev; ++} ++ ++int __kc_adapter_clean(struct net_device *netdev, int *budget) ++{ ++ int work_done; ++ int work_to_do = min(*budget, netdev->quota); ++ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ ++ struct napi_struct *napi = netdev->priv; ++ work_done = napi->poll(napi, work_to_do); ++ *budget -= work_done; ++ netdev->quota -= work_done; ++ return (work_done >= work_to_do) ? 1 : 0; ++} ++#endif /* NAPI */ ++#endif /* <= 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) ++{ ++ struct pci_dev *parent = pdev->bus->self; ++ u16 link_state; ++ int pos; ++ ++ if (!parent) ++ return; ++ ++ pos = pci_find_capability(parent, PCI_CAP_ID_EXP); ++ if (pos) { ++ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); ++ link_state &= ~state; ++ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); ++ } ++} ++#endif /* < 2.6.26 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++#ifdef HAVE_TX_MQ ++void _kc_netif_tx_stop_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_stop_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_stop_subqueue(netdev, i); ++} ++void _kc_netif_tx_wake_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_wake_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_wake_subqueue(netdev, i); ++} ++void _kc_netif_tx_start_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_start_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_start_subqueue(netdev, i); ++} ++#endif /* HAVE_TX_MQ */ ++ ++void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) ++{ ++ va_list args; ++ ++ printk(KERN_WARNING "------------[ cut here ]------------\n"); ++ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); ++ va_start(args, fmt); ++ vprintk(fmt, args); ++ va_end(args); ++ ++ dump_stack(); ++} ++#endif /* __VMKLNX__ */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++ ++int ++_kc_pci_prepare_to_sleep(struct pci_dev *dev) ++{ ++ pci_power_t target_state; ++ int error; ++ ++ target_state = pci_choose_state(dev, PMSG_SUSPEND); ++ ++ pci_enable_wake(dev, target_state, true); ++ ++ error = pci_set_power_state(dev, target_state); ++ ++ if (error) ++ pci_enable_wake(dev, target_state, false); ++ ++ return error; ++} ++ ++int ++_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) ++{ ++ int err; ++ ++ err = pci_enable_wake(dev, PCI_D3cold, enable); ++ if (err) ++ goto out; ++ ++ err = pci_enable_wake(dev, PCI_D3hot, enable); ++ ++out: ++ return err; ++} ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) ++{ ++ u16 old_cmd, cmd; ++ ++ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); ++ if (enable) ++ cmd = old_cmd | PCI_COMMAND_MASTER; ++ else ++ cmd = old_cmd & ~PCI_COMMAND_MASTER; ++ if (cmd != old_cmd) { ++ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", ++ enable ? "enabling" : "disabling"); ++ pci_write_config_word(pdev, PCI_COMMAND, cmd); ++ } ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) ++ pdev->is_busmaster = enable; ++#endif ++} ++ ++void _kc_pci_clear_master(struct pci_dev *dev) ++{ ++ __kc_pci_set_master(dev, false); ++} ++#endif /* < 2.6.29 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) ++{ ++ int num_vf = 0; ++#ifdef CONFIG_PCI_IOV ++ struct pci_dev *vfdev; ++ ++ /* loop through all ethernet devices starting at PF dev */ ++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); ++ while (vfdev) { ++ if (vfdev->is_virtfn && vfdev->physfn == dev) ++ num_vf++; ++ ++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); ++ } ++ ++#endif ++ return num_vf; ++} ++#endif /* RHEL_RELEASE_CODE */ ++#endif /* < 2.6.34 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++#ifdef HAVE_TX_MQ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) ++#ifndef CONFIG_NETDEVICES_MULTIQUEUE ++int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) ++{ ++ unsigned int real_num = dev->real_num_tx_queues; ++ struct Qdisc *qdisc; ++ int i; ++ ++ if (txq < 1 || txq > dev->num_tx_queues) ++ return -EINVAL; ++ ++ else if (txq > real_num) ++ dev->real_num_tx_queues = txq; ++ else if (txq < real_num) { ++ dev->real_num_tx_queues = txq; ++ for (i = txq; i < dev->num_tx_queues; i++) { ++ qdisc = netdev_get_tx_queue(dev, i)->qdisc; ++ if (qdisc) { ++ spin_lock_bh(qdisc_lock(qdisc)); ++ qdisc_reset(qdisc); ++ spin_unlock_bh(qdisc_lock(qdisc)); ++ } ++ } ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ ++#endif /* HAVE_TX_MQ */ ++ ++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, ++ const void __user *from, size_t count) ++{ ++ loff_t pos = *ppos; ++ size_t res; ++ ++ if (pos < 0) ++ return -EINVAL; ++ if (pos >= available || !count) ++ return 0; ++ if (count > available - pos) ++ count = available - pos; ++ res = copy_from_user(to + pos, from, count); ++ if (res == count) ++ return -EFAULT; ++ count -= res; ++ *ppos = pos + count; ++ return count; ++} ++ ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++static const u32 _kc_flags_dup_features = ++ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); ++ ++u32 _kc_ethtool_op_get_flags(struct net_device *dev) ++{ ++ return dev->features & _kc_flags_dup_features; ++} ++ ++int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) ++{ ++ if (data & ~supported) ++ return -EINVAL; ++ ++ dev->features = ((dev->features & ~_kc_flags_dup_features) | ++ (data & _kc_flags_dup_features)); ++ return 0; ++} ++#endif /* < 2.6.36 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) ++ ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ ++#endif /* < 2.6.39 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) ++void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, ++ int off, int size, unsigned int truesize) ++{ ++ skb_fill_page_desc(skb, i, page, off, size); ++ skb->len += size; ++ skb->data_len += size; ++ skb->truesize += truesize; ++} ++ ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++int _kc_simple_open(struct inode *inode, struct file *file) ++{ ++ if (inode->i_private) ++ file->private_data = inode->i_private; ++ ++ return 0; ++} ++#endif /* SLE_VERSION < 11,3,0 */ ++ ++#endif /* < 3.4.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) ++static inline int __kc_pcie_cap_version(struct pci_dev *dev) ++{ ++ int pos; ++ u16 reg16; ++ ++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return 0; ++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); ++ return reg16 & PCI_EXP_FLAGS_VERS; ++} ++ ++static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) ++{ ++ return true; ++} ++ ++static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ type == PCI_EXP_TYPE_ENDPOINT || ++ type == PCI_EXP_TYPE_LEG_END; ++} ++ ++static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ int pos; ++ u16 pcie_flags_reg; ++ ++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return false; ++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ (type == PCI_EXP_TYPE_DOWNSTREAM && ++ pcie_flags_reg & PCI_EXP_FLAGS_SLOT); ++} ++ ++static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ type == PCI_EXP_TYPE_RC_EC; ++} ++ ++static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) ++{ ++ if (!pci_is_pcie(dev)) ++ return false; ++ ++ switch (pos) { ++ case PCI_EXP_FLAGS_TYPE: ++ return true; ++ case PCI_EXP_DEVCAP: ++ case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVSTA: ++ return __kc_pcie_cap_has_devctl(dev); ++ case PCI_EXP_LNKCAP: ++ case PCI_EXP_LNKCTL: ++ case PCI_EXP_LNKSTA: ++ return __kc_pcie_cap_has_lnkctl(dev); ++ case PCI_EXP_SLTCAP: ++ case PCI_EXP_SLTCTL: ++ case PCI_EXP_SLTSTA: ++ return __kc_pcie_cap_has_sltctl(dev); ++ case PCI_EXP_RTCTL: ++ case PCI_EXP_RTCAP: ++ case PCI_EXP_RTSTA: ++ return __kc_pcie_cap_has_rtctl(dev); ++ case PCI_EXP_DEVCAP2: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCAP2: ++ case PCI_EXP_LNKCTL2: ++ case PCI_EXP_LNKSTA2: ++ return __kc_pcie_cap_version(dev) > 1; ++ default: ++ return false; ++ } ++} ++ ++/* ++ * Note that these accessor functions are only for the "PCI Express ++ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the ++ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) ++ */ ++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) ++{ ++ int ret; ++ ++ *val = 0; ++ if (pos & 1) ++ return -EINVAL; ++ ++ if (__kc_pcie_capability_reg_implemented(dev, pos)) { ++ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); ++ /* ++ * Reset *val to 0 if pci_read_config_word() fails, it may ++ * have been written as 0xFFFF if hardware error happens ++ * during pci_read_config_word(). ++ */ ++ if (ret) ++ *val = 0; ++ return ret; ++ } ++ ++ /* ++ * For Functions that do not implement the Slot Capabilities, ++ * Slot Status, and Slot Control registers, these spaces must ++ * be hardwired to 0b, with the exception of the Presence Detect ++ * State bit in the Slot Status register of Downstream Ports, ++ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) ++ */ ++ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && ++ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { ++ *val = PCI_EXP_SLTSTA_PDS; ++ } ++ ++ return 0; ++} ++ ++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) ++{ ++ if (pos & 1) ++ return -EINVAL; ++ ++ if (!__kc_pcie_capability_reg_implemented(dev, pos)) ++ return 0; ++ ++ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); ++} ++ ++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, ++ u16 clear, u16 set) ++{ ++ int ret; ++ u16 val; ++ ++ ret = __kc_pcie_capability_read_word(dev, pos, &val); ++ if (!ret) { ++ val &= ~clear; ++ val |= set; ++ ret = __kc_pcie_capability_write_word(dev, pos, val); ++ } ++ ++ return ret; ++} ++ ++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear) ++{ ++ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); ++} ++#endif /* < 3.7.0 */ ++ ++/****************************************************************************** ++ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, ++ * inferred copyright from kernel ++ */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) ++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, ++ int target, unsigned short *fragoff, int *flags) ++{ ++ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); ++ u8 nexthdr = ipv6_hdr(skb)->nexthdr; ++ unsigned int len; ++ bool found; ++ ++#define __KC_IP6_FH_F_FRAG BIT(0) ++#define __KC_IP6_FH_F_AUTH BIT(1) ++#define __KC_IP6_FH_F_SKIP_RH BIT(2) ++ ++ if (fragoff) ++ *fragoff = 0; ++ ++ if (*offset) { ++ struct ipv6hdr _ip6, *ip6; ++ ++ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); ++ if (!ip6 || (ip6->version != 6)) { ++ printk(KERN_ERR "IPv6 header not found\n"); ++ return -EBADMSG; ++ } ++ start = *offset + sizeof(struct ipv6hdr); ++ nexthdr = ip6->nexthdr; ++ } ++ len = skb->len - start; ++ ++ do { ++ struct ipv6_opt_hdr _hdr, *hp; ++ unsigned int hdrlen; ++ found = (nexthdr == target); ++ ++ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { ++ if (target < 0 || found) ++ break; ++ return -ENOENT; ++ } ++ ++ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); ++ if (!hp) ++ return -EBADMSG; ++ ++ if (nexthdr == NEXTHDR_ROUTING) { ++ struct ipv6_rt_hdr _rh, *rh; ++ ++ rh = skb_header_pointer(skb, start, sizeof(_rh), ++ &_rh); ++ if (!rh) ++ return -EBADMSG; ++ ++ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && ++ rh->segments_left == 0) ++ found = false; ++ } ++ ++ if (nexthdr == NEXTHDR_FRAGMENT) { ++ unsigned short _frag_off; ++ __be16 *fp; ++ ++ if (flags) /* Indicate that this is a fragment */ ++ *flags |= __KC_IP6_FH_F_FRAG; ++ fp = skb_header_pointer(skb, ++ start+offsetof(struct frag_hdr, ++ frag_off), ++ sizeof(_frag_off), ++ &_frag_off); ++ if (!fp) ++ return -EBADMSG; ++ ++ _frag_off = ntohs(*fp) & ~0x7; ++ if (_frag_off) { ++ if (target < 0 && ++ ((!ipv6_ext_hdr(hp->nexthdr)) || ++ hp->nexthdr == NEXTHDR_NONE)) { ++ if (fragoff) ++ *fragoff = _frag_off; ++ return hp->nexthdr; ++ } ++ return -ENOENT; ++ } ++ hdrlen = 8; ++ } else if (nexthdr == NEXTHDR_AUTH) { ++ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) ++ break; ++ hdrlen = (hp->hdrlen + 2) << 2; ++ } else ++ hdrlen = ipv6_optlen(hp); ++ ++ if (!found) { ++ nexthdr = hp->nexthdr; ++ len -= hdrlen; ++ start += hdrlen; ++ } ++ } while (!found); ++ ++ *offset = start; ++ return nexthdr; ++} ++#endif /* < 3.8.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) ++#endif /* 3.9.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, const unsigned char *addr, ++ u16 flags) ++#else ++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr, u16 flags) ++#endif ++{ ++ int err = -EINVAL; ++ ++ /* If aging addresses are supported device will need to ++ * implement its own handler for this. ++ */ ++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", dev->name); ++ return err; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) ++ err = dev_uc_add_excl(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_add_excl(dev, addr); ++ ++ /* Only return duplicate errors if NLM_F_EXCL is set */ ++ if (err == -EEXIST && !(flags & NLM_F_EXCL)) ++ err = 0; ++ ++ return err; ++} ++ ++#ifdef USE_CONST_DEV_UC_CHAR ++#ifdef HAVE_FDB_DEL_NLATTR ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, const unsigned char *addr) ++#else ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ const unsigned char *addr) ++#endif ++#else ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr) ++#endif ++{ ++ int err = -EINVAL; ++ ++ /* If aging addresses are supported device will need to ++ * implement its own handler for this. ++ */ ++ if (!(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", dev->name); ++ return err; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) ++ err = dev_uc_del(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_del(dev, addr); ++ ++ return err; ++} ++ ++#endif /* HAVE_FDB_OPS */ ++#ifdef CONFIG_PCI_IOV ++int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) ++{ ++ unsigned int vfs_assigned = 0; ++#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED ++ int pos; ++ struct pci_dev *vfdev; ++ unsigned short dev_id; ++ ++ /* only search if we are a PF */ ++ if (!dev->is_physfn) ++ return 0; ++ ++ /* find SR-IOV capability */ ++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); ++ if (!pos) ++ return 0; ++ ++ /* ++ * determine the device ID for the VFs, the vendor ID will be the ++ * same as the PF so there is no need to check for that one ++ */ ++ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); ++ ++ /* loop through all the VFs to see if we own any that are assigned */ ++ vfdev = pci_get_device(dev->vendor, dev_id, NULL); ++ while (vfdev) { ++ /* ++ * It is considered assigned if it is a virtual function with ++ * our dev as the physical function and the assigned bit is set ++ */ ++ if (vfdev->is_virtfn && (vfdev->physfn == dev) && ++ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) ++ vfs_assigned++; ++ ++ vfdev = pci_get_device(dev->vendor, dev_id, vfdev); ++ } ++ ++#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ ++ return vfs_assigned; ++} ++ ++#endif /* CONFIG_PCI_IOV */ ++#endif /* 3.10.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) ++const unsigned char pcie_link_speed[] = { ++ PCI_SPEED_UNKNOWN, /* 0 */ ++ PCIE_SPEED_2_5GT, /* 1 */ ++ PCIE_SPEED_5_0GT, /* 2 */ ++ PCIE_SPEED_8_0GT, /* 3 */ ++ PCI_SPEED_UNKNOWN, /* 4 */ ++ PCI_SPEED_UNKNOWN, /* 5 */ ++ PCI_SPEED_UNKNOWN, /* 6 */ ++ PCI_SPEED_UNKNOWN, /* 7 */ ++ PCI_SPEED_UNKNOWN, /* 8 */ ++ PCI_SPEED_UNKNOWN, /* 9 */ ++ PCI_SPEED_UNKNOWN, /* A */ ++ PCI_SPEED_UNKNOWN, /* B */ ++ PCI_SPEED_UNKNOWN, /* C */ ++ PCI_SPEED_UNKNOWN, /* D */ ++ PCI_SPEED_UNKNOWN, /* E */ ++ PCI_SPEED_UNKNOWN /* F */ ++}; ++ ++int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, ++ enum pcie_link_width *width) ++{ ++ int ret; ++ ++ *speed = PCI_SPEED_UNKNOWN; ++ *width = PCIE_LNK_WIDTH_UNKNOWN; ++ ++ while (dev) { ++ u16 lnksta; ++ enum pci_bus_speed next_speed; ++ enum pcie_link_width next_width; ++ ++ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); ++ if (ret) ++ return ret; ++ ++ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; ++ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> ++ PCI_EXP_LNKSTA_NLW_SHIFT; ++ ++ if (next_speed < *speed) ++ *speed = next_speed; ++ ++ if (next_width < *width) ++ *width = next_width; ++ ++ dev = dev->bus->self; ++ } ++ ++ return 0; ++} ++ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) ++int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) ++{ ++ int err = dma_set_mask(dev, mask); ++ ++ if (!err) ++ /* coherent mask for the same size will always succeed if ++ * dma_set_mask does. However we store the error anyways, due ++ * to some kernels which use gcc's warn_unused_result on their ++ * definition of dma_set_coherent_mask. ++ */ ++ err = dma_set_coherent_mask(dev, mask); ++ return err; ++} ++ ++void __kc_netdev_rss_key_fill(void *buffer, size_t len) ++{ ++ /* Set of random keys generated using kernel random number generator */ ++ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, ++ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, ++ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, ++ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, ++ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, ++ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, ++ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; ++ ++ BUG_ON(len > NETDEV_RSS_KEY_LEN); ++ memcpy(buffer, seed, len); ++} ++#endif /* 3.13.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) ++int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, ++ int minvec, int maxvec) ++{ ++ int nvec = maxvec; ++ int rc; ++ ++ if (maxvec < minvec) ++ return -ERANGE; ++ ++ do { ++ rc = pci_enable_msix(dev, entries, nvec); ++ if (rc < 0) { ++ return rc; ++ } else if (rc > 0) { ++ if (rc < minvec) ++ return -ENOSPC; ++ nvec = rc; ++ } ++ } while (rc); ++ ++ return nvec; ++} ++#endif /* 3.14.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct netdev_hw_addr *ha, *tmp; ++ int err; ++ ++ /* first go through and flush out any stale entries */ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (!ha->synced || ha->refcount != 1) ++#else ++ if (!ha->sync_cnt || ha->refcount != 1) ++#endif ++ continue; ++ ++ if (unsync && unsync(dev, ha->addr)) ++ continue; ++ ++ list_del_rcu(&ha->list); ++ kfree_rcu(ha, rcu_head); ++ list->count--; ++ } ++ ++ /* go through and sync new entries to the list */ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (ha->synced) ++#else ++ if (ha->sync_cnt) ++#endif ++ continue; ++ ++ err = sync(dev, ha->addr); ++ if (err) ++ return err; ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ ha->synced = true; ++#else ++ ha->sync_cnt++; ++#endif ++ ha->refcount++; ++ } ++ ++ return 0; ++} ++ ++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct netdev_hw_addr *ha, *tmp; ++ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (!ha->synced) ++#else ++ if (!ha->sync_cnt) ++#endif ++ continue; ++ ++ if (unsync && unsync(dev, ha->addr)) ++ continue; ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ ha->synced = false; ++#else ++ ha->sync_cnt--; ++#endif ++ if (--ha->refcount) ++ continue; ++ ++ list_del_rcu(&ha->list); ++ kfree_rcu(ha, rcu_head); ++ list->count--; ++ } ++} ++ ++#endif /* NETDEV_HW_ADDR_T_UNICAST */ ++#ifndef NETDEV_HW_ADDR_T_MULTICAST ++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct dev_addr_list *da, **next = list; ++ int err; ++ ++ /* first go through and flush out any stale entries */ ++ while ((da = *next) != NULL) { ++ if (da->da_synced && da->da_users == 1) { ++ if (!unsync || !unsync(dev, da->da_addr)) { ++ *next = da->next; ++ kfree(da); ++ (*count)--; ++ continue; ++ } ++ } ++ next = &da->next; ++ } ++ ++ /* go through and sync new entries to the list */ ++ for (da = *list; da != NULL; da = da->next) { ++ if (da->da_synced) ++ continue; ++ ++ err = sync(dev, da->da_addr); ++ if (err) ++ return err; ++ ++ da->da_synced++; ++ da->da_users++; ++ } ++ ++ return 0; ++} ++ ++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct dev_addr_list *da; ++ ++ while ((da = *list) != NULL) { ++ if (da->da_synced) { ++ if (!unsync || !unsync(dev, da->da_addr)) { ++ da->da_synced--; ++ if (--da->da_users == 0) { ++ *list = da->next; ++ kfree(da); ++ (*count)--; ++ continue; ++ } ++ } ++ } ++ list = &da->next; ++ } ++} ++#endif /* NETDEV_HW_ADDR_T_MULTICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++#endif /* 3.16.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) ++#ifndef NO_PTP_SUPPORT ++static void __kc_sock_efree(struct sk_buff *skb) ++{ ++ sock_put(skb->sk); ++} ++ ++struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) ++{ ++ struct sock *sk = skb->sk; ++ struct sk_buff *clone; ++ ++ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) ++ return NULL; ++ ++ clone = skb_clone(skb, GFP_ATOMIC); ++ if (!clone) { ++ sock_put(sk); ++ return NULL; ++ } ++ ++ clone->sk = sk; ++ clone->destructor = __kc_sock_efree; ++ ++ return clone; ++} ++ ++void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps) ++{ ++ struct sock_exterr_skb *serr; ++ struct sock *sk = skb->sk; ++ int err; ++ ++ sock_hold(sk); ++ ++ *skb_hwtstamps(skb) = *hwtstamps; ++ ++ serr = SKB_EXT_ERR(skb); ++ memset(serr, 0, sizeof(*serr)); ++ serr->ee.ee_errno = ENOMSG; ++ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; ++ ++ err = sock_queue_err_skb(sk, skb); ++ if (err) ++ kfree_skb(skb); ++ ++ sock_put(sk); ++} ++#endif ++ ++/* include headers needed for get_headlen function */ ++#ifdef HAVE_SCTP ++#include ++#endif ++ ++unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len) ++{ ++ union { ++ unsigned char *network; ++ /* l2 headers */ ++ struct ethhdr *eth; ++ struct vlan_hdr *vlan; ++ /* l3 headers */ ++ struct iphdr *ipv4; ++ struct ipv6hdr *ipv6; ++ } hdr; ++ __be16 proto; ++ u8 nexthdr = 0; /* default to not TCP */ ++ u8 hlen; ++ ++ /* this should never happen, but better safe than sorry */ ++ if (max_len < ETH_HLEN) ++ return max_len; ++ ++ /* initialize network frame pointer */ ++ hdr.network = data; ++ ++ /* set first protocol and move network header forward */ ++ proto = hdr.eth->h_proto; ++ hdr.network += ETH_HLEN; ++ ++again: ++ switch (proto) { ++ /* handle any vlan tag if present */ ++ case __constant_htons(ETH_P_8021AD): ++ case __constant_htons(ETH_P_8021Q): ++ if ((hdr.network - data) > (max_len - VLAN_HLEN)) ++ return max_len; ++ ++ proto = hdr.vlan->h_vlan_encapsulated_proto; ++ hdr.network += VLAN_HLEN; ++ goto again; ++ /* handle L3 protocols */ ++ case __constant_htons(ETH_P_IP): ++ if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) ++ return max_len; ++ ++ /* access ihl as a u8 to avoid unaligned access on ia64 */ ++ hlen = (hdr.network[0] & 0x0F) << 2; ++ ++ /* verify hlen meets minimum size requirements */ ++ if (hlen < sizeof(struct iphdr)) ++ return hdr.network - data; ++ ++ /* record next protocol if header is present */ ++ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) ++ nexthdr = hdr.ipv4->protocol; ++ ++ hdr.network += hlen; ++ break; ++#ifdef NETIF_F_TSO6 ++ case __constant_htons(ETH_P_IPV6): ++ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) ++ return max_len; ++ ++ /* record next protocol */ ++ nexthdr = hdr.ipv6->nexthdr; ++ hdr.network += sizeof(struct ipv6hdr); ++ break; ++#endif /* NETIF_F_TSO6 */ ++ default: ++ return hdr.network - data; ++ } ++ ++ /* finally sort out L4 */ ++ switch (nexthdr) { ++ case IPPROTO_TCP: ++ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) ++ return max_len; ++ ++ /* access doff as a u8 to avoid unaligned access on ia64 */ ++ hdr.network += max_t(u8, sizeof(struct tcphdr), ++ (hdr.network[12] & 0xF0) >> 2); ++ ++ break; ++ case IPPROTO_UDP: ++ case IPPROTO_UDPLITE: ++ hdr.network += sizeof(struct udphdr); ++ break; ++#ifdef HAVE_SCTP ++ case IPPROTO_SCTP: ++ hdr.network += sizeof(struct sctphdr); ++ break; ++#endif ++ } ++ ++ /* ++ * If everything has gone correctly hdr.network should be the ++ * data section of the packet and will be the end of the header. ++ * If not then it probably represents the end of the last recognized ++ * header. ++ */ ++ return min_t(unsigned int, hdr.network - data, max_len); ++} ++ ++#endif /* < 3.18.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) ++#ifdef HAVE_NET_GET_RANDOM_ONCE ++static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; ++ ++void __kc_netdev_rss_key_fill(void *buffer, size_t len) ++{ ++ BUG_ON(len > sizeof(__kc_netdev_rss_key)); ++ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); ++ memcpy(buffer, __kc_netdev_rss_key, len); ++} ++#endif ++#endif +diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.h b/drivers/net/ethernet/intel/igb/kcompat.h +--- a/drivers/net/ethernet/intel/igb/kcompat.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/kcompat.h 2016-11-14 14:32:08.583567168 +0000 +@@ -0,0 +1,5071 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _KCOMPAT_H_ ++#define _KCOMPAT_H_ ++ ++#ifndef LINUX_VERSION_CODE ++#include ++#else ++#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ ++#ifndef UTS_RELEASE ++/* utsrelease.h changed locations in 2.6.33 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#include ++#else ++#include ++#endif ++#endif ++ ++/* NAPI enable/disable flags here */ ++#define NAPI ++ ++#define adapter_struct igb_adapter ++#define adapter_q_vector igb_q_vector ++#define NAPI ++ ++/* and finally set defines so that the code sees the changes */ ++#ifdef NAPI ++#else ++#endif /* NAPI */ ++ ++/* Dynamic LTR and deeper C-State support disable/enable */ ++ ++/* packet split disable/enable */ ++#ifdef DISABLE_PACKET_SPLIT ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++#define CONFIG_IGB_DISABLE_PACKET_SPLIT ++#endif ++#endif /* DISABLE_PACKET_SPLIT */ ++ ++/* MSI compatibility code for all kernels and drivers */ ++#ifdef DISABLE_PCI_MSI ++#undef CONFIG_PCI_MSI ++#endif ++#ifndef CONFIG_PCI_MSI ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++struct msix_entry { ++ u16 vector; /* kernel uses to write allocated vector */ ++ u16 entry; /* driver uses to specify entry, OS writes */ ++}; ++#endif ++#undef pci_enable_msi ++#define pci_enable_msi(a) -ENOTSUPP ++#undef pci_disable_msi ++#define pci_disable_msi(a) do {} while (0) ++#undef pci_enable_msix ++#define pci_enable_msix(a, b, c) -ENOTSUPP ++#undef pci_disable_msix ++#define pci_disable_msix(a) do {} while (0) ++#define msi_remove_pci_irq_vectors(a) do {} while (0) ++#endif /* CONFIG_PCI_MSI */ ++#ifdef DISABLE_PM ++#undef CONFIG_PM ++#endif ++ ++#ifdef DISABLE_NET_POLL_CONTROLLER ++#undef CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef PMSG_SUSPEND ++#define PMSG_SUSPEND 3 ++#endif ++ ++/* generic boolean compatibility */ ++#undef TRUE ++#undef FALSE ++#define TRUE true ++#define FALSE false ++#ifdef GCC_VERSION ++#if ( GCC_VERSION < 3000 ) ++#define _Bool char ++#endif ++#else ++#define _Bool char ++#endif ++ ++#undef __always_unused ++#define __always_unused __attribute__((__unused__)) ++ ++#undef __maybe_unused ++#define __maybe_unused __attribute__((__unused__)) ++ ++/* kernels less than 2.4.14 don't have this */ ++#ifndef ETH_P_8021Q ++#define ETH_P_8021Q 0x8100 ++#endif ++ ++#ifndef module_param ++#define module_param(v,t,p) MODULE_PARM(v, "i"); ++#endif ++ ++#ifndef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffffffffffffULL ++#endif ++ ++#ifndef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0x00000000ffffffffULL ++#endif ++ ++#ifndef PCI_CAP_ID_EXP ++#define PCI_CAP_ID_EXP 0x10 ++#endif ++ ++#ifndef uninitialized_var ++#define uninitialized_var(x) x = x ++#endif ++ ++#ifndef PCIE_LINK_STATE_L0S ++#define PCIE_LINK_STATE_L0S 1 ++#endif ++#ifndef PCIE_LINK_STATE_L1 ++#define PCIE_LINK_STATE_L1 2 ++#endif ++ ++#ifndef mmiowb ++#ifdef CONFIG_IA64 ++#define mmiowb() asm volatile ("mf.a" ::: "memory") ++#else ++#define mmiowb() ++#endif ++#endif ++ ++#ifndef SET_NETDEV_DEV ++#define SET_NETDEV_DEV(net, pdev) ++#endif ++ ++#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) ++#define free_netdev(x) kfree(x) ++#endif ++ ++#ifdef HAVE_POLL_CONTROLLER ++#define CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef SKB_DATAREF_SHIFT ++/* if we do not have the infrastructure to detect if skb_header is cloned ++ just return false in all cases */ ++#define skb_header_cloned(x) 0 ++#endif ++ ++#ifndef NETIF_F_GSO ++#define gso_size tso_size ++#define gso_segs tso_segs ++#endif ++ ++#ifndef NETIF_F_GRO ++#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ ++ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) ++#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) ++#endif ++ ++#ifndef NETIF_F_SCTP_CSUM ++#define NETIF_F_SCTP_CSUM 0 ++#endif ++ ++#ifndef NETIF_F_LRO ++#define NETIF_F_LRO (1 << 15) ++#endif ++ ++#ifndef NETIF_F_NTUPLE ++#define NETIF_F_NTUPLE (1 << 27) ++#endif ++ ++#ifndef NETIF_F_ALL_FCOE ++#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ ++ NETIF_F_FSO) ++#endif ++ ++#ifndef IPPROTO_SCTP ++#define IPPROTO_SCTP 132 ++#endif ++ ++#ifndef IPPROTO_UDPLITE ++#define IPPROTO_UDPLITE 136 ++#endif ++ ++#ifndef CHECKSUM_PARTIAL ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#define CHECKSUM_COMPLETE CHECKSUM_HW ++#endif ++ ++#ifndef __read_mostly ++#define __read_mostly ++#endif ++ ++#ifndef MII_RESV1 ++#define MII_RESV1 0x17 /* Reserved... */ ++#endif ++ ++#ifndef unlikely ++#define unlikely(_x) _x ++#define likely(_x) _x ++#endif ++ ++#ifndef WARN_ON ++#define WARN_ON(x) ++#endif ++ ++#ifndef PCI_DEVICE ++#define PCI_DEVICE(vend,dev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID ++#endif ++ ++#ifndef node_online ++#define node_online(node) ((node) == 0) ++#endif ++ ++#ifndef num_online_cpus ++#define num_online_cpus() smp_num_cpus ++#endif ++ ++#ifndef cpu_online ++#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) ++#endif ++ ++#ifndef _LINUX_RANDOM_H ++#include ++#endif ++ ++#ifndef DECLARE_BITMAP ++#ifndef BITS_TO_LONGS ++#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) ++#endif ++#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] ++#endif ++ ++#ifndef VLAN_HLEN ++#define VLAN_HLEN 4 ++#endif ++ ++#ifndef VLAN_ETH_HLEN ++#define VLAN_ETH_HLEN 18 ++#endif ++ ++#ifndef VLAN_ETH_FRAME_LEN ++#define VLAN_ETH_FRAME_LEN 1518 ++#endif ++ ++#ifndef DCA_GET_TAG_TWO_ARGS ++#define dca3_get_tag(a,b) dca_get_tag(b) ++#endif ++ ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#if defined(__i386__) || defined(__x86_64__) ++#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#endif ++#endif ++ ++/* taken from 2.6.24 definition in linux/kernel.h */ ++#ifndef IS_ALIGNED ++#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) ++#endif ++ ++#ifdef IS_ENABLED ++#undef IS_ENABLED ++#undef __ARG_PLACEHOLDER_1 ++#undef config_enabled ++#undef _config_enabled ++#undef __config_enabled ++#undef ___config_enabled ++#endif ++ ++#define __ARG_PLACEHOLDER_1 0, ++#define config_enabled(cfg) _config_enabled(cfg) ++#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) ++#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) ++#define ___config_enabled(__ignored, val, ...) val ++ ++#define IS_ENABLED(option) \ ++ (config_enabled(option) || config_enabled(option##_MODULE)) ++ ++#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) ++struct _kc_vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ __be16 h_vlan_proto; ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++#define vlan_ethhdr _kc_vlan_ethhdr ++struct _kc_vlan_hdr { ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++#define vlan_hdr _kc_vlan_hdr ++#define vlan_tx_tag_present(_skb) 0 ++#define vlan_tx_tag_get(_skb) 0 ++#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ ++ ++#ifndef VLAN_PRIO_SHIFT ++#define VLAN_PRIO_SHIFT 13 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_2_5GB ++#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_5_0GB ++#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_8_0GB ++#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X1 ++#define PCI_EXP_LNKSTA_NLW_X1 0x0010 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X2 ++#define PCI_EXP_LNKSTA_NLW_X2 0x0020 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X4 ++#define PCI_EXP_LNKSTA_NLW_X4 0x0040 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X8 ++#define PCI_EXP_LNKSTA_NLW_X8 0x0080 ++#endif ++ ++#ifndef __GFP_COLD ++#define __GFP_COLD 0 ++#endif ++ ++#ifndef __GFP_COMP ++#define __GFP_COMP 0 ++#endif ++ ++#ifndef IP_OFFSET ++#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ ++#endif ++ ++/*****************************************************************************/ ++/* Installations with ethtool version without eeprom, adapter id, or statistics ++ * support */ ++ ++#ifndef ETH_GSTRING_LEN ++#define ETH_GSTRING_LEN 32 ++#endif ++ ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x1d ++#undef ethtool_drvinfo ++#define ethtool_drvinfo k_ethtool_drvinfo ++struct k_ethtool_drvinfo { ++ u32 cmd; ++ char driver[32]; ++ char version[32]; ++ char fw_version[32]; ++ char bus_info[32]; ++ char reserved1[32]; ++ char reserved2[16]; ++ u32 n_stats; ++ u32 testinfo_len; ++ u32 eedump_len; ++ u32 regdump_len; ++}; ++ ++struct ethtool_stats { ++ u32 cmd; ++ u32 n_stats; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_GSTATS */ ++ ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x1c ++#endif /* ETHTOOL_PHYS_ID */ ++ ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x1b ++enum ethtool_stringset { ++ ETH_SS_TEST = 0, ++ ETH_SS_STATS, ++}; ++struct ethtool_gstrings { ++ u32 cmd; /* ETHTOOL_GSTRINGS */ ++ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ ++ u32 len; /* number of strings in the string set */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GSTRINGS */ ++ ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x1a ++enum ethtool_test_flags { ++ ETH_TEST_FL_OFFLINE = (1 << 0), ++ ETH_TEST_FL_FAILED = (1 << 1), ++}; ++struct ethtool_test { ++ u32 cmd; ++ u32 flags; ++ u32 reserved; ++ u32 len; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_TEST */ ++ ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0xb ++#undef ETHTOOL_GREGS ++struct ethtool_eeprom { ++ u32 cmd; ++ u32 magic; ++ u32 offset; ++ u32 len; ++ u8 data[0]; ++}; ++ ++struct ethtool_value { ++ u32 cmd; ++ u32 data; ++}; ++#endif /* ETHTOOL_GEEPROM */ ++ ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0xa ++#endif /* ETHTOOL_GLINK */ ++ ++#ifndef ETHTOOL_GWOL ++#define ETHTOOL_GWOL 0x5 ++#define ETHTOOL_SWOL 0x6 ++#define SOPASS_MAX 6 ++struct ethtool_wolinfo { ++ u32 cmd; ++ u32 supported; ++ u32 wolopts; ++ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ ++}; ++#endif /* ETHTOOL_GWOL */ ++ ++#ifndef ETHTOOL_GREGS ++#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ ++#define ethtool_regs _kc_ethtool_regs ++/* for passing big chunks of data */ ++struct _kc_ethtool_regs { ++ u32 cmd; ++ u32 version; /* driver-specific, indicates different chips/revs */ ++ u32 len; /* bytes */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GREGS */ ++ ++#ifndef ETHTOOL_GMSGLVL ++#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ ++#endif ++#ifndef ETHTOOL_SMSGLVL ++#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ ++#endif ++#ifndef ETHTOOL_NWAY_RST ++#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ ++#endif ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0x0000000a /* Get link status */ ++#endif ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ ++#endif ++#ifndef ETHTOOL_SEEPROM ++#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ ++#endif ++#ifndef ETHTOOL_GCOALESCE ++#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ ++/* for configuring coalescing parameters of chip */ ++#define ethtool_coalesce _kc_ethtool_coalesce ++struct _kc_ethtool_coalesce { ++ u32 cmd; /* ETHTOOL_{G,S}COALESCE */ ++ ++ /* How many usecs to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_max_coalesced_frames ++ * is used. ++ */ ++ u32 rx_coalesce_usecs; ++ ++ /* How many packets to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause RX interrupts to never be ++ * generated. ++ */ ++ u32 rx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 rx_coalesce_usecs_irq; ++ u32 rx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_max_coalesced_frames ++ * is used. ++ */ ++ u32 tx_coalesce_usecs; ++ ++ /* How many packets to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause TX interrupts to never be ++ * generated. ++ */ ++ u32 tx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 tx_coalesce_usecs_irq; ++ u32 tx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay in-memory statistics ++ * block updates. Some drivers do not have an in-memory ++ * statistic block, and in such cases this value is ignored. ++ * This value must not be zero. ++ */ ++ u32 stats_block_coalesce_usecs; ++ ++ /* Adaptive RX/TX coalescing is an algorithm implemented by ++ * some drivers to improve latency under low packet rates and ++ * improve throughput under high packet rates. Some drivers ++ * only implement one of RX or TX adaptive coalescing. Anything ++ * not implemented by the driver causes these values to be ++ * silently ignored. ++ */ ++ u32 use_adaptive_rx_coalesce; ++ u32 use_adaptive_tx_coalesce; ++ ++ /* When the packet rate (measured in packets per second) ++ * is below pkt_rate_low, the {rx,tx}_*_low parameters are ++ * used. ++ */ ++ u32 pkt_rate_low; ++ u32 rx_coalesce_usecs_low; ++ u32 rx_max_coalesced_frames_low; ++ u32 tx_coalesce_usecs_low; ++ u32 tx_max_coalesced_frames_low; ++ ++ /* When the packet rate is below pkt_rate_high but above ++ * pkt_rate_low (both measured in packets per second) the ++ * normal {rx,tx}_* coalescing parameters are used. ++ */ ++ ++ /* When the packet rate is (measured in packets per second) ++ * is above pkt_rate_high, the {rx,tx}_*_high parameters are ++ * used. ++ */ ++ u32 pkt_rate_high; ++ u32 rx_coalesce_usecs_high; ++ u32 rx_max_coalesced_frames_high; ++ u32 tx_coalesce_usecs_high; ++ u32 tx_max_coalesced_frames_high; ++ ++ /* How often to do adaptive coalescing packet rate sampling, ++ * measured in seconds. Must not be zero. ++ */ ++ u32 rate_sample_interval; ++}; ++#endif /* ETHTOOL_GCOALESCE */ ++ ++#ifndef ETHTOOL_SCOALESCE ++#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ ++#endif ++#ifndef ETHTOOL_GRINGPARAM ++#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ ++/* for configuring RX/TX ring parameters */ ++#define ethtool_ringparam _kc_ethtool_ringparam ++struct _kc_ethtool_ringparam { ++ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ ++ ++ /* Read only attributes. These indicate the maximum number ++ * of pending RX/TX ring entries the driver will allow the ++ * user to set. ++ */ ++ u32 rx_max_pending; ++ u32 rx_mini_max_pending; ++ u32 rx_jumbo_max_pending; ++ u32 tx_max_pending; ++ ++ /* Values changeable by the user. The valid values are ++ * in the range 1 to the "*_max_pending" counterpart above. ++ */ ++ u32 rx_pending; ++ u32 rx_mini_pending; ++ u32 rx_jumbo_pending; ++ u32 tx_pending; ++}; ++#endif /* ETHTOOL_GRINGPARAM */ ++ ++#ifndef ETHTOOL_SRINGPARAM ++#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ ++#endif ++#ifndef ETHTOOL_GPAUSEPARAM ++#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ ++/* for configuring link flow control parameters */ ++#define ethtool_pauseparam _kc_ethtool_pauseparam ++struct _kc_ethtool_pauseparam { ++ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ ++ ++ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg ++ * being true) the user may set 'autoneg' here non-zero to have the ++ * pause parameters be auto-negotiated too. In such a case, the ++ * {rx,tx}_pause values below determine what capabilities are ++ * advertised. ++ * ++ * If 'autoneg' is zero or the link is not being auto-negotiated, ++ * then {rx,tx}_pause force the driver to use/not-use pause ++ * flow control. ++ */ ++ u32 autoneg; ++ u32 rx_pause; ++ u32 tx_pause; ++}; ++#endif /* ETHTOOL_GPAUSEPARAM */ ++ ++#ifndef ETHTOOL_SPAUSEPARAM ++#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ ++#endif ++#ifndef ETHTOOL_GRXCSUM ++#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SRXCSUM ++#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GTXCSUM ++#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STXCSUM ++#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GSG ++#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable ++ * (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SSG ++#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable ++ * (ethtool_value). */ ++#endif ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ ++#endif ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ ++#endif ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ ++#endif ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ ++#endif ++#ifndef ETHTOOL_GTSO ++#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STSO ++#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ ++#endif ++ ++#ifndef ETHTOOL_BUSINFO_LEN ++#define ETHTOOL_BUSINFO_LEN 32 ++#endif ++ ++#ifndef SPEED_2500 ++#define SPEED_2500 2500 ++#endif ++#ifndef SPEED_5000 ++#define SPEED_5000 5000 ++#endif ++ ++#ifndef RHEL_RELEASE_VERSION ++#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) ++#endif ++#ifndef AX_RELEASE_VERSION ++#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) ++#endif ++ ++#ifndef AX_RELEASE_CODE ++#define AX_RELEASE_CODE 0 ++#endif ++ ++#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) ++#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) ++#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) ++#endif ++ ++#ifndef RHEL_RELEASE_CODE ++/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ ++#define RHEL_RELEASE_CODE 0 ++#endif ++ ++/* RHEL 7 didn't backport the parameter change in ++ * create_singlethread_workqueue. ++ * If/when RH corrects this we will want to tighten up the version check. ++ */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) ++#undef create_singlethread_workqueue ++#define create_singlethread_workqueue(name) \ ++ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) ++#endif ++ ++/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find ++ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new ++ * enough versions of Ubuntu. Otherwise you can simply see it in the output of ++ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in ++ * the linux-source package, but in the linux-headers package. It begins to ++ * appear in later releases of 14.04 and 14.10. ++ * ++ * Ex: ++ * ++ * $uname -r ++ * 3.13.0-45-generic ++ * ABI is 45 ++ * ++ * ++ * $uname -r ++ * 3.16.0-23-generic ++ * ABI is 23 ++ */ ++#ifndef UTS_UBUNTU_RELEASE_ABI ++#define UTS_UBUNTU_RELEASE_ABI 0 ++#define UBUNTU_VERSION_CODE 0 ++#else ++/* Ubuntu does not provide actual release version macro, so we use the kernel ++ * version plus the ABI to generate a unique version code specific to Ubuntu. ++ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to ++ * ignore differences in sublevel which are not important since we have the ++ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for ++ * ordering checks. ++ */ ++#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ ++ UTS_UBUNTU_RELEASE_ABI) ++ ++#if UTS_UBUNTU_RELEASE_ABI > 255 ++#error UTS_UBUNTU_RELEASE_ABI is too large... ++#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ ++ ++#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) ++/* Our version code scheme does not make sense for non 3.x or newer kernels, ++ * and we have no support in kcompat for this scenario. Thus, treat this as a ++ * non-Ubuntu kernel. Possibly might be better to error here. ++ */ ++#define UTS_UBUNTU_RELEASE_ABI 0 ++#define UBUNTU_VERSION_CODE 0 ++#endif ++ ++#endif ++ ++/* Note that the 3rd digit is always zero, and will be ignored. This is ++ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux ++ * version codes are 3 digit, this 3rd digit is superseded by the ABI value. ++ */ ++#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) ++ ++/* SuSE version macro is the same as Linux kernel version */ ++#ifndef SLE_VERSION ++#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) ++#endif ++#ifdef CONFIG_SUSE_KERNEL ++#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) ++/* SLES11 GA is 2.6.27 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,0,0) ++#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) ++/* SLES11 SP1 is 2.6.32 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,1,0) ++#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) ++/* SLES11 SP2 is 3.0.13 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,2,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) ++/* SLES11 SP3 is 3.0.76 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,3,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101))) ++/* SLES11 SP4 is 3.0.101 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,4,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28))) ++/* SLES12 GA is 3.12.28 based */ ++#define SLE_VERSION_CODE SLE_VERSION(12,0,0) ++/* new SLES kernels must be added here with >= based on kernel ++ * the idea is to order from newest to oldest and just catch all ++ * of them using the >= ++ */ ++#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47))) ++/* SLES12 SP1 is 3.12.47-based */ ++#define SLE_VERSION_CODE SLE_VERSION(12,1,0) ++#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ ++#endif /* CONFIG_SUSE_KERNEL */ ++#ifndef SLE_VERSION_CODE ++#define SLE_VERSION_CODE 0 ++#endif /* SLE_VERSION_CODE */ ++ ++#ifdef __KLOCWORK__ ++#ifdef ARRAY_SIZE ++#undef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++#endif /* __KLOCWORK__ */ ++ ++/*****************************************************************************/ ++/* 2.4.3 => 2.4.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++#ifndef pci_set_dma_mask ++#define pci_set_dma_mask _kc_pci_set_dma_mask ++extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); ++#endif ++ ++#ifndef pci_request_regions ++#define pci_request_regions _kc_pci_request_regions ++extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); ++#endif ++ ++#ifndef pci_release_regions ++#define pci_release_regions _kc_pci_release_regions ++extern void _kc_pci_release_regions(struct pci_dev *pdev); ++#endif ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++#ifndef alloc_etherdev ++#define alloc_etherdev _kc_alloc_etherdev ++extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); ++#endif ++ ++#ifndef is_valid_ether_addr ++#define is_valid_ether_addr _kc_is_valid_ether_addr ++extern int _kc_is_valid_ether_addr(u8 *addr); ++#endif ++ ++/**************************************/ ++/* MISCELLANEOUS */ ++ ++#ifndef INIT_TQUEUE ++#define INIT_TQUEUE(_tq, _routine, _data) \ ++ do { \ ++ INIT_LIST_HEAD(&(_tq)->list); \ ++ (_tq)->sync = 0; \ ++ (_tq)->routine = _routine; \ ++ (_tq)->data = _data; \ ++ } while (0) ++#endif ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) ++/* Generic MII registers. */ ++#define MII_BMCR 0x00 /* Basic mode control register */ ++#define MII_BMSR 0x01 /* Basic mode status register */ ++#define MII_PHYSID1 0x02 /* PHYS ID 1 */ ++#define MII_PHYSID2 0x03 /* PHYS ID 2 */ ++#define MII_ADVERTISE 0x04 /* Advertisement control reg */ ++#define MII_LPA 0x05 /* Link partner ability reg */ ++#define MII_EXPANSION 0x06 /* Expansion register */ ++/* Basic mode control register. */ ++#define BMCR_FULLDPLX 0x0100 /* Full duplex */ ++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ ++/* Basic mode status register. */ ++#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ ++#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ ++#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ ++#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ ++#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ ++#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ ++/* Advertisement control register. */ ++#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ ++#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ ++#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ ++#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ ++#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ ++#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ ++ ADVERTISE_100HALF | ADVERTISE_100FULL) ++/* Expansion register for auto-negotiation. */ ++#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.6 => 2.4.3 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++#ifndef pci_set_power_state ++#define pci_set_power_state _kc_pci_set_power_state ++extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); ++#endif ++ ++#ifndef pci_enable_wake ++#define pci_enable_wake _kc_pci_enable_wake ++extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); ++#endif ++ ++#ifndef pci_disable_device ++#define pci_disable_device _kc_pci_disable_device ++extern void _kc_pci_disable_device(struct pci_dev *pdev); ++#endif ++ ++/* PCI PM entry point syntax changed, so don't support suspend/resume */ ++#undef CONFIG_PM ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++#ifndef HAVE_PCI_SET_MWI ++#define pci_set_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ ++ PCI_COMMAND_INVALIDATE); ++#define pci_clear_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ ++ ~PCI_COMMAND_INVALIDATE); ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.10 => 2.4.9 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) ++ ++/**************************************/ ++/* MODULE API */ ++ ++#ifndef MODULE_LICENSE ++ #define MODULE_LICENSE(X) ++#endif ++ ++/**************************************/ ++/* OTHER */ ++ ++#undef min ++#define min(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x < _y ? _x : _y; }) ++ ++#undef max ++#define max(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x > _y ? _x : _y; }) ++ ++#define min_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x < _y ? _x : _y; }) ++ ++#define max_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x > _y ? _x : _y; }) ++ ++#ifndef list_for_each_safe ++#define list_for_each_safe(pos, n, head) \ ++ for (pos = (head)->next, n = pos->next; pos != (head); \ ++ pos = n, n = pos->next) ++#endif ++ ++#ifndef ____cacheline_aligned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_aligned_in_smp ____cacheline_aligned ++#else ++#define ____cacheline_aligned_in_smp ++#endif /* CONFIG_SMP */ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); ++#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) ++extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) ++#else /* 2.4.8 => 2.4.9 */ ++extern int snprintf(char * buf, size_t size, const char *fmt, ...); ++extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#endif ++#endif /* 2.4.10 -> 2.4.6 */ ++ ++ ++/*****************************************************************************/ ++/* 2.4.12 => 2.4.10 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) ++#ifndef HAVE_NETIF_MSG ++#define HAVE_NETIF_MSG 1 ++enum { ++ NETIF_MSG_DRV = 0x0001, ++ NETIF_MSG_PROBE = 0x0002, ++ NETIF_MSG_LINK = 0x0004, ++ NETIF_MSG_TIMER = 0x0008, ++ NETIF_MSG_IFDOWN = 0x0010, ++ NETIF_MSG_IFUP = 0x0020, ++ NETIF_MSG_RX_ERR = 0x0040, ++ NETIF_MSG_TX_ERR = 0x0080, ++ NETIF_MSG_TX_QUEUED = 0x0100, ++ NETIF_MSG_INTR = 0x0200, ++ NETIF_MSG_TX_DONE = 0x0400, ++ NETIF_MSG_RX_STATUS = 0x0800, ++ NETIF_MSG_PKTDATA = 0x1000, ++ NETIF_MSG_HW = 0x2000, ++ NETIF_MSG_WOL = 0x4000, ++}; ++ ++#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) ++#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) ++#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) ++#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) ++#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) ++#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) ++#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) ++#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) ++#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) ++#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) ++#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) ++#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) ++#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) ++#endif /* !HAVE_NETIF_MSG */ ++#endif /* 2.4.12 => 2.4.10 */ ++ ++/*****************************************************************************/ ++/* 2.4.13 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#ifndef virt_to_page ++ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) ++#endif ++ ++#ifndef pci_map_page ++#define pci_map_page _kc_pci_map_page ++extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); ++#endif ++ ++#ifndef pci_unmap_page ++#define pci_unmap_page _kc_pci_unmap_page ++extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); ++#endif ++ ++/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ ++ ++#undef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0xffffffff ++#undef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffff ++ ++/**************************************/ ++/* OTHER */ ++ ++#ifndef cpu_relax ++#define cpu_relax() rep_nop() ++#endif ++ ++struct vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ unsigned short h_vlan_proto; ++ unsigned short h_vlan_TCI; ++ unsigned short h_vlan_encapsulated_proto; ++}; ++#endif /* 2.4.13 => 2.4.12 */ ++ ++/*****************************************************************************/ ++/* 2.4.17 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) ++ ++#ifndef __devexit_p ++ #define __devexit_p(x) &(x) ++#endif ++ ++#endif /* 2.4.17 => 2.4.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) ++#define NETIF_MSG_HW 0x2000 ++#define NETIF_MSG_WOL 0x4000 ++ ++#ifndef netif_msg_hw ++#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) ++#endif ++#ifndef netif_msg_wol ++#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) ++#endif ++#endif /* 2.4.18 */ ++ ++/*****************************************************************************/ ++ ++/*****************************************************************************/ ++/* 2.4.20 => 2.4.19 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) ++ ++/* we won't support NAPI on less than 2.4.20 */ ++#ifdef NAPI ++#undef NAPI ++#endif ++ ++#endif /* 2.4.20 => 2.4.19 */ ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#define pci_name(x) ((x)->slot_name) ++ ++#ifndef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#endif ++#ifndef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#endif ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#ifndef IGB_NO_LRO ++#define IGB_NO_LRO ++#endif ++#endif ++ ++/*****************************************************************************/ ++/*****************************************************************************/ ++/* 2.4.23 => 2.4.22 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) ++/*****************************************************************************/ ++#ifdef NAPI ++#ifndef netif_poll_disable ++#define netif_poll_disable(x) _kc_netif_poll_disable(x) ++static inline void _kc_netif_poll_disable(struct net_device *netdev) ++{ ++ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { ++ /* No hurry */ ++ current->state = TASK_INTERRUPTIBLE; ++ schedule_timeout(1); ++ } ++} ++#endif ++#ifndef netif_poll_enable ++#define netif_poll_enable(x) _kc_netif_poll_enable(x) ++static inline void _kc_netif_poll_enable(struct net_device *netdev) ++{ ++ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); ++} ++#endif ++#endif /* NAPI */ ++#ifndef netif_tx_disable ++#define netif_tx_disable(x) _kc_netif_tx_disable(x) ++static inline void _kc_netif_tx_disable(struct net_device *dev) ++{ ++ spin_lock_bh(&dev->xmit_lock); ++ netif_stop_queue(dev); ++ spin_unlock_bh(&dev->xmit_lock); ++} ++#endif ++#else /* 2.4.23 => 2.4.22 */ ++#define HAVE_SCTP ++#endif /* 2.4.23 => 2.4.22 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) ++#define ETHTOOL_OPS_COMPAT ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) ++#define __user ++#endif /* < 2.4.27 */ ++ ++/*****************************************************************************/ ++/* 2.5.71 => 2.4.x */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) ++#define sk_protocol protocol ++#define pci_get_device pci_find_device ++#endif /* 2.5.70 => 2.4.x */ ++ ++/*****************************************************************************/ ++/* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) ++ ++#ifndef netif_msg_init ++#define netif_msg_init _kc_netif_msg_init ++static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) ++{ ++ /* use default */ ++ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) ++ return default_msg_enable_bits; ++ if (debug_value == 0) /* no output */ ++ return 0; ++ /* set low N bits */ ++ return (1 << debug_value) -1; ++} ++#endif ++ ++#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++/*****************************************************************************/ ++#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ ++ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ ++ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) ++#define netdev_priv(x) x->priv ++#endif ++ ++/*****************************************************************************/ ++/* <= 2.5.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) ++#include ++#undef pci_register_driver ++#define pci_register_driver pci_module_init ++ ++/* ++ * Most of the dma compat code is copied/modifed from the 2.4.37 ++ * /include/linux/libata-compat.h header file ++ */ ++/* These definitions mirror those in pci.h, so they can be used ++ * interchangeably with their PCI_ counterparts */ ++enum dma_data_direction { ++ DMA_BIDIRECTIONAL = 0, ++ DMA_TO_DEVICE = 1, ++ DMA_FROM_DEVICE = 2, ++ DMA_NONE = 3, ++}; ++ ++struct device { ++ struct pci_dev pdev; ++}; ++ ++static inline struct pci_dev *to_pci_dev (struct device *dev) ++{ ++ return (struct pci_dev *) dev; ++} ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return (struct device *) pdev; ++} ++ ++#define pdev_printk(lvl, pdev, fmt, args...) \ ++ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) ++#define dev_err(dev, fmt, args...) \ ++ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) ++#define dev_info(dev, fmt, args...) \ ++ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) ++#define dev_warn(dev, fmt, args...) \ ++ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) ++#define dev_notice(dev, fmt, args...) \ ++ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) ++#define dev_dbg(dev, fmt, args...) \ ++ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) ++ ++/* NOTE: dangerous! we ignore the 'gfp' argument */ ++#define dma_alloc_coherent(dev,sz,dma,gfp) \ ++ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) ++#define dma_free_coherent(dev,sz,addr,dma_addr) \ ++ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) ++ ++#define dma_map_page(dev,a,b,c,d) \ ++ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) ++#define dma_unmap_page(dev,a,b,c) \ ++ pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_single(dev,a,b,c) \ ++ pci_map_single(to_pci_dev(dev),(a),(b),(c)) ++#define dma_unmap_single(dev,a,b,c) \ ++ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_sg(dev, sg, nents, dir) \ ++ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) ++#define dma_unmap_sg(dev, sg, nents, dir) \ ++ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) ++ ++#define dma_sync_single(dev,a,b,c) \ ++ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) ++ ++/* for range just sync everything, that's all the pci API can do */ ++#define dma_sync_single_range(dev,addr,off,sz,dir) \ ++ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) ++ ++#define dma_set_mask(dev,mask) \ ++ pci_set_dma_mask(to_pci_dev(dev),(mask)) ++ ++/* hlist_* code - double linked lists */ ++struct hlist_head { ++ struct hlist_node *first; ++}; ++ ++struct hlist_node { ++ struct hlist_node *next, **pprev; ++}; ++ ++static inline void __hlist_del(struct hlist_node *n) ++{ ++ struct hlist_node *next = n->next; ++ struct hlist_node **pprev = n->pprev; ++ *pprev = next; ++ if (next) ++ next->pprev = pprev; ++} ++ ++static inline void hlist_del(struct hlist_node *n) ++{ ++ __hlist_del(n); ++ n->next = NULL; ++ n->pprev = NULL; ++} ++ ++static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) ++{ ++ struct hlist_node *first = h->first; ++ n->next = first; ++ if (first) ++ first->pprev = &n->next; ++ h->first = n; ++ n->pprev = &h->first; ++} ++ ++static inline int hlist_empty(const struct hlist_head *h) ++{ ++ return !h->first; ++} ++#define HLIST_HEAD_INIT { .first = NULL } ++#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } ++#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) ++static inline void INIT_HLIST_NODE(struct hlist_node *h) ++{ ++ h->next = NULL; ++ h->pprev = NULL; ++} ++ ++#ifndef might_sleep ++#define might_sleep() ++#endif ++#else ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return &pdev->dev; ++} ++#endif /* <= 2.5.0 */ ++ ++/*****************************************************************************/ ++/* 2.5.28 => 2.4.23 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ ++#include ++#define work_struct tq_struct ++#undef INIT_WORK ++#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) ++#undef container_of ++#define container_of list_entry ++#define schedule_work schedule_task ++#define flush_scheduled_work flush_scheduled_tasks ++#define cancel_work_sync(x) flush_scheduled_work() ++ ++#endif /* 2.5.28 => 2.4.17 */ ++ ++/*****************************************************************************/ ++/* 2.6.0 => 2.5.28 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#ifndef read_barrier_depends ++#define read_barrier_depends() rmb() ++#endif ++ ++#ifndef rcu_head ++struct __kc_callback_head { ++ struct __kc_callback_head *next; ++ void (*func)(struct callback_head *head); ++}; ++#define rcu_head __kc_callback_head ++#endif ++ ++#undef get_cpu ++#define get_cpu() smp_processor_id() ++#undef put_cpu ++#define put_cpu() do { } while(0) ++#define MODULE_INFO(version, _version) ++#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT ++#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 ++#endif ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 ++#endif ++ ++#define dma_set_coherent_mask(dev,mask) 1 ++ ++#undef dev_put ++#define dev_put(dev) __dev_put(dev) ++ ++#ifndef skb_fill_page_desc ++#define skb_fill_page_desc _kc_skb_fill_page_desc ++extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); ++#endif ++ ++#undef ALIGN ++#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) ++ ++#ifndef page_count ++#define page_count(p) atomic_read(&(p)->count) ++#endif ++ ++#ifdef MAX_NUMNODES ++#undef MAX_NUMNODES ++#endif ++#define MAX_NUMNODES 1 ++ ++/* find_first_bit and find_next bit are not defined for most ++ * 2.4 kernels (except for the redhat 2.4.21 kernels ++ */ ++#include ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++#undef find_next_bit ++#define find_next_bit _kc_find_next_bit ++extern unsigned long _kc_find_next_bit(const unsigned long *addr, ++ unsigned long size, ++ unsigned long offset); ++#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) ++ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (strchr(dev->name, '%')) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#ifndef strlcpy ++#define strlcpy _kc_strlcpy ++extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); ++#endif /* strlcpy */ ++ ++#ifndef do_div ++#if BITS_PER_LONG == 64 ++# define do_div(n,base) ({ \ ++ uint32_t __base = (base); \ ++ uint32_t __rem; \ ++ __rem = ((uint64_t)(n)) % __base; \ ++ (n) = ((uint64_t)(n)) / __base; \ ++ __rem; \ ++ }) ++#elif BITS_PER_LONG == 32 ++extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); ++# define do_div(n,base) ({ \ ++ uint32_t __base = (base); \ ++ uint32_t __rem; \ ++ if (likely(((n) >> 32) == 0)) { \ ++ __rem = (uint32_t)(n) % __base; \ ++ (n) = (uint32_t)(n) / __base; \ ++ } else \ ++ __rem = _kc__div64_32(&(n), __base); \ ++ __rem; \ ++ }) ++#else /* BITS_PER_LONG == ?? */ ++# error do_div() does not yet support the C64 ++#endif /* BITS_PER_LONG */ ++#endif /* do_div */ ++ ++#ifndef NSEC_PER_SEC ++#define NSEC_PER_SEC 1000000000L ++#endif ++ ++#undef HAVE_I2C_SUPPORT ++#else /* 2.6.0 */ ++#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \ ++ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9))) ++#define HAVE_I2C_SUPPORT ++#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */ ++ ++#endif /* 2.6.0 => 2.5.28 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) ++#define dma_pool pci_pool ++#define dma_pool_destroy pci_pool_destroy ++#define dma_pool_alloc pci_pool_alloc ++#define dma_pool_free pci_pool_free ++ ++#define dma_pool_create(name,dev,size,align,allocation) \ ++ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) ++#endif /* < 2.6.3 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++#define MODULE_VERSION(_version) MODULE_INFO(version, _version) ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++/* 2.6.5 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ++#define dma_sync_single_for_cpu dma_sync_single ++#define dma_sync_single_for_device dma_sync_single ++#define dma_sync_single_range_for_cpu dma_sync_single_range ++#define dma_sync_single_range_for_device dma_sync_single_range ++#ifndef pci_dma_mapping_error ++#define pci_dma_mapping_error _kc_pci_dma_mapping_error ++static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) ++{ ++ return dma_addr == 0; ++} ++#endif ++#endif /* 2.6.5 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); ++#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) ++/* taken from 2.6 include/linux/bitmap.h */ ++#undef bitmap_zero ++#define bitmap_zero _kc_bitmap_zero ++static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) ++{ ++ if (nbits <= BITS_PER_LONG) ++ *dst = 0UL; ++ else { ++ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ memset(dst, 0, len); ++ } ++} ++#define page_to_nid(x) 0 ++ ++#endif /* < 2.6.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) ++#undef if_mii ++#define if_mii _kc_if_mii ++static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) ++{ ++ return (struct mii_ioctl_data *) &rq->ifr_ifru; ++} ++ ++#ifndef __force ++#define __force ++#endif ++#endif /* < 2.6.7 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++#ifndef PCI_EXP_DEVCTL ++#define PCI_EXP_DEVCTL 8 ++#endif ++#ifndef PCI_EXP_DEVCTL_CERE ++#define PCI_EXP_DEVCTL_CERE 0x0001 ++#endif ++#define PCI_EXP_FLAGS 2 /* Capabilities register */ ++#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ ++#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ ++#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ ++#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ ++#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ ++#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ ++#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ ++#define PCI_EXP_DEVCAP 4 /* Device capabilities */ ++#define PCI_EXP_DEVSTA 10 /* Device Status */ ++#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ ++ schedule_timeout((x * HZ)/1000 + 2); \ ++ } while (0) ++ ++#endif /* < 2.6.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) ++#include ++#define __iomem ++ ++#ifndef kcalloc ++#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++#define MSEC_PER_SEC 1000L ++static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) ++{ ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (MSEC_PER_SEC / HZ) * j; ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); ++#else ++ return (j * MSEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return m * (HZ / MSEC_PER_SEC); ++#else ++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; ++#endif ++} ++ ++#define msleep_interruptible _kc_msleep_interruptible ++static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) ++{ ++ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; ++ ++ while (timeout && !signal_pending(current)) { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ timeout = schedule_timeout(timeout); ++ } ++ return _kc_jiffies_to_msecs(timeout); ++} ++ ++/* Basic mode control register. */ ++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ ++ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++#endif ++#ifndef __be16 ++#define __be16 u16 ++#endif ++#ifndef __be32 ++#define __be32 u32 ++#endif ++#ifndef __be64 ++#define __be64 u64 ++#endif ++ ++static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) ++{ ++ return (struct vlan_ethhdr *)skb->mac.raw; ++} ++ ++/* Wake-On-Lan options. */ ++#define WAKE_PHY (1 << 0) ++#define WAKE_UCAST (1 << 1) ++#define WAKE_MCAST (1 << 2) ++#define WAKE_BCAST (1 << 3) ++#define WAKE_ARP (1 << 4) ++#define WAKE_MAGIC (1 << 5) ++#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ ++ ++#define skb_header_pointer _kc_skb_header_pointer ++static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, ++ int offset, int len, void *buffer) ++{ ++ int hlen = skb_headlen(skb); ++ ++ if (hlen - offset >= len) ++ return skb->data + offset; ++ ++#ifdef MAX_SKB_FRAGS ++ if (skb_copy_bits(skb, offset, buffer, len) < 0) ++ return NULL; ++ ++ return buffer; ++#else ++ return NULL; ++#endif ++ ++#ifndef NETDEV_TX_OK ++#define NETDEV_TX_OK 0 ++#endif ++#ifndef NETDEV_TX_BUSY ++#define NETDEV_TX_BUSY 1 ++#endif ++#ifndef NETDEV_TX_LOCKED ++#define NETDEV_TX_LOCKED -1 ++#endif ++} ++ ++#ifndef __bitwise ++#define __bitwise ++#endif ++#endif /* < 2.6.9 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++#ifdef module_param_array_named ++#undef module_param_array_named ++#define module_param_array_named(name, array, type, nump, perm) \ ++ static struct kparam_array __param_arr_##name \ ++ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ ++ sizeof(array[0]), array }; \ ++ module_param_call(name, param_array_set, param_array_get, \ ++ &__param_arr_##name, perm) ++#endif /* module_param_array_named */ ++/* ++ * num_online is broken for all < 2.6.10 kernels. This is needed to support ++ * Node module parameter of ixgbe. ++ */ ++#undef num_online_nodes ++#define num_online_nodes(n) 1 ++extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); ++#undef node_online_map ++#define node_online_map _kcompat_node_online_map ++#define pci_get_class pci_find_class ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) ++#define PCI_D0 0 ++#define PCI_D1 1 ++#define PCI_D2 2 ++#define PCI_D3hot 3 ++#define PCI_D3cold 4 ++typedef int pci_power_t; ++#define pci_choose_state(pdev,state) state ++#define PMSG_SUSPEND 3 ++#define PCI_EXP_LNKCTL 16 ++ ++#undef NETIF_F_LLTX ++ ++#ifndef ARCH_HAS_PREFETCH ++#define prefetch(X) ++#endif ++ ++#ifndef NET_IP_ALIGN ++#define NET_IP_ALIGN 2 ++#endif ++ ++#define KC_USEC_PER_SEC 1000000L ++#define usecs_to_jiffies _kc_usecs_to_jiffies ++static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) ++{ ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (KC_USEC_PER_SEC / HZ) * j; ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); ++#else ++ return (j * KC_USEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return m * (HZ / KC_USEC_PER_SEC); ++#else ++ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; ++#endif ++} ++ ++#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ ++#define PCI_EXP_LNKSTA 18 /* Link Status */ ++#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ ++#define PCI_EXP_SLTCTL 24 /* Slot Control */ ++#define PCI_EXP_SLTSTA 26 /* Slot Status */ ++#define PCI_EXP_RTCTL 28 /* Root Control */ ++#define PCI_EXP_RTCAP 30 /* Root Capabilities */ ++#define PCI_EXP_RTSTA 32 /* Root Status */ ++#endif /* < 2.6.11 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) ++#include ++#define USE_REBOOT_NOTIFIER ++ ++/* Generic MII registers. */ ++#define MII_CTRL1000 0x09 /* 1000BASE-T control */ ++#define MII_STAT1000 0x0a /* 1000BASE-T status */ ++/* Advertisement control register. */ ++#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ ++#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ ++/* Link partner ability register. */ ++#define LPA_PAUSE_CAP 0x0400 /* Can pause */ ++#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ ++/* 1000BASE-T Control register */ ++#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ ++#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ ++/* 1000BASE-T Status register */ ++#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ ++#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ ++ ++#ifndef is_zero_ether_addr ++#define is_zero_ether_addr _kc_is_zero_ether_addr ++static inline int _kc_is_zero_ether_addr(const u8 *addr) ++{ ++ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); ++} ++#endif /* is_zero_ether_addr */ ++#ifndef is_multicast_ether_addr ++#define is_multicast_ether_addr _kc_is_multicast_ether_addr ++static inline int _kc_is_multicast_ether_addr(const u8 *addr) ++{ ++ return addr[0] & 0x01; ++} ++#endif /* is_multicast_ether_addr */ ++#endif /* < 2.6.12 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) ++#ifndef kstrdup ++#define kstrdup _kc_kstrdup ++extern char *_kc_kstrdup(const char *s, unsigned int gfp); ++#endif ++#endif /* < 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++#define pm_message_t u32 ++#ifndef kzalloc ++#define kzalloc _kc_kzalloc ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++ ++/* Generic MII registers. */ ++#define MII_ESTATUS 0x0f /* Extended Status */ ++/* Basic mode status register. */ ++#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ ++/* Extended status register. */ ++#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ ++#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ ++ ++#define SUPPORTED_Pause (1 << 13) ++#define SUPPORTED_Asym_Pause (1 << 14) ++#define ADVERTISED_Pause (1 << 13) ++#define ADVERTISED_Asym_Pause (1 << 14) ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) ++#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) ++#define gfp_t unsigned ++#else ++typedef unsigned gfp_t; ++#endif ++#endif /* !RHEL4.3->RHEL5.0 */ ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) ++#ifdef CONFIG_X86_64 ++#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ ++ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) ++#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ ++ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) ++#endif ++#endif ++#endif /* < 2.6.14 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) ++#ifndef kfree_rcu ++/* this is placed here due to a lack of rcu_barrier in previous kernels */ ++#define kfree_rcu(_ptr, _offset) kfree(_ptr) ++#endif /* kfree_rcu */ ++#ifndef vmalloc_node ++#define vmalloc_node(a,b) vmalloc(a) ++#endif /* vmalloc_node*/ ++ ++#define setup_timer(_timer, _function, _data) \ ++do { \ ++ (_timer)->function = _function; \ ++ (_timer)->data = _data; \ ++ init_timer(_timer); \ ++} while (0) ++#ifndef device_can_wakeup ++#define device_can_wakeup(dev) (1) ++#endif ++#ifndef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) do{}while(0) ++#endif ++#ifndef device_init_wakeup ++#define device_init_wakeup(dev,val) do {} while (0) ++#endif ++static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) ++{ ++ const u16 *a = (const u16 *) addr1; ++ const u16 *b = (const u16 *) addr2; ++ ++ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; ++} ++#undef compare_ether_addr ++#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) ++#endif /* < 2.6.15 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) ++#undef DEFINE_MUTEX ++#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) ++#define mutex_lock(x) down_interruptible(x) ++#define mutex_unlock(x) up(x) ++ ++#ifndef ____cacheline_internodealigned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp ++#else ++#define ____cacheline_internodealigned_in_smp ++#endif /* CONFIG_SMP */ ++#endif /* ____cacheline_internodealigned_in_smp */ ++#undef HAVE_PCI_ERS ++#else /* 2.6.16 and above */ ++#undef HAVE_PCI_ERS ++#define HAVE_PCI_ERS ++#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) ++#ifdef device_can_wakeup ++#undef device_can_wakeup ++#endif /* device_can_wakeup */ ++#define device_can_wakeup(dev) 1 ++#endif /* SLE_VERSION(10,4,0) */ ++#endif /* < 2.6.16 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) ++#ifndef dev_notice ++#define dev_notice(dev, fmt, args...) \ ++ dev_printk(KERN_NOTICE, dev, fmt, ## args) ++#endif ++ ++#ifndef first_online_node ++#define first_online_node 0 ++#endif ++#ifndef NET_SKB_PAD ++#define NET_SKB_PAD 16 ++#endif ++#endif /* < 2.6.17 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) ++ ++#ifndef IRQ_HANDLED ++#define irqreturn_t void ++#define IRQ_HANDLED ++#define IRQ_NONE ++#endif ++ ++#ifndef IRQF_PROBE_SHARED ++#ifdef SA_PROBEIRQ ++#define IRQF_PROBE_SHARED SA_PROBEIRQ ++#else ++#define IRQF_PROBE_SHARED 0 ++#endif ++#endif ++ ++#ifndef IRQF_SHARED ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++ ++#ifndef FIELD_SIZEOF ++#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) ++#endif ++ ++#ifndef skb_is_gso ++#ifdef NETIF_F_TSO ++#define skb_is_gso _kc_skb_is_gso ++static inline int _kc_skb_is_gso(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_size; ++} ++#else ++#define skb_is_gso(a) 0 ++#endif ++#endif ++ ++#ifndef resource_size_t ++#define resource_size_t unsigned long ++#endif ++ ++#ifdef skb_pad ++#undef skb_pad ++#endif ++#define skb_pad(x,y) _kc_skb_pad(x, y) ++int _kc_skb_pad(struct sk_buff *skb, int pad); ++#ifdef skb_padto ++#undef skb_padto ++#endif ++#define skb_padto(x,y) _kc_skb_padto(x, y) ++static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int size = skb->len; ++ if(likely(size >= len)) ++ return 0; ++ return _kc_skb_pad(skb, len - size); ++} ++ ++#ifndef DECLARE_PCI_UNMAP_ADDR ++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ ++ dma_addr_t ADDR_NAME ++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ ++ u32 LEN_NAME ++#define pci_unmap_addr(PTR, ADDR_NAME) \ ++ ((PTR)->ADDR_NAME) ++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ ++ (((PTR)->ADDR_NAME) = (VAL)) ++#define pci_unmap_len(PTR, LEN_NAME) \ ++ ((PTR)->LEN_NAME) ++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ ++ (((PTR)->LEN_NAME) = (VAL)) ++#endif /* DECLARE_PCI_UNMAP_ADDR */ ++#endif /* < 2.6.18 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++enum pcie_link_width { ++ PCIE_LNK_WIDTH_RESRV = 0x00, ++ PCIE_LNK_X1 = 0x01, ++ PCIE_LNK_X2 = 0x02, ++ PCIE_LNK_X4 = 0x04, ++ PCIE_LNK_X8 = 0x08, ++ PCIE_LNK_X12 = 0x0C, ++ PCIE_LNK_X16 = 0x10, ++ PCIE_LNK_X32 = 0x20, ++ PCIE_LNK_WIDTH_UNKNOWN = 0xFF, ++}; ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) ++#define i_private u.generic_ip ++#endif /* >= RHEL 5.0 */ ++ ++#ifndef DIV_ROUND_UP ++#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) ++#endif ++#ifndef __ALIGN_MASK ++#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) ++#endif ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) ++#if (!((RHEL_RELEASE_CODE && \ ++ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) ++typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); ++#endif ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#undef CONFIG_INET_LRO ++#undef CONFIG_INET_LRO_MODULE ++#endif ++typedef irqreturn_t (*new_handler_t)(int, void*); ++static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#else /* 2.4.x */ ++typedef void (*irq_handler_t)(int, void*, struct pt_regs *); ++typedef void (*new_handler_t)(int, void*); ++static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#endif /* >= 2.5.x */ ++{ ++ irq_handler_t new_handler = (irq_handler_t) handler; ++ return request_irq(irq, new_handler, flags, devname, dev_id); ++} ++ ++#undef request_irq ++#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) ++ ++#define irq_handler_t new_handler_t ++/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) ++#define PCIE_CONFIG_SPACE_LEN 256 ++#define PCI_CONFIG_SPACE_LEN 64 ++#define PCIE_LINK_STATUS 0x12 ++#define pci_config_space_ich8lan() do {} while(0) ++#undef pci_save_state ++extern int _kc_pci_save_state(struct pci_dev *); ++#define pci_save_state(pdev) _kc_pci_save_state(pdev) ++#undef pci_restore_state ++extern void _kc_pci_restore_state(struct pci_dev *); ++#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ ++ ++#ifdef HAVE_PCI_ERS ++#undef free_netdev ++extern void _kc_free_netdev(struct net_device *); ++#define free_netdev(netdev) _kc_free_netdev(netdev) ++#endif ++static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) ++{ ++ return 0; ++} ++#define pci_disable_pcie_error_reporting(dev) do {} while (0) ++#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) ++ ++extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); ++#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) ++#ifndef bool ++#define bool _Bool ++#define true 1 ++#define false 0 ++#endif ++#else /* 2.6.19 */ ++#include ++#include ++#include ++#endif /* < 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) ++#undef INIT_WORK ++#define INIT_WORK(_work, _func) \ ++do { \ ++ INIT_LIST_HEAD(&(_work)->entry); \ ++ (_work)->pending = 0; \ ++ (_work)->func = (void (*)(void *))_func; \ ++ (_work)->data = _work; \ ++ init_timer(&(_work)->timer); \ ++} while (0) ++#endif ++ ++#ifndef PCI_VDEVICE ++#define PCI_VDEVICE(ven, dev) \ ++ PCI_VENDOR_ID_##ven, (dev), \ ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0 ++#endif ++ ++#ifndef PCI_VENDOR_ID_INTEL ++#define PCI_VENDOR_ID_INTEL 0x8086 ++#endif ++ ++#ifndef round_jiffies ++#define round_jiffies(x) x ++#endif ++ ++#define csum_offset csum ++ ++#define HAVE_EARLY_VMALLOC_NODE ++#define dev_to_node(dev) -1 ++#undef set_dev_node ++/* remove compiler warning with b=b, for unused variable */ ++#define set_dev_node(a, b) do { (b) = (b); } while(0) ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) ++typedef __u16 __bitwise __sum16; ++typedef __u32 __bitwise __wsum; ++#endif ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) ++static inline __wsum csum_unfold(__sum16 n) ++{ ++ return (__force __wsum)n; ++} ++#endif ++ ++#else /* < 2.6.20 */ ++#define HAVE_DEVICE_NUMA_NODE ++#endif /* < 2.6.20 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define to_net_dev(class) container_of(class, struct net_device, class_dev) ++#define NETDEV_CLASS_DEV ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) ++#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) ++#define vlan_group_set_device(vg, id, dev) \ ++ do { \ ++ if (vg) vg->vlan_devices[id] = dev; \ ++ } while (0) ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ ++#define pci_channel_offline(pdev) (pdev->error_state && \ ++ pdev->error_state != pci_channel_io_normal) ++#define pci_request_selected_regions(pdev, bars, name) \ ++ pci_request_regions(pdev, name) ++#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); ++ ++#ifndef __aligned ++#define __aligned(x) __attribute__((aligned(x))) ++#endif ++ ++extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); ++#define netdev_to_dev(netdev) \ ++ pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) ++#else ++static inline struct device *netdev_to_dev(struct net_device *netdev) ++{ ++ return &netdev->dev; ++} ++ ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define tcp_hdr(skb) (skb->h.th) ++#define tcp_hdrlen(skb) (skb->h.th->doff << 2) ++#define skb_transport_offset(skb) (skb->h.raw - skb->data) ++#define skb_transport_header(skb) (skb->h.raw) ++#define ipv6_hdr(skb) (skb->nh.ipv6h) ++#define ip_hdr(skb) (skb->nh.iph) ++#define skb_network_offset(skb) (skb->nh.raw - skb->data) ++#define skb_network_header(skb) (skb->nh.raw) ++#define skb_tail_pointer(skb) skb->tail ++#define skb_reset_tail_pointer(skb) \ ++ do { \ ++ skb->tail = skb->data; \ ++ } while (0) ++#define skb_set_tail_pointer(skb, offset) \ ++ do { \ ++ skb->tail = skb->data + offset; \ ++ } while (0) ++#define skb_copy_to_linear_data(skb, from, len) \ ++ memcpy(skb->data, from, len) ++#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ ++ memcpy(skb->data + offset, from, len) ++#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) ++#define pci_register_driver pci_module_init ++#define skb_mac_header(skb) skb->mac.raw ++ ++#ifdef NETIF_F_MULTI_QUEUE ++#ifndef alloc_etherdev_mq ++#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) ++#endif ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef ETH_FCS_LEN ++#define ETH_FCS_LEN 4 ++#endif ++#define cancel_work_sync(x) flush_scheduled_work() ++#ifndef udp_hdr ++#define udp_hdr _udp_hdr ++static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) ++{ ++ return (struct udphdr *)skb_transport_header(skb); ++} ++#endif ++ ++#ifdef cpu_to_be16 ++#undef cpu_to_be16 ++#endif ++#define cpu_to_be16(x) __constant_htons(x) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) ++enum { ++ DUMP_PREFIX_NONE, ++ DUMP_PREFIX_ADDRESS, ++ DUMP_PREFIX_OFFSET ++}; ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ ++#ifndef hex_asc ++#define hex_asc(x) "0123456789abcdef"[x] ++#endif ++#include ++extern void _kc_print_hex_dump(const char *level, const char *prefix_str, ++ int prefix_type, int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii); ++#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ ++ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) ++#ifndef ADVERTISED_2500baseX_Full ++#define ADVERTISED_2500baseX_Full (1 << 15) ++#endif ++#ifndef SUPPORTED_2500baseX_Full ++#define SUPPORTED_2500baseX_Full (1 << 15) ++#endif ++ ++#ifdef HAVE_I2C_SUPPORT ++#include ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) ++struct i2c_board_info { ++ char driver_name[KOBJ_NAME_LEN]; ++ char type[I2C_NAME_SIZE]; ++ unsigned short flags; ++ unsigned short addr; ++ void *platform_data; ++}; ++#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\ ++ .addr = (dev_addr) ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ ++#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info) ++extern struct i2c_client * ++_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); ++#endif /* HAVE_I2C_SUPPORT */ ++ ++#ifndef ETH_P_PAUSE ++#define ETH_P_PAUSE 0x8808 ++#endif ++ ++#else /* 2.6.22 */ ++#define ETH_TYPE_TRANS_SETS_DEV ++#define HAVE_NETDEV_STATS_IN_NETDEV ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) ++#undef SET_MODULE_OWNER ++#define SET_MODULE_OWNER(dev) do { } while (0) ++#endif /* > 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) ++#define netif_subqueue_stopped(_a, _b) 0 ++#ifndef PTR_ALIGN ++#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) ++#endif ++ ++#ifndef CONFIG_PM_SLEEP ++#define CONFIG_PM_SLEEP CONFIG_PM ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) ++#define HAVE_ETHTOOL_GET_PERM_ADDR ++#endif /* 2.6.14 through 2.6.22 */ ++ ++static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) ++{ ++ int delta = 0; ++ ++ if (headroom > (skb->data - skb->head)) ++ delta = headroom - (skb->data - skb->head); ++ ++ if (delta || skb_header_cloned(skb)) ++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, ++ GFP_ATOMIC); ++ return 0; ++} ++#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) ++#endif /* < 2.6.23 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifndef ETH_FLAG_LRO ++#define ETH_FLAG_LRO NETIF_F_LRO ++#endif ++ ++#ifndef ACCESS_ONCE ++#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#endif ++ ++/* if GRO is supported then the napi struct must already exist */ ++#ifndef NETIF_F_GRO ++/* NAPI API changes in 2.6.24 break everything */ ++struct napi_struct { ++ /* used to look up the real NAPI polling routine */ ++ int (*poll)(struct napi_struct *, int); ++ struct net_device *dev; ++ int weight; ++}; ++#endif ++ ++#ifdef NAPI ++extern int __kc_adapter_clean(struct net_device *, int *); ++/* The following definitions are multi-queue aware, and thus we have a driver ++ * define list which determines which drivers support multiple queues, and ++ * thus need these stronger defines. If a driver does not support multi-queue ++ * functionality, you don't need to add it to this list. ++ */ ++extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); ++ ++static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, ++ int (*poll)(struct napi_struct *, int), int weight) ++{ ++ struct net_device *poll_dev = napi_to_poll_dev(napi); ++ poll_dev->poll = __kc_adapter_clean; ++ poll_dev->priv = napi; ++ poll_dev->weight = weight; ++ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); ++ set_bit(__LINK_STATE_START, &poll_dev->state); ++ dev_hold(poll_dev); ++ napi->poll = poll; ++ napi->weight = weight; ++ napi->dev = dev; ++} ++#define netif_napi_add __kc_mq_netif_napi_add ++ ++static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) ++{ ++ struct net_device *poll_dev = napi_to_poll_dev(napi); ++ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); ++ dev_put(poll_dev); ++ memset(poll_dev, 0, sizeof(struct net_device)); ++} ++ ++#define netif_napi_del __kc_mq_netif_napi_del ++ ++static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) ++{ ++ return netif_running(napi->dev) && ++ netif_rx_schedule_prep(napi_to_poll_dev(napi)); ++} ++#define napi_schedule_prep __kc_mq_napi_schedule_prep ++ ++static inline void __kc_mq_napi_schedule(struct napi_struct *napi) ++{ ++ if (napi_schedule_prep(napi)) ++ __netif_rx_schedule(napi_to_poll_dev(napi)); ++} ++#define napi_schedule __kc_mq_napi_schedule ++ ++#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) ++#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) ++#ifdef CONFIG_SMP ++static inline void napi_synchronize(const struct napi_struct *n) ++{ ++ struct net_device *dev = napi_to_poll_dev(n); ++ ++ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { ++ /* No hurry. */ ++ msleep(1); ++ } ++} ++#else ++#define napi_synchronize(n) barrier() ++#endif /* CONFIG_SMP */ ++#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) ++static inline void _kc_napi_complete(struct napi_struct *napi) ++{ ++#ifdef NETIF_F_GRO ++ napi_gro_flush(napi); ++#endif ++ netif_rx_complete(napi_to_poll_dev(napi)); ++} ++#define napi_complete _kc_napi_complete ++#else /* NAPI */ ++ ++/* The following definitions are only used if we don't support NAPI at all. */ ++ ++static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, ++ int (*poll)(struct napi_struct *, int), int weight) ++{ ++ dev->poll = poll; ++ dev->weight = weight; ++ napi->poll = poll; ++ napi->weight = weight; ++ napi->dev = dev; ++} ++#define netif_napi_del(_a) do {} while (0) ++#endif /* NAPI */ ++ ++#undef dev_get_by_name ++#define dev_get_by_name(_a, _b) dev_get_by_name(_b) ++#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) ++#endif ++ ++#ifdef NETIF_F_TSO6 ++#define skb_is_gso_v6 _kc_skb_is_gso_v6 ++static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++#endif /* NETIF_F_TSO6 */ ++ ++#ifndef KERN_CONT ++#define KERN_CONT "" ++#endif ++#ifndef pr_err ++#define pr_err(fmt, arg...) \ ++ printk(KERN_ERR fmt, ##arg) ++#endif ++ ++#ifndef rounddown_pow_of_two ++#define rounddown_pow_of_two(n) \ ++ __builtin_constant_p(n) ? ( \ ++ (n == 1) ? 0 : \ ++ (1UL << ilog2(n))) : \ ++ (1UL << (fls_long(n) - 1)) ++#endif ++ ++#ifndef BIT ++#define BIT(nr) (1UL << (nr)) ++#endif ++ ++#else /* < 2.6.24 */ ++#define HAVE_ETHTOOL_GET_SSET_COUNT ++#define HAVE_NETDEV_NAPI_LIST ++#endif /* < 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) ++#define INCLUDE_PM_QOS_PARAMS_H ++#include ++#else /* >= 3.2.0 */ ++#include ++#endif /* else >= 3.2.0 */ ++#endif /* > 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) ++#define PM_QOS_CPU_DMA_LATENCY 1 ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) ++#include ++#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY ++#define pm_qos_add_requirement(pm_qos_class, name, value) \ ++ set_acceptable_latency(name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) \ ++ remove_acceptable_latency(name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) \ ++ modify_acceptable_latency(name, value) ++#else ++#define PM_QOS_DEFAULT_VALUE -1 ++#define pm_qos_add_requirement(pm_qos_class, name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) { \ ++ if (value != PM_QOS_DEFAULT_VALUE) { \ ++ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ ++ pci_name(adapter->pdev)); \ ++ } \ ++} ++ ++#endif /* > 2.6.18 */ ++ ++#define pci_enable_device_mem(pdev) pci_enable_device(pdev) ++ ++#ifndef DEFINE_PCI_DEVICE_TABLE ++#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] ++#endif /* DEFINE_PCI_DEVICE_TABLE */ ++ ++#ifndef strict_strtol ++#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) ++static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) ++{ ++ /* adapted from strict_strtoul() in 2.6.25 */ ++ char *tail; ++ long val; ++ size_t len; ++ ++ *res = 0; ++ len = strlen(buf); ++ if (!len) ++ return -EINVAL; ++ val = simple_strtol(buf, &tail, base); ++ if (tail == buf) ++ return -EINVAL; ++ if ((*tail == '\0') || ++ ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { ++ *res = val; ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++#endif ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++#ifndef IGB_PROCFS ++#define IGB_PROCFS ++#endif /* IGB_PROCFS */ ++#endif /* >= 2.6.0 */ ++ ++#else /* < 2.6.25 */ ++ ++#if IS_ENABLED(CONFIG_HWMON) ++#ifndef IGB_HWMON ++#define IGB_HWMON ++#endif /* IGB_HWMON */ ++#endif /* CONFIG_HWMON */ ++ ++#endif /* < 2.6.25 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++#ifndef clamp_t ++#define clamp_t(type, val, min, max) ({ \ ++ type __val = (val); \ ++ type __min = (min); \ ++ type __max = (max); \ ++ __val = __val < __min ? __min : __val; \ ++ __val > __max ? __max : __val; }) ++#endif /* clamp_t */ ++#undef kzalloc_node ++#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) ++ ++extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); ++#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) ++#else /* < 2.6.26 */ ++#define NETDEV_CAN_SET_GSO_MAX_SIZE ++#include ++#define HAVE_NETDEV_VLAN_FEATURES ++#ifndef PCI_EXP_LNKCAP_ASPMS ++#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ ++#endif /* PCI_EXP_LNKCAP_ASPMS */ ++#endif /* < 2.6.26 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, ++ __u32 speed) ++{ ++ ep->speed = (__u16)speed; ++ /* ep->speed_hi = (__u16)(speed >> 16); */ ++} ++#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set ++ ++static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) ++{ ++ /* no speed_hi before 2.6.27, and probably no need for it yet */ ++ return (__u32)ep->speed; ++} ++#define ethtool_cmd_speed _kc_ethtool_cmd_speed ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) ++#define ANCIENT_PM 1 ++#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ ++ defined(CONFIG_PM_SLEEP)) ++#define NEWER_PM 1 ++#endif ++#if defined(ANCIENT_PM) || defined(NEWER_PM) ++#undef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) \ ++ do { \ ++ u16 pmc = 0; \ ++ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ ++ if (pm) { \ ++ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ ++ &pmc); \ ++ } \ ++ (dev)->power.can_wakeup = !!(pmc >> 11); \ ++ (dev)->power.should_wakeup = (val && (pmc >> 11)); \ ++ } while (0) ++#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ ++#endif /* 2.6.15 through 2.6.27 */ ++#ifndef netif_napi_del ++#define netif_napi_del(_a) do {} while (0) ++#ifdef NAPI ++#ifdef CONFIG_NETPOLL ++#undef netif_napi_del ++#define netif_napi_del(_a) list_del(&(_a)->dev_list); ++#endif ++#endif ++#endif /* netif_napi_del */ ++#ifdef dma_mapping_error ++#undef dma_mapping_error ++#endif ++#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) ++ ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++#define HAVE_TX_MQ ++#endif ++ ++#ifdef HAVE_TX_MQ ++extern void _kc_netif_tx_stop_all_queues(struct net_device *); ++extern void _kc_netif_tx_wake_all_queues(struct net_device *); ++extern void _kc_netif_tx_start_all_queues(struct net_device *); ++#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) ++#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) ++#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) ++#undef netif_stop_subqueue ++#define netif_stop_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_stop_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_stop_queue((_ndev)); \ ++ } while (0) ++#undef netif_start_subqueue ++#define netif_start_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_start_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_start_queue((_ndev)); \ ++ } while (0) ++#else /* HAVE_TX_MQ */ ++#define netif_tx_stop_all_queues(a) netif_stop_queue(a) ++#define netif_tx_wake_all_queues(a) netif_wake_queue(a) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) ++#define netif_tx_start_all_queues(a) netif_start_queue(a) ++#else ++#define netif_tx_start_all_queues(a) do {} while (0) ++#endif ++#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) ++#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) ++#endif /* HAVE_TX_MQ */ ++#ifndef NETIF_F_MULTI_QUEUE ++#define NETIF_F_MULTI_QUEUE 0 ++#define netif_is_multiqueue(a) 0 ++#define netif_wake_subqueue(a, b) ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef __WARN_printf ++extern void __kc_warn_slowpath(const char *file, const int line, ++ const char *fmt, ...) __attribute__((format(printf, 3, 4))); ++#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) ++#endif /* __WARN_printf */ ++ ++#ifndef WARN ++#define WARN(condition, format...) ({ \ ++ int __ret_warn_on = !!(condition); \ ++ if (unlikely(__ret_warn_on)) \ ++ __WARN_printf(format); \ ++ unlikely(__ret_warn_on); \ ++}) ++#endif /* WARN */ ++#undef HAVE_IXGBE_DEBUG_FS ++#undef HAVE_IGB_DEBUG_FS ++#else /* < 2.6.27 */ ++#define HAVE_TX_MQ ++#define HAVE_NETDEV_SELECT_QUEUE ++#ifdef CONFIG_DEBUG_FS ++#define HAVE_IXGBE_DEBUG_FS ++#define HAVE_IGB_DEBUG_FS ++#endif /* CONFIG_DEBUG_FS */ ++#endif /* < 2.6.27 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ ++ pci_resource_len(pdev, bar)) ++#define pci_wake_from_d3 _kc_pci_wake_from_d3 ++#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep ++extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); ++extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); ++#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) ++#ifndef __skb_queue_head_init ++static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) ++{ ++ list->prev = list->next = (struct sk_buff *)list; ++ list->qlen = 0; ++} ++#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) ++#endif ++ ++#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ ++#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ ++ ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++#ifndef swap ++#define swap(a, b) \ ++ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) ++#endif ++#define pci_request_selected_regions_exclusive(pdev, bars, name) \ ++ pci_request_selected_regions(pdev, bars, name) ++#ifndef CONFIG_NR_CPUS ++#define CONFIG_NR_CPUS 1 ++#endif /* CONFIG_NR_CPUS */ ++#ifndef pcie_aspm_enabled ++#define pcie_aspm_enabled() (1) ++#endif /* pcie_aspm_enabled */ ++ ++#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ ++ ++#ifndef PCI_EXP_LNKSTA_CLS ++#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ ++#endif ++#ifndef PCI_EXP_LNKSTA_NLW ++#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ ++#endif ++ ++#ifndef pci_clear_master ++extern void _kc_pci_clear_master(struct pci_dev *dev); ++#define pci_clear_master(dev) _kc_pci_clear_master(dev) ++#endif ++ ++#ifndef PCI_EXP_LNKCTL_ASPMC ++#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ ++#endif ++#else /* < 2.6.29 */ ++#ifndef HAVE_NET_DEVICE_OPS ++#define HAVE_NET_DEVICE_OPS ++#endif ++#ifdef CONFIG_DCB ++#define HAVE_PFC_MODE_ENABLE ++#endif /* CONFIG_DCB */ ++#endif /* < 2.6.29 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) ++#define NO_PTP_SUPPORT ++#define skb_rx_queue_recorded(a) false ++#define skb_get_rx_queue(a) 0 ++#define skb_record_rx_queue(a, b) do {} while (0) ++#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) ++#ifndef CONFIG_PCI_IOV ++#undef pci_enable_sriov ++#define pci_enable_sriov(a, b) -ENOTSUPP ++#undef pci_disable_sriov ++#define pci_disable_sriov(a) do {} while (0) ++#endif /* CONFIG_PCI_IOV */ ++#ifndef pr_cont ++#define pr_cont(fmt, ...) \ ++ printk(KERN_CONT fmt, ##__VA_ARGS__) ++#endif /* pr_cont */ ++static inline void _kc_synchronize_irq(unsigned int a) ++{ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ synchronize_irq(); ++#else /* < 2.5.28 */ ++ synchronize_irq(a); ++#endif /* < 2.5.28 */ ++} ++#undef synchronize_irq ++#define synchronize_irq(a) _kc_synchronize_irq(a) ++ ++#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ ++ ++#ifdef nr_cpus_node ++#undef nr_cpus_node ++#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) ++#endif ++ ++#else /* < 2.6.30 */ ++#define HAVE_ASPM_QUIRKS ++#endif /* < 2.6.30 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) ++#define ETH_P_1588 0x88F7 ++#define ETH_P_FIP 0x8914 ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc_count) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(uclist, dev) \ ++ for (uclist = dev->uc_list; uclist; uclist = uclist->next) ++#endif ++#ifndef PORT_OTHER ++#define PORT_OTHER 0xff ++#endif ++#ifndef MDIO_PHY_ID_PRTAD ++#define MDIO_PHY_ID_PRTAD 0x03e0 ++#endif ++#ifndef MDIO_PHY_ID_DEVAD ++#define MDIO_PHY_ID_DEVAD 0x001f ++#endif ++#ifndef skb_dst ++#define skb_dst(s) ((s)->dst) ++#endif ++ ++#ifndef SUPPORTED_1000baseKX_Full ++#define SUPPORTED_1000baseKX_Full (1 << 17) ++#endif ++#ifndef SUPPORTED_10000baseKX4_Full ++#define SUPPORTED_10000baseKX4_Full (1 << 18) ++#endif ++#ifndef SUPPORTED_10000baseKR_Full ++#define SUPPORTED_10000baseKR_Full (1 << 19) ++#endif ++ ++#ifndef ADVERTISED_1000baseKX_Full ++#define ADVERTISED_1000baseKX_Full (1 << 17) ++#endif ++#ifndef ADVERTISED_10000baseKX4_Full ++#define ADVERTISED_10000baseKX4_Full (1 << 18) ++#endif ++#ifndef ADVERTISED_10000baseKR_Full ++#define ADVERTISED_10000baseKR_Full (1 << 19) ++#endif ++ ++#else /* < 2.6.31 */ ++#ifndef HAVE_NETDEV_STORAGE_ADDRESS ++#define HAVE_NETDEV_STORAGE_ADDRESS ++#endif ++#ifndef HAVE_NETDEV_HW_ADDR ++#define HAVE_NETDEV_HW_ADDR ++#endif ++#ifndef HAVE_TRANS_START_IN_QUEUE ++#define HAVE_TRANS_START_IN_QUEUE ++#endif ++#ifndef HAVE_INCLUDE_LINUX_MDIO_H ++#define HAVE_INCLUDE_LINUX_MDIO_H ++#endif ++#include ++#endif /* < 2.6.31 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) ++#undef netdev_tx_t ++#define netdev_tx_t int ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++static inline int _kc_pm_runtime_get_sync() ++{ ++ return 1; ++} ++#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() ++#else /* 2.6.0 => 2.6.32 */ ++static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) ++{ ++ return 1; ++} ++#ifndef pm_runtime_get_sync ++#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) ++#endif ++#endif /* 2.6.0 => 2.6.32 */ ++#ifndef pm_runtime_put ++#define pm_runtime_put(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_sync ++#define pm_runtime_put_sync(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_resume ++#define pm_runtime_resume(dev) do {} while (0) ++#endif ++#ifndef pm_schedule_suspend ++#define pm_schedule_suspend(dev, t) do {} while (0) ++#endif ++#ifndef pm_runtime_set_suspended ++#define pm_runtime_set_suspended(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_disable ++#define pm_runtime_disable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_noidle ++#define pm_runtime_put_noidle(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_set_active ++#define pm_runtime_set_active(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_enable ++#define pm_runtime_enable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_get_noresume ++#define pm_runtime_get_noresume(dev) do {} while (0) ++#endif ++#else /* < 2.6.32 */ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_NET_DEVICE_EXTENDED ++#endif /* RHEL >= 6.2 && RHEL < 7.0 */ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_NET_DEVICE_OPS_EXT ++#define HAVE_NDO_SET_FEATURES ++#endif /* RHEL >= 6.6 && RHEL < 7.0 */ ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_OPS_GETAPP ++#define HAVE_DCBNL_OPS_GETAPP ++#endif ++#endif /* CONFIG_DCB */ ++#include ++/* IOV bad DMA target work arounds require at least this kernel rev support */ ++#define HAVE_PCIE_TYPE ++#endif /* < 2.6.32 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#ifndef pci_pcie_cap ++#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) ++#endif ++#ifndef IPV4_FLOW ++#define IPV4_FLOW 0x10 ++#endif /* IPV4_FLOW */ ++#ifndef IPV6_FLOW ++#define IPV6_FLOW 0x11 ++#endif /* IPV6_FLOW */ ++/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ ++#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) ++#endif /* RHEL6 or SLES11 SP1 */ ++#ifndef __percpu ++#define __percpu ++#endif /* __percpu */ ++#ifndef PORT_DA ++#define PORT_DA PORT_OTHER ++#endif ++#ifndef PORT_NONE ++#define PORT_NONE PORT_OTHER ++#endif ++ ++#if ((RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ++#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) ++#undef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME ++#undef DEFINE_DMA_UNMAP_LEN ++#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME ++#undef dma_unmap_addr ++#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) ++#undef dma_unmap_addr_set ++#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) ++#undef dma_unmap_len ++#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) ++#undef dma_unmap_len_set ++#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) ++#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ ++ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) ++static inline bool pci_is_pcie(struct pci_dev *dev) ++{ ++ return !!pci_pcie_cap(dev); ++} ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) ++#define sk_tx_queue_get(_sk) (-1) ++#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) ++#endif /* !(RHEL >= 6.2) */ ++ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#define HAVE_ETHTOOL_SET_PHYS_ID ++#define HAVE_ETHTOOL_GET_TS_INFO ++#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) ++#define HAVE_ETHTOOL_GSRSSH ++#define HAVE_RHEL6_SRIOV_CONFIGURE ++#define HAVE_RXFH_NONCONST ++#endif /* RHEL > 6.5 */ ++#endif /* RHEL >= 6.4 && RHEL < 7.0 */ ++ ++#else /* < 2.6.33 */ ++#endif /* < 2.6.33 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#ifndef pci_num_vf ++#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) ++extern int _kc_pci_num_vf(struct pci_dev *dev); ++#endif ++#endif /* RHEL_RELEASE_CODE */ ++ ++#ifndef ETH_FLAG_NTUPLE ++#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE ++#endif ++ ++#ifndef netdev_mc_count ++#define netdev_mc_count(dev) ((dev)->mc_count) ++#endif ++#ifndef netdev_mc_empty ++#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(mclist, dev) \ ++ for (mclist = dev->mc_list; mclist; mclist = mclist->next) ++#endif ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc.count) ++#endif ++#ifndef netdev_uc_empty ++#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(ha, dev) \ ++ list_for_each_entry(ha, &dev->uc.list, list) ++#endif ++#ifndef dma_set_coherent_mask ++#define dma_set_coherent_mask(dev,mask) \ ++ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) ++#endif ++#ifndef pci_dev_run_wake ++#define pci_dev_run_wake(pdev) (0) ++#endif ++ ++/* netdev logging taken from include/linux/netdevice.h */ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (dev->reg_state != NETREG_REGISTERED) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#undef netdev_printk ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ ++ printk(level "%s: " format, pci_name(pdev), ##args); \ ++} while(0) ++#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ ++ struct device *dev = pci_dev_to_dev(pdev); \ ++ dev_printk(level, dev, "%s: " format, \ ++ netdev_name(netdev), ##args); \ ++} while(0) ++#else /* 2.6.21 => 2.6.34 */ ++#define netdev_printk(level, netdev, format, args...) \ ++ dev_printk(level, (netdev)->dev.parent, \ ++ "%s: " format, \ ++ netdev_name(netdev), ##args) ++#endif /* <2.6.0 <2.6.21 <2.6.34 */ ++#undef netdev_emerg ++#define netdev_emerg(dev, format, args...) \ ++ netdev_printk(KERN_EMERG, dev, format, ##args) ++#undef netdev_alert ++#define netdev_alert(dev, format, args...) \ ++ netdev_printk(KERN_ALERT, dev, format, ##args) ++#undef netdev_crit ++#define netdev_crit(dev, format, args...) \ ++ netdev_printk(KERN_CRIT, dev, format, ##args) ++#undef netdev_err ++#define netdev_err(dev, format, args...) \ ++ netdev_printk(KERN_ERR, dev, format, ##args) ++#undef netdev_warn ++#define netdev_warn(dev, format, args...) \ ++ netdev_printk(KERN_WARNING, dev, format, ##args) ++#undef netdev_notice ++#define netdev_notice(dev, format, args...) \ ++ netdev_printk(KERN_NOTICE, dev, format, ##args) ++#undef netdev_info ++#define netdev_info(dev, format, args...) \ ++ netdev_printk(KERN_INFO, dev, format, ##args) ++#undef netdev_dbg ++#if defined(DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args) ++#elif defined(CONFIG_DYNAMIC_DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++do { \ ++ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ ++ netdev_name(__dev), ##args); \ ++} while (0) ++#else /* DEBUG */ ++#define netdev_dbg(__dev, format, args...) \ ++({ \ ++ if (0) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ ++ 0; \ ++}) ++#endif /* DEBUG */ ++ ++#undef netif_printk ++#define netif_printk(priv, type, level, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_printk(level, (dev), fmt, ##args); \ ++} while (0) ++ ++#undef netif_emerg ++#define netif_emerg(priv, type, dev, fmt, args...) \ ++ netif_level(emerg, priv, type, dev, fmt, ##args) ++#undef netif_alert ++#define netif_alert(priv, type, dev, fmt, args...) \ ++ netif_level(alert, priv, type, dev, fmt, ##args) ++#undef netif_crit ++#define netif_crit(priv, type, dev, fmt, args...) \ ++ netif_level(crit, priv, type, dev, fmt, ##args) ++#undef netif_err ++#define netif_err(priv, type, dev, fmt, args...) \ ++ netif_level(err, priv, type, dev, fmt, ##args) ++#undef netif_warn ++#define netif_warn(priv, type, dev, fmt, args...) \ ++ netif_level(warn, priv, type, dev, fmt, ##args) ++#undef netif_notice ++#define netif_notice(priv, type, dev, fmt, args...) \ ++ netif_level(notice, priv, type, dev, fmt, ##args) ++#undef netif_info ++#define netif_info(priv, type, dev, fmt, args...) \ ++ netif_level(info, priv, type, dev, fmt, ##args) ++#undef netif_dbg ++#define netif_dbg(priv, type, dev, fmt, args...) \ ++ netif_level(dbg, priv, type, dev, fmt, ##args) ++ ++#ifdef SET_SYSTEM_SLEEP_PM_OPS ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#endif ++ ++#ifndef for_each_set_bit ++#define for_each_set_bit(bit, addr, size) \ ++ for ((bit) = find_first_bit((addr), (size)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++#endif /* for_each_set_bit */ ++ ++#ifndef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN ++#define dma_unmap_addr pci_unmap_addr ++#define dma_unmap_addr_set pci_unmap_addr_set ++#define dma_unmap_len pci_unmap_len ++#define dma_unmap_len_set pci_unmap_len_set ++#endif /* DEFINE_DMA_UNMAP_ADDR */ ++ ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) ++#ifdef IGB_HWMON ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++#define sysfs_attr_init(attr) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ (attr)->key = &__key; \ ++ } while (0) ++#else ++#define sysfs_attr_init(attr) do {} while (0) ++#endif /* CONFIG_DEBUG_LOCK_ALLOC */ ++#endif /* IGB_HWMON */ ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++static inline bool _kc_pm_runtime_suspended() ++{ ++ return false; ++} ++#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() ++#else /* 2.6.0 => 2.6.34 */ ++static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) ++{ ++ return false; ++} ++#ifndef pm_runtime_suspended ++#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) ++#endif ++#endif /* 2.6.0 => 2.6.34 */ ++ ++#ifndef pci_bus_speed ++/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ ++enum _kc_pci_bus_speed { ++ _KC_PCIE_SPEED_2_5GT = 0x14, ++ _KC_PCIE_SPEED_5_0GT = 0x15, ++ _KC_PCIE_SPEED_8_0GT = 0x16, ++ _KC_PCI_SPEED_UNKNOWN = 0xff, ++}; ++#define pci_bus_speed _kc_pci_bus_speed ++#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT ++#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT ++#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT ++#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN ++#endif /* pci_bus_speed */ ++ ++#else /* < 2.6.34 */ ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#ifndef HAVE_SET_RX_MODE ++#define HAVE_SET_RX_MODE ++#endif ++ ++#endif /* < 2.6.34 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, ++ const void __user *from, size_t count); ++#define simple_write_to_buffer _kc_simple_write_to_buffer ++ ++#ifndef PCI_EXP_LNKSTA_NLW_SHIFT ++#define PCI_EXP_LNKSTA_NLW_SHIFT 4 ++#endif ++ ++#ifndef numa_node_id ++#define numa_node_id() 0 ++#endif ++#ifndef numa_mem_id ++#define numa_mem_id numa_node_id ++#endif ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) ++#ifdef HAVE_TX_MQ ++#include ++#ifndef CONFIG_NETDEVICES_MULTIQUEUE ++int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); ++#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ dev->egress_subqueue_count = txq; ++ return 0; ++} ++#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ ++#else /* HAVE_TX_MQ */ ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, ++ unsigned int __always_unused txq) ++{ ++ return 0; ++} ++#endif /* HAVE_TX_MQ */ ++#define netif_set_real_num_tx_queues(dev, txq) \ ++ _kc_netif_set_real_num_tx_queues(dev, txq) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ ++#ifndef ETH_FLAG_RXHASH ++#define ETH_FLAG_RXHASH (1<<28) ++#endif /* ETH_FLAG_RXHASH */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) ++#define HAVE_IRQ_AFFINITY_HINT ++#endif ++#else /* < 2.6.35 */ ++#define HAVE_PM_QOS_REQUEST_LIST ++#define HAVE_IRQ_AFFINITY_HINT ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); ++#define ethtool_op_set_flags _kc_ethtool_op_set_flags ++extern u32 _kc_ethtool_op_get_flags(struct net_device *); ++#define ethtool_op_get_flags _kc_ethtool_op_get_flags ++ ++enum { ++ WQ_UNBOUND = 0, ++ WQ_RESCUER = 0, ++}; ++ ++#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#ifdef NET_IP_ALIGN ++#undef NET_IP_ALIGN ++#endif ++#define NET_IP_ALIGN 0 ++#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ ++ ++#ifdef NET_SKB_PAD ++#undef NET_SKB_PAD ++#endif ++ ++#if (L1_CACHE_BYTES > 32) ++#define NET_SKB_PAD L1_CACHE_BYTES ++#else ++#define NET_SKB_PAD 32 ++#endif ++ ++static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) ++{ ++ struct sk_buff *skb; ++ ++ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); ++ if (skb) { ++#if (NET_IP_ALIGN + NET_SKB_PAD) ++ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); ++#endif ++ skb->dev = dev; ++ } ++ return skb; ++} ++ ++#ifdef netdev_alloc_skb_ip_align ++#undef netdev_alloc_skb_ip_align ++#endif ++#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) ++ ++#undef netif_level ++#define netif_level(level, priv, type, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_##level(dev, fmt, ##args); \ ++} while (0) ++ ++#undef usleep_range ++#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) ++ ++#define u64_stats_update_begin(a) do { } while(0) ++#define u64_stats_update_end(a) do { } while(0) ++#define u64_stats_fetch_begin(a) do { } while(0) ++#define u64_stats_fetch_retry_bh(a,b) (0) ++#define u64_stats_fetch_begin_bh(a) (0) ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) ++#define HAVE_8021P_SUPPORT ++#endif ++ ++/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) ++static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) ++{ ++ return; ++} ++#endif ++ ++#else /* < 2.6.36 */ ++ ++#define HAVE_PM_QOS_REQUEST_ACTIVE ++#define HAVE_8021P_SUPPORT ++#define HAVE_NDO_GET_STATS64 ++#endif /* < 2.6.36 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) ++#define HAVE_NON_CONST_PCI_DRIVER_NAME ++#ifndef netif_set_real_num_tx_queues ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ netif_set_real_num_tx_queues(dev, txq); ++ return 0; ++} ++#define netif_set_real_num_tx_queues(dev, txq) \ ++ _kc_netif_set_real_num_tx_queues(dev, txq) ++#endif ++#ifndef netif_set_real_num_rx_queues ++static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, ++ unsigned int __always_unused rxq) ++{ ++ return 0; ++} ++#define netif_set_real_num_rx_queues(dev, rxq) \ ++ __kc_netif_set_real_num_rx_queues((dev), (rxq)) ++#endif ++#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR ++#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) ++#endif ++#ifndef VLAN_N_VID ++#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN ++#endif /* VLAN_N_VID */ ++#ifndef ETH_FLAG_TXVLAN ++#define ETH_FLAG_TXVLAN (1 << 7) ++#endif /* ETH_FLAG_TXVLAN */ ++#ifndef ETH_FLAG_RXVLAN ++#define ETH_FLAG_RXVLAN (1 << 8) ++#endif /* ETH_FLAG_RXVLAN */ ++ ++#define WQ_MEM_RECLAIM WQ_RESCUER ++ ++static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) ++{ ++ WARN_ON(skb->ip_summed != CHECKSUM_NONE); ++} ++#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) ++ ++static inline void *_kc_vzalloc_node(unsigned long size, int node) ++{ ++ void *addr = vmalloc_node(size, node); ++ if (addr) ++ memset(addr, 0, size); ++ return addr; ++} ++#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) ++ ++static inline void *_kc_vzalloc(unsigned long size) ++{ ++ void *addr = vmalloc(size); ++ if (addr) ++ memset(addr, 0, size); ++ return addr; ++} ++#define vzalloc(_size) _kc_vzalloc(_size) ++ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ ++ (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) ++static inline __be16 vlan_get_protocol(const struct sk_buff *skb) ++{ ++ if (vlan_tx_tag_present(skb) || ++ skb->protocol != cpu_to_be16(ETH_P_8021Q)) ++ return skb->protocol; ++ ++ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) ++ return 0; ++ ++ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; ++} ++#endif /* !RHEL5.7+ || RHEL6.0 */ ++ ++#ifdef HAVE_HW_TIME_STAMP ++#define SKBTX_HW_TSTAMP (1 << 0) ++#define SKBTX_IN_PROGRESS (1 << 2) ++#define SKB_SHARED_TX_IS_UNION ++#endif ++ ++#ifndef device_wakeup_enable ++#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) ++#ifndef HAVE_VLAN_RX_REGISTER ++#define HAVE_VLAN_RX_REGISTER ++#endif ++#endif /* > 2.4.18 */ ++#endif /* < 2.6.37 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define skb_checksum_start_offset(skb) skb_transport_offset(skb) ++#else /* 2.6.22 -> 2.6.37 */ ++static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) ++{ ++ return skb->csum_start - skb_headroom(skb); ++} ++#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) ++#endif /* 2.6.22 -> 2.6.37 */ ++#if IS_ENABLED(CONFIG_DCB) ++#ifndef IEEE_8021QAZ_MAX_TCS ++#define IEEE_8021QAZ_MAX_TCS 8 ++#endif ++#ifndef DCB_CAP_DCBX_HOST ++#define DCB_CAP_DCBX_HOST 0x01 ++#endif ++#ifndef DCB_CAP_DCBX_LLD_MANAGED ++#define DCB_CAP_DCBX_LLD_MANAGED 0x02 ++#endif ++#ifndef DCB_CAP_DCBX_VER_CEE ++#define DCB_CAP_DCBX_VER_CEE 0x04 ++#endif ++#ifndef DCB_CAP_DCBX_VER_IEEE ++#define DCB_CAP_DCBX_VER_IEEE 0x08 ++#endif ++#ifndef DCB_CAP_DCBX_STATIC ++#define DCB_CAP_DCBX_STATIC 0x10 ++#endif ++#endif /* CONFIG_DCB */ ++#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) ++#define CONFIG_XPS ++#endif /* RHEL_RELEASE_VERSION(6,2) */ ++#endif /* < 2.6.38 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) ++#ifndef TC_BITMASK ++#define TC_BITMASK 15 ++#endif ++#ifndef NETIF_F_RXCSUM ++#define NETIF_F_RXCSUM (1 << 29) ++#endif ++#ifndef skb_queue_reverse_walk_safe ++#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ ++ for (skb = (queue)->prev, tmp = skb->prev; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->prev) ++#endif ++ ++#ifndef udp_csum ++#define udp_csum __kc_udp_csum ++static inline __wsum __kc_udp_csum(struct sk_buff *skb) ++{ ++ __wsum csum = csum_partial(skb_transport_header(skb), ++ sizeof(struct udphdr), skb->csum); ++ ++ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { ++ csum = csum_add(csum, skb->csum); ++ } ++ return csum; ++} ++#endif /* udp_csum */ ++#else /* < 2.6.39 */ ++#ifndef HAVE_MQPRIO ++#define HAVE_MQPRIO ++#endif ++#ifndef HAVE_SETUP_TC ++#define HAVE_SETUP_TC ++#endif ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_IEEE ++#define HAVE_DCBNL_IEEE ++#endif ++#endif /* CONFIG_DCB */ ++#ifndef HAVE_NDO_SET_FEATURES ++#define HAVE_NDO_SET_FEATURES ++#endif ++#endif /* < 2.6.39 */ ++ ++/*****************************************************************************/ ++/* use < 2.6.40 because of a Fedora 15 kernel update where they ++ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features ++ * like set_phys_id for ethtool. ++ */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) ++#ifdef ETHTOOL_GRXRINGS ++#ifndef FLOW_EXT ++#define FLOW_EXT 0x80000000 ++union _kc_ethtool_flow_union { ++ struct ethtool_tcpip4_spec tcp_ip4_spec; ++ struct ethtool_usrip4_spec usr_ip4_spec; ++ __u8 hdata[60]; ++}; ++struct _kc_ethtool_flow_ext { ++ __be16 vlan_etype; ++ __be16 vlan_tci; ++ __be32 data[2]; ++}; ++struct _kc_ethtool_rx_flow_spec { ++ __u32 flow_type; ++ union _kc_ethtool_flow_union h_u; ++ struct _kc_ethtool_flow_ext h_ext; ++ union _kc_ethtool_flow_union m_u; ++ struct _kc_ethtool_flow_ext m_ext; ++ __u64 ring_cookie; ++ __u32 location; ++}; ++#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec ++#endif /* FLOW_EXT */ ++#endif ++ ++#define pci_disable_link_state_locked pci_disable_link_state ++ ++#ifndef PCI_LTR_VALUE_MASK ++#define PCI_LTR_VALUE_MASK 0x000003ff ++#endif ++#ifndef PCI_LTR_SCALE_MASK ++#define PCI_LTR_SCALE_MASK 0x00001c00 ++#endif ++#ifndef PCI_LTR_SCALE_SHIFT ++#define PCI_LTR_SCALE_SHIFT 10 ++#endif ++ ++#else /* < 2.6.40 */ ++#define HAVE_ETHTOOL_SET_PHYS_ID ++#endif /* < 2.6.40 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) ++#define USE_LEGACY_PM_SUPPORT ++#ifndef kfree_rcu ++#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) ++#endif /* kfree_rcu */ ++#ifndef kstrtol_from_user ++#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) ++static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, ++ unsigned int base, long *res) ++{ ++ /* sign, base 2 representation, newline, terminator */ ++ char buf[1 + sizeof(long) * 8 + 1 + 1]; ++ ++ count = min(count, sizeof(buf) - 1); ++ if (copy_from_user(buf, s, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ return strict_strtol(buf, base, res); ++} ++#endif ++ ++/* 20000base_blah_full Supported and Advertised Registers */ ++#define SUPPORTED_20000baseMLD2_Full (1 << 21) ++#define SUPPORTED_20000baseKR2_Full (1 << 22) ++#define ADVERTISED_20000baseMLD2_Full (1 << 21) ++#define ADVERTISED_20000baseKR2_Full (1 << 22) ++#endif /* < 3.0.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) ++#ifndef __netdev_alloc_skb_ip_align ++#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) ++#endif /* __netdev_alloc_skb_ip_align */ ++#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) ++#define dcb_ieee_delapp(dev, app) 0 ++#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) ++ ++/* 1000BASE-T Control register */ ++#define CTL1000_AS_MASTER 0x0800 ++#define CTL1000_ENABLE_MASTER 0x1000 ++ ++/* kernels less than 3.0.0 don't have this */ ++#ifndef ETH_P_8021AD ++#define ETH_P_8021AD 0x88A8 ++#endif ++#else /* < 3.1.0 */ ++#ifndef HAVE_DCBNL_IEEE_DELAPP ++#define HAVE_DCBNL_IEEE_DELAPP ++#endif ++#endif /* < 3.1.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) ++#ifndef dma_zalloc_coherent ++#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) ++static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, ++ dma_addr_t *dma_handle, gfp_t flag) ++{ ++ void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif ++#ifdef ETHTOOL_GRXRINGS ++#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS ++#endif /* ETHTOOL_GRXRINGS */ ++ ++#ifndef skb_frag_size ++#define skb_frag_size(frag) _kc_skb_frag_size(frag) ++static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) ++{ ++ return frag->size; ++} ++#endif /* skb_frag_size */ ++ ++#ifndef skb_frag_size_sub ++#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) ++static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) ++{ ++ frag->size -= delta; ++} ++#endif /* skb_frag_size_sub */ ++ ++#ifndef skb_frag_page ++#define skb_frag_page(frag) _kc_skb_frag_page(frag) ++static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) ++{ ++ return frag->page; ++} ++#endif /* skb_frag_page */ ++ ++#ifndef skb_frag_address ++#define skb_frag_address(frag) _kc_skb_frag_address(frag) ++static inline void *_kc_skb_frag_address(const skb_frag_t *frag) ++{ ++ return page_address(skb_frag_page(frag)) + frag->page_offset; ++} ++#endif /* skb_frag_address */ ++ ++#ifndef skb_frag_dma_map ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++#include ++#endif ++#define skb_frag_dma_map(dev,frag,offset,size,dir) \ ++ _kc_skb_frag_dma_map(dev,frag,offset,size,dir) ++static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, ++ const skb_frag_t *frag, ++ size_t offset, size_t size, ++ enum dma_data_direction dir) ++{ ++ return dma_map_page(dev, skb_frag_page(frag), ++ frag->page_offset + offset, size, dir); ++} ++#endif /* skb_frag_dma_map */ ++ ++#ifndef __skb_frag_unref ++#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) ++static inline void __kc_skb_frag_unref(skb_frag_t *frag) ++{ ++ put_page(skb_frag_page(frag)); ++} ++#endif /* __skb_frag_unref */ ++ ++#ifndef SPEED_UNKNOWN ++#define SPEED_UNKNOWN -1 ++#endif ++#ifndef DUPLEX_UNKNOWN ++#define DUPLEX_UNKNOWN 0xff ++#endif ++#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) ++#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_PCI_DEV_FLAGS_ASSIGNED ++#endif ++#endif ++#else /* < 3.2.0 */ ++#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_VF_SPOOFCHK_CONFIGURE ++#endif ++#ifndef HAVE_SKB_L4_RXHASH ++#define HAVE_SKB_L4_RXHASH ++#endif ++#define HAVE_IOMMU_PRESENT ++#define HAVE_PM_QOS_REQUEST_LIST_NEW ++#endif /* < 3.2.0 */ ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) ++#undef ixgbe_get_netdev_tc_txq ++#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) ++#endif ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) ++/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than ++ * alloc_workqueue() to avoid compiler warning from -Wvarargs ++ */ ++static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) ++_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, ++ const char *fmt, ...) ++{ ++ struct workqueue_struct *wq; ++ va_list args, temp; ++ unsigned int len; ++ char *p; ++ ++ va_start(args, fmt); ++ va_copy(temp, args); ++ len = vsnprintf(NULL, 0, fmt, temp); ++ va_end(temp); ++ ++ p = kmalloc(len + 1, GFP_KERNEL); ++ if (!p) { ++ va_end(args); ++ return NULL; ++ } ++ ++ vsnprintf(p, len + 1, fmt, args); ++ va_end(args); ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++ wq = create_workqueue(p); ++#else ++ wq = alloc_workqueue(p, flags, max_active); ++#endif ++ kfree(p); ++ ++ return wq; ++} ++#ifdef alloc_workqueue ++#undef alloc_workqueue ++#endif ++#define alloc_workqueue(fmt, flags, max_active, args...) \ ++ _kc_alloc_workqueue(flags, max_active, fmt, ##args) ++ ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) ++typedef u32 netdev_features_t; ++#endif ++#undef PCI_EXP_TYPE_RC_EC ++#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ ++#ifndef CONFIG_BQL ++#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) ++#define netdev_completed_queue(_n, _p, _b) do {} while (0) ++#define netdev_tx_sent_queue(_q, _b) do {} while (0) ++#define netdev_sent_queue(_n, _b) do {} while (0) ++#define netdev_tx_reset_queue(_q) do {} while (0) ++#define netdev_reset_queue(_n) do {} while (0) ++#endif ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#endif /* SLE_VERSION(11,3,0) */ ++#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) ++static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, ++ u8 *nexthdrp, ++ __be16 __always_unused *frag_offp) ++{ ++ return ipv6_skip_exthdr(skb, start, nexthdrp); ++} ++#undef ipv6_skip_exthdr ++#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) ++#endif /* !SLES11sp4 or greater */ ++ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) ++static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) ++{ ++ return index % n_rx_rings; ++} ++#endif ++ ++#else /* ! < 3.3.0 */ ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#define HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef ETHTOOL_SRXNTUPLE ++#undef ETHTOOL_SRXNTUPLE ++#endif ++#endif /* < 3.3.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) ++#ifndef NETIF_F_RXFCS ++#define NETIF_F_RXFCS 0 ++#endif /* NETIF_F_RXFCS */ ++#ifndef NETIF_F_RXALL ++#define NETIF_F_RXALL 0 ++#endif /* NETIF_F_RXALL */ ++ ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define NUMTCS_RETURNS_U8 ++ ++int _kc_simple_open(struct inode *inode, struct file *file); ++#define simple_open _kc_simple_open ++#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ ++ ++#ifndef skb_add_rx_frag ++#define skb_add_rx_frag _kc_skb_add_rx_frag ++extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, ++ int, int, unsigned int); ++#endif ++#ifdef NET_ADDR_RANDOM ++#define eth_hw_addr_random(N) do { \ ++ eth_random_addr(N->dev_addr); \ ++ N->addr_assign_type |= NET_ADDR_RANDOM; \ ++ } while (0) ++#else /* NET_ADDR_RANDOM */ ++#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) ++#endif /* NET_ADDR_RANDOM */ ++ ++#ifndef for_each_set_bit_from ++#define for_each_set_bit_from(bit, addr, size) \ ++ for ((bit) = find_next_bit((addr), (size), (bit)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++#endif /* for_each_set_bit_from */ ++ ++#else /* < 3.4.0 */ ++#include ++#endif /* >= 3.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ ++ ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ++#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++#define HAVE_PTP_1588_CLOCK ++#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) ++ ++#ifndef ether_addr_equal ++static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) ++{ ++ return !compare_ether_addr(addr1, addr2); ++} ++#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) ++#endif ++ ++#else ++#define HAVE_FDB_OPS ++#define HAVE_ETHTOOL_GET_TS_INFO ++#endif /* < 3.5.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) ++#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ ++ ++#ifndef MDIO_EEE_100TX ++#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ ++#endif ++#ifndef MDIO_EEE_1000T ++#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GT ++#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ ++#endif ++#ifndef MDIO_EEE_1000KX ++#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GKX4 ++#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GKR ++#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ ++#endif ++ ++#ifndef __GFP_MEMALLOC ++#define __GFP_MEMALLOC 0 ++#endif ++ ++#ifndef eth_random_addr ++#define eth_random_addr _kc_eth_random_addr ++static inline void _kc_eth_random_addr(u8 *addr) ++{ ++ get_random_bytes(addr, ETH_ALEN); ++ addr[0] &= 0xfe; /* clear multicast */ ++ addr[0] |= 0x02; /* set local assignment */ ++} ++#endif /* eth_random_addr */ ++#else /* < 3.6.0 */ ++#define HAVE_STRUCT_PAGE_PFMEMALLOC ++#endif /* < 3.6.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) ++#ifndef ADVERTISED_40000baseKR4_Full ++/* these defines were all added in one commit, so should be safe ++ * to trigger activiation on one define ++ */ ++#define SUPPORTED_40000baseKR4_Full (1 << 23) ++#define SUPPORTED_40000baseCR4_Full (1 << 24) ++#define SUPPORTED_40000baseSR4_Full (1 << 25) ++#define SUPPORTED_40000baseLR4_Full (1 << 26) ++#define ADVERTISED_40000baseKR4_Full (1 << 23) ++#define ADVERTISED_40000baseCR4_Full (1 << 24) ++#define ADVERTISED_40000baseSR4_Full (1 << 25) ++#define ADVERTISED_40000baseLR4_Full (1 << 26) ++#endif ++ ++#ifndef mmd_eee_cap_to_ethtool_sup_t ++/** ++ * mmd_eee_cap_to_ethtool_sup_t ++ * @eee_cap: value of the MMD EEE Capability register ++ * ++ * A small helper function that translates MMD EEE Capability (3.20) bits ++ * to ethtool supported settings. ++ */ ++static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) ++{ ++ u32 supported = 0; ++ ++ if (eee_cap & MDIO_EEE_100TX) ++ supported |= SUPPORTED_100baseT_Full; ++ if (eee_cap & MDIO_EEE_1000T) ++ supported |= SUPPORTED_1000baseT_Full; ++ if (eee_cap & MDIO_EEE_10GT) ++ supported |= SUPPORTED_10000baseT_Full; ++ if (eee_cap & MDIO_EEE_1000KX) ++ supported |= SUPPORTED_1000baseKX_Full; ++ if (eee_cap & MDIO_EEE_10GKX4) ++ supported |= SUPPORTED_10000baseKX4_Full; ++ if (eee_cap & MDIO_EEE_10GKR) ++ supported |= SUPPORTED_10000baseKR_Full; ++ ++ return supported; ++} ++#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ ++ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) ++#endif /* mmd_eee_cap_to_ethtool_sup_t */ ++ ++#ifndef mmd_eee_adv_to_ethtool_adv_t ++/** ++ * mmd_eee_adv_to_ethtool_adv_t ++ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers ++ * ++ * A small helper function that translates the MMD EEE Advertisment (7.60) ++ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement ++ * settings. ++ */ ++static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) ++{ ++ u32 adv = 0; ++ ++ if (eee_adv & MDIO_EEE_100TX) ++ adv |= ADVERTISED_100baseT_Full; ++ if (eee_adv & MDIO_EEE_1000T) ++ adv |= ADVERTISED_1000baseT_Full; ++ if (eee_adv & MDIO_EEE_10GT) ++ adv |= ADVERTISED_10000baseT_Full; ++ if (eee_adv & MDIO_EEE_1000KX) ++ adv |= ADVERTISED_1000baseKX_Full; ++ if (eee_adv & MDIO_EEE_10GKX4) ++ adv |= ADVERTISED_10000baseKX4_Full; ++ if (eee_adv & MDIO_EEE_10GKR) ++ adv |= ADVERTISED_10000baseKR_Full; ++ ++ return adv; ++} ++ ++#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ ++ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) ++#endif /* mmd_eee_adv_to_ethtool_adv_t */ ++ ++#ifndef ethtool_adv_to_mmd_eee_adv_t ++/** ++ * ethtool_adv_to_mmd_eee_adv_t ++ * @adv: the ethtool advertisement settings ++ * ++ * A small helper function that translates ethtool advertisement settings ++ * to EEE advertisements for the MMD EEE Advertisement (7.60) and ++ * MMD EEE Link Partner Ability (7.61) registers. ++ */ ++static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) ++{ ++ u16 reg = 0; ++ ++ if (adv & ADVERTISED_100baseT_Full) ++ reg |= MDIO_EEE_100TX; ++ if (adv & ADVERTISED_1000baseT_Full) ++ reg |= MDIO_EEE_1000T; ++ if (adv & ADVERTISED_10000baseT_Full) ++ reg |= MDIO_EEE_10GT; ++ if (adv & ADVERTISED_1000baseKX_Full) ++ reg |= MDIO_EEE_1000KX; ++ if (adv & ADVERTISED_10000baseKX4_Full) ++ reg |= MDIO_EEE_10GKX4; ++ if (adv & ADVERTISED_10000baseKR_Full) ++ reg |= MDIO_EEE_10GKR; ++ ++ return reg; ++} ++#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) ++#endif /* ethtool_adv_to_mmd_eee_adv_t */ ++ ++#ifndef pci_pcie_type ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++static inline u8 pci_pcie_type(struct pci_dev *pdev) ++{ ++ int pos; ++ u16 reg16; ++ ++ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ BUG_ON(!pos); ++ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); ++ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; ++} ++#else /* < 2.6.24 */ ++#define pci_pcie_type(x) (x)->pcie_type ++#endif /* < 2.6.24 */ ++#endif /* pci_pcie_type */ ++ ++#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ ++ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) ++#define ptp_clock_register(caps, args...) ptp_clock_register(caps) ++#endif ++ ++#ifndef pcie_capability_read_word ++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); ++#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) ++#endif /* pcie_capability_read_word */ ++ ++#ifndef pcie_capability_write_word ++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); ++#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) ++#endif /* pcie_capability_write_word */ ++ ++#ifndef pcie_capability_clear_and_set_word ++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, ++ u16 clear, u16 set); ++#define pcie_capability_clear_and_set_word(d,p,c,s) \ ++ __kc_pcie_capability_clear_and_set_word(d,p,c,s) ++#endif /* pcie_capability_clear_and_set_word */ ++ ++#ifndef pcie_capability_clear_word ++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear); ++#define pcie_capability_clear_word(d, p, c) \ ++ __kc_pcie_capability_clear_word(d, p, c) ++#endif /* pcie_capability_clear_word */ ++ ++#ifndef PCI_EXP_LNKSTA2 ++#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ ++#endif ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define USE_CONST_DEV_UC_CHAR ++#endif ++ ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) ++#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) ++#endif /* !RHEL6.8+ */ ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ++#include ++#else ++ ++#define DEFINE_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] = \ ++ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } ++ ++#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] __read_mostly = \ ++ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } ++ ++#define DECLARE_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] ++ ++#define HASH_SIZE(name) (ARRAY_SIZE(name)) ++#define HASH_BITS(name) ilog2(HASH_SIZE(name)) ++ ++/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ ++#define hash_min(val, bits) \ ++ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) ++ ++static inline void __hash_init(struct hlist_head *ht, unsigned int sz) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < sz; i++) ++ INIT_HLIST_HEAD(&ht[i]); ++} ++ ++#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) ++ ++#define hash_add(hashtable, node, key) \ ++ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) ++ ++static inline bool hash_hashed(struct hlist_node *node) ++{ ++ return !hlist_unhashed(node); ++} ++ ++static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < sz; i++) ++ if (!hlist_empty(&ht[i])) ++ return false; ++ ++ return true; ++} ++ ++#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) ++ ++static inline void hash_del(struct hlist_node *node) ++{ ++ hlist_del_init(node); ++} ++#endif /* RHEL >= 6.6 */ ++ ++#else /* >= 3.7.0 */ ++#include ++#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS ++#define USE_CONST_DEV_UC_CHAR ++#endif /* >= 3.7.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) ++#ifndef pci_sriov_set_totalvfs ++static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) ++{ ++ return 0; ++} ++#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) ++#endif ++#ifndef PCI_EXP_LNKCTL_ASPM_L0S ++#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ ++#endif ++#ifndef PCI_EXP_LNKCTL_ASPM_L1 ++#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ ++#endif ++#define HAVE_CONFIG_HOTPLUG ++/* Reserved Ethernet Addresses per IEEE 802.1Q */ ++static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { ++ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; ++ ++#ifndef is_link_local_ether_addr ++static inline bool __kc_is_link_local_ether_addr(const u8 *addr) ++{ ++ __be16 *a = (__be16 *)addr; ++ static const __be16 *b = (const __be16 *)eth_reserved_addr_base; ++ static const __be16 m = cpu_to_be16(0xfff0); ++ ++ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; ++} ++#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) ++#endif /* is_link_local_ether_addr */ ++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, ++ int target, unsigned short *fragoff, int *flags); ++#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) ++ ++#ifndef FLOW_MAC_EXT ++#define FLOW_MAC_EXT 0x40000000 ++#endif /* FLOW_MAC_EXT */ ++ ++#else /* >= 3.8.0 */ ++#ifndef __devinit ++#define __devinit ++#endif ++ ++#ifndef __devinitdata ++#define __devinitdata ++#endif ++ ++#ifndef __devinitconst ++#define __devinitconst ++#endif ++ ++#ifndef __devexit ++#define __devexit ++#endif ++ ++#ifndef __devexit_p ++#define __devexit_p ++#endif ++ ++#ifndef HAVE_ENCAP_CSUM_OFFLOAD ++#define HAVE_ENCAP_CSUM_OFFLOAD ++#endif ++ ++#ifndef HAVE_GRE_ENCAP_OFFLOAD ++#define HAVE_GRE_ENCAP_OFFLOAD ++#endif ++ ++#ifndef HAVE_SRIOV_CONFIGURE ++#define HAVE_SRIOV_CONFIGURE ++#endif ++ ++#define HAVE_BRIDGE_ATTRIBS ++#ifndef BRIDGE_MODE_VEB ++#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ ++#endif /* BRIDGE_MODE_VEB */ ++#ifndef BRIDGE_MODE_VEPA ++#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ ++#endif /* BRIDGE_MODE_VEPA */ ++#endif /* >= 3.8.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) ++ ++#undef BUILD_BUG_ON ++#ifdef __CHECKER__ ++#define BUILD_BUG_ON(condition) (0) ++#else /* __CHECKER__ */ ++#ifndef __compiletime_warning ++#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) ++#define __compiletime_warning(message) __attribute__((warning(message))) ++#else /* __GNUC__ */ ++#define __compiletime_warning(message) ++#endif /* __GNUC__ */ ++#endif /* __compiletime_warning */ ++#ifndef __compiletime_error ++#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) ++#define __compiletime_error(message) __attribute__((error(message))) ++#define __compiletime_error_fallback(condition) do { } while (0) ++#else /* __GNUC__ */ ++#define __compiletime_error(message) ++#define __compiletime_error_fallback(condition) \ ++ do { ((void)sizeof(char[1 - 2 * condition])); } while (0) ++#endif /* __GNUC__ */ ++#else /* __compiletime_error */ ++#define __compiletime_error_fallback(condition) do { } while (0) ++#endif /* __compiletime_error */ ++#define __compiletime_assert(condition, msg, prefix, suffix) \ ++ do { \ ++ bool __cond = !(condition); \ ++ extern void prefix ## suffix(void) __compiletime_error(msg); \ ++ if (__cond) \ ++ prefix ## suffix(); \ ++ __compiletime_error_fallback(__cond); \ ++ } while (0) ++ ++#define _compiletime_assert(condition, msg, prefix, suffix) \ ++ __compiletime_assert(condition, msg, prefix, suffix) ++#define compiletime_assert(condition, msg) \ ++ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ++#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) ++#ifndef __OPTIMIZE__ ++#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) ++#else /* __OPTIMIZE__ */ ++#define BUILD_BUG_ON(condition) \ ++ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) ++#endif /* __OPTIMIZE__ */ ++#endif /* __CHECKER__ */ ++ ++#undef hlist_entry ++#define hlist_entry(ptr, type, member) container_of(ptr,type,member) ++ ++#undef hlist_entry_safe ++#define hlist_entry_safe(ptr, type, member) \ ++ ({ typeof(ptr) ____ptr = (ptr); \ ++ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ ++ }) ++ ++#undef hlist_for_each_entry ++#define hlist_for_each_entry(pos, head, member) \ ++ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ ++ pos; \ ++ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) ++ ++#undef hlist_for_each_entry_safe ++#define hlist_for_each_entry_safe(pos, n, head, member) \ ++ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ ++ pos && ({ n = pos->member.next; 1; }); \ ++ pos = hlist_entry_safe(n, typeof(*pos), member)) ++ ++#undef hash_for_each ++#define hash_for_each(name, bkt, obj, member) \ ++ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ ++ (bkt)++)\ ++ hlist_for_each_entry(obj, &name[bkt], member) ++ ++#undef hash_for_each_safe ++#define hash_for_each_safe(name, bkt, tmp, obj, member) \ ++ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ ++ (bkt)++)\ ++ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) ++ ++#undef hash_for_each_possible ++#define hash_for_each_possible(name, obj, member, key) \ ++ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) ++ ++#undef hash_for_each_possible_safe ++#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ ++ hlist_for_each_entry_safe(obj, tmp,\ ++ &name[hash_min(key, HASH_BITS(name))], member) ++ ++#ifdef CONFIG_XPS ++extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16); ++#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) ++#else /* CONFIG_XPS */ ++#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) ++#endif /* CONFIG_XPS */ ++ ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#define _kc_hashrnd 0xd631614b /* not so random hash salt */ ++extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); ++#define __netdev_pick_tx __kc_netdev_pick_tx ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#else ++#define HAVE_BRIDGE_FILTER ++#define HAVE_FDB_DEL_NLATTR ++#endif /* < 3.9.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++#ifndef NAPI_POLL_WEIGHT ++#define NAPI_POLL_WEIGHT 64 ++#endif ++#ifdef CONFIG_PCI_IOV ++extern int __kc_pci_vfs_assigned(struct pci_dev *dev); ++#else ++static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) ++{ ++ return 0; ++} ++#endif ++#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) ++ ++#ifndef list_first_entry_or_null ++#define list_first_entry_or_null(ptr, type, member) \ ++ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) ++#endif ++ ++#ifndef VLAN_TX_COOKIE_MAGIC ++static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, ++ u16 vlan_tci) ++{ ++#ifdef VLAN_TAG_PRESENT ++ vlan_tci |= VLAN_TAG_PRESENT; ++#endif ++ skb->vlan_tci = vlan_tci; ++ return skb; ++} ++#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ ++ __kc__vlan_hwaccel_put_tag(skb, vlan_tci) ++#endif ++ ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr, u16 flags); ++#ifdef HAVE_FDB_DEL_NLATTR ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr); ++#else ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ const unsigned char *addr); ++#endif ++#else ++extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr, u16 flags); ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr); ++#endif ++#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add ++#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del ++#endif /* HAVE_FDB_OPS */ ++ ++#ifndef PCI_DEVID ++#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) ++#endif ++#else /* >= 3.10.0 */ ++#define HAVE_ENCAP_TSO_OFFLOAD ++#define USE_DEFAULT_FDB_DEL_DUMP ++#define HAVE_SKB_INNER_NETWORK_HEADER ++#endif /* >= 3.10.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) ++#define netdev_notifier_info_to_dev(ptr) ptr ++#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) ++#define HAVE_NDO_SET_VF_LINK_STATE ++#endif ++#else /* >= 3.11.0 */ ++#define HAVE_NDO_SET_VF_LINK_STATE ++#define HAVE_SKB_INNER_PROTOCOL ++#define HAVE_MPLS_FEATURES ++#endif /* >= 3.11.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) ++extern int __kc_pcie_get_minimum_link(struct pci_dev *dev, ++ enum pci_bus_speed *speed, ++ enum pcie_link_width *width); ++#ifndef pcie_get_minimum_link ++#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) ++#endif ++#else /* >= 3.12.0 */ ++#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) ++#define HAVE_VXLAN_RX_OFFLOAD ++#endif /* < 4.8.0 */ ++#define HAVE_NDO_GET_PHYS_PORT_ID ++#endif /* >= 3.12.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) ++#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) ++extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); ++#ifndef u64_stats_init ++#define u64_stats_init(a) do { } while(0) ++#endif ++#ifndef BIT_ULL ++#define BIT_ULL(n) (1ULL << (n)) ++#endif ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) ++#undef HAVE_STRUCT_PAGE_PFMEMALLOC ++#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT ++#endif ++#ifndef list_next_entry ++#define list_next_entry(pos, member) \ ++ list_entry((pos)->member.next, typeof(*(pos)), member) ++#endif ++ ++#else /* >= 3.13.0 */ ++#define HAVE_VXLAN_CHECKS ++#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#else ++#define HAVE_NDO_SELECT_QUEUE_ACCEL ++#endif ++#define HAVE_NET_GET_RANDOM_ONCE ++#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS ++#endif ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) ++ ++#ifndef U16_MAX ++#define U16_MAX ((u16)~0U) ++#endif ++ ++#ifndef U32_MAX ++#define U32_MAX ((u32)~0U) ++#endif ++ ++#define dev_consume_skb_any(x) dev_kfree_skb_any(x) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) ++ ++/* it isn't expected that this would be a #define unless we made it so */ ++#ifndef skb_set_hash ++ ++#define PKT_HASH_TYPE_NONE 0 ++#define PKT_HASH_TYPE_L2 1 ++#define PKT_HASH_TYPE_L3 2 ++#define PKT_HASH_TYPE_L4 3 ++ ++#define skb_set_hash __kc_skb_set_hash ++static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, ++ u32 __maybe_unused hash, ++ int __maybe_unused type) ++{ ++#ifdef HAVE_SKB_L4_RXHASH ++ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); ++#endif ++#ifdef NETIF_F_RXHASH ++ skb->rxhash = hash; ++#endif ++} ++#endif /* !skb_set_hash */ ++ ++#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ ++ ++#ifndef HAVE_VXLAN_RX_OFFLOAD ++#define HAVE_VXLAN_RX_OFFLOAD ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ ++#ifndef HAVE_VXLAN_CHECKS ++#define HAVE_VXLAN_CHECKS ++#endif /* HAVE_VXLAN_CHECKS */ ++#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ ++ ++#ifndef pci_enable_msix_range ++extern int __kc_pci_enable_msix_range(struct pci_dev *dev, ++ struct msix_entry *entries, ++ int minvec, int maxvec); ++#define pci_enable_msix_range __kc_pci_enable_msix_range ++#endif ++ ++#ifndef ether_addr_copy ++#define ether_addr_copy __kc_ether_addr_copy ++static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) ++{ ++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ++ *(u32 *)dst = *(const u32 *)src; ++ *(u16 *)(dst + 4) = *(const u16 *)(src + 4); ++#else ++ u16 *a = (u16 *)dst; ++ const u16 *b = (const u16 *)src; ++ ++ a[0] = b[0]; ++ a[1] = b[1]; ++ a[2] = b[2]; ++#endif ++} ++#endif /* ether_addr_copy */ ++ ++#else /* >= 3.14.0 */ ++ ++/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ ++#ifndef HAVE_NDO_DFWD_OPS ++#define HAVE_NDO_DFWD_OPS ++#endif ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif /* 3.14.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ ++ !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) ++#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh ++#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh ++#endif ++ ++#else ++#define HAVE_PTP_1588_CLOCK_PINS ++#define HAVE_NETDEV_PORT ++#endif /* 3.15.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) ++#ifndef smp_mb__before_atomic ++#define smp_mb__before_atomic() smp_mb() ++#define smp_mb__after_atomic() smp_mb() ++#endif ++#ifndef __dev_uc_sync ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)); ++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)); ++#endif ++#ifndef NETDEV_HW_ADDR_T_MULTICAST ++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)); ++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)); ++#endif ++#endif /* HAVE_SET_RX_MODE */ ++ ++static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); ++#elif defined(HAVE_SET_RX_MODE) ++ return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, ++ dev, sync, unsync); ++#else ++ return 0; ++#endif ++} ++#define __dev_uc_sync __kc_dev_uc_sync ++ ++static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); ++#else /* NETDEV_HW_ADDR_T_MULTICAST */ ++ __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); ++#endif /* NETDEV_HW_ADDR_T_UNICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++} ++#define __dev_uc_unsync __kc_dev_uc_unsync ++ ++static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); ++#elif defined(HAVE_SET_RX_MODE) ++ return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, ++ dev, sync, unsync); ++#else ++ return 0; ++#endif ++ ++} ++#define __dev_mc_sync __kc_dev_mc_sync ++ ++static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); ++#else /* NETDEV_HW_ADDR_T_MULTICAST */ ++ __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); ++#endif /* NETDEV_HW_ADDR_T_MULTICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++} ++#define __dev_mc_unsync __kc_dev_mc_unsync ++#endif /* __dev_uc_sync */ ++ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++#endif ++ ++#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM ++/* if someone backports this, hopefully they backport as a #define. ++ * declare it as zero on older kernels so that if it get's or'd in ++ * it won't effect anything, therefore preventing core driver changes ++ */ ++#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 ++#define SKB_GSO_UDP_TUNNEL_CSUM 0 ++#endif ++ ++#else ++#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY ++#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++#endif /* 3.16.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ ++ !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) ++#ifndef timespec64 ++#define timespec64 timespec ++static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) ++{ ++ return ts; ++} ++static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) ++{ ++ return ts64; ++} ++#define timespec64_equal timespec_equal ++#define timespec64_compare timespec_compare ++#define set_normalized_timespec64 set_normalized_timespec ++#define timespec64_add_safe timespec_add_safe ++#define timespec64_add timespec_add ++#define timespec64_sub timespec_sub ++#define timespec64_valid timespec_valid ++#define timespec64_valid_strict timespec_valid_strict ++#define timespec64_to_ns timespec_to_ns ++#define ns_to_timespec64 ns_to_timespec ++#define ktime_to_timespec64 ktime_to_timespec ++#define timespec64_add_ns timespec_add_ns ++#endif /* timespec64 */ ++#endif /* !(RHEL6.8 ++extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); ++extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps); ++#define skb_clone_sk __kc_skb_clone_sk ++#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp ++#endif ++extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); ++#define eth_get_headlen __kc_eth_get_headlen ++#ifndef ETH_P_XDSA ++#define ETH_P_XDSA 0x00F8 ++#endif ++/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_SKBUFF_CSUM_LEVEL ++#endif /* >= RH 7.1 */ ++ ++#undef GENMASK ++#define GENMASK(h, l) \ ++ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++#undef GENMASK_ULL ++#define GENMASK_ULL(h, l) \ ++ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) ++ ++#else /* 3.18.0 */ ++#define HAVE_SKBUFF_CSUM_LEVEL ++#define HAVE_SKB_XMIT_MORE ++#define HAVE_SKB_INNER_PROTOCOL_TYPE ++#endif /* 3.18.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) ++#else ++#define HAVE_NDO_FEATURES_CHECK ++#endif /* 3.18.4 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) ++/* netdev_phys_port_id renamed to netdev_phys_item_id */ ++#define netdev_phys_item_id netdev_phys_port_id ++ ++static inline void _kc_napi_complete_done(struct napi_struct *napi, ++ int __always_unused work_done) { ++ napi_complete(napi); ++} ++#define napi_complete_done _kc_napi_complete_done ++ ++#ifndef NETDEV_RSS_KEY_LEN ++#define NETDEV_RSS_KEY_LEN (13 * 4) ++#endif ++#if ( !(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ) ++#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) ++#endif /* RHEL_RELEASE_CODE */ ++extern void __kc_netdev_rss_key_fill(void *buffer, size_t len); ++#define SPEED_20000 20000 ++#define SPEED_40000 40000 ++#ifndef dma_rmb ++#define dma_rmb() rmb() ++#endif ++#ifndef dev_alloc_pages ++#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) ++#endif ++#ifndef dev_alloc_page ++#define dev_alloc_page() dev_alloc_pages(0) ++#endif ++#if !defined(eth_skb_pad) && !defined(skb_put_padto) ++/** ++ * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size ++ * @skb: buffer to pad ++ * @len: minimal length ++ * ++ * Pads up a buffer to ensure the trailing bytes exist and are ++ * blanked. If the buffer already contains sufficient data it ++ * is untouched. Otherwise it is extended. Returns zero on ++ * success. The skb is freed on error. ++ */ ++static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int size = skb->len; ++ ++ if (unlikely(size < len)) { ++ len -= size; ++ if (skb_pad(skb, len)) ++ return -ENOMEM; ++ __skb_put(skb, len); ++ } ++ return 0; ++} ++#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) ++ ++static inline int __kc_eth_skb_pad(struct sk_buff *skb) ++{ ++ return __kc_skb_put_padto(skb, ETH_ZLEN); ++} ++#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) ++#endif /* eth_skb_pad && skb_put_padto */ ++ ++#ifndef SKB_ALLOC_NAPI ++/* RHEL 7.2 backported napi_alloc_skb and friends */ ++static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) ++{ ++ return netdev_alloc_skb_ip_align(napi->dev, length); ++} ++#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) ++#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) ++#endif /* SKB_ALLOC_NAPI */ ++#define HAVE_CONFIG_PM_RUNTIME ++#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RXFH_HASHFUNC ++#endif /* 6.7 < RHEL < 7.0 */ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_RXFH_HASHFUNC ++#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++#endif /* RHEL > 7.1 */ ++#ifndef napi_schedule_irqoff ++#define napi_schedule_irqoff napi_schedule ++#endif ++#ifndef READ_ONCE ++#define READ_ONCE(_x) ACCESS_ONCE(_x) ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_FDB_ADD_VID ++#endif ++#else /* 3.19.0 */ ++#define HAVE_NDO_FDB_ADD_VID ++#define HAVE_RXFH_HASHFUNC ++#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++#endif /* 3.19.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) ++/* vlan_tx_xx functions got renamed to skb_vlan */ ++#ifndef skb_vlan_tag_get ++#define skb_vlan_tag_get vlan_tx_tag_get ++#endif ++#ifndef skb_vlan_tag_present ++#define skb_vlan_tag_present vlan_tx_tag_present ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++#endif ++#else ++#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++#endif /* 3.20.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) ++#ifndef NO_PTP_SUPPORT ++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#include ++#else ++#include ++#endif ++static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) ++{ ++ tc->nsec += delta; ++} ++#define timecounter_adjtime __kc_timecounter_adjtime ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++#endif ++#else ++#define HAVE_PTP_CLOCK_INFO_GETTIME64 ++#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++#define HAVE_PASSTHRU_FEATURES_CHECK ++#define HAVE_NDO_SET_VF_RSS_QUERY_EN ++#endif /* 4,1,0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) ++#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) && \ ++ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) ++{ ++#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC ++ return page->pfmemalloc; ++#else ++ return false; ++#endif ++} ++#endif /* !SLES12sp1 */ ++#else ++#undef HAVE_STRUCT_PAGE_PFMEMALLOC ++#endif /* 4.1.9 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) ++#else ++#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++#endif /* 4.2.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) ++#ifndef CONFIG_64BIT ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) ++#include /* 32-bit readq/writeq */ ++#else /* 3.3.0 => 4.3.x */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) ++#include ++#endif /* 2.6.26 => 3.3.0 */ ++#ifndef readq ++static inline __u64 readq(const volatile void __iomem *addr) ++{ ++ const volatile u32 __iomem *p = addr; ++ u32 low, high; ++ ++ low = readl(p); ++ high = readl(p + 1); ++ ++ return low + ((u64)high << 32); ++} ++#define readq readq ++#endif ++ ++#ifndef writeq ++static inline void writeq(__u64 val, volatile void __iomem *addr) ++{ ++ writel(val, addr); ++ writel(val >> 32, addr + 4); ++} ++#define writeq writeq ++#endif ++#endif /* < 3.3.0 */ ++#endif /* !CONFIG_64BIT */ ++#else ++#define HAVE_NDO_SET_VF_TRUST ++ ++#ifndef CONFIG_64BIT ++#include /* 32-bit readq/writeq */ ++#endif /* !CONFIG_64BIT */ ++#endif /* 4.4.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) ++/* protect against a likely backport */ ++#ifndef NETIF_F_CSUM_MASK ++#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM ++#endif /* NETIF_F_CSUM_MASK */ ++#ifndef NETIF_F_SCTP_CRC ++#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM ++#endif /* NETIF_F_SCTP_CRC */ ++#else ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) ++#define HAVE_GENEVE_RX_OFFLOAD ++#endif /* < 4.8.0 */ ++#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD ++#endif /* 4.5.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) ++#if !(UBUNTU_VERSION_CODE && \ ++ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ ++ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline void napi_consume_skb(struct sk_buff *skb, ++ int __always_unused budget) ++{ ++ dev_consume_skb_any(skb); ++} ++ ++#endif /* UBUNTU_VERSION(4,4,0,21) */ ++static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) ++{ ++ * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); ++} ++ ++#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline void page_ref_inc(struct page *page) ++{ ++ atomic_inc(&page->_count); ++} ++ ++#endif ++ ++#endif /* 4.6.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) ++#else ++#define HAVE_NETIF_TRANS_UPDATE ++#endif /* 4.7.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) ++enum udp_parsable_tunnel_type { ++ UDP_TUNNEL_TYPE_VXLAN, ++ UDP_TUNNEL_TYPE_GENEVE, ++}; ++struct udp_tunnel_info { ++ unsigned short type; ++ sa_family_t sa_family; ++ __be16 port; ++}; ++#else ++#define HAVE_UDP_ENC_RX_OFFLOAD ++#endif /* 4.8.0 */ ++ ++#endif /* _KCOMPAT_H_ */ +diff -Nu a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c +--- a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c 2016-11-14 14:32:08.583567168 +0000 +@@ -0,0 +1,1169 @@ ++/******************************************************************************* ++ ++ Intel(R) Gigabit Ethernet Linux driver ++ Copyright(c) 2007-2015 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ++ * net/core/ethtool.c - Ethtool ioctl handler ++ * Copyright (c) 2003 Matthew Wilcox ++ * ++ * This file is where we call all the ethtool_ops commands to get ++ * the information ethtool needs. We fall back to calling do_ioctl() ++ * for drivers which haven't been converted to ethtool_ops yet. ++ * ++ * It's GPL, stupid. ++ * ++ * Modification by sfeldma@pobox.com to work as backward compat ++ * solution for pre-ethtool_ops kernels. ++ * - copied struct ethtool_ops from ethtool.h ++ * - defined SET_ETHTOOL_OPS ++ * - put in some #ifndef NETIF_F_xxx wrappers ++ * - changes refs to dev->ethtool_ops to ethtool_ops ++ * - changed dev_ethtool to ethtool_ioctl ++ * - remove EXPORT_SYMBOL()s ++ * - added _kc_ prefix in built-in ethtool_op_xxx ops. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "kcompat.h" ++ ++#undef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#undef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#undef SPEED_10000 ++#define SPEED_10000 10000 ++ ++#undef ethtool_ops ++#define ethtool_ops _kc_ethtool_ops ++ ++struct _kc_ethtool_ops { ++ int (*get_settings)(struct net_device *, struct ethtool_cmd *); ++ int (*set_settings)(struct net_device *, struct ethtool_cmd *); ++ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); ++ int (*get_regs_len)(struct net_device *); ++ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); ++ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); ++ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); ++ u32 (*get_msglevel)(struct net_device *); ++ void (*set_msglevel)(struct net_device *, u32); ++ int (*nway_reset)(struct net_device *); ++ u32 (*get_link)(struct net_device *); ++ int (*get_eeprom_len)(struct net_device *); ++ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ void (*get_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ int (*set_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ u32 (*get_rx_csum)(struct net_device *); ++ int (*set_rx_csum)(struct net_device *, u32); ++ u32 (*get_tx_csum)(struct net_device *); ++ int (*set_tx_csum)(struct net_device *, u32); ++ u32 (*get_sg)(struct net_device *); ++ int (*set_sg)(struct net_device *, u32); ++ u32 (*get_tso)(struct net_device *); ++ int (*set_tso)(struct net_device *, u32); ++ int (*self_test_count)(struct net_device *); ++ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); ++ void (*get_strings)(struct net_device *, u32 stringset, u8 *); ++ int (*phys_id)(struct net_device *, u32); ++ int (*get_stats_count)(struct net_device *); ++ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, ++ u64 *); ++} *ethtool_ops = NULL; ++ ++#undef SET_ETHTOOL_OPS ++#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) ++ ++/* ++ * Some useful ethtool_ops methods that are device independent. If we find that ++ * all drivers want to do the same thing here, we can turn these into dev_() ++ * function calls. ++ */ ++ ++#undef ethtool_op_get_link ++#define ethtool_op_get_link _kc_ethtool_op_get_link ++u32 _kc_ethtool_op_get_link(struct net_device *dev) ++{ ++ return netif_carrier_ok(dev) ? 1 : 0; ++} ++ ++#undef ethtool_op_get_tx_csum ++#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum ++u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) ++{ ++#ifdef NETIF_F_IP_CSUM ++ return (dev->features & NETIF_F_IP_CSUM) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tx_csum ++#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum ++int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_IP_CSUM ++ if (data) ++#ifdef NETIF_F_IPV6_CSUM ++ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++ else ++ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++#else ++ dev->features |= NETIF_F_IP_CSUM; ++ else ++ dev->features &= ~NETIF_F_IP_CSUM; ++#endif ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_sg ++#define ethtool_op_get_sg _kc_ethtool_op_get_sg ++u32 _kc_ethtool_op_get_sg(struct net_device *dev) ++{ ++#ifdef NETIF_F_SG ++ return (dev->features & NETIF_F_SG) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_sg ++#define ethtool_op_set_sg _kc_ethtool_op_set_sg ++int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_SG ++ if (data) ++ dev->features |= NETIF_F_SG; ++ else ++ dev->features &= ~NETIF_F_SG; ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_tso ++#define ethtool_op_get_tso _kc_ethtool_op_get_tso ++u32 _kc_ethtool_op_get_tso(struct net_device *dev) ++{ ++#ifdef NETIF_F_TSO ++ return (dev->features & NETIF_F_TSO) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tso ++#define ethtool_op_set_tso _kc_ethtool_op_set_tso ++int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_TSO ++ if (data) ++ dev->features |= NETIF_F_TSO; ++ else ++ dev->features &= ~NETIF_F_TSO; ++#endif ++ ++ return 0; ++} ++ ++/* Handlers for each ethtool command */ ++ ++static int ethtool_get_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd = { ETHTOOL_GSET }; ++ int err; ++ ++ if (!ethtool_ops->get_settings) ++ return -EOPNOTSUPP; ++ ++ err = ethtool_ops->get_settings(dev, &cmd); ++ if (err < 0) ++ return err; ++ ++ if (copy_to_user(useraddr, &cmd, sizeof(cmd))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd; ++ ++ if (!ethtool_ops->set_settings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&cmd, useraddr, sizeof(cmd))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_settings(dev, &cmd); ++} ++ ++static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_drvinfo info; ++ struct ethtool_ops *ops = ethtool_ops; ++ ++ if (!ops->get_drvinfo) ++ return -EOPNOTSUPP; ++ ++ memset(&info, 0, sizeof(info)); ++ info.cmd = ETHTOOL_GDRVINFO; ++ ops->get_drvinfo(dev, &info); ++ ++ if (ops->self_test_count) ++ info.testinfo_len = ops->self_test_count(dev); ++ if (ops->get_stats_count) ++ info.n_stats = ops->get_stats_count(dev); ++ if (ops->get_regs_len) ++ info.regdump_len = ops->get_regs_len(dev); ++ if (ops->get_eeprom_len) ++ info.eedump_len = ops->get_eeprom_len(dev); ++ ++ if (copy_to_user(useraddr, &info, sizeof(info))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_regs(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_regs regs; ++ struct ethtool_ops *ops = ethtool_ops; ++ void *regbuf; ++ int reglen, ret; ++ ++ if (!ops->get_regs || !ops->get_regs_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(®s, useraddr, sizeof(regs))) ++ return -EFAULT; ++ ++ reglen = ops->get_regs_len(dev); ++ if (regs.len > reglen) ++ regs.len = reglen; ++ ++ regbuf = kmalloc(reglen, GFP_USER); ++ if (!regbuf) ++ return -ENOMEM; ++ ++ ops->get_regs(dev, ®s, regbuf); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, ®s, sizeof(regs))) ++ goto out; ++ useraddr += offsetof(struct ethtool_regs, data); ++ if (copy_to_user(useraddr, regbuf, reglen)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(regbuf); ++ return ret; ++} ++ ++static int ethtool_get_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; ++ ++ if (!ethtool_ops->get_wol) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_wol(dev, &wol); ++ ++ if (copy_to_user(useraddr, &wol, sizeof(wol))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol; ++ ++ if (!ethtool_ops->set_wol) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&wol, useraddr, sizeof(wol))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_wol(dev, &wol); ++} ++ ++static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GMSGLVL }; ++ ++ if (!ethtool_ops->get_msglevel) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_msglevel(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_msglevel) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_msglevel(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_nway_reset(struct net_device *dev) ++{ ++ if (!ethtool_ops->nway_reset) ++ return -EOPNOTSUPP; ++ ++ return ethtool_ops->nway_reset(dev); ++} ++ ++static int ethtool_get_link(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GLINK }; ++ ++ if (!ethtool_ops->get_link) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_link(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->get_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) ++ goto out; ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->set_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->set_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ ret = -EFAULT; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_coalesce(dev, &coalesce); ++ ++ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_coalesce(dev, &coalesce); ++} ++ ++static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_ringparam(dev, &ringparam); ++ ++ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_ringparam(dev, &ringparam); ++} ++ ++static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_pauseparam(dev, &pauseparam); ++ ++ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_pauseparam(dev, &pauseparam); ++} ++ ++static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GRXCSUM }; ++ ++ if (!ethtool_ops->get_rx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_rx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_rx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_rx_csum(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTXCSUM }; ++ ++ if (!ethtool_ops->get_tx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tx_csum(dev, edata.data); ++} ++ ++static int ethtool_get_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GSG }; ++ ++ if (!ethtool_ops->get_sg) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_sg(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_sg) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_sg(dev, edata.data); ++} ++ ++static int ethtool_get_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTSO }; ++ ++ if (!ethtool_ops->get_tso) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tso(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tso) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tso(dev, edata.data); ++} ++ ++static int ethtool_self_test(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_test test; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->self_test || !ops->self_test_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&test, useraddr, sizeof(test))) ++ return -EFAULT; ++ ++ test.len = ops->self_test_count(dev); ++ data = kmalloc(test.len * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->self_test(dev, &test, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &test, sizeof(test))) ++ goto out; ++ useraddr += sizeof(test); ++ if (copy_to_user(useraddr, data, test.len * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_strings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_gstrings gstrings; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_strings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) ++ return -EFAULT; ++ ++ switch (gstrings.string_set) { ++ case ETH_SS_TEST: ++ if (!ops->self_test_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->self_test_count(dev); ++ break; ++ case ETH_SS_STATS: ++ if (!ops->get_stats_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->get_stats_count(dev); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_strings(dev, gstrings.string_set, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) ++ goto out; ++ useraddr += sizeof(gstrings); ++ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_phys_id(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value id; ++ ++ if (!ethtool_ops->phys_id) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&id, useraddr, sizeof(id))) ++ return -EFAULT; ++ ++ return ethtool_ops->phys_id(dev, id.data); ++} ++ ++static int ethtool_get_stats(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_stats stats; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->get_ethtool_stats || !ops->get_stats_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&stats, useraddr, sizeof(stats))) ++ return -EFAULT; ++ ++ stats.n_stats = ops->get_stats_count(dev); ++ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_ethtool_stats(dev, &stats, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &stats, sizeof(stats))) ++ goto out; ++ useraddr += sizeof(stats); ++ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++/* The main entry point in this file. Called from net/core/dev.c */ ++ ++#define ETHTOOL_OPS_COMPAT ++int ethtool_ioctl(struct ifreq *ifr) ++{ ++ struct net_device *dev = __dev_get_by_name(ifr->ifr_name); ++ void *useraddr = (void *) ifr->ifr_data; ++ u32 ethcmd; ++ ++ /* ++ * XXX: This can be pushed down into the ethtool_* handlers that ++ * need it. Keep existing behavior for the moment. ++ */ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (!dev || !netif_device_present(dev)) ++ return -ENODEV; ++ ++ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) ++ return -EFAULT; ++ ++ switch (ethcmd) { ++ case ETHTOOL_GSET: ++ return ethtool_get_settings(dev, useraddr); ++ case ETHTOOL_SSET: ++ return ethtool_set_settings(dev, useraddr); ++ case ETHTOOL_GDRVINFO: ++ return ethtool_get_drvinfo(dev, useraddr); ++ case ETHTOOL_GREGS: ++ return ethtool_get_regs(dev, useraddr); ++ case ETHTOOL_GWOL: ++ return ethtool_get_wol(dev, useraddr); ++ case ETHTOOL_SWOL: ++ return ethtool_set_wol(dev, useraddr); ++ case ETHTOOL_GMSGLVL: ++ return ethtool_get_msglevel(dev, useraddr); ++ case ETHTOOL_SMSGLVL: ++ return ethtool_set_msglevel(dev, useraddr); ++ case ETHTOOL_NWAY_RST: ++ return ethtool_nway_reset(dev); ++ case ETHTOOL_GLINK: ++ return ethtool_get_link(dev, useraddr); ++ case ETHTOOL_GEEPROM: ++ return ethtool_get_eeprom(dev, useraddr); ++ case ETHTOOL_SEEPROM: ++ return ethtool_set_eeprom(dev, useraddr); ++ case ETHTOOL_GCOALESCE: ++ return ethtool_get_coalesce(dev, useraddr); ++ case ETHTOOL_SCOALESCE: ++ return ethtool_set_coalesce(dev, useraddr); ++ case ETHTOOL_GRINGPARAM: ++ return ethtool_get_ringparam(dev, useraddr); ++ case ETHTOOL_SRINGPARAM: ++ return ethtool_set_ringparam(dev, useraddr); ++ case ETHTOOL_GPAUSEPARAM: ++ return ethtool_get_pauseparam(dev, useraddr); ++ case ETHTOOL_SPAUSEPARAM: ++ return ethtool_set_pauseparam(dev, useraddr); ++ case ETHTOOL_GRXCSUM: ++ return ethtool_get_rx_csum(dev, useraddr); ++ case ETHTOOL_SRXCSUM: ++ return ethtool_set_rx_csum(dev, useraddr); ++ case ETHTOOL_GTXCSUM: ++ return ethtool_get_tx_csum(dev, useraddr); ++ case ETHTOOL_STXCSUM: ++ return ethtool_set_tx_csum(dev, useraddr); ++ case ETHTOOL_GSG: ++ return ethtool_get_sg(dev, useraddr); ++ case ETHTOOL_SSG: ++ return ethtool_set_sg(dev, useraddr); ++ case ETHTOOL_GTSO: ++ return ethtool_get_tso(dev, useraddr); ++ case ETHTOOL_STSO: ++ return ethtool_set_tso(dev, useraddr); ++ case ETHTOOL_TEST: ++ return ethtool_self_test(dev, useraddr); ++ case ETHTOOL_GSTRINGS: ++ return ethtool_get_strings(dev, useraddr); ++ case ETHTOOL_PHYS_ID: ++ return ethtool_phys_id(dev, useraddr); ++ case ETHTOOL_GSTATS: ++ return ethtool_get_stats(dev, useraddr); ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++#define mii_if_info _kc_mii_if_info ++struct _kc_mii_if_info { ++ int phy_id; ++ int advertising; ++ int phy_id_mask; ++ int reg_num_mask; ++ ++ unsigned int full_duplex : 1; /* is full duplex? */ ++ unsigned int force_media : 1; /* is autoneg. disabled? */ ++ ++ struct net_device *dev; ++ int (*mdio_read) (struct net_device *dev, int phy_id, int location); ++ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); ++}; ++ ++struct ethtool_cmd; ++struct mii_ioctl_data; ++ ++#undef mii_link_ok ++#define mii_link_ok _kc_mii_link_ok ++#undef mii_nway_restart ++#define mii_nway_restart _kc_mii_nway_restart ++#undef mii_ethtool_gset ++#define mii_ethtool_gset _kc_mii_ethtool_gset ++#undef mii_ethtool_sset ++#define mii_ethtool_sset _kc_mii_ethtool_sset ++#undef mii_check_link ++#define mii_check_link _kc_mii_check_link ++extern int _kc_mii_link_ok (struct mii_if_info *mii); ++extern int _kc_mii_nway_restart (struct mii_if_info *mii); ++extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern void _kc_mii_check_link (struct mii_if_info *mii); ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++#undef generic_mii_ioctl ++#define generic_mii_ioctl _kc_generic_mii_ioctl ++extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_changed); ++#endif /* > 2.4.6 */ ++ ++ ++struct _kc_pci_dev_ext { ++ struct pci_dev *dev; ++ void *pci_drvdata; ++ struct pci_driver *driver; ++}; ++ ++struct _kc_net_dev_ext { ++ struct net_device *dev; ++ unsigned int carrier; ++}; ++ ++ ++/**************************************/ ++/* mii support */ ++ ++int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ u32 advert, bmcr, lpa, nego; ++ ++ ecmd->supported = ++ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | ++ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ++ ++ /* only supports twisted-pair */ ++ ecmd->port = PORT_MII; ++ ++ /* only supports internal transceiver */ ++ ecmd->transceiver = XCVR_INTERNAL; ++ ++ /* this isn't fully supported at higher layers */ ++ ecmd->phy_address = mii->phy_id; ++ ++ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ if (advert & ADVERTISE_10HALF) ++ ecmd->advertising |= ADVERTISED_10baseT_Half; ++ if (advert & ADVERTISE_10FULL) ++ ecmd->advertising |= ADVERTISED_10baseT_Full; ++ if (advert & ADVERTISE_100HALF) ++ ecmd->advertising |= ADVERTISED_100baseT_Half; ++ if (advert & ADVERTISE_100FULL) ++ ecmd->advertising |= ADVERTISED_100baseT_Full; ++ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); ++ if (bmcr & BMCR_ANENABLE) { ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ ecmd->autoneg = AUTONEG_ENABLE; ++ ++ nego = mii_nway_result(advert & lpa); ++ if (nego == LPA_100FULL || nego == LPA_100HALF) ++ ecmd->speed = SPEED_100; ++ else ++ ecmd->speed = SPEED_10; ++ if (nego == LPA_100FULL || nego == LPA_10FULL) { ++ ecmd->duplex = DUPLEX_FULL; ++ mii->full_duplex = 1; ++ } else { ++ ecmd->duplex = DUPLEX_HALF; ++ mii->full_duplex = 0; ++ } ++ } else { ++ ecmd->autoneg = AUTONEG_DISABLE; ++ ++ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; ++ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; ++ } ++ ++ /* ignore maxtxpkt, maxrxpkt for now */ ++ ++ return 0; ++} ++ ++int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ ++ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) ++ return -EINVAL; ++ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) ++ return -EINVAL; ++ if (ecmd->port != PORT_MII) ++ return -EINVAL; ++ if (ecmd->transceiver != XCVR_INTERNAL) ++ return -EINVAL; ++ if (ecmd->phy_address != mii->phy_id) ++ return -EINVAL; ++ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) ++ return -EINVAL; ++ ++ /* ignore supported, maxtxpkt, maxrxpkt */ ++ ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ u32 bmcr, advert, tmp; ++ ++ if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full)) == 0) ++ return -EINVAL; ++ ++ /* advertise only what has been requested */ ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); ++ if (ADVERTISED_10baseT_Half) ++ tmp |= ADVERTISE_10HALF; ++ if (ADVERTISED_10baseT_Full) ++ tmp |= ADVERTISE_10FULL; ++ if (ADVERTISED_100baseT_Half) ++ tmp |= ADVERTISE_100HALF; ++ if (ADVERTISED_100baseT_Full) ++ tmp |= ADVERTISE_100FULL; ++ if (advert != tmp) { ++ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); ++ mii->advertising = tmp; ++ } ++ ++ /* turn on autonegotiation, and force a renegotiate */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); ++ ++ mii->force_media = 0; ++ } else { ++ u32 bmcr, tmp; ++ ++ /* turn off auto negotiation, set speed and duplexity */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); ++ if (ecmd->speed == SPEED_100) ++ tmp |= BMCR_SPEED100; ++ if (ecmd->duplex == DUPLEX_FULL) { ++ tmp |= BMCR_FULLDPLX; ++ mii->full_duplex = 1; ++ } else ++ mii->full_duplex = 0; ++ if (bmcr != tmp) ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); ++ ++ mii->force_media = 1; ++ } ++ return 0; ++} ++ ++int _kc_mii_link_ok (struct mii_if_info *mii) ++{ ++ /* first, a dummy read, needed to latch some MII phys */ ++ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); ++ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) ++ return 1; ++ return 0; ++} ++ ++int _kc_mii_nway_restart (struct mii_if_info *mii) ++{ ++ int bmcr; ++ int r = -EINVAL; ++ ++ /* if autoneg is off, it's an error */ ++ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); ++ ++ if (bmcr & BMCR_ANENABLE) { ++ bmcr |= BMCR_ANRESTART; ++ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); ++ r = 0; ++ } ++ ++ return r; ++} ++ ++void _kc_mii_check_link (struct mii_if_info *mii) ++{ ++ int cur_link = mii_link_ok(mii); ++ int prev_link = netif_carrier_ok(mii->dev); ++ ++ if (cur_link && !prev_link) ++ netif_carrier_on(mii->dev); ++ else if (prev_link && !cur_link) ++ netif_carrier_off(mii->dev); ++} ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_chg_out) ++{ ++ int rc = 0; ++ unsigned int duplex_changed = 0; ++ ++ if (duplex_chg_out) ++ *duplex_chg_out = 0; ++ ++ mii_data->phy_id &= mii_if->phy_id_mask; ++ mii_data->reg_num &= mii_if->reg_num_mask; ++ ++ switch(cmd) { ++ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ ++ case SIOCGMIIPHY: ++ mii_data->phy_id = mii_if->phy_id; ++ /* fall through */ ++ ++ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ ++ case SIOCGMIIREG: ++ mii_data->val_out = ++ mii_if->mdio_read(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num); ++ break; ++ ++ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ ++ case SIOCSMIIREG: { ++ u16 val = mii_data->val_in; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (mii_data->phy_id == mii_if->phy_id) { ++ switch(mii_data->reg_num) { ++ case MII_BMCR: { ++ unsigned int new_duplex = 0; ++ if (val & (BMCR_RESET|BMCR_ANENABLE)) ++ mii_if->force_media = 0; ++ else ++ mii_if->force_media = 1; ++ if (mii_if->force_media && ++ (val & BMCR_FULLDPLX)) ++ new_duplex = 1; ++ if (mii_if->full_duplex != new_duplex) { ++ duplex_changed = 1; ++ mii_if->full_duplex = new_duplex; ++ } ++ break; ++ } ++ case MII_ADVERTISE: ++ mii_if->advertising = val; ++ break; ++ default: ++ /* do nothing */ ++ break; ++ } ++ } ++ ++ mii_if->mdio_write(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num, val); ++ break; ++ } ++ ++ default: ++ rc = -EOPNOTSUPP; ++ break; ++ } ++ ++ if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) ++ *duplex_chg_out = 1; ++ ++ return rc; ++} ++#endif /* > 2.4.6 */ ++ diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-support-intel-igb-bcm5461X-phy.patch b/packages/base/any/kernels/3.16-lts/patches/driver-support-intel-igb-bcm5461X-phy.patch new file mode 100644 index 00000000..5de8cb5b --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-support-intel-igb-bcm5461X-phy.patch @@ -0,0 +1,242 @@ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c +--- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 15:48:41.379628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-15 09:36:04.608478513 +0000 +@@ -302,6 +302,16 @@ + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; ++ case BCM5461S_PHY_ID: ++ phy->type = e1000_phy_bcm5461s; ++ phy->ops.check_polarity = NULL; ++ phy->ops.get_info = igb_get_phy_info_5461s; ++ phy->ops.get_cable_length = NULL; ++ phy->ops.force_speed_duplex = igb_e1000_phy_force_speed_duplex_82577; ++ break; ++ case BCM54616_E_PHY_ID: ++ phy->type = e1000_phy_bcm54616; ++ break; + default: + ret_val = -E1000_ERR_PHY; + goto out; +@@ -701,6 +711,17 @@ + break; + } + ret_val = e1000_get_phy_id(hw); ++ ++ if (ret_val && hw->mac.type == e1000_i354) { ++ /* we do a special check for bcm5461s phy by setting ++ * the phy->addr to 5 and doing the phy check again. This ++ * call will succeed and retrieve a valid phy id if we have ++ * the bcm5461s phy ++ */ ++ phy->addr = 5; ++ phy->type = e1000_phy_bcm5461s; ++ ret_val = e1000_get_phy_id(hw); ++ } + goto out; + } + +@@ -1148,6 +1169,9 @@ + (hw->phy.type == e1000_phy_igp_3)) + e1000_phy_init_script_igp3(hw); + ++ if (hw->phy.type == e1000_phy_bcm5461s) ++ igb_phy_init_script_5461s(hw); ++ + return E1000_SUCCESS; + } + +@@ -1557,6 +1581,7 @@ + case e1000_i350: + case e1000_i210: + case e1000_i211: ++ case e1000_i354: + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); +@@ -1602,6 +1627,10 @@ + case e1000_phy_82580: + ret_val = igb_e1000_copper_link_setup_82577(hw); + break; ++ case e1000_phy_bcm54616: ++ break; ++ case e1000_phy_bcm5461s: ++ break; + default: + ret_val = -E1000_ERR_PHY; + break; +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h +--- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 15:48:41.383628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 17:13:16.567695539 +0000 +@@ -1184,6 +1184,8 @@ + #define I350_I_PHY_ID 0x015403B0 + #define I210_I_PHY_ID 0x01410C00 + #define IGP04E1000_E_PHY_ID 0x02A80391 ++#define BCM54616_E_PHY_ID 0x3625D10 ++#define BCM5461S_PHY_ID 0x002060C0 + #define M88_VENDOR 0x0141 + + /* M88E1000 Specific Registers */ +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h +--- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 15:48:41.387628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 17:11:55.735694465 +0000 +@@ -133,6 +133,8 @@ + e1000_phy_82580, + e1000_phy_vf, + e1000_phy_i210, ++ e1000_phy_bcm54616, ++ e1000_phy_bcm5461s, + }; + + enum e1000_bus_type { +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c +--- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 15:48:41.403628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-15 09:48:09.668488140 +0000 +@@ -272,6 +272,13 @@ + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ ++ if (phy->type == e1000_phy_bcm5461s) { ++ mdic = E1000_READ_REG(hw, E1000_MDICNFG); ++ mdic &= ~E1000_MDICNFG_PHY_MASK; ++ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdic); ++ } ++ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); +@@ -331,6 +338,13 @@ + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ ++ if (phy->type == e1000_phy_bcm5461s) { ++ mdic = E1000_READ_REG(hw, E1000_MDICNFG); ++ mdic &= ~E1000_MDICNFG_PHY_MASK; ++ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); ++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdic); ++ } ++ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | +@@ -1614,10 +1628,12 @@ + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); +- ret_val = hw->phy.ops.force_speed_duplex(hw); +- if (ret_val) { +- DEBUGOUT("Error Forcing Speed and Duplex\n"); +- return ret_val; ++ if (hw->phy.ops.force_speed_duplex) { ++ ret_val = hw->phy.ops.force_speed_duplex(hw); ++ if (ret_val) { ++ DEBUGOUT("Error Forcing Speed and Duplex\n"); ++ return ret_val; ++ } + } + } + +@@ -3407,3 +3423,67 @@ + + return ready; + } ++ ++/** ++ * igb_phy_init_script_5461s - Inits the BCM5461S PHY ++ * @hw: pointer to the HW structure ++ * ++ * Initializes a Broadcom Gigabit PHY. ++ **/ ++s32 igb_phy_init_script_5461s(struct e1000_hw *hw) ++{ ++ u16 mii_reg_led = 0; ++ ++ /* 1. Speed LED (Set the Link LED mode), Shadow 00010, 0x1C.bit2=1 */ ++ hw->phy.ops.write_reg(hw, 0x1C, 0x0800); ++ hw->phy.ops.read_reg(hw, 0x1C, &mii_reg_led); ++ mii_reg_led |= 0x0004; ++ hw->phy.ops.write_reg(hw, 0x1C, mii_reg_led | 0x8000); ++ ++ /* 2. Active LED (Set the Link LED mode), Shadow 01001, 0x1C.bit4=1, 0x10.bit5=0 */ ++ hw->phy.ops.write_reg(hw, 0x1C, 0x2400); ++ hw->phy.ops.read_reg(hw, 0x1C, &mii_reg_led); ++ mii_reg_led |= 0x0010; ++ hw->phy.ops.write_reg(hw, 0x1C, mii_reg_led | 0x8000); ++ hw->phy.ops.read_reg(hw, 0x10, &mii_reg_led); ++ mii_reg_led &= 0xffdf; ++ hw->phy.ops.write_reg(hw, 0x10, mii_reg_led); ++ ++ return 0; ++} ++ ++ ++/** ++ * igb_get_phy_info_5461s - Retrieve 5461s PHY information ++ * @hw: pointer to the HW structure ++ * ++ * Read PHY status to determine if link is up. If link is up, then ++ * set/determine 10base-T extended distance and polarity correction. Read ++ * PHY port status to determine MDI/MDIx and speed. Based on the speed, ++ * determine on the cable length, local and remote receiver. ++ **/ ++s32 igb_get_phy_info_5461s(struct e1000_hw *hw) ++{ ++ struct e1000_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ bool link; ++ ++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); ++ if (ret_val) ++ goto out; ++ ++ if (!link) { ++ ret_val = -E1000_ERR_CONFIG; ++ goto out; ++ } ++ ++ phy->polarity_correction = true; ++ ++ phy->is_mdix = true; ++ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; ++ phy->local_rx = e1000_1000t_rx_status_ok; ++ phy->remote_rx = e1000_1000t_rx_status_ok; ++ ++out: ++ return ret_val; ++} +diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h +--- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 15:48:41.403628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 17:21:08.243701801 +0000 +@@ -74,6 +74,8 @@ + s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); + s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); ++s32 igb_phy_init_script_5461s(struct e1000_hw *hw); ++s32 igb_get_phy_info_5461s(struct e1000_hw *hw); + enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); + s32 e1000_determine_phy_address(struct e1000_hw *hw); + s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +--- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 15:48:41.411628151 +0000 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 19:07:51.867786828 +0000 +@@ -8607,11 +8607,19 @@ + case SIOCGMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; ++ adapter->hw.phy.addr = data->phy_id; + if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ adapter->hw.phy.addr = data->phy_id; ++ if (igb_e1000_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, ++ data->val_in)) ++ return -EIO; ++ break; + default: + return -EOPNOTSUPP; + } diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom-update.patch b/packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom-update.patch new file mode 100644 index 00000000..3deb7cdb --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom-update.patch @@ -0,0 +1,141 @@ +Update SFF8436 EEPROM driver + +From: Shuotian Cheng + +Support newer kernel and remove eeprom_class dependency +--- + drivers/misc/eeprom/sff_8436_eeprom.c | 27 +++++++-------------------- + include/linux/i2c/sff-8436.h | 2 -- + 2 files changed, 7 insertions(+), 22 deletions(-) + +diff --git a/drivers/misc/eeprom/sff_8436_eeprom.c b/drivers/misc/eeprom/sff_8436_eeprom.c +index 0b6bf31..f5627bf 100644 +--- a/drivers/misc/eeprom/sff_8436_eeprom.c ++++ b/drivers/misc/eeprom/sff_8436_eeprom.c +@@ -82,7 +82,6 @@ + #include + #include + #include +-#include + + #include + #include +@@ -116,7 +115,6 @@ struct sff_8436_data { + unsigned num_addresses; + + u8 data[SFF_8436_EEPROM_SIZE]; +- struct eeprom_device *eeprom_dev; + + struct i2c_client *client[]; + }; +@@ -421,10 +419,9 @@ static ssize_t sff_8436_eeprom_write(struct sff_8436_data *sff_8436, const char + { + struct i2c_client *client = sff_8436->client[0]; + struct i2c_msg msg; +- ssize_t status; + unsigned long timeout, write_time; + unsigned next_page; +- int i = 0; ++ int status, i = 0; + + /* write max is at most a page */ + if (count > sff_8436->write_max) +@@ -528,7 +525,7 @@ static ssize_t sff_8436_eeprom_update_client(struct sff_8436_data *sff_8436, + page = sff_8436_translate_offset(sff_8436, &phy_offset); + + dev_dbg(&client->dev, +- "sff_8436_eeprom_update_client off %lld page:%d phy_offset:%lld, count:%d, opcode:%d\n", ++ "sff_8436_eeprom_update_client off %lld page:%d phy_offset:%lld, count:%zu, opcode:%d\n", + off, page, phy_offset, count, opcode); + if (page > 0) { + ret = sff_8436_write_page_reg(sff_8436, page); +@@ -705,18 +702,18 @@ static ssize_t sff_8436_read_write(struct sff_8436_data *sff_8436, + pending_len = pending_len - page_len; + + dev_dbg(&client->dev, +- "sff_read off %lld len %d page_start_offset %lld page_offset %lld page_len %d pending_len %d\n", ++ "sff_read off %lld len %zu page_start_offset %lld page_offset %lld page_len %zu pending_len %zu\n", + off, len, page_start_offset, page_offset, page_len, pending_len); + + /* Refresh the data from offset for specified len */ + ret = sff_8436_eeprom_update_client(sff_8436, page_offset, page_len, opcode); + if (ret != page_len) { + if (err_timeout) { +- dev_dbg(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", ++ dev_dbg(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %zu failed %d!\n", + (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); + goto err; + } else { +- dev_err(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", ++ dev_err(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %zu failed %d!\n", + (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); + } + } +@@ -780,15 +777,13 @@ static ssize_t sff_8436_macc_write(struct memory_accessor *macc, const char *buf + + /*-------------------------------------------------------------------------*/ + +-static int __devexit sff_8436_remove(struct i2c_client *client) ++static int sff_8436_remove(struct i2c_client *client) + { + struct sff_8436_data *sff_8436; + + sff_8436 = i2c_get_clientdata(client); + sysfs_remove_bin_file(&client->dev.kobj, &sff_8436->bin); + +- eeprom_device_unregister(sff_8436->eeprom_dev); +- + kfree(sff_8436->writebuf); + kfree(sff_8436); + return 0; +@@ -821,7 +816,6 @@ static int sff_8436_eeprom_probe(struct i2c_client *client, + + chip.setup = NULL; + chip.context = NULL; +- chip.eeprom_data = NULL; + } + + if (!is_power_of_2(chip.byte_len)) +@@ -923,13 +917,6 @@ static int sff_8436_eeprom_probe(struct i2c_client *client, + if (err) + goto err_struct; + +- sff_8436->eeprom_dev = eeprom_device_register(&client->dev, chip.eeprom_data); +- if (IS_ERR(sff_8436->eeprom_dev)) { +- dev_err(&client->dev, "error registering eeprom device.\n"); +- err = PTR_ERR(sff_8436->eeprom_dev); +- goto err_sysfs_cleanup; +- } +- + i2c_set_clientdata(client, sff_8436); + + dev_info(&client->dev, "%zu byte %s EEPROM, %s\n", +@@ -968,7 +955,7 @@ static struct i2c_driver sff_8436_driver = { + .owner = THIS_MODULE, + }, + .probe = sff_8436_eeprom_probe, +- .remove = __devexit_p(sff_8436_remove), ++ .remove = sff_8436_remove, + .id_table = sff8436_ids, + }; + +diff --git a/include/linux/i2c/sff-8436.h b/include/linux/i2c/sff-8436.h +index cd46896..4df48ad 100644 +--- a/include/linux/i2c/sff-8436.h ++++ b/include/linux/i2c/sff-8436.h +@@ -3,7 +3,6 @@ + + #include + #include +-#include + + /* + * As seen through Linux I2C, differences between the most common types of I2C +@@ -27,7 +26,6 @@ struct sff_8436_platform_data { + + void (*setup)(struct memory_accessor *, void *context); + void *context; +- struct eeprom_platform_data *eeprom_data; /* extra data for the eeprom_class */ + }; + + #endif /* _LINUX_SFF_8436_H */ diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom.patch b/packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom.patch new file mode 100644 index 00000000..86d8c3e0 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-support-sff-8436-eeprom.patch @@ -0,0 +1,1086 @@ +Driver to expose eeprom information including DOM for QSFPs + +From: Cumulus Networks + + +--- + drivers/misc/eeprom/Kconfig | 12 + drivers/misc/eeprom/Makefile | 1 + drivers/misc/eeprom/sff_8436_eeprom.c | 995 +++++++++++++++++++++++++++++++++ + include/linux/i2c/sff-8436.h | 33 + + 4 files changed, 1041 insertions(+) + create mode 100644 drivers/misc/eeprom/sff_8436_eeprom.c + create mode 100644 include/linux/i2c/sff-8436.h + +diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig +index 9536852..484e3e1 100644 +--- a/drivers/misc/eeprom/Kconfig ++++ b/drivers/misc/eeprom/Kconfig +@@ -96,6 +96,18 @@ config EEPROM_DIGSY_MTC_CFG + + If unsure, say N. + ++config EEPROM_SFF_8436 ++ tristate "SFF-8436 QSFP EEPROMs support" ++ depends on I2C && SYSFS ++ help ++ If you say yes here you get read-only support for the EEPROM of ++ the QSFPs which are implemented as per SFF-8436. ++ ++ All other features of this chip should be accessed via i2c-dev. ++ ++ This driver can also be built as a module. If so, the module ++ will be called sff_8436. ++ + config EEPROM_SUNXI_SID + tristate "Allwinner sunxi security ID support" + depends on ARCH_SUNXI && SYSFS +diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile +index 9507aec..235b5cc 100644 +--- a/drivers/misc/eeprom/Makefile ++++ b/drivers/misc/eeprom/Makefile +@@ -6,3 +6,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o + obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o + obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o + obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o ++obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o +diff --git a/drivers/misc/eeprom/sff_8436_eeprom.c b/drivers/misc/eeprom/sff_8436_eeprom.c +new file mode 100644 +index 0000000..0b6bf31 +--- /dev/null ++++ b/drivers/misc/eeprom/sff_8436_eeprom.c +@@ -0,0 +1,995 @@ ++/* ++ * sff_8436_eeprom.c - handle most SFF-8436 based QSFP EEPROMs ++ * ++ * Copyright (C) 2014 Cumulus networks Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Freeoftware Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++/* ++ * Description: ++ * a) SFF 8436 based qsfp read/write transactions are just like the at24 eeproms ++ * b) The register/memory layout is up to 5 128 byte pages defined by a "pages valid" ++ * register and switched via a "page select" register as explained in below diagram. ++ * c) 256 bytes are mapped at a time. page 0 is always mapped to the first 128 bytes and ++ * the other 4 pages are selectively mapped to the second 128 bytes ++ * ++ * SFF 8436 based QSFP Memory Map ++ * ++ * 2-Wire Serial Address: 1010000x ++ * ++ * Lower Page 00h (128 bytes) ++ * ===================== ++ * | | ++ * | | ++ * | | ++ * | | ++ * | | ++ * | | ++ * | | ++ * | | ++ * | | ++ * | | ++ * |Page Select Byte(127)| ++ * ===================== ++ * | ++ * | ++ * | ++ * | ++ * V ++ * ----------------------------------------------------------------- ++ * | | | | ++ * | | | | ++ * | | | | ++ * | | | | ++ * | | | | ++ * | | | | ++ * | | | | ++ * | | | | ++ * | | | | ++ * V V V V ++ * ------------- ---------------- ----------------- -------------- ++ * | | | | | | | | ++ * | Upper | | Upper | | Upper | | Upper | ++ * | Page 00h | | Page 01h | | Page 02h | | Page 03h | ++ * | | | (Optional) | | (Optional) | | (Optional | ++ * | | | | | | | for Cable | ++ * | | | | | | | Assemblies) | ++ * | ID | | AST | | User | | | ++ * | Fields | | Table | | EEPROM Data | | | ++ * | | | | | | | | ++ * | | | | | | | | ++ * | | | | | | | | ++ * ------------- ---------------- ----------------- -------------- ++ * ++ * ++ **/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define SFF_8436_EEPROM_SIZE 5*128 ++#define SFF_8436_MAX_PAGE_COUNT 5 ++#define SFF_8436_MMAP_SIZE 256 ++#define SFF_8436_PAGE_SELECT_REG 0x7F ++ ++#define SFF_8436_OPTION_4_OFFSET 0xC3 ++#define SFF_8436_PAGE_02_PRESENT (1 << 7) /* Memory Page 02 present */ ++#define SFF_8436_PAGE_01_PRESENT (1 << 6) /* Memory Page 01 present */ ++#define SFF_8436_STATUS_2_OFFSET 0x02 ++#define SFF_8436_STATUS_PAGE_03_PRESENT_L (1 << 2) /* Flat Memory:0- Paging, 1- Page 0 only */ ++ ++struct sff_8436_data { ++ struct sff_8436_platform_data chip; ++ struct memory_accessor macc; ++ int use_smbus; ++ ++ /* ++ * Lock protects against activities from other Linux tasks, ++ * but not from changes by other I2C masters. ++ */ ++ struct mutex lock; ++ struct bin_attribute bin; ++ ++ u8 *writebuf; ++ unsigned write_max; ++ ++ unsigned num_addresses; ++ ++ u8 data[SFF_8436_EEPROM_SIZE]; ++ struct eeprom_device *eeprom_dev; ++ ++ struct i2c_client *client[]; ++}; ++ ++typedef enum qsfp_opcode { ++ QSFP_READ_OP = 0, ++ QSFP_WRITE_OP = 1 ++} qsfp_opcode_e; ++ ++/* ++ * This parameter is to help this driver avoid blocking other drivers out ++ * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C ++ * clock, one 256 byte read takes about 1/43 second which is excessive; ++ * but the 1/170 second it takes at 400 kHz may be quite reasonable; and ++ * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible. ++ * ++ * This value is forced to be a power of two so that writes align on pages. ++ */ ++static unsigned io_limit = 128; ++ ++/* ++ *pecs often allow 5 msec for a page write, sometimes 20 msec; ++ * it's important to recover from write timeouts. ++ */ ++static unsigned write_timeout = 25; ++ ++#define SFF_8436_PAGE_SIZE 128 ++#define SFF_8436_SIZE_BYTELEN 5 ++#define SFF_8436_SIZE_FLAGS 8 ++ ++#define SFF_8436_BITMASK(x) (BIT(x) - 1) ++ ++ ++/* create non-zero magic value for given eeprom parameters */ ++#define SFF_8436_DEVICE_MAGIC(_len, _flags) \ ++ ((1 << SFF_8436_SIZE_FLAGS | (_flags)) \ ++ << SFF_8436_SIZE_BYTELEN | ilog2(_len)) ++ ++static const struct i2c_device_id sff8436_ids[] = { ++ { "sff8436",SFF_8436_DEVICE_MAGIC(2048 / 8, 0) }, ++ { /* END OF LIST */ } ++}; ++MODULE_DEVICE_TABLE(i2c, sff8436_ids); ++ ++/*-------------------------------------------------------------------------*/ ++/* ++ * This routine computes the addressing information to be used for a given r/w request. ++ * Assumes that sanity checks for offset happened at sysfs-layer. ++ * Offset within Lower Page 00h and Upper Page 00h are not recomputed ++ */ ++static uint8_t sff_8436_translate_offset(struct sff_8436_data *sff_8436, ++ loff_t *offset) ++{ ++ unsigned page = 0; ++ ++ if (*offset < SFF_8436_MMAP_SIZE) { ++ return 0; ++ } ++ ++ page = (*offset >> 7)-1; ++ ++ if (page > 0 ) { ++ *offset = 0x80 + (*offset & 0x7f); ++ } else { ++ *offset &= 0xff; ++ } ++ ++ return page; ++} ++ ++static int sff_8436_read_reg(struct sff_8436_data *sff_8436, ++ uint8_t reg, uint8_t *val) ++{ ++ int count = 1, i = 0; ++ struct i2c_client *client = sff_8436->client[0]; ++ struct i2c_msg msg[2]; ++ u8 msgbuf[2]; ++ ssize_t status; ++ unsigned long timeout, read_time; ++ ++ memset(msg, 0, sizeof(msg)); ++ ++ /* ++ * Writes fail if the previous one didn't complete yet. We may ++ * loop a few times until this one succeeds, waiting at least ++ * long enough for one entire page write to work. ++ */ ++ timeout = jiffies + msecs_to_jiffies(write_timeout); ++ do { ++ read_time = jiffies; ++ switch (sff_8436->use_smbus) { ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ status = i2c_smbus_read_i2c_block_data(client, ++ reg, count, val); ++ break; ++ case I2C_SMBUS_WORD_DATA: ++ status = i2c_smbus_read_word_data(client, reg); ++ ++ if (status >= 0) { ++ *val = status & 0xff; ++ status = count; ++ } ++ break; ++ case I2C_SMBUS_BYTE_DATA: ++ status = i2c_smbus_read_byte_data(client, reg); ++ ++ if (status >= 0) { ++ *val = status; ++ status = count; ++ } ++ break; ++ ++ default: ++ i = 0; ++ msgbuf[i++] = reg; ++ ++ msg[0].addr = client->addr; ++ msg[0].buf = msgbuf; ++ msg[0].len = i; ++ ++ msg[1].addr = client->addr; ++ msg[1].flags = I2C_M_RD; ++ msg[1].buf = val; ++ msg[1].len = count; ++ ++ status = i2c_transfer(client->adapter, msg, 2); ++ if (status == 2) ++ status = count; ++ break; ++ } ++ dev_dbg(&client->dev, "read (using smbus %d) %d@%d --> %zd (%ld)\n", ++ sff_8436->use_smbus, count, reg, status, jiffies); ++ ++ if (status == count) ++ return count; ++ ++ /* REVISIT: at HZ=100, this is sloooow */ ++ msleep(1); ++ } while (time_before(read_time, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static int sff_8436_write_reg(struct sff_8436_data *sff_8436, ++ uint8_t reg, uint8_t val) ++{ ++ uint8_t data[2] = { reg, val }; ++ int count = 1; ++ struct i2c_client *client = sff_8436->client[0]; ++ struct i2c_msg msg; ++ ssize_t status; ++ unsigned long timeout, write_time; ++ ++ /* ++ * Writes fail if the previous one didn't complete yet. We may ++ * loop a few times until this one succeeds, waiting at least ++ * long enough for one entire page write to work. ++ */ ++ timeout = jiffies + msecs_to_jiffies(write_timeout); ++ do { ++ write_time = jiffies; ++ switch (sff_8436->use_smbus) { ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ status = i2c_smbus_write_i2c_block_data(client, ++ reg, count, &val); ++ if (status == 0) ++ status = count; ++ break; ++ case I2C_SMBUS_WORD_DATA: ++ case I2C_SMBUS_BYTE_DATA: ++ status = i2c_smbus_write_byte_data(client, reg, val); ++ ++ if (status == 0) ++ status = count; ++ break; ++ default: ++ msg.addr = client->addr; ++ msg.flags = 0; ++ msg.len = sizeof(data); ++ msg.buf = (char *) data; ++ ++ status = i2c_transfer(client->adapter, &msg, 1); ++ if (status == 1) ++ status = count; ++ break; ++ } ++ dev_dbg(&client->dev, "write (using smbus %d) %d@%d --> %zd (%ld)\n", ++ sff_8436->use_smbus, count, reg, status, jiffies); ++ ++ if (status == count) ++ return count; ++ ++ /* REVISIT: at HZ=100, this is sloooow */ ++ msleep(1); ++ } while (time_before(write_time, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static int sff_8436_write_page_reg(struct sff_8436_data *sff_8436, ++ uint8_t val) ++{ ++ return sff_8436_write_reg(sff_8436, SFF_8436_PAGE_SELECT_REG, val); ++} ++ ++static ssize_t sff_8436_eeprom_read(struct sff_8436_data *sff_8436, char *buf, ++ unsigned offset, size_t count) ++{ ++ struct i2c_msg msg[2]; ++ u8 msgbuf[2]; ++ struct i2c_client *client = sff_8436->client[0]; ++ unsigned long timeout, read_time; ++ int status, i; ++ ++ memset(msg, 0, sizeof(msg)); ++ ++ switch (sff_8436->use_smbus) { ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ /*smaller eeproms can work given some SMBus extension calls */ ++ if (count > I2C_SMBUS_BLOCK_MAX) ++ count = I2C_SMBUS_BLOCK_MAX; ++ break; ++ case I2C_SMBUS_WORD_DATA: ++ /* Check for odd length transaction */ ++ count = (count == 1) ? 1 : 2; ++ break; ++ case I2C_SMBUS_BYTE_DATA: ++ count = 1; ++ break; ++ default: ++ /* ++ * When we have a better choice than SMBus calls, use a ++ * combined I2C message. Write address; then read up to ++ * io_limit data bytes. Note that read page rollover helps us ++ * here (unlike writes). msgbuf is u8 and will cast to our ++ * needs. ++ */ ++ i = 0; ++ msgbuf[i++] = offset; ++ ++ msg[0].addr = client->addr; ++ msg[0].buf = msgbuf; ++ msg[0].len = i; ++ ++ msg[1].addr = client->addr; ++ msg[1].flags = I2C_M_RD; ++ msg[1].buf = buf; ++ msg[1].len = count; ++ } ++ ++ /* ++ * Reads fail if the previous write didn't complete yet. We may ++ * loop a few times until this one succeeds, waiting at least ++ * long enough for one entire page write to work. ++ */ ++ timeout = jiffies + msecs_to_jiffies(write_timeout); ++ do { ++ read_time = jiffies; ++ ++ switch (sff_8436->use_smbus) { ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ status = i2c_smbus_read_i2c_block_data(client, offset, ++ count, buf); ++ break; ++ case I2C_SMBUS_WORD_DATA: ++ status = i2c_smbus_read_word_data(client, offset); ++ if (status >= 0) { ++ buf[0] = status & 0xff; ++ if (count == 2) ++ buf[1] = status >> 8; ++ status = count; ++ } ++ break; ++ case I2C_SMBUS_BYTE_DATA: ++ status = i2c_smbus_read_byte_data(client, offset); ++ if (status >= 0) { ++ buf[0] = status; ++ status = count; ++ } ++ break; ++ default: ++ status = i2c_transfer(client->adapter, msg, 2); ++ if (status == 2) ++ status = count; ++ } ++ ++ dev_dbg(&client->dev, "eeprom read %zu@%d --> %d (%ld)\n", ++ count, offset, status, jiffies); ++ ++ if (status == count) ++ return count; ++ ++ /* REVISIT: at HZ=100, this is sloooow */ ++ msleep(1); ++ } while (time_before(read_time, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static ssize_t sff_8436_eeprom_write(struct sff_8436_data *sff_8436, const char *buf, ++ unsigned offset, size_t count) ++{ ++ struct i2c_client *client = sff_8436->client[0]; ++ struct i2c_msg msg; ++ ssize_t status; ++ unsigned long timeout, write_time; ++ unsigned next_page; ++ int i = 0; ++ ++ /* write max is at most a page */ ++ if (count > sff_8436->write_max) ++ count = sff_8436->write_max; ++ ++ /* Never roll over backwards, to the start of this page */ ++ next_page = roundup(offset + 1, SFF_8436_PAGE_SIZE); ++ if (offset + count > next_page) ++ count = next_page - offset; ++ ++ switch (sff_8436->use_smbus) { ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ /*smaller eeproms can work given some SMBus extension calls */ ++ if (count > I2C_SMBUS_BLOCK_MAX) ++ count = I2C_SMBUS_BLOCK_MAX; ++ break; ++ case I2C_SMBUS_WORD_DATA: ++ /* Check for odd length transaction */ ++ count = (count == 1) ? 1 : 2; ++ break; ++ case I2C_SMBUS_BYTE_DATA: ++ count = 1; ++ break; ++ default: ++ /* If we'll use I2C calls for I/O, set up the message */ ++ msg.addr = client->addr; ++ msg.flags = 0; ++ ++ /* msg.buf is u8 and casts will mask the values */ ++ msg.buf = sff_8436->writebuf; ++ ++ msg.buf[i++] = offset; ++ memcpy(&msg.buf[i], buf, count); ++ msg.len = i + count; ++ break; ++ } ++ ++ /* ++ * Reads fail if the previous write didn't complete yet. We may ++ * loop a few times until this one succeeds, waiting at least ++ * long enough for one entire page write to work. ++ */ ++ timeout = jiffies + msecs_to_jiffies(write_timeout); ++ do { ++ write_time = jiffies; ++ ++ switch (sff_8436->use_smbus) { ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ status = i2c_smbus_write_i2c_block_data(client, ++ offset, count, buf); ++ if (status == 0) ++ status = count; ++ break; ++ case I2C_SMBUS_WORD_DATA: ++ if (count == 2) { ++ status = i2c_smbus_write_word_data( ++ client,offset,(u16)((buf[0]) | ++ (buf[1] << 8))); ++ } else { ++ /* count = 1 */ ++ status = i2c_smbus_write_byte_data( ++ client, offset, buf[0]); ++ } ++ if (status == 0) ++ status = count; ++ break; ++ case I2C_SMBUS_BYTE_DATA: ++ status = i2c_smbus_write_byte_data(client, offset, buf[0]); ++ if (status == 0) ++ status = count; ++ break; ++ default: ++ status = i2c_transfer(client->adapter, &msg, 1); ++ if (status == 1) ++ status = count; ++ break; ++ } ++ ++ dev_dbg(&client->dev, "eeprom write %zu@%d --> %d (%ld)\n", ++ count, offset, status, jiffies); ++ ++ if (status == count) ++ return count; ++ ++ /* REVISIT: at HZ=100, this is sloooow */ ++ msleep(1); ++ } while (time_before(write_time, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static ssize_t sff_8436_eeprom_update_client(struct sff_8436_data *sff_8436, ++ loff_t off, size_t count, qsfp_opcode_e opcode) ++{ ++ struct i2c_client *client = sff_8436->client[0]; ++ ssize_t retval = 0; ++ u8 page = 0; ++ loff_t phy_offset = off; ++ int ret = 0; ++ ++ page = sff_8436_translate_offset(sff_8436, &phy_offset); ++ ++ dev_dbg(&client->dev, ++ "sff_8436_eeprom_update_client off %lld page:%d phy_offset:%lld, count:%d, opcode:%d\n", ++ off, page, phy_offset, count, opcode); ++ if (page > 0) { ++ ret = sff_8436_write_page_reg(sff_8436, page); ++ if (ret < 0) { ++ dev_err(&client->dev, ++ "sff_8436_write_page_reg for page %d failed ret:%d!\n", ++ page, ret); ++ return ret; ++ } ++ } ++ ++ while (count) { ++ ssize_t status; ++ ++ if (opcode == QSFP_READ_OP) { ++ status = sff_8436_eeprom_read(sff_8436, (char *)(&sff_8436->data[off]), phy_offset, count); ++ } else { ++ status = sff_8436_eeprom_write(sff_8436, (char *)(&sff_8436->data[off]), phy_offset, count); ++ } ++ if (status <= 0) { ++ if (retval == 0) ++ retval = status; ++ break; ++ } ++ phy_offset += status; ++ off += status; ++ count -= status; ++ retval += status; ++ } ++ ++ ++ if (page > 0) { ++ ret = sff_8436_write_page_reg(sff_8436, 0); ++ if (ret < 0) { ++ dev_err(&client->dev, ++ "sff_8436_write_page_reg for page 0 failed ret:%d!\n", ret); ++ return ret; ++ } ++ } ++ return retval; ++} ++ ++static ssize_t sff_8436_read_write(struct sff_8436_data *sff_8436, ++ char *buf, loff_t off, size_t len, qsfp_opcode_e opcode) ++{ ++ struct i2c_client *client = sff_8436->client[0]; ++ u8 page; ++ u8 refresh_page = 0; ++ int ret = 0; ++ u8 val = 0; ++ int err_timeout = 0; ++ size_t pending_len = 0, page_len = 0; ++ loff_t page_offset = 0, page_start_offset = 0; ++ ++ if (unlikely(!len)) ++ return len; ++ ++ if (off > SFF_8436_EEPROM_SIZE) ++ return 0; ++ ++ if (off + len > SFF_8436_EEPROM_SIZE) ++ len = SFF_8436_EEPROM_SIZE - off; ++ ++ if (opcode == QSFP_READ_OP) { ++ memset(sff_8436->data, 0xff, SFF_8436_EEPROM_SIZE); ++ } else if (opcode == QSFP_WRITE_OP) { ++ memcpy(&sff_8436->data[off], buf, len); ++ } ++ ++ /* ++ * Read data from chip, protecting against concurrent updates ++ * from this host, but not from other I2C masters. ++ */ ++ mutex_lock(&sff_8436->lock); ++ ++ /* ++ * Refresh pages which covers the requested data ++ * from offset to off + len ++ * Only refresh pages which contain requested bytes ++ * ++ */ ++ ++ pending_len = len; ++ ++ for (page = off >> 7; page <= (off + len - 1) >> 7; page++) { ++ refresh_page = 0; ++ switch (page) { ++ case 0: ++ /* Lower page 00h */ ++ refresh_page = 1; ++ err_timeout = 1; ++ break; ++ case 1: ++ /* Upper page 00h */ ++ refresh_page = 1; ++ err_timeout = 1; ++ break; ++ case 2: ++ /* Upper page 01h */ ++ ret = sff_8436_read_reg(sff_8436, SFF_8436_OPTION_4_OFFSET, &val); ++ if (ret < 0) { ++ dev_dbg(&client->dev, ++ "sff_8436_read_reg for page 01h status failed %d!\n", ret); ++ goto err; ++ } ++ if (val & SFF_8436_PAGE_01_PRESENT) { ++ refresh_page = 1; ++ } ++ break; ++ case 3: ++ /* Upper page 02h */ ++ ret = sff_8436_read_reg(sff_8436, SFF_8436_OPTION_4_OFFSET, &val); ++ if (ret < 0) { ++ dev_dbg(&client->dev, ++ "sff_8436_read_reg for page 02h status failed %d!\n", ret); ++ goto err; ++ } ++ if (val & SFF_8436_PAGE_02_PRESENT) { ++ refresh_page = 1; ++ } ++ break; ++ case 4: ++ /* Upper page 03h */ ++ ret = sff_8436_read_reg(sff_8436, SFF_8436_STATUS_2_OFFSET, &val); ++ if (ret < 0) { ++ dev_dbg(&client->dev, ++ "sff_8436_read_reg for page 03h status failed %d!\n", ret); ++ goto err; ++ } ++ if (!(val & SFF_8436_STATUS_PAGE_03_PRESENT_L)) { ++ refresh_page = 1; ++ } ++ break; ++ default: ++ /* Invalid page index */ ++ dev_err(&client->dev, "Invalid page %d!\n", page); ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ if (!refresh_page) { ++ /* if page is not valid or already refreshed */ ++ continue; ++ } ++ ++ /* ++ * Compute the offset and number of bytes to be read/write ++ * w.r.t requested page ++ * ++ * 1. start at offset 0 (within the page), and read/write the entire page ++ * 2. start at offset 0 (within the page) and read/write less than entire page ++ * 3. start at an offset not equal to 0 and read/write the rest of the page ++ * 4. start at an offset not equal to 0 and read/write less than (end of page - offset) ++ * ++ */ ++ page_start_offset = page * SFF_8436_PAGE_SIZE; ++ ++ if (page_start_offset < off) { ++ page_offset = off; ++ if (off + pending_len < page_start_offset + SFF_8436_PAGE_SIZE) { ++ page_len = pending_len; ++ } else { ++ page_len = SFF_8436_PAGE_SIZE - off; ++ } ++ } else { ++ page_offset = page_start_offset; ++ if (pending_len > SFF_8436_PAGE_SIZE) { ++ page_len = SFF_8436_PAGE_SIZE; ++ } else { ++ page_len = pending_len; ++ } ++ } ++ ++ pending_len = pending_len - page_len; ++ ++ dev_dbg(&client->dev, ++ "sff_read off %lld len %d page_start_offset %lld page_offset %lld page_len %d pending_len %d\n", ++ off, len, page_start_offset, page_offset, page_len, pending_len); ++ ++ /* Refresh the data from offset for specified len */ ++ ret = sff_8436_eeprom_update_client(sff_8436, page_offset, page_len, opcode); ++ if (ret != page_len) { ++ if (err_timeout) { ++ dev_dbg(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", ++ (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); ++ goto err; ++ } else { ++ dev_err(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", ++ (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); ++ } ++ } ++ } ++ mutex_unlock(&sff_8436->lock); ++ ++ if (opcode == QSFP_READ_OP) { ++ memcpy(buf, &sff_8436->data[off], len); ++ } ++ return len; ++ ++err: ++ mutex_unlock(&sff_8436->lock); ++ ++ return ret; ++} ++ ++static ssize_t sff_8436_bin_read(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); ++ struct sff_8436_data *sff_8436 = i2c_get_clientdata(client); ++ ++ return sff_8436_read_write(sff_8436, buf, off, count, QSFP_READ_OP); ++} ++ ++ ++static ssize_t sff_8436_bin_write(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); ++ struct sff_8436_data *sff_8436 = i2c_get_clientdata(client); ++ ++ return sff_8436_read_write(sff_8436, buf, off, count, QSFP_WRITE_OP); ++} ++/*-------------------------------------------------------------------------*/ ++ ++/* ++ * This lets other kernel code access the eeprom data. For example, it ++ * might hold a board's Ethernet address, or board-specific calibration ++ * data generated on the manufacturing floor. ++ */ ++ ++static ssize_t sff_8436_macc_read(struct memory_accessor *macc, char *buf, ++ off_t offset, size_t count) ++{ ++ struct sff_8436_data *sff_8436 = container_of(macc, struct sff_8436_data, macc); ++ ++ return sff_8436_read_write(sff_8436, buf, offset, count, QSFP_READ_OP); ++} ++ ++static ssize_t sff_8436_macc_write(struct memory_accessor *macc, const char *buf, ++ off_t offset, size_t count) ++{ ++ struct sff_8436_data *sff_8436 = container_of(macc, struct sff_8436_data, macc); ++ ++ return sff_8436_read_write(sff_8436, buf, offset, count, QSFP_WRITE_OP); ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static int __devexit sff_8436_remove(struct i2c_client *client) ++{ ++ struct sff_8436_data *sff_8436; ++ ++ sff_8436 = i2c_get_clientdata(client); ++ sysfs_remove_bin_file(&client->dev.kobj, &sff_8436->bin); ++ ++ eeprom_device_unregister(sff_8436->eeprom_dev); ++ ++ kfree(sff_8436->writebuf); ++ kfree(sff_8436); ++ return 0; ++} ++static int sff_8436_eeprom_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ int err; ++ int use_smbus = 0; ++ struct sff_8436_platform_data chip; ++ struct sff_8436_data *sff_8436; ++ kernel_ulong_t magic; ++ ++ if (client->dev.platform_data) { ++ chip = *(struct sff_8436_platform_data *)client->dev.platform_data; ++ } else { ++ /* ++ * SFF-8436 MMAP is 256 bytes long ++ */ ++ magic = SFF_8436_DEVICE_MAGIC(2048 / 8, 0); ++ chip.byte_len = BIT(magic & SFF_8436_BITMASK(SFF_8436_SIZE_BYTELEN)); ++ magic >>= SFF_8436_SIZE_BYTELEN; ++ chip.flags = magic & SFF_8436_BITMASK(SFF_8436_SIZE_FLAGS); ++ /* ++ * This is slow, but we can't know all eeproms, so we better ++ * play safe.pecifying custom eeprom-types via platform_data ++ * is recommended anyhow. ++ */ ++ chip.page_size = 1; ++ ++ chip.setup = NULL; ++ chip.context = NULL; ++ chip.eeprom_data = NULL; ++ } ++ ++ if (!is_power_of_2(chip.byte_len)) ++ dev_warn(&client->dev, ++ "byte_len looks suspicious (no power of 2)!\n"); ++ ++ if (!chip.page_size) { ++ dev_err(&client->dev, "page_size must not be 0!\n"); ++ err = -EINVAL; ++ goto exit; ++ } ++ if (!is_power_of_2(chip.page_size)) ++ dev_warn(&client->dev, ++ "page_size looks suspicious (no power of 2)!\n"); ++ ++ /* Use I2C operations unless we're stuck with SMBus extensions. */ ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { ++ if (i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { ++ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA; ++ } else if (i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_READ_WORD_DATA)) { ++ use_smbus = I2C_SMBUS_WORD_DATA; ++ } else if (i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_READ_BYTE_DATA)) { ++ use_smbus = I2C_SMBUS_BYTE_DATA; ++ } else { ++ err = -EPFNOSUPPORT; ++ goto exit; ++ } ++ } ++ ++ if (!(sff_8436 = kzalloc(sizeof(struct sff_8436_data) + sizeof(struct i2c_client *), GFP_KERNEL))) { ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ mutex_init(&sff_8436->lock); ++ sff_8436->use_smbus = use_smbus; ++ sff_8436->chip = chip; ++ ++ /* ++ * Export the EEPROM bytes through sysfs, since that's convenient. ++ * By default, only root should see the data (maybe passwords etc) ++ */ ++ sysfs_bin_attr_init(&sff_8436->bin); ++ sff_8436->bin.attr.name = "eeprom"; ++ sff_8436->bin.attr.mode = SFF_8436_FLAG_IRUGO; ++ sff_8436->bin.read = sff_8436_bin_read; ++ sff_8436->bin.size = SFF_8436_EEPROM_SIZE; ++ ++ sff_8436->macc.read = sff_8436_macc_read; ++ ++ if (!use_smbus || ++ (i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) || ++ i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_WRITE_WORD_DATA) || ++ i2c_check_functionality(client->adapter, ++ I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) { ++ //unsigned write_max = chip.page_size; ++ /* ++ * NOTE: AN-2079 ++ * Finisar recommends that the host implement 1 byte writes only, ++ * since this module only supports 32 byte page boundaries. ++ * 2 byte writes are acceptable for PE and Vout changes per ++ * Application Note AN-2071. ++ */ ++ unsigned write_max = 1; ++ ++ sff_8436->macc.write = sff_8436_macc_write; ++ ++ sff_8436->bin.write = sff_8436_bin_write; ++ sff_8436->bin.attr.mode |= S_IWUSR; ++ ++ if (write_max > io_limit) ++ write_max = io_limit; ++ if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX) ++ write_max = I2C_SMBUS_BLOCK_MAX; ++ sff_8436->write_max = write_max; ++ ++ /* buffer (data + address at the beginning) */ ++ sff_8436->writebuf = kmalloc(write_max + 2, GFP_KERNEL); ++ if (!sff_8436->writebuf) { ++ err = -ENOMEM; ++ goto exit_kfree; ++ } ++ } else { ++ dev_warn(&client->dev, ++ "cannot write due to controller restrictions."); ++ } ++ ++ memset(sff_8436->data, 0xff, SFF_8436_EEPROM_SIZE); ++ ++ sff_8436->client[0] = client; ++ ++ /* create the sysfs eeprom file */ ++ err = sysfs_create_bin_file(&client->dev.kobj, &sff_8436->bin); ++ if (err) ++ goto err_struct; ++ ++ sff_8436->eeprom_dev = eeprom_device_register(&client->dev, chip.eeprom_data); ++ if (IS_ERR(sff_8436->eeprom_dev)) { ++ dev_err(&client->dev, "error registering eeprom device.\n"); ++ err = PTR_ERR(sff_8436->eeprom_dev); ++ goto err_sysfs_cleanup; ++ } ++ ++ i2c_set_clientdata(client, sff_8436); ++ ++ dev_info(&client->dev, "%zu byte %s EEPROM, %s\n", ++ sff_8436->bin.size, client->name, ++ "read-only"); ++ ++ if (use_smbus == I2C_SMBUS_WORD_DATA || ++ use_smbus == I2C_SMBUS_BYTE_DATA) { ++ dev_notice(&client->dev, "Falling back to %s reads, " ++ "performance will suffer\n", use_smbus == ++ I2C_SMBUS_WORD_DATA ? "word" : "byte"); ++ } ++ ++ if (chip.setup) ++ chip.setup(&sff_8436->macc, chip.context); ++ ++ return 0; ++ ++err_sysfs_cleanup: ++ sysfs_remove_bin_file(&client->dev.kobj, &sff_8436->bin); ++err_struct: ++ kfree(sff_8436->writebuf); ++exit_kfree: ++ kfree(sff_8436); ++exit: ++ dev_dbg(&client->dev, "probe error %d\n", err); ++ ++ return err; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static struct i2c_driver sff_8436_driver = { ++ .driver = { ++ .name = "sff8436", ++ .owner = THIS_MODULE, ++ }, ++ .probe = sff_8436_eeprom_probe, ++ .remove = __devexit_p(sff_8436_remove), ++ .id_table = sff8436_ids, ++}; ++ ++static int __init sff_8436_init(void) ++{ ++ if (!io_limit) { ++ pr_err("sff_8436: io_limit must not be 0!\n"); ++ return -EINVAL; ++ } ++ ++ io_limit = rounddown_pow_of_two(io_limit); ++ return i2c_add_driver(&sff_8436_driver); ++} ++module_init(sff_8436_init); ++ ++static void __exit sff_8436_exit(void) ++{ ++ i2c_del_driver(&sff_8436_driver); ++} ++module_exit(sff_8436_exit); ++ ++MODULE_DESCRIPTION("Driver for SFF-8436 based QSFP EEPROMs"); ++MODULE_AUTHOR("VIDYA RAVIPATI "); ++MODULE_LICENSE("GPL"); +diff --git a/include/linux/i2c/sff-8436.h b/include/linux/i2c/sff-8436.h +new file mode 100644 +index 0000000..cd46896 +--- /dev/null ++++ b/include/linux/i2c/sff-8436.h +@@ -0,0 +1,33 @@ ++#ifndef _LINUX_SFF_8436_H ++#define _LINUX_SFF_8436_H ++ ++#include ++#include ++#include ++ ++/* ++ * As seen through Linux I2C, differences between the most common types of I2C ++ * memory include: ++ * - How much memory is available (usually specified in bit)? ++ * - What write page size does it support? ++ * - Special flags (read_only, world readable...)? ++ * ++ * If you set up a custom eeprom type, please double-check the parameters. ++ * Especially page_size needs extra care, as you risk data loss if your value ++ * is bigger than what the chip actually supports! ++ */ ++ ++struct sff_8436_platform_data { ++ u32 byte_len; /* size (sum of all addr) */ ++ u16 page_size; /* for writes */ ++ u8 flags; ++#define SFF_8436_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ ++#define SFF_8436_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ ++#define SFF_8436_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ ++ ++ void (*setup)(struct memory_accessor *, void *context); ++ void *context; ++ struct eeprom_platform_data *eeprom_data; /* extra data for the eeprom_class */ ++}; ++ ++#endif /* _LINUX_SFF_8436_H */ diff --git a/packages/base/any/kernels/3.16-lts/patches/overlayfs.patch b/packages/base/any/kernels/3.16-lts/patches/overlayfs.patch new file mode 100644 index 00000000..11f414c3 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/overlayfs.patch @@ -0,0 +1,4309 @@ +diff -urpN a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking +--- a/Documentation/filesystems/Locking 2016-11-20 01:17:41.000000000 +0000 ++++ b/Documentation/filesystems/Locking 2016-12-21 21:06:34.006677297 +0000 +@@ -67,6 +67,7 @@ prototypes: + struct file *, unsigned open_flag, + umode_t create_mode, int *opened); + int (*tmpfile) (struct inode *, struct dentry *, umode_t); ++ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); + + locking rules: + all may block +@@ -96,6 +97,7 @@ fiemap: no + update_time: no + atomic_open: yes + tmpfile: no ++dentry_open: no + + Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on + victim. +diff -urpN a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt +--- a/Documentation/filesystems/overlayfs.txt 1970-01-01 00:00:00.000000000 +0000 ++++ b/Documentation/filesystems/overlayfs.txt 2016-12-21 21:06:34.006677297 +0000 +@@ -0,0 +1,198 @@ ++Written by: Neil Brown ++ ++Overlay Filesystem ++================== ++ ++This document describes a prototype for a new approach to providing ++overlay-filesystem functionality in Linux (sometimes referred to as ++union-filesystems). An overlay-filesystem tries to present a ++filesystem which is the result over overlaying one filesystem on top ++of the other. ++ ++The result will inevitably fail to look exactly like a normal ++filesystem for various technical reasons. The expectation is that ++many use cases will be able to ignore these differences. ++ ++This approach is 'hybrid' because the objects that appear in the ++filesystem do not all appear to belong to that filesystem. In many ++cases an object accessed in the union will be indistinguishable ++from accessing the corresponding object from the original filesystem. ++This is most obvious from the 'st_dev' field returned by stat(2). ++ ++While directories will report an st_dev from the overlay-filesystem, ++all non-directory objects will report an st_dev from the lower or ++upper filesystem that is providing the object. Similarly st_ino will ++only be unique when combined with st_dev, and both of these can change ++over the lifetime of a non-directory object. Many applications and ++tools ignore these values and will not be affected. ++ ++Upper and Lower ++--------------- ++ ++An overlay filesystem combines two filesystems - an 'upper' filesystem ++and a 'lower' filesystem. When a name exists in both filesystems, the ++object in the 'upper' filesystem is visible while the object in the ++'lower' filesystem is either hidden or, in the case of directories, ++merged with the 'upper' object. ++ ++It would be more correct to refer to an upper and lower 'directory ++tree' rather than 'filesystem' as it is quite possible for both ++directory trees to be in the same filesystem and there is no ++requirement that the root of a filesystem be given for either upper or ++lower. ++ ++The lower filesystem can be any filesystem supported by Linux and does ++not need to be writable. The lower filesystem can even be another ++overlayfs. The upper filesystem will normally be writable and if it ++is it must support the creation of trusted.* extended attributes, and ++must provide valid d_type in readdir responses, so NFS is not suitable. ++ ++A read-only overlay of two read-only filesystems may use any ++filesystem type. ++ ++Directories ++----------- ++ ++Overlaying mainly involves directories. If a given name appears in both ++upper and lower filesystems and refers to a non-directory in either, ++then the lower object is hidden - the name refers only to the upper ++object. ++ ++Where both upper and lower objects are directories, a merged directory ++is formed. ++ ++At mount time, the two directories given as mount options "lowerdir" and ++"upperdir" are combined into a merged directory: ++ ++ mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\ ++workdir=/work /merged ++ ++The "workdir" needs to be an empty directory on the same filesystem ++as upperdir. ++ ++Then whenever a lookup is requested in such a merged directory, the ++lookup is performed in each actual directory and the combined result ++is cached in the dentry belonging to the overlay filesystem. If both ++actual lookups find directories, both are stored and a merged ++directory is created, otherwise only one is stored: the upper if it ++exists, else the lower. ++ ++Only the lists of names from directories are merged. Other content ++such as metadata and extended attributes are reported for the upper ++directory only. These attributes of the lower directory are hidden. ++ ++whiteouts and opaque directories ++-------------------------------- ++ ++In order to support rm and rmdir without changing the lower ++filesystem, an overlay filesystem needs to record in the upper filesystem ++that files have been removed. This is done using whiteouts and opaque ++directories (non-directories are always opaque). ++ ++A whiteout is created as a character device with 0/0 device number. ++When a whiteout is found in the upper level of a merged directory, any ++matching name in the lower level is ignored, and the whiteout itself ++is also hidden. ++ ++A directory is made opaque by setting the xattr "trusted.overlay.opaque" ++to "y". Where the upper filesystem contains an opaque directory, any ++directory in the lower filesystem with the same name is ignored. ++ ++readdir ++------- ++ ++When a 'readdir' request is made on a merged directory, the upper and ++lower directories are each read and the name lists merged in the ++obvious way (upper is read first, then lower - entries that already ++exist are not re-added). This merged name list is cached in the ++'struct file' and so remains as long as the file is kept open. If the ++directory is opened and read by two processes at the same time, they ++will each have separate caches. A seekdir to the start of the ++directory (offset 0) followed by a readdir will cause the cache to be ++discarded and rebuilt. ++ ++This means that changes to the merged directory do not appear while a ++directory is being read. This is unlikely to be noticed by many ++programs. ++ ++seek offsets are assigned sequentially when the directories are read. ++Thus if ++ - read part of a directory ++ - remember an offset, and close the directory ++ - re-open the directory some time later ++ - seek to the remembered offset ++ ++there may be little correlation between the old and new locations in ++the list of filenames, particularly if anything has changed in the ++directory. ++ ++Readdir on directories that are not merged is simply handled by the ++underlying directory (upper or lower). ++ ++ ++Non-directories ++--------------- ++ ++Objects that are not directories (files, symlinks, device-special ++files etc.) are presented either from the upper or lower filesystem as ++appropriate. When a file in the lower filesystem is accessed in a way ++the requires write-access, such as opening for write access, changing ++some metadata etc., the file is first copied from the lower filesystem ++to the upper filesystem (copy_up). Note that creating a hard-link ++also requires copy_up, though of course creation of a symlink does ++not. ++ ++The copy_up may turn out to be unnecessary, for example if the file is ++opened for read-write but the data is not modified. ++ ++The copy_up process first makes sure that the containing directory ++exists in the upper filesystem - creating it and any parents as ++necessary. It then creates the object with the same metadata (owner, ++mode, mtime, symlink-target etc.) and then if the object is a file, the ++data is copied from the lower to the upper filesystem. Finally any ++extended attributes are copied up. ++ ++Once the copy_up is complete, the overlay filesystem simply ++provides direct access to the newly created file in the upper ++filesystem - future operations on the file are barely noticed by the ++overlay filesystem (though an operation on the name of the file such as ++rename or unlink will of course be noticed and handled). ++ ++ ++Non-standard behavior ++--------------------- ++ ++The copy_up operation essentially creates a new, identical file and ++moves it over to the old name. The new file may be on a different ++filesystem, so both st_dev and st_ino of the file may change. ++ ++Any open files referring to this inode will access the old data and ++metadata. Similarly any file locks obtained before copy_up will not ++apply to the copied up file. ++ ++On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and ++fsetxattr(2) will fail with EROFS. ++ ++If a file with multiple hard links is copied up, then this will ++"break" the link. Changes will not be propagated to other names ++referring to the same inode. ++ ++Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory ++object in overlayfs will not contain valid absolute paths, only ++relative paths leading up to the filesystem's root. This will be ++fixed in the future. ++ ++Some operations are not atomic, for example a crash during copy_up or ++rename will leave the filesystem in an inconsistent state. This will ++be addressed in the future. ++ ++Changes to underlying filesystems ++--------------------------------- ++ ++Offline changes, when the overlay is not mounted, are allowed to either ++the upper or the lower trees. ++ ++Changes to the underlying filesystems while part of a mounted overlay ++filesystem are not allowed. If the underlying filesystem is changed, ++the behavior of the overlay is undefined, though it will not result in ++a crash or deadlock. +diff -urpN a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt +--- a/Documentation/filesystems/vfs.txt 2016-11-20 01:17:41.000000000 +0000 ++++ b/Documentation/filesystems/vfs.txt 2016-12-21 21:06:34.006677297 +0000 +@@ -364,6 +364,7 @@ struct inode_operations { + int (*atomic_open)(struct inode *, struct dentry *, struct file *, + unsigned open_flag, umode_t create_mode, int *opened); + int (*tmpfile) (struct inode *, struct dentry *, umode_t); ++ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); + }; + + Again, all methods are called without any locks being held, unless +@@ -696,6 +697,12 @@ struct address_space_operations { + but instead uses bmap to find out where the blocks in the file + are and uses those addresses directly. + ++ dentry_open: this is an alternative to f_op->open(), the difference is that ++ this method may open a file not necessarily originating from the same ++ filesystem as the one i_op->open() was called on. It may be ++ useful for stacking filesystems which want to allow native I/O directly ++ on underlying files. ++ + + invalidatepage: If a page has PagePrivate set, then invalidatepage + will be called when part or all of the page is to be removed +diff -urpN a/MAINTAINERS b/MAINTAINERS +--- a/MAINTAINERS 2016-11-20 01:17:41.000000000 +0000 ++++ b/MAINTAINERS 2016-12-21 21:06:34.010677297 +0000 +@@ -6654,6 +6654,13 @@ F: drivers/scsi/osd/ + F: include/scsi/osd_* + F: fs/exofs/ + ++OVERLAYFS FILESYSTEM ++M: Miklos Szeredi ++L: linux-fsdevel@vger.kernel.org ++S: Supported ++F: fs/overlayfs/* ++F: Documentation/filesystems/overlayfs.txt ++ + P54 WIRELESS DRIVER + M: Christian Lamparter + L: linux-wireless@vger.kernel.org +diff -urpN a/fs/Kconfig b/fs/Kconfig +--- a/fs/Kconfig 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/Kconfig 2016-12-21 21:06:34.010677297 +0000 +@@ -67,6 +67,7 @@ source "fs/quota/Kconfig" + + source "fs/autofs4/Kconfig" + source "fs/fuse/Kconfig" ++source "fs/overlayfs/Kconfig" + + menu "Caches" + +diff -urpN a/fs/Makefile b/fs/Makefile +--- a/fs/Makefile 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/Makefile 2016-12-21 21:06:34.010677297 +0000 +@@ -104,6 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ + obj-$(CONFIG_AUTOFS4_FS) += autofs4/ + obj-$(CONFIG_ADFS_FS) += adfs/ + obj-$(CONFIG_FUSE_FS) += fuse/ ++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/ + obj-$(CONFIG_UDF_FS) += udf/ + obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ + obj-$(CONFIG_OMFS_FS) += omfs/ +diff -urpN a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +--- a/fs/btrfs/ioctl.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/btrfs/ioctl.c 2016-12-21 21:06:34.010677297 +0000 +@@ -766,23 +766,6 @@ out: + return ret; + } + +-/* copy of check_sticky in fs/namei.c() +-* It's inline, so penalty for filesystems that don't use sticky bit is +-* minimal. +-*/ +-static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode) +-{ +- kuid_t fsuid = current_fsuid(); +- +- if (!(dir->i_mode & S_ISVTX)) +- return 0; +- if (uid_eq(inode->i_uid, fsuid)) +- return 0; +- if (uid_eq(dir->i_uid, fsuid)) +- return 0; +- return !capable(CAP_FOWNER); +-} +- + /* copy of may_delete in fs/namei.c() + * Check whether we can remove a link victim from directory dir, check + * whether the type of victim is right. +@@ -818,8 +801,7 @@ static int btrfs_may_delete(struct inode + return error; + if (IS_APPEND(dir)) + return -EPERM; +- if (btrfs_check_sticky(dir, victim->d_inode)|| +- IS_APPEND(victim->d_inode)|| ++ if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) || + IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) + return -EPERM; + if (isdir) { +diff -urpN a/fs/dcache.c b/fs/dcache.c +--- a/fs/dcache.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/dcache.c 2016-12-21 21:06:34.010677297 +0000 +@@ -2637,6 +2637,7 @@ struct dentry *d_ancestor(struct dentry + } + return NULL; + } ++EXPORT_SYMBOL(d_ancestor); + + /* + * This helper attempts to cope with remotely renamed directories +diff -urpN a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c +--- a/fs/ecryptfs/main.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/ecryptfs/main.c 2016-12-21 21:06:34.010677297 +0000 +@@ -576,13 +576,6 @@ static struct dentry *ecryptfs_mount(str + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; + s->s_blocksize = path.dentry->d_sb->s_blocksize; + s->s_magic = ECRYPTFS_SUPER_MAGIC; +- s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1; +- +- rc = -EINVAL; +- if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { +- pr_err("eCryptfs: maximum fs stacking depth exceeded\n"); +- goto out_free; +- } + + inode = ecryptfs_get_inode(path.dentry->d_inode, s); + rc = PTR_ERR(inode); +diff -urpN a/fs/ext4/namei.c b/fs/ext4/namei.c +--- a/fs/ext4/namei.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/ext4/namei.c 2016-12-21 21:06:34.010677297 +0000 +@@ -1849,10 +1849,10 @@ static int make_indexed_dir(handle_t *ha + + retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); + if (retval) +- goto out_frames; ++ goto out_frames; + retval = ext4_handle_dirty_dirent_node(handle, dir, bh); + if (retval) +- goto out_frames; ++ goto out_frames; + + de = do_split(handle,dir, &bh, frame, &hinfo); + if (IS_ERR(de)) { +@@ -2905,7 +2905,7 @@ retry: + * for transaction commit if we are running out of space + * and thus we deadlock. So we have to stop transaction now + * and restart it when symlink contents is written. +- * ++ * + * To keep fs consistent in case of crash, we have to put inode + * to orphan list in the mean time. + */ +@@ -3186,6 +3186,39 @@ static void ext4_update_dir_count(handle + } + } + ++static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent, ++ int credits, handle_t **h) ++{ ++ struct inode *wh; ++ handle_t *handle; ++ int retries = 0; ++ ++ /* ++ * for inode block, sb block, group summaries, ++ * and inode bitmap ++ */ ++ credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) + ++ EXT4_XATTR_TRANS_BLOCKS + 4); ++retry: ++ wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE, ++ &ent->dentry->d_name, 0, NULL, ++ EXT4_HT_DIR, credits); ++ ++ handle = ext4_journal_current_handle(); ++ if (IS_ERR(wh)) { ++ if (handle) ++ ext4_journal_stop(handle); ++ if (PTR_ERR(wh) == -ENOSPC && ++ ext4_should_retry_alloc(ent->dir->i_sb, &retries)) ++ goto retry; ++ } else { ++ *h = handle; ++ init_special_inode(wh, wh->i_mode, WHITEOUT_DEV); ++ wh->i_op = &ext4_special_inode_operations; ++ } ++ return wh; ++} ++ + /* + * Anybody can rename anything with this: the permission checks are left to the + * higher-level routines. +@@ -3194,8 +3227,9 @@ static void ext4_update_dir_count(handle + * while new_{dentry,inode) refers to the destination dentry/inode + * This comes from rename(const char *oldpath, const char *newpath) + */ +-static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry) ++static int ext4_simple_rename(struct inode *old_dir, struct dentry *old_dentry, ++ struct inode *new_dir, struct dentry *new_dentry, ++ unsigned int flags) + { + handle_t *handle = NULL; + struct ext4_renament old = { +@@ -3210,6 +3244,9 @@ static int ext4_rename(struct inode *old + }; + int force_reread; + int retval; ++ struct inode *whiteout = NULL; ++ int credits; ++ u8 old_file_type; + + dquot_initialize(old.dir); + dquot_initialize(new.dir); +@@ -3248,11 +3285,17 @@ static int ext4_rename(struct inode *old + if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC)) + ext4_alloc_da_blocks(old.inode); + +- handle = ext4_journal_start(old.dir, EXT4_HT_DIR, +- (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + +- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); +- if (IS_ERR(handle)) +- return PTR_ERR(handle); ++ credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + ++ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); ++ if (!(flags & RENAME_WHITEOUT)) { ++ handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); ++ if (IS_ERR(handle)) ++ return PTR_ERR(handle); ++ } else { ++ whiteout = ext4_whiteout_for_rename(&old, credits, &handle); ++ if (IS_ERR(whiteout)) ++ return PTR_ERR(whiteout); ++ } + + if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) + ext4_handle_sync(handle); +@@ -3280,13 +3323,23 @@ static int ext4_rename(struct inode *old + */ + force_reread = (new.dir->i_ino == old.dir->i_ino && + ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); +- if (!new.bh) { ++ ++ old_file_type = old.de->file_type; ++ if (whiteout) { ++ retval = ext4_setent(handle, &old, whiteout->i_ino, ++ EXT4_FT_CHRDEV); ++ if (retval) ++ goto end_rename; ++ ext4_mark_inode_dirty(handle, whiteout); ++ } ++ ++ if (!new.bh) { + retval = ext4_add_entry(handle, new.dentry, old.inode); + if (retval) + goto end_rename; + } else { + retval = ext4_setent(handle, &new, +- old.inode->i_ino, old.de->file_type); ++ old.inode->i_ino, old_file_type); + if (retval) + goto end_rename; + } +@@ -3301,10 +3354,12 @@ static int ext4_rename(struct inode *old + old.inode->i_ctime = ext4_current_time(old.inode); + ext4_mark_inode_dirty(handle, old.inode); + +- /* +- * ok, that's it +- */ +- ext4_rename_delete(handle, &old, force_reread); ++ if (!whiteout) { ++ /* ++ * ok, that's it ++ */ ++ ext4_rename_delete(handle, &old, force_reread); ++ } + + if (new.inode) { + ext4_dec_count(handle, new.inode); +@@ -3340,6 +3395,12 @@ end_rename: + brelse(old.dir_bh); + brelse(old.bh); + brelse(new.bh); ++ if (whiteout) { ++ if (retval) ++ drop_nlink(whiteout); ++ unlock_new_inode(whiteout); ++ iput(whiteout); ++ } + if (handle) + ext4_journal_stop(handle); + return retval; +@@ -3468,22 +3529,26 @@ end_rename: + return retval; + } + ++static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, ++ struct inode *new_dir, struct dentry *new_dentry) ++{ ++ return ext4_simple_rename(old_dir, old_dentry, new_dir, new_dentry, 0); ++} ++ + static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) + { +- if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) ++ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) + return -EINVAL; + + if (flags & RENAME_EXCHANGE) { + return ext4_cross_rename(old_dir, old_dentry, + new_dir, new_dentry); + } +- /* +- * Existence checking was done by the VFS, otherwise "RENAME_NOREPLACE" +- * is equivalent to regular rename. +- */ +- return ext4_rename(old_dir, old_dentry, new_dir, new_dentry); ++ ++ return ext4_simple_rename(old_dir, old_dentry, ++ new_dir, new_dentry, flags); + } + + /* +diff -urpN a/fs/internal.h b/fs/internal.h +--- a/fs/internal.h 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/internal.h 2016-12-21 21:06:34.010677297 +0000 +@@ -42,7 +42,6 @@ extern void __init chrdev_init(void); + /* + * namei.c + */ +-extern int __inode_permission(struct inode *, int); + extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *); + extern int vfs_path_lookup(struct dentry *, struct vfsmount *, + const char *, unsigned int, struct path *); +@@ -135,12 +134,6 @@ extern ssize_t __kernel_write(struct fil + extern int rw_verify_area(int, struct file *, const loff_t *, size_t); + + /* +- * splice.c +- */ +-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, +- loff_t *opos, size_t len, unsigned int flags); +- +-/* + * pipe.c + */ + extern const struct file_operations pipefifo_fops; +diff -urpN a/fs/namei.c b/fs/namei.c +--- a/fs/namei.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/namei.c 2016-12-21 21:06:34.010677297 +0000 +@@ -416,6 +416,7 @@ int __inode_permission(struct inode *ino + + return security_inode_permission(inode, mask); + } ++EXPORT_SYMBOL(__inode_permission); + + /** + * sb_permission - Check superblock-level permissions +@@ -2405,22 +2406,17 @@ kern_path_mountpoint(int dfd, const char + } + EXPORT_SYMBOL(kern_path_mountpoint); + +-/* +- * It's inline, so penalty for filesystems that don't use sticky bit is +- * minimal. +- */ +-static inline int check_sticky(struct inode *dir, struct inode *inode) ++int __check_sticky(struct inode *dir, struct inode *inode) + { + kuid_t fsuid = current_fsuid(); + +- if (!(dir->i_mode & S_ISVTX)) +- return 0; + if (uid_eq(inode->i_uid, fsuid)) + return 0; + if (uid_eq(dir->i_uid, fsuid)) + return 0; + return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); + } ++EXPORT_SYMBOL(__check_sticky); + + /* + * Check whether we can remove a link victim from directory dir, check +@@ -3075,9 +3071,12 @@ finish_open_created: + error = may_open(&nd->path, acc_mode, open_flag); + if (error) + goto out; +- file->f_path.mnt = nd->path.mnt; +- error = finish_open(file, nd->path.dentry, NULL, opened); +- if (error) { ++ ++ BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ ++ error = vfs_open(&nd->path, file, current_cred()); ++ if (!error) { ++ *opened |= FILE_OPENED; ++ } else { + if (error == -EOPENSTALE) + goto stale_open; + goto out; +@@ -4222,12 +4221,16 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, + bool should_retry = false; + int error; + +- if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) ++ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) + return -EINVAL; + +- if ((flags & RENAME_NOREPLACE) && (flags & RENAME_EXCHANGE)) ++ if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && ++ (flags & RENAME_EXCHANGE)) + return -EINVAL; + ++ if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) ++ return -EPERM; ++ + retry: + from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags); + if (IS_ERR(from)) { +@@ -4359,6 +4362,20 @@ SYSCALL_DEFINE2(rename, const char __use + return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); + } + ++int vfs_whiteout(struct inode *dir, struct dentry *dentry) ++{ ++ int error = may_create(dir, dentry); ++ if (error) ++ return error; ++ ++ if (!dir->i_op->mknod) ++ return -EPERM; ++ ++ return dir->i_op->mknod(dir, dentry, ++ S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); ++} ++EXPORT_SYMBOL(vfs_whiteout); ++ + int readlink_copy(char __user *buffer, int buflen, const char *link) + { + int len = PTR_ERR(link); +diff -urpN a/fs/namespace.c b/fs/namespace.c +--- a/fs/namespace.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/namespace.c 2016-12-21 21:06:34.010677297 +0000 +@@ -1599,6 +1599,33 @@ void drop_collected_mounts(struct vfsmou + namespace_unlock(); + } + ++/** ++ * clone_private_mount - create a private clone of a path ++ * ++ * This creates a new vfsmount, which will be the clone of @path. The new will ++ * not be attached anywhere in the namespace and will be private (i.e. changes ++ * to the originating mount won't be propagated into this). ++ * ++ * Release with mntput(). ++ */ ++struct vfsmount *clone_private_mount(struct path *path) ++{ ++ struct mount *old_mnt = real_mount(path->mnt); ++ struct mount *new_mnt; ++ ++ if (IS_MNT_UNBINDABLE(old_mnt)) ++ return ERR_PTR(-EINVAL); ++ ++ down_read(&namespace_sem); ++ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); ++ up_read(&namespace_sem); ++ if (IS_ERR(new_mnt)) ++ return ERR_CAST(new_mnt); ++ ++ return &new_mnt->mnt; ++} ++EXPORT_SYMBOL_GPL(clone_private_mount); ++ + int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, + struct vfsmount *root) + { +diff -urpN a/fs/open.c b/fs/open.c +--- a/fs/open.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/open.c 2016-12-21 21:06:34.010677297 +0000 +@@ -823,8 +823,7 @@ struct file *dentry_open(const struct pa + f = get_empty_filp(); + if (!IS_ERR(f)) { + f->f_flags = flags; +- f->f_path = *path; +- error = do_dentry_open(f, NULL, cred); ++ error = vfs_open(path, f, cred); + if (!error) { + /* from now on we need fput() to dispose of f */ + error = open_check_o_direct(f); +@@ -841,6 +840,26 @@ struct file *dentry_open(const struct pa + } + EXPORT_SYMBOL(dentry_open); + ++/** ++ * vfs_open - open the file at the given path ++ * @path: path to open ++ * @filp: newly allocated file with f_flag initialized ++ * @cred: credentials to use ++ */ ++int vfs_open(const struct path *path, struct file *filp, ++ const struct cred *cred) ++{ ++ struct inode *inode = path->dentry->d_inode; ++ ++ if (inode->i_op->dentry_open) ++ return inode->i_op->dentry_open(path->dentry, filp, cred); ++ else { ++ filp->f_path = *path; ++ return do_dentry_open(filp, NULL, cred); ++ } ++} ++EXPORT_SYMBOL(vfs_open); ++ + static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) + { + int lookup_flags = 0; +diff -urpN a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig +--- a/fs/overlayfs/Kconfig 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/Kconfig 2016-12-21 21:06:34.010677297 +0000 +@@ -0,0 +1,10 @@ ++config OVERLAYFS_FS ++ tristate "Overlay filesystem support" ++ help ++ An overlay filesystem combines two filesystems - an 'upper' filesystem ++ and a 'lower' filesystem. When a name exists in both filesystems, the ++ object in the 'upper' filesystem is visible while the object in the ++ 'lower' filesystem is either hidden or, in the case of directories, ++ merged with the 'upper' object. ++ ++ For more information see Documentation/filesystems/overlayfs.txt +diff -urpN a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile +--- a/fs/overlayfs/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/Makefile 2016-12-21 21:06:34.010677297 +0000 +@@ -0,0 +1,7 @@ ++# ++# Makefile for the overlay filesystem. ++# ++ ++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o ++ ++overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o +diff -urpN a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +--- a/fs/overlayfs/copy_up.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/copy_up.c 2016-12-21 21:06:34.010677297 +0000 +@@ -0,0 +1,431 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "overlayfs.h" ++ ++#define OVL_COPY_UP_CHUNK_SIZE (1 << 20) ++ ++int ovl_copy_xattr(struct dentry *old, struct dentry *new) ++{ ++ ssize_t list_size, size; ++ char *buf, *name, *value; ++ int error; ++ ++ if (!old->d_inode->i_op->getxattr || ++ !new->d_inode->i_op->getxattr) ++ return 0; ++ ++ list_size = vfs_listxattr(old, NULL, 0); ++ if (list_size <= 0) { ++ if (list_size == -EOPNOTSUPP) ++ return 0; ++ return list_size; ++ } ++ ++ buf = kzalloc(list_size, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ error = -ENOMEM; ++ value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL); ++ if (!value) ++ goto out; ++ ++ list_size = vfs_listxattr(old, buf, list_size); ++ if (list_size <= 0) { ++ error = list_size; ++ goto out_free_value; ++ } ++ ++ for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { ++ size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX); ++ if (size <= 0) { ++ error = size; ++ goto out_free_value; ++ } ++ error = vfs_setxattr(new, name, value, size, 0); ++ if (error) ++ goto out_free_value; ++ } ++ ++out_free_value: ++ kfree(value); ++out: ++ kfree(buf); ++ return error; ++} ++ ++static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) ++{ ++ struct file *old_file; ++ struct file *new_file; ++ loff_t old_pos = 0; ++ loff_t new_pos = 0; ++ int error = 0; ++ ++ if (len == 0) ++ return 0; ++ ++ old_file = ovl_path_open(old, O_RDONLY); ++ if (IS_ERR(old_file)) ++ return PTR_ERR(old_file); ++ ++ new_file = ovl_path_open(new, O_WRONLY); ++ if (IS_ERR(new_file)) { ++ error = PTR_ERR(new_file); ++ goto out_fput; ++ } ++ ++ /* FIXME: copy up sparse files efficiently */ ++ while (len) { ++ size_t this_len = OVL_COPY_UP_CHUNK_SIZE; ++ long bytes; ++ ++ if (len < this_len) ++ this_len = len; ++ ++ if (signal_pending_state(TASK_KILLABLE, current)) { ++ error = -EINTR; ++ break; ++ } ++ ++ bytes = do_splice_direct(old_file, &old_pos, ++ new_file, &new_pos, ++ this_len, SPLICE_F_MOVE); ++ if (bytes <= 0) { ++ error = bytes; ++ break; ++ } ++ WARN_ON(old_pos != new_pos); ++ ++ len -= bytes; ++ } ++ ++ fput(new_file); ++out_fput: ++ fput(old_file); ++ return error; ++} ++ ++static char *ovl_read_symlink(struct dentry *realdentry) ++{ ++ int res; ++ char *buf; ++ struct inode *inode = realdentry->d_inode; ++ mm_segment_t old_fs; ++ ++ res = -EINVAL; ++ if (!inode->i_op->readlink) ++ goto err; ++ ++ res = -ENOMEM; ++ buf = (char *) __get_free_page(GFP_KERNEL); ++ if (!buf) ++ goto err; ++ ++ old_fs = get_fs(); ++ set_fs(get_ds()); ++ /* The cast to a user pointer is valid due to the set_fs() */ ++ res = inode->i_op->readlink(realdentry, ++ (char __user *)buf, PAGE_SIZE - 1); ++ set_fs(old_fs); ++ if (res < 0) { ++ free_page((unsigned long) buf); ++ goto err; ++ } ++ buf[res] = '\0'; ++ ++ return buf; ++ ++err: ++ return ERR_PTR(res); ++} ++ ++static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat) ++{ ++ struct iattr attr = { ++ .ia_valid = ++ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, ++ .ia_atime = stat->atime, ++ .ia_mtime = stat->mtime, ++ }; ++ ++ return notify_change(upperdentry, &attr, NULL); ++} ++ ++int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) ++{ ++ int err = 0; ++ ++ mutex_lock(&upperdentry->d_inode->i_mutex); ++ if (!S_ISLNK(stat->mode)) { ++ struct iattr attr = { ++ .ia_valid = ATTR_MODE, ++ .ia_mode = stat->mode, ++ }; ++ err = notify_change(upperdentry, &attr, NULL); ++ } ++ if (!err) { ++ struct iattr attr = { ++ .ia_valid = ATTR_UID | ATTR_GID, ++ .ia_uid = stat->uid, ++ .ia_gid = stat->gid, ++ }; ++ err = notify_change(upperdentry, &attr, NULL); ++ } ++ if (!err) ++ ovl_set_timestamps(upperdentry, stat); ++ mutex_unlock(&upperdentry->d_inode->i_mutex); ++ ++ return err; ++ ++} ++ ++static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, ++ struct dentry *dentry, struct path *lowerpath, ++ struct kstat *stat, const char *link) ++{ ++ struct inode *wdir = workdir->d_inode; ++ struct inode *udir = upperdir->d_inode; ++ struct dentry *newdentry = NULL; ++ struct dentry *upper = NULL; ++ umode_t mode = stat->mode; ++ int err; ++ ++ newdentry = ovl_lookup_temp(workdir, dentry); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out; ++ ++ upper = lookup_one_len(dentry->d_name.name, upperdir, ++ dentry->d_name.len); ++ err = PTR_ERR(upper); ++ if (IS_ERR(upper)) ++ goto out; ++ ++ /* Can't properly set mode on creation because of the umask */ ++ stat->mode &= S_IFMT; ++ err = ovl_create_real(wdir, newdentry, stat, link, NULL, true); ++ stat->mode = mode; ++ if (err) ++ goto out; ++ ++ if (S_ISREG(stat->mode)) { ++ struct path upperpath; ++ ovl_path_upper(dentry, &upperpath); ++ BUG_ON(upperpath.dentry != NULL); ++ upperpath.dentry = newdentry; ++ ++ err = ovl_copy_up_data(lowerpath, &upperpath, stat->size); ++ if (err) ++ goto out_cleanup; ++ } ++ ++ err = ovl_copy_xattr(lowerpath->dentry, newdentry); ++ if (err) ++ goto out_cleanup; ++ ++ err = ovl_set_attr(newdentry, stat); ++ if (err) ++ goto out_cleanup; ++ ++ err = ovl_do_rename(wdir, newdentry, udir, upper, 0); ++ if (err) ++ goto out_cleanup; ++ ++ ovl_dentry_update(dentry, newdentry); ++ newdentry = NULL; ++ ++ /* ++ * Easiest way to get rid of the lower dentry reference is to ++ * drop this dentry. This is neither needed nor possible for ++ * directories. ++ * ++ * Non-directores become opaque when copied up. ++ */ ++ if (!S_ISDIR(stat->mode)) { ++ ovl_dentry_set_opaque(dentry, true); ++ d_drop(dentry); ++ } ++out: ++ dput(upper); ++ dput(newdentry); ++ return err; ++ ++out_cleanup: ++ ovl_cleanup(wdir, newdentry); ++ goto out; ++} ++ ++/* ++ * Copy up a single dentry ++ * ++ * Directory renames only allowed on "pure upper" (already created on ++ * upper filesystem, never copied up). Directories which are on lower or ++ * are merged may not be renamed. For these -EXDEV is returned and ++ * userspace has to deal with it. This means, when copying up a ++ * directory we can rely on it and ancestors being stable. ++ * ++ * Non-directory renames start with copy up of source if necessary. The ++ * actual rename will only proceed once the copy up was successful. Copy ++ * up uses upper parent i_mutex for exclusion. Since rename can change ++ * d_parent it is possible that the copy up will lock the old parent. At ++ * that point the file will have already been copied up anyway. ++ */ ++static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, ++ struct path *lowerpath, struct kstat *stat) ++{ ++ struct dentry *workdir = ovl_workdir(dentry); ++ int err; ++ struct kstat pstat; ++ struct path parentpath; ++ struct dentry *upperdir; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ char *link = NULL; ++ ++ ovl_path_upper(parent, &parentpath); ++ upperdir = parentpath.dentry; ++ ++ err = vfs_getattr(&parentpath, &pstat); ++ if (err) ++ return err; ++ ++ if (S_ISLNK(stat->mode)) { ++ link = ovl_read_symlink(lowerpath->dentry); ++ if (IS_ERR(link)) ++ return PTR_ERR(link); ++ } ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_free_link; ++ ++ override_cred->fsuid = stat->uid; ++ override_cred->fsgid = stat->gid; ++ /* ++ * CAP_SYS_ADMIN for copying up extended attributes ++ * CAP_DAC_OVERRIDE for create ++ * CAP_FOWNER for chmod, timestamp update ++ * CAP_FSETID for chmod ++ * CAP_CHOWN for chown ++ * CAP_MKNOD for mknod ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ cap_raise(override_cred->cap_effective, CAP_FSETID); ++ cap_raise(override_cred->cap_effective, CAP_CHOWN); ++ cap_raise(override_cred->cap_effective, CAP_MKNOD); ++ old_cred = override_creds(override_cred); ++ ++ err = -EIO; ++ if (lock_rename(workdir, upperdir) != NULL) { ++ pr_err("overlayfs: failed to lock workdir+upperdir\n"); ++ goto out_unlock; ++ } ++ if (ovl_path_type(dentry) != OVL_PATH_LOWER) { ++ err = 0; ++ } else { ++ err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, ++ stat, link); ++ if (!err) { ++ /* Restore timestamps on parent (best effort) */ ++ ovl_set_timestamps(upperdir, &pstat); ++ } ++ } ++out_unlock: ++ unlock_rename(workdir, upperdir); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++out_free_link: ++ if (link) ++ free_page((unsigned long) link); ++ ++ return err; ++} ++ ++int ovl_copy_up(struct dentry *dentry) ++{ ++ int err; ++ ++ err = 0; ++ while (!err) { ++ struct dentry *next; ++ struct dentry *parent; ++ struct path lowerpath; ++ struct kstat stat; ++ enum ovl_path_type type = ovl_path_type(dentry); ++ ++ if (type != OVL_PATH_LOWER) ++ break; ++ ++ next = dget(dentry); ++ /* find the topmost dentry not yet copied up */ ++ for (;;) { ++ parent = dget_parent(next); ++ ++ type = ovl_path_type(parent); ++ if (type != OVL_PATH_LOWER) ++ break; ++ ++ dput(next); ++ next = parent; ++ } ++ ++ ovl_path_lower(next, &lowerpath); ++ err = vfs_getattr(&lowerpath, &stat); ++ if (!err) ++ err = ovl_copy_up_one(parent, next, &lowerpath, &stat); ++ ++ dput(parent); ++ dput(next); ++ } ++ ++ return err; ++} ++ ++/* Optimize by not copying up the file first and truncating later */ ++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size) ++{ ++ int err; ++ struct kstat stat; ++ struct path lowerpath; ++ struct dentry *parent = dget_parent(dentry); ++ ++ err = ovl_copy_up(parent); ++ if (err) ++ goto out_dput_parent; ++ ++ ovl_path_lower(dentry, &lowerpath); ++ err = vfs_getattr(&lowerpath, &stat); ++ if (err) ++ goto out_dput_parent; ++ ++ if (size < stat.size) ++ stat.size = size; ++ ++ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat); ++ ++out_dput_parent: ++ dput(parent); ++ return err; ++} +diff -urpN a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c +--- a/fs/overlayfs/dir.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/dir.c 2016-12-21 21:06:34.014677298 +0000 +@@ -0,0 +1,922 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "overlayfs.h" ++ ++void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) ++{ ++ int err; ++ ++ if (S_ISDIR(wdentry->d_inode->i_mode)) ++ err = ovl_do_rmdir(wdir, wdentry); ++ else ++ err = ovl_do_unlink(wdir, wdentry); ++ ++ if (err) { ++ pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n", ++ wdentry, err); ++ } ++} ++ ++struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) ++{ ++ struct dentry *temp; ++ char name[20]; ++ ++ snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry); ++ ++ temp = lookup_one_len(name, workdir, strlen(name)); ++ if (!IS_ERR(temp) && temp->d_inode) { ++ pr_err("overlayfs: workdir/%s already exists\n", name); ++ dput(temp); ++ temp = ERR_PTR(-EIO); ++ } ++ ++ return temp; ++} ++ ++/* caller holds i_mutex on workdir */ ++static struct dentry *ovl_whiteout(struct dentry *workdir, ++ struct dentry *dentry) ++{ ++ int err; ++ struct dentry *whiteout; ++ struct inode *wdir = workdir->d_inode; ++ ++ whiteout = ovl_lookup_temp(workdir, dentry); ++ if (IS_ERR(whiteout)) ++ return whiteout; ++ ++ err = ovl_do_whiteout(wdir, whiteout); ++ if (err) { ++ dput(whiteout); ++ whiteout = ERR_PTR(err); ++ } ++ ++ return whiteout; ++} ++ ++int ovl_create_real(struct inode *dir, struct dentry *newdentry, ++ struct kstat *stat, const char *link, ++ struct dentry *hardlink, bool debug) ++{ ++ int err; ++ ++ if (newdentry->d_inode) ++ return -ESTALE; ++ ++ if (hardlink) { ++ err = ovl_do_link(hardlink, dir, newdentry, debug); ++ } else { ++ switch (stat->mode & S_IFMT) { ++ case S_IFREG: ++ err = ovl_do_create(dir, newdentry, stat->mode, debug); ++ break; ++ ++ case S_IFDIR: ++ err = ovl_do_mkdir(dir, newdentry, stat->mode, debug); ++ break; ++ ++ case S_IFCHR: ++ case S_IFBLK: ++ case S_IFIFO: ++ case S_IFSOCK: ++ err = ovl_do_mknod(dir, newdentry, ++ stat->mode, stat->rdev, debug); ++ break; ++ ++ case S_IFLNK: ++ err = ovl_do_symlink(dir, newdentry, link, debug); ++ break; ++ ++ default: ++ err = -EPERM; ++ } ++ } ++ if (!err && WARN_ON(!newdentry->d_inode)) { ++ /* ++ * Not quite sure if non-instantiated dentry is legal or not. ++ * VFS doesn't seem to care so check and warn here. ++ */ ++ err = -ENOENT; ++ } ++ return err; ++} ++ ++static int ovl_set_opaque(struct dentry *upperdentry) ++{ ++ return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); ++} ++ ++static void ovl_remove_opaque(struct dentry *upperdentry) ++{ ++ int err; ++ ++ err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); ++ if (err) { ++ pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", ++ upperdentry->d_name.name, err); ++ } ++} ++ ++static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, ++ struct kstat *stat) ++{ ++ int err; ++ enum ovl_path_type type; ++ struct path realpath; ++ ++ type = ovl_path_real(dentry, &realpath); ++ err = vfs_getattr(&realpath, stat); ++ if (err) ++ return err; ++ ++ stat->dev = dentry->d_sb->s_dev; ++ stat->ino = dentry->d_inode->i_ino; ++ ++ /* ++ * It's probably not worth it to count subdirs to get the ++ * correct link count. nlink=1 seems to pacify 'find' and ++ * other utilities. ++ */ ++ if (type == OVL_PATH_MERGE) ++ stat->nlink = 1; ++ ++ return 0; ++} ++ ++static int ovl_create_upper(struct dentry *dentry, struct inode *inode, ++ struct kstat *stat, const char *link, ++ struct dentry *hardlink) ++{ ++ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); ++ struct inode *udir = upperdir->d_inode; ++ struct dentry *newdentry; ++ int err; ++ ++ mutex_lock_nested(&udir->i_mutex, I_MUTEX_PARENT); ++ newdentry = lookup_one_len(dentry->d_name.name, upperdir, ++ dentry->d_name.len); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_unlock; ++ err = ovl_create_real(udir, newdentry, stat, link, hardlink, false); ++ if (err) ++ goto out_dput; ++ ++ ovl_dentry_version_inc(dentry->d_parent); ++ ovl_dentry_update(dentry, newdentry); ++ ovl_copyattr(newdentry->d_inode, inode); ++ d_instantiate(dentry, inode); ++ newdentry = NULL; ++out_dput: ++ dput(newdentry); ++out_unlock: ++ mutex_unlock(&udir->i_mutex); ++ return err; ++} ++ ++static int ovl_lock_rename_workdir(struct dentry *workdir, ++ struct dentry *upperdir) ++{ ++ /* Workdir should not be subdir of upperdir and vice versa */ ++ if (lock_rename(workdir, upperdir) != NULL) { ++ unlock_rename(workdir, upperdir); ++ pr_err("overlayfs: failed to lock workdir+upperdir\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ ++static struct dentry *ovl_clear_empty(struct dentry *dentry, ++ struct list_head *list) ++{ ++ struct dentry *workdir = ovl_workdir(dentry); ++ struct inode *wdir = workdir->d_inode; ++ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); ++ struct inode *udir = upperdir->d_inode; ++ struct path upperpath; ++ struct dentry *upper; ++ struct dentry *opaquedir; ++ struct kstat stat; ++ int err; ++ ++ err = ovl_lock_rename_workdir(workdir, upperdir); ++ if (err) ++ goto out; ++ ++ ovl_path_upper(dentry, &upperpath); ++ err = vfs_getattr(&upperpath, &stat); ++ if (err) ++ goto out; ++ ++ err = -ESTALE; ++ if (!S_ISDIR(stat.mode)) ++ goto out; ++ ++ opaquedir = ovl_lookup_temp(workdir, dentry); ++ err = PTR_ERR(opaquedir); ++ if (IS_ERR(opaquedir)) ++ goto out_unlock; ++ ++ err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true); ++ if (err) ++ goto out_dput; ++ ++ upper = upperpath.dentry; ++ err = ovl_copy_xattr(upper, opaquedir); ++ if (err) ++ goto out_cleanup; ++ ++ err = ovl_set_opaque(opaquedir); ++ if (err) ++ goto out_cleanup; ++ ++ err = ovl_set_attr(opaquedir, &stat); ++ if (err) ++ goto out_cleanup; ++ ++ err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE); ++ if (err) ++ goto out_cleanup; ++ ++ unlock_rename(workdir, upperdir); ++ ovl_cleanup_whiteouts(upper, list); ++ mutex_lock_nested(&wdir->i_mutex, I_MUTEX_PARENT); ++ ovl_cleanup(wdir, upper); ++ mutex_unlock(&wdir->i_mutex); ++ ++ /* dentry's upper doesn't match now, get rid of it */ ++ d_drop(dentry); ++ ++ return opaquedir; ++ ++out_cleanup: ++ ovl_cleanup(wdir, opaquedir); ++out_dput: ++ dput(opaquedir); ++out_unlock: ++ unlock_rename(workdir, upperdir); ++out: ++ return ERR_PTR(err); ++} ++ ++static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry, ++ enum ovl_path_type type) ++{ ++ int err; ++ struct dentry *ret = NULL; ++ LIST_HEAD(list); ++ ++ err = ovl_check_empty_dir(dentry, &list); ++ if (err) ++ ret = ERR_PTR(err); ++ else if (type == OVL_PATH_MERGE) ++ ret = ovl_clear_empty(dentry, &list); ++ ++ ovl_cache_free(&list); ++ ++ return ret; ++} ++ ++static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, ++ struct kstat *stat, const char *link, ++ struct dentry *hardlink) ++{ ++ struct dentry *workdir = ovl_workdir(dentry); ++ struct inode *wdir = workdir->d_inode; ++ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); ++ struct inode *udir = upperdir->d_inode; ++ struct dentry *upper; ++ struct dentry *newdentry; ++ int err; ++ ++ err = ovl_lock_rename_workdir(workdir, upperdir); ++ if (err) ++ goto out; ++ ++ newdentry = ovl_lookup_temp(workdir, dentry); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_unlock; ++ ++ upper = lookup_one_len(dentry->d_name.name, upperdir, ++ dentry->d_name.len); ++ err = PTR_ERR(upper); ++ if (IS_ERR(upper)) ++ goto out_dput; ++ ++ err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true); ++ if (err) ++ goto out_dput2; ++ ++ if (S_ISDIR(stat->mode)) { ++ err = ovl_set_opaque(newdentry); ++ if (err) ++ goto out_cleanup; ++ ++ err = ovl_do_rename(wdir, newdentry, udir, upper, ++ RENAME_EXCHANGE); ++ if (err) ++ goto out_cleanup; ++ ++ ovl_cleanup(wdir, upper); ++ } else { ++ err = ovl_do_rename(wdir, newdentry, udir, upper, 0); ++ if (err) ++ goto out_cleanup; ++ } ++ ovl_dentry_version_inc(dentry->d_parent); ++ ovl_dentry_update(dentry, newdentry); ++ ovl_copyattr(newdentry->d_inode, inode); ++ d_instantiate(dentry, inode); ++ newdentry = NULL; ++out_dput2: ++ dput(upper); ++out_dput: ++ dput(newdentry); ++out_unlock: ++ unlock_rename(workdir, upperdir); ++out: ++ return err; ++ ++out_cleanup: ++ ovl_cleanup(wdir, newdentry); ++ goto out_dput2; ++} ++ ++static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev, ++ const char *link, struct dentry *hardlink) ++{ ++ int err; ++ struct inode *inode; ++ struct kstat stat = { ++ .mode = mode, ++ .rdev = rdev, ++ }; ++ ++ err = -ENOMEM; ++ inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata); ++ if (!inode) ++ goto out; ++ ++ err = ovl_copy_up(dentry->d_parent); ++ if (err) ++ goto out_iput; ++ ++ if (!ovl_dentry_is_opaque(dentry)) { ++ err = ovl_create_upper(dentry, inode, &stat, link, hardlink); ++ } else { ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_iput; ++ ++ /* ++ * CAP_SYS_ADMIN for setting opaque xattr ++ * CAP_DAC_OVERRIDE for create in workdir, rename ++ * CAP_FOWNER for removing whiteout from sticky dir ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ old_cred = override_creds(override_cred); ++ ++ err = ovl_create_over_whiteout(dentry, inode, &stat, link, ++ hardlink); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ } ++ ++ if (!err) ++ inode = NULL; ++out_iput: ++ iput(inode); ++out: ++ return err; ++} ++ ++static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, ++ const char *link) ++{ ++ int err; ++ ++ err = ovl_want_write(dentry); ++ if (!err) { ++ err = ovl_create_or_link(dentry, mode, rdev, link, NULL); ++ ovl_drop_write(dentry); ++ } ++ ++ return err; ++} ++ ++static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ bool excl) ++{ ++ return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL); ++} ++ ++static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ++{ ++ return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL); ++} ++ ++static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, ++ dev_t rdev) ++{ ++ /* Don't allow creation of "whiteout" on overlay */ ++ if (S_ISCHR(mode) && rdev == WHITEOUT_DEV) ++ return -EPERM; ++ ++ return ovl_create_object(dentry, mode, rdev, NULL); ++} ++ ++static int ovl_symlink(struct inode *dir, struct dentry *dentry, ++ const char *link) ++{ ++ return ovl_create_object(dentry, S_IFLNK, 0, link); ++} ++ ++static int ovl_link(struct dentry *old, struct inode *newdir, ++ struct dentry *new) ++{ ++ int err; ++ struct dentry *upper; ++ ++ err = ovl_want_write(old); ++ if (err) ++ goto out; ++ ++ err = ovl_copy_up(old); ++ if (err) ++ goto out_drop_write; ++ ++ upper = ovl_dentry_upper(old); ++ err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper); ++ ++out_drop_write: ++ ovl_drop_write(old); ++out: ++ return err; ++} ++ ++static int ovl_remove_and_whiteout(struct dentry *dentry, ++ enum ovl_path_type type, bool is_dir) ++{ ++ struct dentry *workdir = ovl_workdir(dentry); ++ struct inode *wdir = workdir->d_inode; ++ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); ++ struct inode *udir = upperdir->d_inode; ++ struct dentry *whiteout; ++ struct dentry *upper; ++ struct dentry *opaquedir = NULL; ++ int err; ++ ++ if (is_dir) { ++ opaquedir = ovl_check_empty_and_clear(dentry, type); ++ err = PTR_ERR(opaquedir); ++ if (IS_ERR(opaquedir)) ++ goto out; ++ } ++ ++ err = ovl_lock_rename_workdir(workdir, upperdir); ++ if (err) ++ goto out_dput; ++ ++ whiteout = ovl_whiteout(workdir, dentry); ++ err = PTR_ERR(whiteout); ++ if (IS_ERR(whiteout)) ++ goto out_unlock; ++ ++ if (type == OVL_PATH_LOWER) { ++ upper = lookup_one_len(dentry->d_name.name, upperdir, ++ dentry->d_name.len); ++ err = PTR_ERR(upper); ++ if (IS_ERR(upper)) ++ goto kill_whiteout; ++ ++ err = ovl_do_rename(wdir, whiteout, udir, upper, 0); ++ dput(upper); ++ if (err) ++ goto kill_whiteout; ++ } else { ++ int flags = 0; ++ ++ upper = ovl_dentry_upper(dentry); ++ if (opaquedir) ++ upper = opaquedir; ++ err = -ESTALE; ++ if (upper->d_parent != upperdir) ++ goto kill_whiteout; ++ ++ if (is_dir) ++ flags |= RENAME_EXCHANGE; ++ ++ err = ovl_do_rename(wdir, whiteout, udir, upper, flags); ++ if (err) ++ goto kill_whiteout; ++ ++ if (is_dir) ++ ovl_cleanup(wdir, upper); ++ } ++ ovl_dentry_version_inc(dentry->d_parent); ++out_d_drop: ++ d_drop(dentry); ++ dput(whiteout); ++out_unlock: ++ unlock_rename(workdir, upperdir); ++out_dput: ++ dput(opaquedir); ++out: ++ return err; ++ ++kill_whiteout: ++ ovl_cleanup(wdir, whiteout); ++ goto out_d_drop; ++} ++ ++static int ovl_remove_upper(struct dentry *dentry, bool is_dir) ++{ ++ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); ++ struct inode *dir = upperdir->d_inode; ++ struct dentry *upper = ovl_dentry_upper(dentry); ++ int err; ++ ++ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); ++ err = -ESTALE; ++ if (upper->d_parent == upperdir) { ++ if (is_dir) ++ err = vfs_rmdir(dir, upper); ++ else ++ err = vfs_unlink(dir, upper, NULL); ++ ++ ovl_dentry_version_inc(dentry->d_parent); ++ } ++ ++ /* ++ * Keeping this dentry hashed would mean having to release ++ * upperpath/lowerpath, which could only be done if we are the ++ * sole user of this dentry. Too tricky... Just unhash for ++ * now. ++ */ ++ d_drop(dentry); ++ mutex_unlock(&dir->i_mutex); ++ ++ return err; ++} ++ ++static inline int ovl_check_sticky(struct dentry *dentry) ++{ ++ struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode; ++ struct inode *inode = ovl_dentry_real(dentry)->d_inode; ++ ++ if (check_sticky(dir, inode)) ++ return -EPERM; ++ ++ return 0; ++} ++ ++static int ovl_do_remove(struct dentry *dentry, bool is_dir) ++{ ++ enum ovl_path_type type; ++ int err; ++ ++ err = ovl_check_sticky(dentry); ++ if (err) ++ goto out; ++ ++ err = ovl_want_write(dentry); ++ if (err) ++ goto out; ++ ++ err = ovl_copy_up(dentry->d_parent); ++ if (err) ++ goto out_drop_write; ++ ++ type = ovl_path_type(dentry); ++ if (type == OVL_PATH_UPPER && !ovl_dentry_is_opaque(dentry)) { ++ err = ovl_remove_upper(dentry, is_dir); ++ } else { ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_drop_write; ++ ++ /* ++ * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir ++ * CAP_DAC_OVERRIDE for create in workdir, rename ++ * CAP_FOWNER for removing whiteout from sticky dir ++ * CAP_FSETID for chmod of opaque dir ++ * CAP_CHOWN for chown of opaque dir ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ cap_raise(override_cred->cap_effective, CAP_FSETID); ++ cap_raise(override_cred->cap_effective, CAP_CHOWN); ++ old_cred = override_creds(override_cred); ++ ++ err = ovl_remove_and_whiteout(dentry, type, is_dir); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ } ++out_drop_write: ++ ovl_drop_write(dentry); ++out: ++ return err; ++} ++ ++static int ovl_unlink(struct inode *dir, struct dentry *dentry) ++{ ++ return ovl_do_remove(dentry, false); ++} ++ ++static int ovl_rmdir(struct inode *dir, struct dentry *dentry) ++{ ++ return ovl_do_remove(dentry, true); ++} ++ ++static int ovl_rename2(struct inode *olddir, struct dentry *old, ++ struct inode *newdir, struct dentry *new, ++ unsigned int flags) ++{ ++ int err; ++ enum ovl_path_type old_type; ++ enum ovl_path_type new_type; ++ struct dentry *old_upperdir; ++ struct dentry *new_upperdir; ++ struct dentry *olddentry; ++ struct dentry *newdentry; ++ struct dentry *trap; ++ bool old_opaque; ++ bool new_opaque; ++ bool new_create = false; ++ bool cleanup_whiteout = false; ++ bool overwrite = !(flags & RENAME_EXCHANGE); ++ bool is_dir = S_ISDIR(old->d_inode->i_mode); ++ bool new_is_dir = false; ++ struct dentry *opaquedir = NULL; ++ const struct cred *old_cred = NULL; ++ struct cred *override_cred = NULL; ++ ++ err = -EINVAL; ++ if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) ++ goto out; ++ ++ flags &= ~RENAME_NOREPLACE; ++ ++ err = ovl_check_sticky(old); ++ if (err) ++ goto out; ++ ++ /* Don't copy up directory trees */ ++ old_type = ovl_path_type(old); ++ err = -EXDEV; ++ if (old_type != OVL_PATH_UPPER && is_dir) ++ goto out; ++ ++ if (new->d_inode) { ++ err = ovl_check_sticky(new); ++ if (err) ++ goto out; ++ ++ if (S_ISDIR(new->d_inode->i_mode)) ++ new_is_dir = true; ++ ++ new_type = ovl_path_type(new); ++ err = -EXDEV; ++ if (!overwrite && new_type != OVL_PATH_UPPER && new_is_dir) ++ goto out; ++ ++ err = 0; ++ if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { ++ if (ovl_dentry_lower(old)->d_inode == ++ ovl_dentry_lower(new)->d_inode) ++ goto out; ++ } ++ if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { ++ if (ovl_dentry_upper(old)->d_inode == ++ ovl_dentry_upper(new)->d_inode) ++ goto out; ++ } ++ } else { ++ new_type = OVL_PATH_UPPER; ++ } ++ ++ err = ovl_want_write(old); ++ if (err) ++ goto out; ++ ++ err = ovl_copy_up(old); ++ if (err) ++ goto out_drop_write; ++ ++ err = ovl_copy_up(new->d_parent); ++ if (err) ++ goto out_drop_write; ++ if (!overwrite) { ++ err = ovl_copy_up(new); ++ if (err) ++ goto out_drop_write; ++ } ++ ++ old_opaque = ovl_dentry_is_opaque(old) || old_type != OVL_PATH_UPPER; ++ new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER; ++ ++ if (old_opaque || new_opaque) { ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_drop_write; ++ ++ /* ++ * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir ++ * CAP_DAC_OVERRIDE for create in workdir ++ * CAP_FOWNER for removing whiteout from sticky dir ++ * CAP_FSETID for chmod of opaque dir ++ * CAP_CHOWN for chown of opaque dir ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ cap_raise(override_cred->cap_effective, CAP_FSETID); ++ cap_raise(override_cred->cap_effective, CAP_CHOWN); ++ old_cred = override_creds(override_cred); ++ } ++ ++ if (overwrite && new_type != OVL_PATH_UPPER && new_is_dir) { ++ opaquedir = ovl_check_empty_and_clear(new, new_type); ++ err = PTR_ERR(opaquedir); ++ if (IS_ERR(opaquedir)) { ++ opaquedir = NULL; ++ goto out_revert_creds; ++ } ++ } ++ ++ if (overwrite) { ++ if (old_opaque) { ++ if (new->d_inode || !new_opaque) { ++ /* Whiteout source */ ++ flags |= RENAME_WHITEOUT; ++ } else { ++ /* Switch whiteouts */ ++ flags |= RENAME_EXCHANGE; ++ } ++ } else if (is_dir && !new->d_inode && new_opaque) { ++ flags |= RENAME_EXCHANGE; ++ cleanup_whiteout = true; ++ } ++ } ++ ++ old_upperdir = ovl_dentry_upper(old->d_parent); ++ new_upperdir = ovl_dentry_upper(new->d_parent); ++ ++ trap = lock_rename(new_upperdir, old_upperdir); ++ ++ olddentry = ovl_dentry_upper(old); ++ newdentry = ovl_dentry_upper(new); ++ if (newdentry) { ++ if (opaquedir) { ++ newdentry = opaquedir; ++ opaquedir = NULL; ++ } else { ++ dget(newdentry); ++ } ++ } else { ++ new_create = true; ++ newdentry = lookup_one_len(new->d_name.name, new_upperdir, ++ new->d_name.len); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_unlock; ++ } ++ ++ err = -ESTALE; ++ if (olddentry->d_parent != old_upperdir) ++ goto out_dput; ++ if (newdentry->d_parent != new_upperdir) ++ goto out_dput; ++ if (olddentry == trap) ++ goto out_dput; ++ if (newdentry == trap) ++ goto out_dput; ++ ++ if (is_dir && !old_opaque && new_opaque) { ++ err = ovl_set_opaque(olddentry); ++ if (err) ++ goto out_dput; ++ } ++ if (!overwrite && new_is_dir && old_opaque && !new_opaque) { ++ err = ovl_set_opaque(newdentry); ++ if (err) ++ goto out_dput; ++ } ++ ++ if (old_opaque || new_opaque) { ++ err = ovl_do_rename(old_upperdir->d_inode, olddentry, ++ new_upperdir->d_inode, newdentry, ++ flags); ++ } else { ++ /* No debug for the plain case */ ++ BUG_ON(flags & ~RENAME_EXCHANGE); ++ err = vfs_rename(old_upperdir->d_inode, olddentry, ++ new_upperdir->d_inode, newdentry, ++ NULL, flags); ++ } ++ ++ if (err) { ++ if (is_dir && !old_opaque && new_opaque) ++ ovl_remove_opaque(olddentry); ++ if (!overwrite && new_is_dir && old_opaque && !new_opaque) ++ ovl_remove_opaque(newdentry); ++ goto out_dput; ++ } ++ ++ if (is_dir && old_opaque && !new_opaque) ++ ovl_remove_opaque(olddentry); ++ if (!overwrite && new_is_dir && !old_opaque && new_opaque) ++ ovl_remove_opaque(newdentry); ++ ++ if (old_opaque != new_opaque) { ++ ovl_dentry_set_opaque(old, new_opaque); ++ if (!overwrite) ++ ovl_dentry_set_opaque(new, old_opaque); ++ } ++ ++ if (cleanup_whiteout) ++ ovl_cleanup(old_upperdir->d_inode, newdentry); ++ ++ /* ++ * Copy-up already unhashed it, but then vfs_rename() rehashed it. ++ * See comment in ovl_copy_up_locked() as to why we drop the dentry(). ++ */ ++ if (!is_dir && old_type == OVL_PATH_LOWER) ++ d_drop(old); ++ ++ if (!overwrite && !new_is_dir && new_type == OVL_PATH_LOWER) ++ d_drop(new); ++ ++ ovl_dentry_version_inc(old->d_parent); ++ ovl_dentry_version_inc(new->d_parent); ++ ++out_dput: ++ dput(newdentry); ++out_unlock: ++ unlock_rename(new_upperdir, old_upperdir); ++out_revert_creds: ++ if (old_opaque || new_opaque) { ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ } ++out_drop_write: ++ ovl_drop_write(old); ++out: ++ dput(opaquedir); ++ return err; ++} ++ ++static int ovl_rename(struct inode *olddir, struct dentry *old, ++ struct inode *newdir, struct dentry *new) ++{ ++ return ovl_rename2(olddir, old, newdir, new, 0); ++} ++ ++ ++const struct inode_operations ovl_dir_inode_operations = { ++ .lookup = ovl_lookup, ++ .mkdir = ovl_mkdir, ++ .symlink = ovl_symlink, ++ .unlink = ovl_unlink, ++ .rmdir = ovl_rmdir, ++ .rename = ovl_rename, ++ .rename2 = ovl_rename2, ++ .link = ovl_link, ++ .setattr = ovl_setattr, ++ .create = ovl_create, ++ .mknod = ovl_mknod, ++ .permission = ovl_permission, ++ .getattr = ovl_dir_getattr, ++ .setxattr = ovl_setxattr, ++ .getxattr = ovl_getxattr, ++ .listxattr = ovl_listxattr, ++ .removexattr = ovl_removexattr, ++}; +diff -urpN a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c +--- a/fs/overlayfs/inode.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/inode.c 2016-12-21 21:06:34.014677298 +0000 +@@ -0,0 +1,408 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include "overlayfs.h" ++ ++int ovl_setattr(struct dentry *dentry, struct iattr *attr) ++{ ++ struct dentry *upperdentry; ++ int err; ++ ++ err = ovl_want_write(dentry); ++ if (err) ++ goto out; ++ ++ if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry)) ++ err = ovl_copy_up_truncate(dentry, attr->ia_size); ++ else ++ err = ovl_copy_up(dentry); ++ if (err) ++ goto out_drop_write; ++ ++ upperdentry = ovl_dentry_upper(dentry); ++ ++ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) ++ attr->ia_valid &= ~ATTR_MODE; ++ ++ mutex_lock(&upperdentry->d_inode->i_mutex); ++ err = notify_change(upperdentry, attr, NULL); ++ if (!err) ++ ovl_copyattr(upperdentry->d_inode, dentry->d_inode); ++ mutex_unlock(&upperdentry->d_inode->i_mutex); ++ ++out_drop_write: ++ ovl_drop_write(dentry); ++out: ++ return err; ++} ++ ++static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry, ++ struct kstat *stat) ++{ ++ struct path realpath; ++ ++ ovl_path_real(dentry, &realpath); ++ return vfs_getattr(&realpath, stat); ++} ++ ++int ovl_permission(struct inode *inode, int mask) ++{ ++ struct ovl_entry *oe; ++ struct dentry *alias = NULL; ++ struct inode *realinode; ++ struct dentry *realdentry; ++ bool is_upper; ++ int err; ++ ++ if (S_ISDIR(inode->i_mode)) { ++ oe = inode->i_private; ++ } else if (mask & MAY_NOT_BLOCK) { ++ return -ECHILD; ++ } else { ++ /* ++ * For non-directories find an alias and get the info ++ * from there. ++ */ ++ alias = d_find_any_alias(inode); ++ if (WARN_ON(!alias)) ++ return -ENOENT; ++ ++ oe = alias->d_fsdata; ++ } ++ ++ realdentry = ovl_entry_real(oe, &is_upper); ++ ++ /* Careful in RCU walk mode */ ++ realinode = ACCESS_ONCE(realdentry->d_inode); ++ if (!realinode) { ++ WARN_ON(!(mask & MAY_NOT_BLOCK)); ++ err = -ENOENT; ++ goto out_dput; ++ } ++ ++ if (mask & MAY_WRITE) { ++ umode_t mode = realinode->i_mode; ++ ++ /* ++ * Writes will always be redirected to upper layer, so ++ * ignore lower layer being read-only. ++ * ++ * If the overlay itself is read-only then proceed ++ * with the permission check, don't return EROFS. ++ * This will only happen if this is the lower layer of ++ * another overlayfs. ++ * ++ * If upper fs becomes read-only after the overlay was ++ * constructed return EROFS to prevent modification of ++ * upper layer. ++ */ ++ err = -EROFS; ++ if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) && ++ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) ++ goto out_dput; ++ } ++ ++ err = __inode_permission(realinode, mask); ++out_dput: ++ dput(alias); ++ return err; ++} ++ ++ ++struct ovl_link_data { ++ struct dentry *realdentry; ++ void *cookie; ++}; ++ ++static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd) ++{ ++ void *ret; ++ struct dentry *realdentry; ++ struct inode *realinode; ++ ++ realdentry = ovl_dentry_real(dentry); ++ realinode = realdentry->d_inode; ++ ++ if (WARN_ON(!realinode->i_op->follow_link)) ++ return ERR_PTR(-EPERM); ++ ++ ret = realinode->i_op->follow_link(realdentry, nd); ++ if (IS_ERR(ret)) ++ return ret; ++ ++ if (realinode->i_op->put_link) { ++ struct ovl_link_data *data; ++ ++ data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL); ++ if (!data) { ++ realinode->i_op->put_link(realdentry, nd, ret); ++ return ERR_PTR(-ENOMEM); ++ } ++ data->realdentry = realdentry; ++ data->cookie = ret; ++ ++ return data; ++ } else { ++ return NULL; ++ } ++} ++ ++static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c) ++{ ++ struct inode *realinode; ++ struct ovl_link_data *data = c; ++ ++ if (!data) ++ return; ++ ++ realinode = data->realdentry->d_inode; ++ realinode->i_op->put_link(data->realdentry, nd, data->cookie); ++ kfree(data); ++} ++ ++static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) ++{ ++ struct path realpath; ++ struct inode *realinode; ++ ++ ovl_path_real(dentry, &realpath); ++ realinode = realpath.dentry->d_inode; ++ ++ if (!realinode->i_op->readlink) ++ return -EINVAL; ++ ++ touch_atime(&realpath); ++ ++ return realinode->i_op->readlink(realpath.dentry, buf, bufsiz); ++} ++ ++ ++static bool ovl_is_private_xattr(const char *name) ++{ ++ return strncmp(name, "trusted.overlay.", 14) == 0; ++} ++ ++int ovl_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ int err; ++ struct dentry *upperdentry; ++ ++ err = ovl_want_write(dentry); ++ if (err) ++ goto out; ++ ++ err = -EPERM; ++ if (ovl_is_private_xattr(name)) ++ goto out_drop_write; ++ ++ err = ovl_copy_up(dentry); ++ if (err) ++ goto out_drop_write; ++ ++ upperdentry = ovl_dentry_upper(dentry); ++ err = vfs_setxattr(upperdentry, name, value, size, flags); ++ ++out_drop_write: ++ ovl_drop_write(dentry); ++out: ++ return err; ++} ++ ++ssize_t ovl_getxattr(struct dentry *dentry, const char *name, ++ void *value, size_t size) ++{ ++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && ++ ovl_is_private_xattr(name)) ++ return -ENODATA; ++ ++ return vfs_getxattr(ovl_dentry_real(dentry), name, value, size); ++} ++ ++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) ++{ ++ ssize_t res; ++ int off; ++ ++ res = vfs_listxattr(ovl_dentry_real(dentry), list, size); ++ if (res <= 0 || size == 0) ++ return res; ++ ++ if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE) ++ return res; ++ ++ /* filter out private xattrs */ ++ for (off = 0; off < res;) { ++ char *s = list + off; ++ size_t slen = strlen(s) + 1; ++ ++ BUG_ON(off + slen > res); ++ ++ if (ovl_is_private_xattr(s)) { ++ res -= slen; ++ memmove(s, s + slen, res - off); ++ } else { ++ off += slen; ++ } ++ } ++ ++ return res; ++} ++ ++int ovl_removexattr(struct dentry *dentry, const char *name) ++{ ++ int err; ++ struct path realpath; ++ enum ovl_path_type type; ++ ++ err = ovl_want_write(dentry); ++ if (err) ++ goto out; ++ ++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && ++ ovl_is_private_xattr(name)) ++ goto out_drop_write; ++ ++ type = ovl_path_real(dentry, &realpath); ++ if (type == OVL_PATH_LOWER) { ++ err = vfs_getxattr(realpath.dentry, name, NULL, 0); ++ if (err < 0) ++ goto out_drop_write; ++ ++ err = ovl_copy_up(dentry); ++ if (err) ++ goto out_drop_write; ++ ++ ovl_path_upper(dentry, &realpath); ++ } ++ ++ err = vfs_removexattr(realpath.dentry, name); ++out_drop_write: ++ ovl_drop_write(dentry); ++out: ++ return err; ++} ++ ++static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, ++ struct dentry *realdentry) ++{ ++ if (type != OVL_PATH_LOWER) ++ return false; ++ ++ if (special_file(realdentry->d_inode->i_mode)) ++ return false; ++ ++ if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC)) ++ return false; ++ ++ return true; ++} ++ ++static int ovl_dentry_open(struct dentry *dentry, struct file *file, ++ const struct cred *cred) ++{ ++ int err; ++ struct path realpath; ++ enum ovl_path_type type; ++ bool want_write = false; ++ ++ type = ovl_path_real(dentry, &realpath); ++ if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) { ++ want_write = true; ++ err = ovl_want_write(dentry); ++ if (err) ++ goto out; ++ ++ if (file->f_flags & O_TRUNC) ++ err = ovl_copy_up_truncate(dentry, 0); ++ else ++ err = ovl_copy_up(dentry); ++ if (err) ++ goto out_drop_write; ++ ++ ovl_path_upper(dentry, &realpath); ++ } ++ ++ err = vfs_open(&realpath, file, cred); ++out_drop_write: ++ if (want_write) ++ ovl_drop_write(dentry); ++out: ++ return err; ++} ++ ++static const struct inode_operations ovl_file_inode_operations = { ++ .setattr = ovl_setattr, ++ .permission = ovl_permission, ++ .getattr = ovl_getattr, ++ .setxattr = ovl_setxattr, ++ .getxattr = ovl_getxattr, ++ .listxattr = ovl_listxattr, ++ .removexattr = ovl_removexattr, ++ .dentry_open = ovl_dentry_open, ++}; ++ ++static const struct inode_operations ovl_symlink_inode_operations = { ++ .setattr = ovl_setattr, ++ .follow_link = ovl_follow_link, ++ .put_link = ovl_put_link, ++ .readlink = ovl_readlink, ++ .getattr = ovl_getattr, ++ .setxattr = ovl_setxattr, ++ .getxattr = ovl_getxattr, ++ .listxattr = ovl_listxattr, ++ .removexattr = ovl_removexattr, ++}; ++ ++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, ++ struct ovl_entry *oe) ++{ ++ struct inode *inode; ++ ++ inode = new_inode(sb); ++ if (!inode) ++ return NULL; ++ ++ mode &= S_IFMT; ++ ++ inode->i_ino = get_next_ino(); ++ inode->i_mode = mode; ++ inode->i_flags |= S_NOATIME | S_NOCMTIME; ++ ++ switch (mode) { ++ case S_IFDIR: ++ inode->i_private = oe; ++ inode->i_op = &ovl_dir_inode_operations; ++ inode->i_fop = &ovl_dir_operations; ++ break; ++ ++ case S_IFLNK: ++ inode->i_op = &ovl_symlink_inode_operations; ++ break; ++ ++ case S_IFREG: ++ case S_IFSOCK: ++ case S_IFBLK: ++ case S_IFCHR: ++ case S_IFIFO: ++ inode->i_op = &ovl_file_inode_operations; ++ break; ++ ++ default: ++ WARN(1, "illegal file type: %i\n", mode); ++ iput(inode); ++ inode = NULL; ++ } ++ ++ return inode; ++ ++} +diff -urpN a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h +--- a/fs/overlayfs/overlayfs.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/overlayfs.h 2016-12-21 21:06:34.014677298 +0000 +@@ -0,0 +1,187 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include ++ ++struct ovl_entry; ++ ++enum ovl_path_type { ++ OVL_PATH_UPPER, ++ OVL_PATH_MERGE, ++ OVL_PATH_LOWER, ++}; ++ ++extern const char *ovl_opaque_xattr; ++extern const struct dentry_operations ovl_dentry_operations; ++ ++static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) ++{ ++ int err = vfs_rmdir(dir, dentry); ++ pr_debug("rmdir(%pd2) = %i\n", dentry, err); ++ return err; ++} ++ ++static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry) ++{ ++ int err = vfs_unlink(dir, dentry, NULL); ++ pr_debug("unlink(%pd2) = %i\n", dentry, err); ++ return err; ++} ++ ++static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir, ++ struct dentry *new_dentry, bool debug) ++{ ++ int err = vfs_link(old_dentry, dir, new_dentry, NULL); ++ if (debug) { ++ pr_debug("link(%pd2, %pd2) = %i\n", ++ old_dentry, new_dentry, err); ++ } ++ return err; ++} ++ ++static inline int ovl_do_create(struct inode *dir, struct dentry *dentry, ++ umode_t mode, bool debug) ++{ ++ int err = vfs_create(dir, dentry, mode, true); ++ if (debug) ++ pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err); ++ return err; ++} ++ ++static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry, ++ umode_t mode, bool debug) ++{ ++ int err = vfs_mkdir(dir, dentry, mode); ++ if (debug) ++ pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err); ++ return err; ++} ++ ++static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry, ++ umode_t mode, dev_t dev, bool debug) ++{ ++ int err = vfs_mknod(dir, dentry, mode, dev); ++ if (debug) { ++ pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", ++ dentry, mode, dev, err); ++ } ++ return err; ++} ++ ++static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry, ++ const char *oldname, bool debug) ++{ ++ int err = vfs_symlink(dir, dentry, oldname); ++ if (debug) ++ pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err); ++ return err; ++} ++ ++static inline int ovl_do_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ int err = vfs_setxattr(dentry, name, value, size, flags); ++ pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n", ++ dentry, name, (int) size, (char *) value, flags, err); ++ return err; ++} ++ ++static inline int ovl_do_removexattr(struct dentry *dentry, const char *name) ++{ ++ int err = vfs_removexattr(dentry, name); ++ pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err); ++ return err; ++} ++ ++static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry, ++ struct inode *newdir, struct dentry *newdentry, ++ unsigned int flags) ++{ ++ int err; ++ ++ pr_debug("rename2(%pd2, %pd2, 0x%x)\n", ++ olddentry, newdentry, flags); ++ ++ err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags); ++ ++ if (err) { ++ pr_debug("...rename2(%pd2, %pd2, ...) = %i\n", ++ olddentry, newdentry, err); ++ } ++ return err; ++} ++ ++static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry) ++{ ++ int err = vfs_whiteout(dir, dentry); ++ pr_debug("whiteout(%pd2) = %i\n", dentry, err); ++ return err; ++} ++ ++enum ovl_path_type ovl_path_type(struct dentry *dentry); ++u64 ovl_dentry_version_get(struct dentry *dentry); ++void ovl_dentry_version_inc(struct dentry *dentry); ++void ovl_path_upper(struct dentry *dentry, struct path *path); ++void ovl_path_lower(struct dentry *dentry, struct path *path); ++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); ++struct dentry *ovl_dentry_upper(struct dentry *dentry); ++struct dentry *ovl_dentry_lower(struct dentry *dentry); ++struct dentry *ovl_dentry_real(struct dentry *dentry); ++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper); ++struct dentry *ovl_workdir(struct dentry *dentry); ++int ovl_want_write(struct dentry *dentry); ++void ovl_drop_write(struct dentry *dentry); ++bool ovl_dentry_is_opaque(struct dentry *dentry); ++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque); ++bool ovl_is_whiteout(struct dentry *dentry); ++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); ++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int flags); ++struct file *ovl_path_open(struct path *path, int flags); ++ ++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry, ++ struct kstat *stat, const char *link); ++ ++/* readdir.c */ ++extern const struct file_operations ovl_dir_operations; ++int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list); ++void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list); ++void ovl_cache_free(struct list_head *list); ++ ++/* inode.c */ ++int ovl_setattr(struct dentry *dentry, struct iattr *attr); ++int ovl_permission(struct inode *inode, int mask); ++int ovl_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags); ++ssize_t ovl_getxattr(struct dentry *dentry, const char *name, ++ void *value, size_t size); ++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); ++int ovl_removexattr(struct dentry *dentry, const char *name); ++ ++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, ++ struct ovl_entry *oe); ++static inline void ovl_copyattr(struct inode *from, struct inode *to) ++{ ++ to->i_uid = from->i_uid; ++ to->i_gid = from->i_gid; ++} ++ ++/* dir.c */ ++extern const struct inode_operations ovl_dir_inode_operations; ++struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); ++int ovl_create_real(struct inode *dir, struct dentry *newdentry, ++ struct kstat *stat, const char *link, ++ struct dentry *hardlink, bool debug); ++void ovl_cleanup(struct inode *dir, struct dentry *dentry); ++ ++/* copy_up.c */ ++int ovl_copy_up(struct dentry *dentry); ++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size); ++int ovl_copy_xattr(struct dentry *old, struct dentry *new); ++int ovl_set_attr(struct dentry *upper, struct kstat *stat); +diff -urpN a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c +--- a/fs/overlayfs/readdir.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/readdir.c 2016-12-21 21:06:34.014677298 +0000 +@@ -0,0 +1,518 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "overlayfs.h" ++ ++struct ovl_cache_entry { ++ const char *name; ++ unsigned int len; ++ unsigned int type; ++ u64 ino; ++ bool is_whiteout; ++ struct list_head l_node; ++ struct rb_node node; ++}; ++ ++struct ovl_readdir_data { ++ struct dir_context ctx; ++ bool is_merge; ++ struct rb_root *root; ++ struct list_head *list; ++ struct list_head *middle; ++ int count; ++ int err; ++}; ++ ++struct ovl_dir_file { ++ bool is_real; ++ bool is_cached; ++ struct list_head cursor; ++ u64 cache_version; ++ struct list_head cache; ++ struct file *realfile; ++}; ++ ++static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) ++{ ++ return container_of(n, struct ovl_cache_entry, node); ++} ++ ++static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, ++ const char *name, int len) ++{ ++ struct rb_node *node = root->rb_node; ++ int cmp; ++ ++ while (node) { ++ struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); ++ ++ cmp = strncmp(name, p->name, len); ++ if (cmp > 0) ++ node = p->node.rb_right; ++ else if (cmp < 0 || len < p->len) ++ node = p->node.rb_left; ++ else ++ return p; ++ } ++ ++ return NULL; ++} ++ ++static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, ++ u64 ino, unsigned int d_type) ++{ ++ struct ovl_cache_entry *p; ++ ++ p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL); ++ if (p) { ++ char *name_copy = (char *) (p + 1); ++ memcpy(name_copy, name, len); ++ name_copy[len] = '\0'; ++ p->name = name_copy; ++ p->len = len; ++ p->type = d_type; ++ p->ino = ino; ++ p->is_whiteout = false; ++ } ++ ++ return p; ++} ++ ++static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, ++ const char *name, int len, u64 ino, ++ unsigned int d_type) ++{ ++ struct rb_node **newp = &rdd->root->rb_node; ++ struct rb_node *parent = NULL; ++ struct ovl_cache_entry *p; ++ ++ while (*newp) { ++ int cmp; ++ struct ovl_cache_entry *tmp; ++ ++ parent = *newp; ++ tmp = ovl_cache_entry_from_node(*newp); ++ cmp = strncmp(name, tmp->name, len); ++ if (cmp > 0) ++ newp = &tmp->node.rb_right; ++ else if (cmp < 0 || len < tmp->len) ++ newp = &tmp->node.rb_left; ++ else ++ return 0; ++ } ++ ++ p = ovl_cache_entry_new(name, len, ino, d_type); ++ if (p == NULL) ++ return -ENOMEM; ++ ++ list_add_tail(&p->l_node, rdd->list); ++ rb_link_node(&p->node, parent, newp); ++ rb_insert_color(&p->node, rdd->root); ++ ++ return 0; ++} ++ ++static int ovl_fill_lower(struct ovl_readdir_data *rdd, ++ const char *name, int namelen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct ovl_cache_entry *p; ++ ++ p = ovl_cache_entry_find(rdd->root, name, namelen); ++ if (p) { ++ list_move_tail(&p->l_node, rdd->middle); ++ } else { ++ p = ovl_cache_entry_new(name, namelen, ino, d_type); ++ if (p == NULL) ++ rdd->err = -ENOMEM; ++ else ++ list_add_tail(&p->l_node, rdd->middle); ++ } ++ ++ return rdd->err; ++} ++ ++void ovl_cache_free(struct list_head *list) ++{ ++ struct ovl_cache_entry *p; ++ struct ovl_cache_entry *n; ++ ++ list_for_each_entry_safe(p, n, list, l_node) ++ kfree(p); ++ ++ INIT_LIST_HEAD(list); ++} ++ ++static int ovl_fill_merge(void *buf, const char *name, int namelen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct ovl_readdir_data *rdd = buf; ++ ++ rdd->count++; ++ if (!rdd->is_merge) ++ return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); ++ else ++ return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type); ++} ++ ++static inline int ovl_dir_read(struct path *realpath, ++ struct ovl_readdir_data *rdd) ++{ ++ struct file *realfile; ++ int err; ++ ++ realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); ++ if (IS_ERR(realfile)) ++ return PTR_ERR(realfile); ++ ++ rdd->ctx.pos = 0; ++ do { ++ rdd->count = 0; ++ rdd->err = 0; ++ err = iterate_dir(realfile, &rdd->ctx); ++ if (err >= 0) ++ err = rdd->err; ++ } while (!err && rdd->count); ++ fput(realfile); ++ ++ return err; ++} ++ ++static void ovl_dir_reset(struct file *file) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ enum ovl_path_type type = ovl_path_type(file->f_path.dentry); ++ ++ if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) { ++ list_del_init(&od->cursor); ++ ovl_cache_free(&od->cache); ++ od->is_cached = false; ++ } ++ WARN_ON(!od->is_real && type != OVL_PATH_MERGE); ++ if (od->is_real && type == OVL_PATH_MERGE) { ++ fput(od->realfile); ++ od->realfile = NULL; ++ od->is_real = false; ++ } ++} ++ ++static int ovl_dir_mark_whiteouts(struct dentry *dir, ++ struct ovl_readdir_data *rdd) ++{ ++ struct ovl_cache_entry *p; ++ struct dentry *dentry; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ override_cred = prepare_creds(); ++ if (!override_cred) { ++ ovl_cache_free(rdd->list); ++ return -ENOMEM; ++ } ++ ++ /* ++ * CAP_DAC_OVERRIDE for lookup ++ */ ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ old_cred = override_creds(override_cred); ++ ++ mutex_lock(&dir->d_inode->i_mutex); ++ list_for_each_entry(p, rdd->list, l_node) { ++ if (p->type != DT_CHR) ++ continue; ++ ++ dentry = lookup_one_len(p->name, dir, p->len); ++ if (IS_ERR(dentry)) ++ continue; ++ ++ p->is_whiteout = ovl_is_whiteout(dentry); ++ dput(dentry); ++ } ++ mutex_unlock(&dir->d_inode->i_mutex); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++ return 0; ++} ++ ++static inline int ovl_dir_read_merged(struct path *upperpath, ++ struct path *lowerpath, ++ struct list_head *list) ++{ ++ int err; ++ struct rb_root root = RB_ROOT; ++ struct list_head middle; ++ struct ovl_readdir_data rdd = { ++ .ctx.actor = ovl_fill_merge, ++ .list = list, ++ .root = &root, ++ .is_merge = false, ++ }; ++ ++ if (upperpath->dentry) { ++ err = ovl_dir_read(upperpath, &rdd); ++ if (err) ++ goto out; ++ ++ if (lowerpath->dentry) { ++ err = ovl_dir_mark_whiteouts(upperpath->dentry, &rdd); ++ if (err) ++ goto out; ++ } ++ } ++ if (lowerpath->dentry) { ++ /* ++ * Insert lowerpath entries before upperpath ones, this allows ++ * offsets to be reasonably constant ++ */ ++ list_add(&middle, rdd.list); ++ rdd.middle = &middle; ++ rdd.is_merge = true; ++ err = ovl_dir_read(lowerpath, &rdd); ++ list_del(&middle); ++ } ++out: ++ return err; ++ ++} ++ ++static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) ++{ ++ struct list_head *l; ++ loff_t off; ++ ++ l = od->cache.next; ++ for (off = 0; off < pos; off++) { ++ if (l == &od->cache) ++ break; ++ l = l->next; ++ } ++ list_move_tail(&od->cursor, l); ++} ++ ++static int ovl_iterate(struct file *file, struct dir_context *ctx) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ int res; ++ ++ if (!ctx->pos) ++ ovl_dir_reset(file); ++ ++ if (od->is_real) { ++ res = iterate_dir(od->realfile, ctx); ++ ++ return res; ++ } ++ ++ if (!od->is_cached) { ++ struct path lowerpath; ++ struct path upperpath; ++ ++ ovl_path_lower(file->f_path.dentry, &lowerpath); ++ ovl_path_upper(file->f_path.dentry, &upperpath); ++ ++ res = ovl_dir_read_merged(&upperpath, &lowerpath, &od->cache); ++ if (res) { ++ ovl_cache_free(&od->cache); ++ return res; ++ } ++ ++ od->cache_version = ovl_dentry_version_get(file->f_path.dentry); ++ od->is_cached = true; ++ ++ ovl_seek_cursor(od, ctx->pos); ++ } ++ ++ while (od->cursor.next != &od->cache) { ++ struct ovl_cache_entry *p; ++ ++ p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node); ++ if (!p->is_whiteout) { ++ if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) ++ break; ++ } ++ ctx->pos++; ++ list_move(&od->cursor, &p->l_node); ++ } ++ ++ return 0; ++} ++ ++static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) ++{ ++ loff_t res; ++ struct ovl_dir_file *od = file->private_data; ++ ++ mutex_lock(&file_inode(file)->i_mutex); ++ if (!file->f_pos) ++ ovl_dir_reset(file); ++ ++ if (od->is_real) { ++ res = vfs_llseek(od->realfile, offset, origin); ++ file->f_pos = od->realfile->f_pos; ++ } else { ++ res = -EINVAL; ++ ++ switch (origin) { ++ case SEEK_CUR: ++ offset += file->f_pos; ++ break; ++ case SEEK_SET: ++ break; ++ default: ++ goto out_unlock; ++ } ++ if (offset < 0) ++ goto out_unlock; ++ ++ if (offset != file->f_pos) { ++ file->f_pos = offset; ++ if (od->is_cached) ++ ovl_seek_cursor(od, offset); ++ } ++ res = offset; ++ } ++out_unlock: ++ mutex_unlock(&file_inode(file)->i_mutex); ++ ++ return res; ++} ++ ++static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, ++ int datasync) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ ++ /* May need to reopen directory if it got copied up */ ++ if (!od->realfile) { ++ struct path upperpath; ++ ++ ovl_path_upper(file->f_path.dentry, &upperpath); ++ od->realfile = ovl_path_open(&upperpath, O_RDONLY); ++ if (IS_ERR(od->realfile)) ++ return PTR_ERR(od->realfile); ++ } ++ ++ return vfs_fsync_range(od->realfile, start, end, datasync); ++} ++ ++static int ovl_dir_release(struct inode *inode, struct file *file) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ ++ list_del(&od->cursor); ++ ovl_cache_free(&od->cache); ++ if (od->realfile) ++ fput(od->realfile); ++ kfree(od); ++ ++ return 0; ++} ++ ++static int ovl_dir_open(struct inode *inode, struct file *file) ++{ ++ struct path realpath; ++ struct file *realfile; ++ struct ovl_dir_file *od; ++ enum ovl_path_type type; ++ ++ od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); ++ if (!od) ++ return -ENOMEM; ++ ++ type = ovl_path_real(file->f_path.dentry, &realpath); ++ realfile = ovl_path_open(&realpath, file->f_flags); ++ if (IS_ERR(realfile)) { ++ kfree(od); ++ return PTR_ERR(realfile); ++ } ++ INIT_LIST_HEAD(&od->cache); ++ INIT_LIST_HEAD(&od->cursor); ++ od->is_cached = false; ++ od->realfile = realfile; ++ od->is_real = (type != OVL_PATH_MERGE); ++ file->private_data = od; ++ ++ return 0; ++} ++ ++const struct file_operations ovl_dir_operations = { ++ .read = generic_read_dir, ++ .open = ovl_dir_open, ++ .iterate = ovl_iterate, ++ .llseek = ovl_dir_llseek, ++ .fsync = ovl_dir_fsync, ++ .release = ovl_dir_release, ++}; ++ ++int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) ++{ ++ int err; ++ struct path lowerpath; ++ struct path upperpath; ++ struct ovl_cache_entry *p; ++ ++ ovl_path_upper(dentry, &upperpath); ++ ovl_path_lower(dentry, &lowerpath); ++ ++ err = ovl_dir_read_merged(&upperpath, &lowerpath, list); ++ if (err) ++ return err; ++ ++ err = 0; ++ ++ list_for_each_entry(p, list, l_node) { ++ if (p->is_whiteout) ++ continue; ++ ++ if (p->name[0] == '.') { ++ if (p->len == 1) ++ continue; ++ if (p->len == 2 && p->name[1] == '.') ++ continue; ++ } ++ err = -ENOTEMPTY; ++ break; ++ } ++ ++ return err; ++} ++ ++void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list) ++{ ++ struct ovl_cache_entry *p; ++ ++ mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_PARENT); ++ list_for_each_entry(p, list, l_node) { ++ struct dentry *dentry; ++ ++ if (!p->is_whiteout) ++ continue; ++ ++ dentry = lookup_one_len(p->name, upper, p->len); ++ if (IS_ERR(dentry)) { ++ pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n", ++ upper->d_name.name, p->len, p->name, ++ (int) PTR_ERR(dentry)); ++ continue; ++ } ++ ovl_cleanup(upper->d_inode, dentry); ++ dput(dentry); ++ } ++ mutex_unlock(&upper->d_inode->i_mutex); ++} +diff -urpN a/fs/overlayfs/super.c b/fs/overlayfs/super.c +--- a/fs/overlayfs/super.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/fs/overlayfs/super.c 2016-12-21 21:06:34.014677298 +0000 +@@ -0,0 +1,762 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "overlayfs.h" ++ ++MODULE_AUTHOR("Miklos Szeredi "); ++MODULE_DESCRIPTION("Overlay filesystem"); ++MODULE_LICENSE("GPL"); ++ ++#define OVERLAYFS_SUPER_MAGIC 0x794c764f ++ ++struct ovl_config { ++ char *lowerdir; ++ char *upperdir; ++ char *workdir; ++}; ++ ++/* private information held for overlayfs's superblock */ ++struct ovl_fs { ++ struct vfsmount *upper_mnt; ++ struct vfsmount *lower_mnt; ++ struct dentry *workdir; ++ long lower_namelen; ++ /* pathnames of lower and upper dirs, for show_options */ ++ struct ovl_config config; ++}; ++ ++/* private information held for every overlayfs dentry */ ++struct ovl_entry { ++ /* ++ * Keep "double reference" on upper dentries, so that ++ * d_delete() doesn't think it's OK to reset d_inode to NULL. ++ */ ++ struct dentry *__upperdentry; ++ struct dentry *lowerdentry; ++ union { ++ struct { ++ u64 version; ++ bool opaque; ++ }; ++ struct rcu_head rcu; ++ }; ++}; ++ ++const char *ovl_opaque_xattr = "trusted.overlay.opaque"; ++ ++ ++enum ovl_path_type ovl_path_type(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ if (oe->__upperdentry) { ++ if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode)) ++ return OVL_PATH_MERGE; ++ else ++ return OVL_PATH_UPPER; ++ } else { ++ return OVL_PATH_LOWER; ++ } ++} ++ ++static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) ++{ ++ struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry); ++ /* ++ * Make sure to order reads to upperdentry wrt ovl_dentry_update() ++ */ ++ smp_read_barrier_depends(); ++ return upperdentry; ++} ++ ++void ovl_path_upper(struct dentry *dentry, struct path *path) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ path->mnt = ofs->upper_mnt; ++ path->dentry = ovl_upperdentry_dereference(oe); ++} ++ ++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) ++{ ++ ++ enum ovl_path_type type = ovl_path_type(dentry); ++ ++ if (type == OVL_PATH_LOWER) ++ ovl_path_lower(dentry, path); ++ else ++ ovl_path_upper(dentry, path); ++ ++ return type; ++} ++ ++struct dentry *ovl_dentry_upper(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ return ovl_upperdentry_dereference(oe); ++} ++ ++struct dentry *ovl_dentry_lower(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ return oe->lowerdentry; ++} ++ ++struct dentry *ovl_dentry_real(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ struct dentry *realdentry; ++ ++ realdentry = ovl_upperdentry_dereference(oe); ++ if (!realdentry) ++ realdentry = oe->lowerdentry; ++ ++ return realdentry; ++} ++ ++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) ++{ ++ struct dentry *realdentry; ++ ++ realdentry = ovl_upperdentry_dereference(oe); ++ if (realdentry) { ++ *is_upper = true; ++ } else { ++ realdentry = oe->lowerdentry; ++ *is_upper = false; ++ } ++ return realdentry; ++} ++ ++void ovl_path_lower(struct dentry *dentry, struct path *path) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ path->mnt = ofs->lower_mnt; ++ path->dentry = oe->lowerdentry; ++} ++ ++int ovl_want_write(struct dentry *dentry) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ return mnt_want_write(ofs->upper_mnt); ++} ++ ++void ovl_drop_write(struct dentry *dentry) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ mnt_drop_write(ofs->upper_mnt); ++} ++ ++struct dentry *ovl_workdir(struct dentry *dentry) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ return ofs->workdir; ++} ++ ++bool ovl_dentry_is_opaque(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ return oe->opaque; ++} ++ ++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ oe->opaque = opaque; ++} ++ ++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex)); ++ WARN_ON(oe->__upperdentry); ++ BUG_ON(!upperdentry->d_inode); ++ /* ++ * Make sure upperdentry is consistent before making it visible to ++ * ovl_upperdentry_dereference(). ++ */ ++ smp_wmb(); ++ oe->__upperdentry = dget(upperdentry); ++} ++ ++void ovl_dentry_version_inc(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); ++ oe->version++; ++} ++ ++u64 ovl_dentry_version_get(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); ++ return oe->version; ++} ++ ++bool ovl_is_whiteout(struct dentry *dentry) ++{ ++ struct inode *inode = dentry->d_inode; ++ ++ return inode && IS_WHITEOUT(inode); ++} ++ ++static bool ovl_is_opaquedir(struct dentry *dentry) ++{ ++ int res; ++ char val; ++ struct inode *inode = dentry->d_inode; ++ ++ if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) ++ return false; ++ ++ res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); ++ if (res == 1 && val == 'y') ++ return true; ++ ++ return false; ++} ++ ++static void ovl_entry_free(struct rcu_head *head) ++{ ++ struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu); ++ kfree(oe); ++} ++ ++static void ovl_dentry_release(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ if (oe) { ++ dput(oe->__upperdentry); ++ dput(oe->__upperdentry); ++ dput(oe->lowerdentry); ++ call_rcu(&oe->rcu, ovl_entry_free); ++ } ++} ++ ++const struct dentry_operations ovl_dentry_operations = { ++ .d_release = ovl_dentry_release, ++}; ++ ++static struct ovl_entry *ovl_alloc_entry(void) ++{ ++ return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); ++} ++ ++static inline struct dentry *ovl_lookup_real(struct dentry *dir, ++ struct qstr *name) ++{ ++ struct dentry *dentry; ++ ++ mutex_lock(&dir->d_inode->i_mutex); ++ dentry = lookup_one_len(name->name, dir, name->len); ++ mutex_unlock(&dir->d_inode->i_mutex); ++ ++ if (IS_ERR(dentry)) { ++ if (PTR_ERR(dentry) == -ENOENT) ++ dentry = NULL; ++ } else if (!dentry->d_inode) { ++ dput(dentry); ++ dentry = NULL; ++ } ++ return dentry; ++} ++ ++static int ovl_do_lookup(struct dentry *dentry) ++{ ++ struct ovl_entry *oe; ++ struct dentry *upperdir; ++ struct dentry *lowerdir; ++ struct dentry *upperdentry = NULL; ++ struct dentry *lowerdentry = NULL; ++ struct inode *inode = NULL; ++ int err; ++ ++ err = -ENOMEM; ++ oe = ovl_alloc_entry(); ++ if (!oe) ++ goto out; ++ ++ upperdir = ovl_dentry_upper(dentry->d_parent); ++ lowerdir = ovl_dentry_lower(dentry->d_parent); ++ ++ if (upperdir) { ++ upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); ++ err = PTR_ERR(upperdentry); ++ if (IS_ERR(upperdentry)) ++ goto out_put_dir; ++ ++ if (lowerdir && upperdentry) { ++ if (ovl_is_whiteout(upperdentry)) { ++ dput(upperdentry); ++ upperdentry = NULL; ++ oe->opaque = true; ++ } else if (ovl_is_opaquedir(upperdentry)) { ++ oe->opaque = true; ++ } ++ } ++ } ++ if (lowerdir && !oe->opaque) { ++ lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); ++ err = PTR_ERR(lowerdentry); ++ if (IS_ERR(lowerdentry)) ++ goto out_dput_upper; ++ } ++ ++ if (lowerdentry && upperdentry && ++ (!S_ISDIR(upperdentry->d_inode->i_mode) || ++ !S_ISDIR(lowerdentry->d_inode->i_mode))) { ++ dput(lowerdentry); ++ lowerdentry = NULL; ++ oe->opaque = true; ++ } ++ ++ if (lowerdentry || upperdentry) { ++ struct dentry *realdentry; ++ ++ realdentry = upperdentry ? upperdentry : lowerdentry; ++ err = -ENOMEM; ++ inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, ++ oe); ++ if (!inode) ++ goto out_dput; ++ ovl_copyattr(realdentry->d_inode, inode); ++ } ++ ++ if (upperdentry) ++ oe->__upperdentry = dget(upperdentry); ++ ++ if (lowerdentry) ++ oe->lowerdentry = lowerdentry; ++ ++ dentry->d_fsdata = oe; ++ dentry->d_op = &ovl_dentry_operations; ++ d_add(dentry, inode); ++ ++ return 0; ++ ++out_dput: ++ dput(lowerdentry); ++out_dput_upper: ++ dput(upperdentry); ++out_put_dir: ++ kfree(oe); ++out: ++ return err; ++} ++ ++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int flags) ++{ ++ int err = ovl_do_lookup(dentry); ++ ++ if (err) ++ return ERR_PTR(err); ++ ++ return NULL; ++} ++ ++struct file *ovl_path_open(struct path *path, int flags) ++{ ++ return dentry_open(path, flags, current_cred()); ++} ++ ++static void ovl_put_super(struct super_block *sb) ++{ ++ struct ovl_fs *ufs = sb->s_fs_info; ++ ++ dput(ufs->workdir); ++ mntput(ufs->upper_mnt); ++ mntput(ufs->lower_mnt); ++ ++ kfree(ufs->config.lowerdir); ++ kfree(ufs->config.upperdir); ++ kfree(ufs->config.workdir); ++ kfree(ufs); ++} ++ ++/** ++ * ovl_statfs ++ * @sb: The overlayfs super block ++ * @buf: The struct kstatfs to fill in with stats ++ * ++ * Get the filesystem statistics. As writes always target the upper layer ++ * filesystem pass the statfs to the same filesystem. ++ */ ++static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ struct dentry *root_dentry = dentry->d_sb->s_root; ++ struct path path; ++ int err; ++ ++ ovl_path_upper(root_dentry, &path); ++ ++ err = vfs_statfs(&path, buf); ++ if (!err) { ++ buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen); ++ buf->f_type = OVERLAYFS_SUPER_MAGIC; ++ } ++ ++ return err; ++} ++ ++/** ++ * ovl_show_options ++ * ++ * Prints the mount options for a given superblock. ++ * Returns zero; does not fail. ++ */ ++static int ovl_show_options(struct seq_file *m, struct dentry *dentry) ++{ ++ struct super_block *sb = dentry->d_sb; ++ struct ovl_fs *ufs = sb->s_fs_info; ++ ++ seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); ++ seq_printf(m, ",upperdir=%s", ufs->config.upperdir); ++ seq_printf(m, ",workdir=%s", ufs->config.workdir); ++ return 0; ++} ++ ++static const struct super_operations ovl_super_operations = { ++ .put_super = ovl_put_super, ++ .statfs = ovl_statfs, ++ .show_options = ovl_show_options, ++}; ++ ++enum { ++ OPT_LOWERDIR, ++ OPT_UPPERDIR, ++ OPT_WORKDIR, ++ OPT_ERR, ++}; ++ ++static const match_table_t ovl_tokens = { ++ {OPT_LOWERDIR, "lowerdir=%s"}, ++ {OPT_UPPERDIR, "upperdir=%s"}, ++ {OPT_WORKDIR, "workdir=%s"}, ++ {OPT_ERR, NULL} ++}; ++ ++static int ovl_parse_opt(char *opt, struct ovl_config *config) ++{ ++ char *p; ++ ++ config->upperdir = NULL; ++ config->lowerdir = NULL; ++ config->workdir = NULL; ++ ++ while ((p = strsep(&opt, ",")) != NULL) { ++ int token; ++ substring_t args[MAX_OPT_ARGS]; ++ ++ if (!*p) ++ continue; ++ ++ token = match_token(p, ovl_tokens, args); ++ switch (token) { ++ case OPT_UPPERDIR: ++ kfree(config->upperdir); ++ config->upperdir = match_strdup(&args[0]); ++ if (!config->upperdir) ++ return -ENOMEM; ++ break; ++ ++ case OPT_LOWERDIR: ++ kfree(config->lowerdir); ++ config->lowerdir = match_strdup(&args[0]); ++ if (!config->lowerdir) ++ return -ENOMEM; ++ break; ++ ++ case OPT_WORKDIR: ++ kfree(config->workdir); ++ config->workdir = match_strdup(&args[0]); ++ if (!config->workdir) ++ return -ENOMEM; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ } ++ return 0; ++} ++ ++#define OVL_WORKDIR_NAME "work" ++ ++static struct dentry *ovl_workdir_create(struct vfsmount *mnt, ++ struct dentry *dentry) ++{ ++ struct inode *dir = dentry->d_inode; ++ struct dentry *work; ++ int err; ++ bool retried = false; ++ ++ err = mnt_want_write(mnt); ++ if (err) ++ return ERR_PTR(err); ++ ++ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); ++retry: ++ work = lookup_one_len(OVL_WORKDIR_NAME, dentry, ++ strlen(OVL_WORKDIR_NAME)); ++ ++ if (!IS_ERR(work)) { ++ struct kstat stat = { ++ .mode = S_IFDIR | 0, ++ }; ++ ++ if (work->d_inode) { ++ err = -EEXIST; ++ if (retried) ++ goto out_dput; ++ ++ retried = true; ++ ovl_cleanup(dir, work); ++ dput(work); ++ goto retry; ++ } ++ ++ err = ovl_create_real(dir, work, &stat, NULL, NULL, true); ++ if (err) ++ goto out_dput; ++ } ++out_unlock: ++ mutex_unlock(&dir->i_mutex); ++ mnt_drop_write(mnt); ++ ++ return work; ++ ++out_dput: ++ dput(work); ++ work = ERR_PTR(err); ++ goto out_unlock; ++} ++ ++static int ovl_mount_dir(const char *name, struct path *path) ++{ ++ int err; ++ ++ err = kern_path(name, LOOKUP_FOLLOW, path); ++ if (err) { ++ pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); ++ err = -EINVAL; ++ } ++ return err; ++} ++ ++static int ovl_fill_super(struct super_block *sb, void *data, int silent) ++{ ++ struct path lowerpath; ++ struct path upperpath; ++ struct path workpath; ++ struct inode *root_inode; ++ struct dentry *root_dentry; ++ struct ovl_entry *oe; ++ struct ovl_fs *ufs; ++ struct kstatfs statfs; ++ int err; ++ ++ err = -ENOMEM; ++ ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL); ++ if (!ufs) ++ goto out; ++ ++ err = ovl_parse_opt((char *) data, &ufs->config); ++ if (err) ++ goto out_free_ufs; ++ ++ /* FIXME: workdir is not needed for a R/O mount */ ++ err = -EINVAL; ++ if (!ufs->config.upperdir || !ufs->config.lowerdir || ++ !ufs->config.workdir) { ++ pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); ++ goto out_free_config; ++ } ++ ++ oe = ovl_alloc_entry(); ++ if (oe == NULL) ++ goto out_free_config; ++ ++ err = ovl_mount_dir(ufs->config.upperdir, &upperpath); ++ if (err) ++ goto out_free_oe; ++ ++ err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); ++ if (err) ++ goto out_put_upperpath; ++ ++ err = ovl_mount_dir(ufs->config.workdir, &workpath); ++ if (err) ++ goto out_put_lowerpath; ++ ++ err = -EINVAL; ++ if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || ++ !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || ++ !S_ISDIR(workpath.dentry->d_inode->i_mode)) { ++ pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); ++ goto out_put_workpath; ++ } ++ ++ if (upperpath.mnt != workpath.mnt) { ++ pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); ++ goto out_put_workpath; ++ } ++ if (upperpath.dentry == workpath.dentry || ++ d_ancestor(upperpath.dentry, workpath.dentry) || ++ d_ancestor(workpath.dentry, upperpath.dentry)) { ++ pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); ++ goto out_put_workpath; ++ } ++ ++ err = vfs_statfs(&lowerpath, &statfs); ++ if (err) { ++ pr_err("overlayfs: statfs failed on lowerpath\n"); ++ goto out_put_workpath; ++ } ++ ufs->lower_namelen = statfs.f_namelen; ++ ++ sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, ++ lowerpath.mnt->mnt_sb->s_stack_depth) + 1; ++ ++ err = -EINVAL; ++ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { ++ pr_err("overlayfs: maximum fs stacking depth exceeded\n"); ++ goto out_put_lowerpath; ++ } ++ ++ ++ ufs->upper_mnt = clone_private_mount(&upperpath); ++ err = PTR_ERR(ufs->upper_mnt); ++ if (IS_ERR(ufs->upper_mnt)) { ++ pr_err("overlayfs: failed to clone upperpath\n"); ++ goto out_put_workpath; ++ } ++ ++ ufs->lower_mnt = clone_private_mount(&lowerpath); ++ err = PTR_ERR(ufs->lower_mnt); ++ if (IS_ERR(ufs->lower_mnt)) { ++ pr_err("overlayfs: failed to clone lowerpath\n"); ++ goto out_put_upper_mnt; ++ } ++ ++ ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); ++ err = PTR_ERR(ufs->workdir); ++ if (IS_ERR(ufs->workdir)) { ++ pr_err("overlayfs: failed to create directory %s/%s\n", ++ ufs->config.workdir, OVL_WORKDIR_NAME); ++ goto out_put_lower_mnt; ++ } ++ ++ /* ++ * Make lower_mnt R/O. That way fchmod/fchown on lower file ++ * will fail instead of modifying lower fs. ++ */ ++ ufs->lower_mnt->mnt_flags |= MNT_READONLY; ++ ++ /* If the upper fs is r/o, we mark overlayfs r/o too */ ++ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) ++ sb->s_flags |= MS_RDONLY; ++ ++ err = -ENOMEM; ++ root_inode = ovl_new_inode(sb, S_IFDIR, oe); ++ if (!root_inode) ++ goto out_put_workdir; ++ ++ root_dentry = d_make_root(root_inode); ++ if (!root_dentry) ++ goto out_put_workdir; ++ ++ mntput(upperpath.mnt); ++ mntput(lowerpath.mnt); ++ path_put(&workpath); ++ ++ oe->__upperdentry = dget(upperpath.dentry); ++ oe->lowerdentry = lowerpath.dentry; ++ ++ root_dentry->d_fsdata = oe; ++ root_dentry->d_op = &ovl_dentry_operations; ++ ++ sb->s_magic = OVERLAYFS_SUPER_MAGIC; ++ sb->s_op = &ovl_super_operations; ++ sb->s_root = root_dentry; ++ sb->s_fs_info = ufs; ++ ++ return 0; ++ ++out_put_workdir: ++ dput(ufs->workdir); ++out_put_lower_mnt: ++ mntput(ufs->lower_mnt); ++out_put_upper_mnt: ++ mntput(ufs->upper_mnt); ++out_put_workpath: ++ path_put(&workpath); ++out_put_lowerpath: ++ path_put(&lowerpath); ++out_put_upperpath: ++ path_put(&upperpath); ++out_free_oe: ++ kfree(oe); ++out_free_config: ++ kfree(ufs->config.lowerdir); ++ kfree(ufs->config.upperdir); ++ kfree(ufs->config.workdir); ++out_free_ufs: ++ kfree(ufs); ++out: ++ return err; ++} ++ ++static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, ++ const char *dev_name, void *raw_data) ++{ ++ return mount_nodev(fs_type, flags, raw_data, ovl_fill_super); ++} ++ ++static struct file_system_type ovl_fs_type = { ++ .owner = THIS_MODULE, ++ .name = "overlay", ++ .mount = ovl_mount, ++ .kill_sb = kill_anon_super, ++}; ++MODULE_ALIAS_FS("overlay"); ++ ++static int __init ovl_init(void) ++{ ++ return register_filesystem(&ovl_fs_type); ++} ++ ++static void __exit ovl_exit(void) ++{ ++ unregister_filesystem(&ovl_fs_type); ++} ++ ++module_init(ovl_init); ++module_exit(ovl_exit); +diff -urpN a/fs/splice.c b/fs/splice.c +--- a/fs/splice.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/fs/splice.c 2016-12-21 21:06:34.014677298 +0000 +@@ -1351,6 +1351,7 @@ long do_splice_direct(struct file *in, l + + return ret; + } ++EXPORT_SYMBOL(do_splice_direct); + + static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, + struct pipe_inode_info *opipe, +diff -urpN a/include/linux/fs.h b/include/linux/fs.h +--- a/include/linux/fs.h 2016-11-20 01:17:41.000000000 +0000 ++++ b/include/linux/fs.h 2016-12-21 21:07:23.538678372 +0000 +@@ -225,6 +225,20 @@ typedef void (dio_iodone_t)(struct kiocb + #define ATTR_TIMES_SET (1 << 16) + + /* ++ * Whiteout is represented by a char device. The following constants define the ++ * mode and device number to use. ++ */ ++#define WHITEOUT_MODE 0 ++#define WHITEOUT_DEV 0 ++ ++/* ++ * Whiteout is represented by a char device. The following constants define the ++ * mode and device number to use. ++ */ ++#define WHITEOUT_MODE 0 ++#define WHITEOUT_DEV 0 ++ ++/* + * This is the Inode Attributes structure, used for notify_change(). It + * uses the above definitions as flags, to know which values have changed. + * Also, in this manner, a Filesystem can look at only the values it cares +@@ -262,7 +276,13 @@ struct iattr { + */ + #define FILESYSTEM_MAX_STACK_DEPTH 2 + +-/** ++/* ++ * Maximum number of layers of fs stack. Needs to be limited to ++ * prevent kernel stack overflow ++ */ ++#define FILESYSTEM_MAX_STACK_DEPTH 2 ++ ++/** + * enum positive_aop_returns - aop return codes with specific semantics + * + * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has +@@ -272,7 +292,7 @@ struct iattr { + * be a candidate for writeback again in the near + * future. Other callers must be careful to unlock + * the page if they get this return. Returned by +- * writepage(); ++ * writepage(); + * + * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has + * unlocked it and the page might have been truncated. +@@ -806,10 +826,10 @@ static inline struct file *get_file(stru + + #define MAX_NON_LFS ((1UL<<31) - 1) + +-/* Page cache limit. The filesystems should put that into their s_maxbytes +- limits, otherwise bad things can happen in VM. */ ++/* Page cache limit. The filesystems should put that into their s_maxbytes ++ limits, otherwise bad things can happen in VM. */ + #if BITS_PER_LONG==32 +-#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) ++#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) + #elif BITS_PER_LONG==64 + #define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) + #endif +@@ -1401,6 +1421,7 @@ extern int vfs_link(struct dentry *, str + extern int vfs_rmdir(struct inode *, struct dentry *); + extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); + extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); ++extern int vfs_whiteout(struct inode *, struct dentry *); + + /* + * VFS dentry helper functions. +@@ -1531,6 +1552,7 @@ struct inode_operations { + umode_t create_mode, int *opened); + int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*set_acl)(struct inode *, struct posix_acl *, int); ++ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); + } ____cacheline_aligned; + + ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, +@@ -1628,6 +1650,9 @@ struct super_operations { + #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) + #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) + ++#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ ++ (inode)->i_rdev == WHITEOUT_DEV) ++ + /* + * Inode state bits. Protected by inode->i_lock + * +@@ -1761,7 +1786,7 @@ int sync_inode_metadata(struct inode *in + struct file_system_type { + const char *name; + int fs_flags; +-#define FS_REQUIRES_DEV 1 ++#define FS_REQUIRES_DEV 1 + #define FS_BINARY_MOUNTDATA 2 + #define FS_HAS_SUBTYPE 4 + #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ +@@ -2042,6 +2067,7 @@ extern struct file *file_open_name(struc + extern struct file *filp_open(const char *, int, umode_t); + extern struct file *file_open_root(struct dentry *, struct vfsmount *, + const char *, int, umode_t); ++extern int vfs_open(const struct path *, struct file *, const struct cred *); + extern struct file * dentry_open(const struct path *, int, const struct cred *); + extern int filp_close(struct file *, fl_owner_t id); + +@@ -2255,7 +2281,9 @@ extern sector_t bmap(struct inode *, sec + #endif + extern int notify_change(struct dentry *, struct iattr *, struct inode **); + extern int inode_permission(struct inode *, int); ++extern int __inode_permission(struct inode *, int); + extern int generic_permission(struct inode *, int); ++extern int __check_sticky(struct inode *dir, struct inode *inode); + + static inline bool execute_ok(struct inode *inode) + { +@@ -2347,7 +2375,7 @@ extern int do_pipe_flags(int *, int); + extern int kernel_read(struct file *, loff_t, char *, unsigned long); + extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t); + extern struct file * open_exec(const char *); +- ++ + /* fs/dcache.c -- generic fs support functions */ + extern int is_subdir(struct dentry *, struct dentry *); + extern int path_is_under(struct path *, struct path *); +@@ -2453,6 +2481,9 @@ extern ssize_t iter_file_splice_write(st + struct file *, loff_t *, size_t, unsigned int); + extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, + struct file *out, loff_t *, size_t len, unsigned int flags); ++extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, ++ loff_t *opos, size_t len, unsigned int flags); ++ + + extern void + file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); +@@ -2739,6 +2770,14 @@ static inline int is_sxid(umode_t mode) + return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); + } + ++static inline int check_sticky(struct inode *dir, struct inode *inode) ++{ ++ if (!(dir->i_mode & S_ISVTX)) ++ return 0; ++ ++ return __check_sticky(dir, inode); ++} ++ + static inline void inode_has_no_xattr(struct inode *inode) + { + if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) +diff -urpN a/include/linux/mount.h b/include/linux/mount.h +--- a/include/linux/mount.h 2016-11-20 01:17:41.000000000 +0000 ++++ b/include/linux/mount.h 2016-12-21 21:06:34.014677298 +0000 +@@ -81,6 +81,9 @@ extern void mnt_pin(struct vfsmount *mnt + extern void mnt_unpin(struct vfsmount *mnt); + extern int __mnt_is_readonly(struct vfsmount *mnt); + ++struct path; ++extern struct vfsmount *clone_private_mount(struct path *path); ++ + struct file_system_type; + extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, + int flags, const char *name, +diff -urpN a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h +--- a/include/uapi/linux/fs.h 2016-11-20 01:17:41.000000000 +0000 ++++ b/include/uapi/linux/fs.h 2016-12-21 21:06:34.014677298 +0000 +@@ -37,6 +37,7 @@ + + #define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */ + #define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ ++#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */ + + struct fstrim_range { + __u64 start; +diff -urpN a/mm/shmem.c b/mm/shmem.c +--- a/mm/shmem.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/mm/shmem.c 2016-12-21 21:06:34.014677298 +0000 +@@ -2050,20 +2050,82 @@ static int shmem_rmdir(struct inode *dir + return shmem_unlink(dir, dentry); + } + ++static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) ++{ ++ bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); ++ bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); ++ ++ if (old_dir != new_dir && old_is_dir != new_is_dir) { ++ if (old_is_dir) { ++ drop_nlink(old_dir); ++ inc_nlink(new_dir); ++ } else { ++ drop_nlink(new_dir); ++ inc_nlink(old_dir); ++ } ++ } ++ old_dir->i_ctime = old_dir->i_mtime = ++ new_dir->i_ctime = new_dir->i_mtime = ++ old_dentry->d_inode->i_ctime = ++ new_dentry->d_inode->i_ctime = CURRENT_TIME; ++ ++ return 0; ++} ++ ++static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) ++{ ++ struct dentry *whiteout; ++ int error; ++ ++ whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); ++ if (!whiteout) ++ return -ENOMEM; ++ ++ error = shmem_mknod(old_dir, whiteout, ++ S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); ++ dput(whiteout); ++ if (error) ++ return error; ++ ++ /* ++ * Cheat and hash the whiteout while the old dentry is still in ++ * place, instead of playing games with FS_RENAME_DOES_D_MOVE. ++ * ++ * d_lookup() will consistently find one of them at this point, ++ * not sure which one, but that isn't even important. ++ */ ++ d_rehash(whiteout); ++ return 0; ++} ++ + /* + * The VFS layer already does all the dentry stuff for rename, + * we just have to decrement the usage count for the target if + * it exists so that the VFS layer correctly free's it when it + * gets overwritten. + */ +-static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) ++static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) + { + struct inode *inode = old_dentry->d_inode; + int they_are_dirs = S_ISDIR(inode->i_mode); + ++ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) ++ return -EINVAL; ++ ++ if (flags & RENAME_EXCHANGE) ++ return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); ++ + if (!simple_empty(new_dentry)) + return -ENOTEMPTY; + ++ if (flags & RENAME_WHITEOUT) { ++ int error; ++ ++ error = shmem_whiteout(old_dir, old_dentry); ++ if (error) ++ return error; ++ } ++ + if (new_dentry->d_inode) { + (void) shmem_unlink(new_dir, new_dentry); + if (they_are_dirs) { +@@ -2083,6 +2145,11 @@ static int shmem_rename(struct inode *ol + return 0; + } + ++static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) ++{ ++ return shmem_rename2(old_dir, old_dentry, new_dir, new_dentry, 0); ++} ++ + static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) + { + int error; +@@ -2746,6 +2813,7 @@ static const struct inode_operations shm + .rmdir = shmem_rmdir, + .mknod = shmem_mknod, + .rename = shmem_rename, ++ .rename2 = shmem_rename2, + .tmpfile = shmem_tmpfile, + #endif + #ifdef CONFIG_TMPFS_XATTR diff --git a/packages/base/any/kernels/3.16-lts/patches/series b/packages/base/any/kernels/3.16-lts/patches/series new file mode 100644 index 00000000..e72c4459 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/series @@ -0,0 +1,16 @@ +overlayfs.patch +driver-at24-fix-odd-length-two-byte-access.patch +driver-hwmon-max6620.patch +driver-hwmon-max6620-fix-rpm-calc.patch +driver-hwmon-max6620-update.patch +driver-hwmon-pmbus-dni_dps460.patch +driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch +driver-i2c-bus-intel-ismt-add-delay-param.patch +driver-support-sff-8436-eeprom.patch +driver-support-sff-8436-eeprom-update.patch +driver-hwmon-pmbus-add-dps460-support.patch +driver-hwmon-pmbus-ucd9200-mlnx.patch +driver-arista-piix4-mux-patch.patch +driver-igb-version-5.3.54.patch +driver-support-intel-igb-bcm5461X-phy.patch +driver-i2c-bus-intel-ismt-enable-param.patch From 89eab6b3c5ec827ad115cc88d2619d2228704b78 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 22:44:03 +0000 Subject: [PATCH 179/255] 3.16 LTS package for x86_64. --- .../kernel-3.16-lts-x86-64-all/Makefile | 1 + .../kernel-3.16-lts-x86-64-all/PKG.yml | 18 ++++++++++++++++++ .../builds/.gitignore | 2 ++ .../builds/Makefile | 19 +++++++++++++++++++ 4 files changed, 40 insertions(+) create mode 100644 packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/Makefile create mode 100644 packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml create mode 100644 packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore create mode 100644 packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml new file mode 100644 index 00000000..47496c0e --- /dev/null +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml @@ -0,0 +1,18 @@ + +common: + arch: amd64 + version: 1.0.0 + copyright: Copyright 2013, 2014, 2015 Big Switch Networks + maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com + +packages: + - name: onl-kernel-3.16-lts-x86-64-all + version: 1.0.0 + summary: Open Network Linux Kernel 3.16 LTS for X86_64 Platforms. + + files: + builds/kernel-3.16* : $$PKG_INSTALL/ + builds/linux-*mbuild : $$PKG_INSTALL/mbuilds + + changelog: Change changes changes., diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore new file mode 100644 index 00000000..ef51fa80 --- /dev/null +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore @@ -0,0 +1,2 @@ +linux-* +kernel-* diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile new file mode 100644 index 00000000..e198a046 --- /dev/null +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile @@ -0,0 +1,19 @@ +# -*- Makefile -*- +############################################################ +# +# +# Copyright 2013, 2014 BigSwitch Networks, Inc. +# +# +# +# +############################################################ +THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) + +include $(ONL)/make/config.mk + +kernel: + $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) + +clean: + rm -rf linux-3.16* kernel-3.16* From 44a1a6393629c46a9954df8ad9776ea8268b74e5 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 22:44:45 +0000 Subject: [PATCH 180/255] Add 3.16 LTS as a kernel option. --- .../src/lib/platform-config-defaults-x86-64.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml b/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml index 6e3c841a..7588a9e2 100644 --- a/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml +++ b/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml @@ -35,6 +35,10 @@ default: =: kernel-3.16+deb8-x86_64-all package: onl-kernel-3.16+deb8-x86-64-all:amd64 + kernel-3.16-lts: &kernel-3-16-lts + =: kernel-3.16-lts-x86_64-all + package: onl-kernel-3.16-lts-x86-64-all:amd64 + # pick one of the above kernels kernel: <<: *kernel-3-2 From 5a6205527657cffeb9b3d47545271fc03f77bf0b Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 22:45:08 +0000 Subject: [PATCH 181/255] Add 3.16 LTS. --- packages/base/amd64/upgrade/builds/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/base/amd64/upgrade/builds/Makefile b/packages/base/amd64/upgrade/builds/Makefile index f514ac8c..890edcf7 100644 --- a/packages/base/amd64/upgrade/builds/Makefile +++ b/packages/base/amd64/upgrade/builds/Makefile @@ -5,6 +5,7 @@ KERNELS := $(shell $(ONLPM) --find-file onl-kernel-3.9.6-x86-64-all:amd64 ker $(shell $(ONLPM) --find-file onl-kernel-3.2-deb7-x86-64-all:amd64 kernel-3.2-deb7-x86_64-all) \ $(shell $(ONLPM) --find-file onl-kernel-3.18-x86-64-all:amd64 kernel-3.18-x86_64-all) \ $(shell $(ONLPM) --find-file onl-kernel-3.16+deb8-x86-64-all:amd64 kernel-3.16+deb8-x86_64-all) \ + $(shell $(ONLPM) --find-file onl-kernel-3.16-lts-x86-64-all:amd64 kernel-3.16-lts-x86_64-all) \ # Loader initrd @@ -16,5 +17,3 @@ all: cp $(KERNELS) files cp $(INITRD) files cp $(MANIFEST) files - - From 500cfa094f05eac76e481bfe4f8cfb6c3b248303 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Wed, 21 Dec 2016 22:46:51 +0000 Subject: [PATCH 182/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 87021ef3..84454401 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 87021ef339b28f8556b9013722752ca0520b17f9 +Subproject commit 84454401a4d88f0d1b37e72ce670c2bbe75511b2 From 7af648fbf02da158e9f9fc834f81dadfe62ba554 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 22 Dec 2016 16:39:49 +0000 Subject: [PATCH 183/255] This version has been superceded by the standard 3.16 LTS version. --- .../3.16+deb8/configs/x86_64-all/.gitignore | 3 - .../3.16+deb8/configs/x86_64-all/Makefile | 42 - .../configs/x86_64-all/x86_64-all.config | 3567 -- .../base/any/kernels/3.16+deb8/kconfig.mk | 33 - .../3.16+deb8/patches/3.16-fs-overlayfs.patch | 4278 -- .../kernels/3.16+deb8/patches/changelog.patch | 18 - .../driver-arista-piix4-mux-patch.patch | 146 - ...-at24-fix-odd-length-two-byte-access.patch | 34 - .../driver-hwmon-max6620-fix-rpm-calc.patch | 196 - .../patches/driver-hwmon-max6620-update.patch | 113 - .../patches/driver-hwmon-max6620.patch | 753 - ...river-hwmon-pmbus-add-dps460-support.patch | 78 - ...n-pmbus-dni_dps460-update-pmbus-core.patch | 96 - .../driver-hwmon-pmbus-dni_dps460.patch | 304 - .../driver-hwmon-pmbus-ucd9200-mlnx.patch | 89 - ...r-i2c-bus-intel-ismt-add-delay-param.patch | 57 - ...iver-i2c-bus-intel-ismt-enable-param.patch | 27 - .../patches/driver-igb-version-5.3.54.patch | 48795 ---------------- ...river-support-intel-igb-bcm5461X-phy.patch | 242 - ...river-support-sff-8436-eeprom-update.patch | 141 - .../driver-support-sff-8436-eeprom.patch | 1086 - .../base/any/kernels/3.16+deb8/patches/series | 16 - 22 files changed, 60114 deletions(-) delete mode 100644 packages/base/any/kernels/3.16+deb8/configs/x86_64-all/.gitignore delete mode 100644 packages/base/any/kernels/3.16+deb8/configs/x86_64-all/Makefile delete mode 100644 packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config delete mode 100644 packages/base/any/kernels/3.16+deb8/kconfig.mk delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/3.16-fs-overlayfs.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/changelog.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-arista-piix4-mux-patch.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-fix-rpm-calc.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-update.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-add-dps460-support.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom-update.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom.patch delete mode 100644 packages/base/any/kernels/3.16+deb8/patches/series diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/.gitignore b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/.gitignore deleted file mode 100644 index 5dbdc5b9..00000000 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -kernel-3.16* -linux-* - diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/Makefile b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/Makefile deleted file mode 100644 index dae79278..00000000 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -############################################################ -# -# -# Copyright 2015 Big Switch Networks, Inc. -# -# Licensed under the Eclipse Public License, Version 1.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.eclipse.org/legal/epl-v10.html -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the -# License. -# -# -############################################################ -# -# Default 3.18.25 configuration for x86_64 platforms. -# -############################################################ -THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) -include $(ONL)/make/config.mk - -export ARCH := x86_64 -ifndef K_TARGET_DIR -K_TARGET_DIR := $(THIS_DIR) -endif - -include ../../kconfig.mk -K_CONFIG := x86_64-all.config -K_BUILD_TARGET := bzImage -K_COPY_SRC := arch/x86/boot/bzImage -ifndef K_COPY_DST -K_COPY_DST := kernel-3.16+deb8-x86_64-all -endif - -include $(ONL)/make/kbuild.mk - diff --git a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config deleted file mode 100644 index 94506877..00000000 --- a/packages/base/any/kernels/3.16+deb8/configs/x86_64-all/x86_64-all.config +++ /dev/null @@ -1,3567 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.16.7-ckt25 Kernel Configuration -# -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_MMU=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_X86_64_SMP=y -CONFIG_X86_HT=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-OpenNetworkLinux" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -# CONFIG_KERNEL_GZIP is not set -CONFIG_KERNEL_BZIP2=y -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_FHANDLE=y -CONFIG_USELIB=y -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_IRQ_DOMAIN=y -# CONFIG_IRQ_DOMAIN_DEBUG is not set -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_PREEMPT_RCU is not set -CONFIG_RCU_STALL_COMMON=y -# CONFIG_RCU_USER_QS is not set -CONFIG_RCU_FANOUT=64 -CONFIG_RCU_FANOUT_LEAF=16 -# CONFIG_RCU_FANOUT_EXACT is not set -CONFIG_RCU_FAST_NO_HZ=y -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_RCU_NOCB_CPU is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=17 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_MEMCG=y -# CONFIG_MEMCG_DISABLED is not set -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -# CONFIG_CGROUP_HUGETLB is not set -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_CFS_BANDWIDTH is not set -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -# CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_PCI_QUIRKS=y -CONFIG_EMBEDDED=y -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SLOB is not set -# CONFIG_SYSTEM_TRUSTED_KEYRING is not set -# CONFIG_PROFILING is not set -CONFIG_TRACEPOINTS=y -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -# CONFIG_KPROBES is not set -# CONFIG_JUMP_LABEL is not set -# CONFIG_UPROBES is not set -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR is not set -CONFIG_CC_STACKPROTECTOR_NONE=y -# CONFIG_CC_STACKPROTECTOR_REGULAR is not set -# CONFIG_CC_STACKPROTECTOR_STRONG is not set -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -# CONFIG_MODULE_SIG is not set -CONFIG_STOP_MACHINE=y -CONFIG_BLOCK=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_THROTTLING is not set -# CONFIG_BLK_CMDLINE_PARSER is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_CUMANA=y -CONFIG_ACORN_PARTITION_EESOX=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_ADFS=y -CONFIG_ACORN_PARTITION_POWERTEC=y -CONFIG_ACORN_PARTITION_RISCIX=y -# CONFIG_AIX_PARTITION is not set -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -CONFIG_BLOCK_COMPAT=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUE_RWLOCK=y -CONFIG_QUEUE_RWLOCK=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_ZONE_DMA=y -CONFIG_SMP=y -CONFIG_X86_MPPARSE=y -# CONFIG_X86_EXTENDED_PLATFORM is not set -# CONFIG_X86_INTEL_LPSS is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -# CONFIG_HYPERVISOR_GUEST is not set -CONFIG_NO_BOOTMEM=y -CONFIG_MEMTEST=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -# CONFIG_PROCESSOR_SELECT is not set -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -CONFIG_CALGARY_IOMMU=y -CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS=512 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -CONFIG_X86_UP_APIC_MSI=y -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y -# CONFIG_X86_16BIT is not set -# CONFIG_I8K is not set -# CONFIG_MICROCODE is not set -# CONFIG_MICROCODE_INTEL_EARLY is not set -# CONFIG_MICROCODE_AMD_EARLY is not set -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DIRECT_GBPAGES=y -# CONFIG_NUMA is not set -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_NEED_BOUNCE_POOL=y -CONFIG_VIRT_TO_BUS=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=y -CONFIG_TRANSPARENT_HUGEPAGE=y -# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -# CONFIG_CMA is not set -# CONFIG_ZBUD is not set -# CONFIG_ZSMALLOC is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_ARCH_RANDOM=y -CONFIG_X86_SMAP=y -# CONFIG_EFI is not set -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -# CONFIG_RANDOMIZE_BASE is not set -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_HOTPLUG_CPU=y -# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set -# CONFIG_DEBUG_HOTPLUG_CPU0 is not set -# CONFIG_COMPAT_VDSO is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y - -# -# Power management and ACPI options -# -# CONFIG_SUSPEND is not set -# CONFIG_HIBERNATION is not set -# CONFIG_PM_RUNTIME is not set -CONFIG_ACPI=y -# CONFIG_ACPI_PROCFS_POWER is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_FAN=y -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_HOTPLUG_CPU=y -# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set -CONFIG_ACPI_THERMAL=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set -# CONFIG_ACPI_DEBUG is not set -# CONFIG_ACPI_PCI_SLOT is not set -CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=y -# CONFIG_ACPI_HOTPLUG_MEMORY is not set -# CONFIG_ACPI_SBS is not set -# CONFIG_ACPI_HED is not set -CONFIG_ACPI_CUSTOM_METHOD=y -# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set -# CONFIG_ACPI_APEI is not set -# CONFIG_ACPI_EXTLOG is not set -# CONFIG_SFI is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_STAT_DETAILS is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y - -# -# x86 CPU frequency scaling drivers -# -# CONFIG_X86_INTEL_PSTATE is not set -# CONFIG_X86_PCC_CPUFREQ is not set -# CONFIG_X86_ACPI_CPUFREQ is not set -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_P4_CLOCKMOD=y - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=y - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set -CONFIG_INTEL_IDLE=y - -# -# Memory power savings -# -CONFIG_I7300_IDLE_IOAT_CHANNEL=y -CONFIG_I7300_IDLE=y - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -# CONFIG_PCIE_ECRC is not set -CONFIG_PCIEAER_INJECT=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCI_MSI=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -# CONFIG_PCI_STUB is not set -CONFIG_HT_IRQ=y -CONFIG_PCI_ATS=y -CONFIG_PCI_IOV=y -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -CONFIG_PCI_IOAPIC=y -CONFIG_PCI_LABEL=y - -# -# PCI host controller drivers -# -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=y -CONFIG_PCMCIA=y -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -# CONFIG_YENTA is not set -CONFIG_PD6729=y -CONFIG_I82092=y -CONFIG_PCCARD_NONSTATIC=y -CONFIG_HOTPLUG_PCI=y -# CONFIG_HOTPLUG_PCI_ACPI is not set -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_CPCI_ZT5550=y -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=y -CONFIG_HOTPLUG_PCI_SHPC=y -# CONFIG_RAPIDIO is not set -# CONFIG_X86_SYSFB is not set - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -# CONFIG_HAVE_AOUT is not set -CONFIG_BINFMT_MISC=y -CONFIG_COREDUMP=y -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=y -# CONFIG_X86_X32 is not set -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_KEYS_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y -CONFIG_IOSF_MBI=y -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -# CONFIG_PACKET_DIAG is not set -CONFIG_UNIX=y -# CONFIG_UNIX_DIAG is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -# CONFIG_XFRM_STATISTICS is not set -CONFIG_XFRM_IPCOMP=y -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -CONFIG_NET_IP_TUNNEL=y -# CONFIG_IP_MROUTE is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -CONFIG_INET_TUNNEL=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_LRO=y -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -# CONFIG_INET_UDP_DIAG is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=y -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=y -CONFIG_TCP_CONG_HTCP=y -CONFIG_TCP_CONG_HSTCP=y -CONFIG_TCP_CONG_HYBLA=y -CONFIG_TCP_CONG_VEGAS=y -CONFIG_TCP_CONG_SCALABLE=y -CONFIG_TCP_CONG_LP=y -CONFIG_TCP_CONG_VENO=y -CONFIG_TCP_CONG_YEAH=y -CONFIG_TCP_CONG_ILLINOIS=y -# CONFIG_DEFAULT_BIC is not set -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_HTCP is not set -# CONFIG_DEFAULT_HYBLA is not set -# CONFIG_DEFAULT_VEGAS is not set -# CONFIG_DEFAULT_VENO is not set -# CONFIG_DEFAULT_WESTWOOD is not set -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_INET6_XFRM_TUNNEL=y -CONFIG_INET6_TUNNEL=y -CONFIG_INET6_XFRM_MODE_TRANSPORT=y -CONFIG_INET6_XFRM_MODE_TUNNEL=y -CONFIG_INET6_XFRM_MODE_BEET=y -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y -# CONFIG_IPV6_VTI is not set -CONFIG_IPV6_SIT=y -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=y -# CONFIG_IPV6_GRE is not set -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_NETLINK=y -CONFIG_NETFILTER_NETLINK_ACCT=y -CONFIG_NETFILTER_NETLINK_QUEUE=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_BROADCAST=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NF_CT_NETLINK_TIMEOUT=y -CONFIG_NF_CT_NETLINK_HELPER=y -CONFIG_NETFILTER_NETLINK_QUEUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -CONFIG_NF_NAT_SIP=y -CONFIG_NF_NAT_TFTP=y -# CONFIG_NF_TABLES is not set -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=y -CONFIG_NETFILTER_XT_CONNMARK=y -CONFIG_NETFILTER_XT_SET=y - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=y -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_CT=y -CONFIG_NETFILTER_XT_TARGET_DSCP=y -CONFIG_NETFILTER_XT_TARGET_HL=y -CONFIG_NETFILTER_XT_TARGET_HMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -# CONFIG_NETFILTER_XT_TARGET_LED is not set -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NETMAP=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_RATEEST=y -CONFIG_NETFILTER_XT_TARGET_REDIRECT=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_CGROUP=y -CONFIG_NETFILTER_XT_MATCH_CLUSTER=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_CPU=y -CONFIG_NETFILTER_XT_MATCH_DCCP=y -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ECN=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_HL=y -CONFIG_NETFILTER_XT_MATCH_IPCOMP=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_IPVS=y -CONFIG_NETFILTER_XT_MATCH_L2TP=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_NFACCT=y -CONFIG_NETFILTER_XT_MATCH_OSF=y -CONFIG_NETFILTER_XT_MATCH_OWNER=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_RATEEST=y -CONFIG_NETFILTER_XT_MATCH_REALM=y -CONFIG_NETFILTER_XT_MATCH_RECENT=y -CONFIG_NETFILTER_XT_MATCH_SCTP=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_IP_SET=y -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -# CONFIG_IP_SET_HASH_IPMARK is not set -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -# CONFIG_IP_SET_HASH_NETPORTNET is not set -CONFIG_IP_SET_HASH_NET=y -# CONFIG_IP_SET_HASH_NETNET is not set -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_LC=y -CONFIG_IP_VS_WLC=y -CONFIG_IP_VS_LBLC=y -CONFIG_IP_VS_LBLCR=y -CONFIG_IP_VS_DH=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_SED=y -CONFIG_IP_VS_NQ=y - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS application helper -# -# CONFIG_IP_VS_FTP is not set -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=y - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_CONNTRACK_PROC_COMPAT=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -# CONFIG_IP_NF_MATCH_RPFILTER is not set -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -# CONFIG_IP_NF_TARGET_SYNPROXY is not set -# CONFIG_IP_NF_TARGET_ULOG is not set -CONFIG_NF_NAT_IPV4=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -# CONFIG_IP_NF_TARGET_NETMAP is not set -# CONFIG_IP_NF_TARGET_REDIRECT is not set -CONFIG_NF_NAT_SNMP_BASIC=y -CONFIG_NF_NAT_PROTO_GRE=y -CONFIG_NF_NAT_PPTP=y -CONFIG_NF_NAT_H323=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_TARGET_CLUSTERIP=y -CONFIG_IP_NF_TARGET_ECN=y -CONFIG_IP_NF_TARGET_TTL=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_AH=y -CONFIG_IP6_NF_MATCH_EUI64=y -CONFIG_IP6_NF_MATCH_FRAG=y -CONFIG_IP6_NF_MATCH_OPTS=y -CONFIG_IP6_NF_MATCH_HL=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_MATCH_MH=y -# CONFIG_IP6_NF_MATCH_RPFILTER is not set -CONFIG_IP6_NF_MATCH_RT=y -CONFIG_IP6_NF_TARGET_HL=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -# CONFIG_IP6_NF_TARGET_SYNPROXY is not set -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -# CONFIG_NF_NAT_IPV6 is not set -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_802_3=y -CONFIG_BRIDGE_EBT_AMONG=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_LIMIT=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_PKTTYPE=y -CONFIG_BRIDGE_EBT_STP=y -CONFIG_BRIDGE_EBT_VLAN=y -CONFIG_BRIDGE_EBT_ARPREPLY=y -CONFIG_BRIDGE_EBT_DNAT=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_REDIRECT=y -CONFIG_BRIDGE_EBT_SNAT=y -CONFIG_BRIDGE_EBT_LOG=y -# CONFIG_BRIDGE_EBT_ULOG is not set -CONFIG_BRIDGE_EBT_NFLOG=y -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=y -# CONFIG_SCTP_DBG_OBJCNT is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -CONFIG_STP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -CONFIG_VLAN_8021Q=y -# CONFIG_VLAN_8021Q_GVRP is not set -# CONFIG_VLAN_8021Q_MVRP is not set -# CONFIG_DECNET is not set -CONFIG_LLC=y -CONFIG_LLC2=y -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_IEEE802154 is not set -# CONFIG_NET_SCHED is not set -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -# CONFIG_OPENVSWITCH is not set -# CONFIG_VSOCKETS is not set -CONFIG_NETLINK_MMAP=y -CONFIG_NETLINK_DIAG=y -# CONFIG_NET_MPLS_GSO is not set -# CONFIG_HSR is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -# CONFIG_CGROUP_NET_PRIO is not set -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -# CONFIG_BPF_JIT is not set -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=y -CONFIG_NET_DROP_MONITOR=y -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -CONFIG_AF_RXRPC=y -# CONFIG_AF_RXRPC_DEBUG is not set -# CONFIG_RXKAD is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -# CONFIG_LIB80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=y -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set -# CONFIG_NFC is not set -CONFIG_HAVE_BPF_JIT=y - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -# CONFIG_DEVTMPFS_MOUNT is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_GENERIC_CPU_DEVICES is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -# CONFIG_DMA_SHARED_BUFFER is not set - -# -# Bus devices -# -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_MTD is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -# CONFIG_BLK_DEV_FD is not set -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -# CONFIG_BLK_CPQ_CISS_DA is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -# CONFIG_BLK_DEV_DRBD is not set -CONFIG_BLK_DEV_NBD=y -# CONFIG_BLK_DEV_NVME is not set -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_OSD is not set -CONFIG_BLK_DEV_SX8=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=65536 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=y -# CONFIG_BLK_DEV_HD is not set -# CONFIG_BLK_DEV_RBD is not set -# CONFIG_BLK_DEV_RSXX is not set - -# -# Misc devices -# -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_AD525X_DPOT is not set -CONFIG_DUMMY_IRQ=y -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1780 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_TI_DAC7512=y -# CONFIG_BMP085_I2C is not set -# CONFIG_BMP085_SPI is not set -# CONFIG_USB_SWITCH_FSA9480 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=y -CONFIG_EEPROM_AT25=y -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -CONFIG_EEPROM_93CX6=y -# CONFIG_EEPROM_93XX46 is not set -CONFIG_EEPROM_SFF_8436=y -CONFIG_CB710_CORE=y -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module -# -# CONFIG_ALTERA_STAPL is not set -# CONFIG_VMWARE_VMCI is not set - -# -# Intel MIC Host Driver -# -# CONFIG_INTEL_MIC_HOST is not set - -# -# Intel MIC Card Driver -# -# CONFIG_INTEL_MIC_CARD is not set -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -# CONFIG_SCSI_TGT is not set -CONFIG_SCSI_NETLINK=y -# CONFIG_SCSI_PROC_FS is not set - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -# CONFIG_BLK_DEV_SR is not set -CONFIG_CHR_DEV_SG=y -# CONFIG_CHR_DEV_SCH is not set -# CONFIG_SCSI_MULTI_LUN is not set -# CONFIG_SCSI_CONSTANTS is not set -# CONFIG_SCSI_LOGGING is not set -# CONFIG_SCSI_SCAN_ASYNC is not set - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=y -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=y -CONFIG_ISCSI_BOOT_SYSFS=y -CONFIG_SCSI_CXGB3_ISCSI=y -CONFIG_SCSI_CXGB4_ISCSI=y -CONFIG_SCSI_BNX2_ISCSI=y -CONFIG_SCSI_BNX2X_FCOE=y -CONFIG_BE2ISCSI=y -CONFIG_BLK_DEV_3W_XXXX_RAID=y -CONFIG_SCSI_HPSA=y -CONFIG_SCSI_3W_9XXX=y -CONFIG_SCSI_3W_SAS=y -CONFIG_SCSI_ACARD=y -CONFIG_SCSI_AACRAID=y -CONFIG_SCSI_AIC7XXX=y -CONFIG_AIC7XXX_CMDS_PER_DEVICE=8 -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -CONFIG_AIC7XXX_DEBUG_ENABLE=y -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC79XX=y -CONFIG_AIC79XX_CMDS_PER_DEVICE=32 -CONFIG_AIC79XX_RESET_DELAY_MS=15000 -CONFIG_AIC79XX_DEBUG_ENABLE=y -CONFIG_AIC79XX_DEBUG_MASK=0 -CONFIG_AIC79XX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC94XX=y -# CONFIG_AIC94XX_DEBUG is not set -CONFIG_SCSI_MVSAS=y -# CONFIG_SCSI_MVSAS_DEBUG is not set -# CONFIG_SCSI_MVSAS_TASKLET is not set -CONFIG_SCSI_MVUMI=y -CONFIG_SCSI_DPT_I2O=y -CONFIG_SCSI_ADVANSYS=y -CONFIG_SCSI_ARCMSR=y -# CONFIG_SCSI_ESAS2R is not set -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=y -CONFIG_MEGARAID_MAILBOX=y -CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT2SAS=y -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS_LOGGING is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_UFSHCD is not set -CONFIG_SCSI_HPTIOP=y -CONFIG_SCSI_BUSLOGIC=y -# CONFIG_SCSI_FLASHPOINT is not set -CONFIG_VMWARE_PVSCSI=y -CONFIG_LIBFC=y -CONFIG_LIBFCOE=y -CONFIG_FCOE=y -CONFIG_FCOE_FNIC=y -CONFIG_SCSI_DMX3191D=y -CONFIG_SCSI_EATA=y -CONFIG_SCSI_EATA_TAGGED_QUEUE=y -CONFIG_SCSI_EATA_LINKED_COMMANDS=y -CONFIG_SCSI_EATA_MAX_TAGS=16 -CONFIG_SCSI_FUTURE_DOMAIN=y -CONFIG_SCSI_GDTH=y -CONFIG_SCSI_ISCI=y -CONFIG_SCSI_IPS=y -CONFIG_SCSI_INITIO=y -CONFIG_SCSI_INIA100=y -CONFIG_SCSI_STEX=y -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 -CONFIG_SCSI_SYM53C8XX_MMIO=y -CONFIG_SCSI_IPR=y -# CONFIG_SCSI_IPR_TRACE is not set -# CONFIG_SCSI_IPR_DUMP is not set -CONFIG_SCSI_QLOGIC_1280=y -CONFIG_SCSI_QLA_FC=y -CONFIG_SCSI_QLA_ISCSI=y -CONFIG_SCSI_LPFC=y -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -CONFIG_SCSI_DC395x=y -CONFIG_SCSI_DC390T=y -CONFIG_SCSI_DEBUG=y -CONFIG_SCSI_PMCRAID=y -CONFIG_SCSI_PM8001=y -# CONFIG_SCSI_SRP is not set -CONFIG_SCSI_BFA_FC=y -# CONFIG_SCSI_VIRTIO is not set -# CONFIG_SCSI_CHELSIO_FCOE is not set -CONFIG_SCSI_LOWLEVEL_PCMCIA=y -# CONFIG_PCMCIA_AHA152X is not set -# CONFIG_PCMCIA_FDOMAIN is not set -# CONFIG_PCMCIA_QLOGIC is not set -# CONFIG_PCMCIA_SYM53C500 is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -CONFIG_SCSI_OSD_INITIATOR=y -CONFIG_SCSI_OSD_ULD=y -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_SATA_INIC162X is not set -CONFIG_SATA_ACARD_AHCI=y -CONFIG_SATA_SIL24=y -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=y -CONFIG_SATA_QSTOR=y -CONFIG_SATA_SX4=y -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -CONFIG_SATA_MV=y -CONFIG_SATA_NV=y -CONFIG_SATA_PROMISE=y -CONFIG_SATA_SIL=y -CONFIG_SATA_SIS=y -CONFIG_SATA_SVW=y -CONFIG_SATA_ULI=y -CONFIG_SATA_VIA=y -CONFIG_SATA_VITESSE=y - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=y -CONFIG_PATA_AMD=y -CONFIG_PATA_ARTOP=y -CONFIG_PATA_ATIIXP=y -CONFIG_PATA_ATP867X=y -CONFIG_PATA_CMD64X=y -# CONFIG_PATA_CYPRESS is not set -CONFIG_PATA_EFAR=y -CONFIG_PATA_HPT366=y -CONFIG_PATA_HPT37X=y -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -CONFIG_PATA_IT8213=y -CONFIG_PATA_IT821X=y -CONFIG_PATA_JMICRON=y -CONFIG_PATA_MARVELL=y -CONFIG_PATA_NETCELL=y -CONFIG_PATA_NINJA32=y -CONFIG_PATA_NS87415=y -CONFIG_PATA_OLDPIIX=y -# CONFIG_PATA_OPTIDMA is not set -CONFIG_PATA_PDC2027X=y -CONFIG_PATA_PDC_OLD=y -# CONFIG_PATA_RADISYS is not set -CONFIG_PATA_RDC=y -CONFIG_PATA_SCH=y -CONFIG_PATA_SERVERWORKS=y -CONFIG_PATA_SIL680=y -CONFIG_PATA_SIS=y -CONFIG_PATA_TOSHIBA=y -CONFIG_PATA_TRIFLEX=y -CONFIG_PATA_VIA=y -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -CONFIG_PATA_MPIIX=y -CONFIG_PATA_NS87410=y -# CONFIG_PATA_OPTI is not set -CONFIG_PATA_PCMCIA=y -CONFIG_PATA_PLATFORM=y -CONFIG_PATA_RZ1000=y - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=y -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -# CONFIG_MD_AUTODETECT is not set -# CONFIG_MD_LINEAR is not set -# CONFIG_MD_RAID0 is not set -# CONFIG_MD_RAID1 is not set -# CONFIG_MD_RAID10 is not set -# CONFIG_MD_RAID456 is not set -# CONFIG_MD_MULTIPATH is not set -# CONFIG_MD_FAULTY is not set -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -CONFIG_DM_CRYPT=y -# CONFIG_DM_SNAPSHOT is not set -# CONFIG_DM_THIN_PROVISIONING is not set -# CONFIG_DM_CACHE is not set -# CONFIG_DM_ERA is not set -# CONFIG_DM_MIRROR is not set -# CONFIG_DM_RAID is not set -# CONFIG_DM_ZERO is not set -# CONFIG_DM_MULTIPATH is not set -# CONFIG_DM_DELAY is not set -# CONFIG_DM_UEVENT is not set -# CONFIG_DM_FLAKEY is not set -# CONFIG_DM_VERITY is not set -# CONFIG_DM_SWITCH is not set -# CONFIG_TARGET_CORE is not set -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=y -CONFIG_FIREWIRE_OHCI=y -CONFIG_FIREWIRE_SBP2=y -CONFIG_FIREWIRE_NET=y -CONFIG_FIREWIRE_NOSY=y -# CONFIG_I2O is not set -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y -# CONFIG_BONDING is not set -CONFIG_DUMMY=y -# CONFIG_EQUALIZER is not set -# CONFIG_NET_FC is not set -# CONFIG_NET_TEAM is not set -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -# CONFIG_VXLAN is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -CONFIG_TUN=y -CONFIG_VETH=y -CONFIG_VIRTIO_NET=y -# CONFIG_NLMON is not set -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -# CONFIG_NET_DSA_MV88E6XXX is not set -# CONFIG_NET_DSA_MV88E6060 is not set -# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set -# CONFIG_NET_DSA_MV88E6131 is not set -# CONFIG_NET_DSA_MV88E6123_61_65 is not set -CONFIG_ETHERNET=y -CONFIG_MDIO=y -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_XGENE is not set -CONFIG_NET_VENDOR_ARC=y -# CONFIG_NET_VENDOR_ATHEROS is not set -CONFIG_NET_VENDOR_BROADCOM=y -CONFIG_B44=y -CONFIG_B44_PCI_AUTOSELECT=y -CONFIG_B44_PCICORE_AUTOSELECT=y -CONFIG_B44_PCI=y -CONFIG_BNX2=y -CONFIG_CNIC=y -CONFIG_TIGON3=y -CONFIG_BNX2X=y -CONFIG_BNX2X_SRIOV=y -# CONFIG_NET_VENDOR_BROCADE is not set -# CONFIG_NET_CALXEDA_XGMAC is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -CONFIG_CHELSIO_T3=y -CONFIG_CHELSIO_T4=y -CONFIG_CHELSIO_T4VF=y -# CONFIG_NET_VENDOR_CISCO is not set -# CONFIG_CX_ECAT is not set -# CONFIG_DNET is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set -# CONFIG_NET_VENDOR_FUJITSU is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=y -CONFIG_IXGB=y -CONFIG_IXGBE=y -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBEVF=y -# CONFIG_I40E is not set -# CONFIG_I40EVF is not set -CONFIG_NET_VENDOR_I825XX=y -# CONFIG_IP1000 is not set -# CONFIG_JME is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -# CONFIG_MLX4_EN is not set -# CONFIG_MLX4_CORE is not set -# CONFIG_MLX5_CORE is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_NET_VENDOR_MICROCHIP=y -CONFIG_ENC28J60=y -CONFIG_ENC28J60_WRITEVERIFY=y -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_ETHOC is not set -# CONFIG_NET_PACKET_ENGINE is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_8139CP is not set -# CONFIG_8139TOO is not set -CONFIG_R8169=y -# CONFIG_SH_ETH is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_SAMSUNG=y -# CONFIG_SXGBE_ETH is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_SFC is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WIZNET=y -# CONFIG_WIZNET_W5100 is not set -# CONFIG_WIZNET_W5300 is not set -# CONFIG_NET_VENDOR_XIRCOM is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -# CONFIG_AT803X_PHY is not set -# CONFIG_AMD_PHY is not set -CONFIG_MARVELL_PHY=y -CONFIG_DAVICOM_PHY=y -CONFIG_QSEMI_PHY=y -CONFIG_LXT_PHY=y -CONFIG_CICADA_PHY=y -CONFIG_VITESSE_PHY=y -CONFIG_SMSC_PHY=y -CONFIG_BROADCOM_PHY=y -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_ICPLUS_PHY is not set -CONFIG_REALTEK_PHY=y -CONFIG_NATIONAL_PHY=y -CONFIG_STE10XP=y -CONFIG_LSI_ET1011C_PHY=y -CONFIG_MICREL_PHY=y -CONFIG_FIXED_PHY=y -CONFIG_MDIO_BITBANG=y -# CONFIG_MDIO_GPIO is not set -# CONFIG_MICREL_KS8995MA is not set -CONFIG_PPP=y -# CONFIG_PPP_BSDCOMP is not set -# CONFIG_PPP_DEFLATE is not set -# CONFIG_PPP_FILTER is not set -# CONFIG_PPP_MPPE is not set -# CONFIG_PPP_MULTILINK is not set -# CONFIG_PPPOE is not set -# CONFIG_PPP_ASYNC is not set -# CONFIG_PPP_SYNC_TTY is not set -# CONFIG_SLIP is not set -CONFIG_SLHC=y - -# -# USB Network Adapters -# -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_RTL8152 is not set -CONFIG_USB_USBNET=y -# CONFIG_USB_NET_AX8817X is not set -# CONFIG_USB_NET_AX88179_178A is not set -CONFIG_USB_NET_CDCETHER=y -# CONFIG_USB_NET_CDC_EEM is not set -CONFIG_USB_NET_CDC_NCM=y -# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -# CONFIG_USB_NET_CDC_MBIM is not set -# CONFIG_USB_NET_DM9601 is not set -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -# CONFIG_USB_NET_SMSC75XX is not set -# CONFIG_USB_NET_SMSC95XX is not set -# CONFIG_USB_NET_GL620A is not set -CONFIG_USB_NET_NET1080=y -# CONFIG_USB_NET_PLUSB is not set -# CONFIG_USB_NET_MCS7830 is not set -# CONFIG_USB_NET_RNDIS_HOST is not set -CONFIG_USB_NET_CDC_SUBSET=y -# CONFIG_USB_ALI_M5632 is not set -# CONFIG_USB_AN2720 is not set -CONFIG_USB_BELKIN=y -# CONFIG_USB_ARMLINUX is not set -# CONFIG_USB_EPSON2888 is not set -# CONFIG_USB_KC2190 is not set -CONFIG_USB_NET_ZAURUS=y -# CONFIG_USB_NET_CX82310_ETH is not set -# CONFIG_USB_NET_KALMIA is not set -# CONFIG_USB_NET_QMI_WWAN is not set -# CONFIG_USB_NET_INT51X1 is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_USB_SIERRA_NET is not set -# CONFIG_USB_VL600 is not set -# CONFIG_WLAN is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -# CONFIG_VMXNET3 is not set -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=y -CONFIG_INPUT_SPARSEKMAP=y -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=y -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_ROCKETPORT is not set -# CONFIG_CYCLADES is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -# CONFIG_SYNCLINK is not set -# CONFIG_SYNCLINKMP is not set -# CONFIG_SYNCLINK_GT is not set -# CONFIG_NOZOMI is not set -# CONFIG_ISI is not set -# CONFIG_N_HDLC is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_CS=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -# CONFIG_SERIAL_8250_DW is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -CONFIG_SERIAL_MFD_HSU=y -# CONFIG_SERIAL_MFD_HSU_CONSOLE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=y -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_IFX6X60 is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_TTY_PRINTK is not set -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=y -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y -CONFIG_HW_RANDOM_INTEL=y -CONFIG_HW_RANDOM_AMD=y -CONFIG_HW_RANDOM_VIA=y -# CONFIG_HW_RANDOM_VIRTIO is not set -CONFIG_NVRAM=y -# CONFIG_R3964 is not set -# CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# -CONFIG_SYNCLINK_CS=y -CONFIG_CARDMAN_4000=y -CONFIG_CARDMAN_4040=y -CONFIG_IPWIRELESS=y -# CONFIG_MWAVE is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_HPET is not set -# CONFIG_HANGCHECK_TIMER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_TELCLOCK is not set -CONFIG_DEVPORT=y -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_MUX_GPIO=y -CONFIG_I2C_MUX_PCA9541=y -CONFIG_I2C_MUX_PCA954x=y -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=y -CONFIG_I2C_ALGOPCA=y - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -CONFIG_I2C_I801=y -CONFIG_I2C_ISCH=y -CONFIG_I2C_ISMT=y -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CBUS_GPIO is not set -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=y -# CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_PXA2XX_PCI is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_DESIGNWARE is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set - -# -# PPS support -# -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y - -# -# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -CONFIG_GPIO_DEVRES=y -CONFIG_GPIO_ACPI=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_GENERIC=y -CONFIG_GPIO_MAX730X=y - -# -# Memory mapped GPIO drivers: -# -CONFIG_GPIO_GENERIC_PLATFORM=y -# CONFIG_GPIO_IT8761E is not set -# CONFIG_GPIO_F7188X is not set -# CONFIG_GPIO_SCH311X is not set -CONFIG_GPIO_SCH=y -# CONFIG_GPIO_ICH is not set -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_LYNXPOINT is not set - -# -# I2C GPIO expanders: -# -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -CONFIG_GPIO_PCA953X=y -# CONFIG_GPIO_PCA953X_IRQ is not set -CONFIG_GPIO_PCF857X=y -# CONFIG_GPIO_SX150X is not set -# CONFIG_GPIO_ADP5588 is not set - -# -# PCI GPIO expanders: -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_INTEL_MID is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI GPIO expanders: -# -CONFIG_GPIO_MAX7301=y -CONFIG_GPIO_MC33880=y - -# -# AC97 GPIO expanders: -# - -# -# LPC GPIO expanders: -# - -# -# MODULbus GPIO expanders: -# - -# -# USB GPIO expanders: -# -# CONFIG_W1 is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_BATTERY_BQ27x00 is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_POWER_RESET is not set -# CONFIG_POWER_AVS is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_ABITUGURU3 is not set -# CONFIG_SENSORS_AD7314 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -CONFIG_SENSORS_ADM1021=y -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_K8TEMP is not set -# CONFIG_SENSORS_K10TEMP is not set -# CONFIG_SENSORS_FAM15H_POWER is not set -# CONFIG_SENSORS_APPLESMC is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -CONFIG_SENSORS_GPIO_FAN=y -# CONFIG_SENSORS_HIH6130 is not set -CONFIG_SENSORS_CORETEMP=y -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -CONFIG_SENSORS_LTC4151=y -CONFIG_SENSORS_LTC4215=y -# CONFIG_SENSORS_LTC4222 is not set -CONFIG_SENSORS_LTC4245=y -# CONFIG_SENSORS_LTC4260 is not set -CONFIG_SENSORS_LTC4261=y -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -CONFIG_SENSORS_MAX6650=y -CONFIG_SENSORS_MAX6620=y -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_HTU21 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_ADCXX is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM70 is not set -# CONFIG_SENSORS_LM73 is not set -CONFIG_SENSORS_LM75=y -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -CONFIG_SENSORS_LM85=y -# CONFIG_SENSORS_LM87 is not set -CONFIG_SENSORS_LM90=y -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_PCF8591 is not set -CONFIG_PMBUS=y -CONFIG_SENSORS_PMBUS=y -# CONFIG_SENSORS_ADM1275 is not set -# CONFIG_SENSORS_LM25066 is not set -# CONFIG_SENSORS_LTC2978 is not set -# CONFIG_SENSORS_MAX16064 is not set -# CONFIG_SENSORS_MAX34440 is not set -CONFIG_SENSORS_DNI_DPS460=y -# CONFIG_SENSORS_MAX8688 is not set -# CONFIG_SENSORS_UCD9000 is not set -CONFIG_SENSORS_UCD9200=y -# CONFIG_SENSORS_ZL6100 is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH56XX_COMMON is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS1015 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_ADS7871 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA_CPUTEMP is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -CONFIG_SENSORS_W83781D=y -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -# CONFIG_SENSORS_ATK0110 is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_GOV_FAIR_SHARE is not set -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_INTEL_POWERCLAMP is not set -CONFIG_X86_PKG_TEMP_THERMAL=m -# CONFIG_ACPI_INT3403_THERMAL is not set -# CONFIG_INTEL_SOC_DTS_THERMAL is not set - -# -# Texas Instruments thermal drivers -# -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -CONFIG_SSB=y -CONFIG_SSB_SPROM=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -# CONFIG_SSB_B43_PCI_BRIDGE is not set -CONFIG_SSB_PCMCIAHOST_POSSIBLE=y -CONFIG_SSB_PCMCIAHOST=y -CONFIG_SSB_SDIOHOST_POSSIBLE=y -CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_SILENT is not set -# CONFIG_SSB_DEBUG is not set -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -# CONFIG_SSB_DRIVER_GPIO is not set -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -CONFIG_BCMA=y -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -# CONFIG_BCMA_DRIVER_GMAC_CMN is not set -# CONFIG_BCMA_DRIVER_GPIO is not set -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_CS5535 is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_AXP20X is not set -# CONFIG_MFD_CROS_EC is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -CONFIG_LPC_SCH=y -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RTSX_PCI is not set -# CONFIG_MFD_RTSX_USB is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SMSC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS80031 is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -CONFIG_MFD_WL1273_CORE=y -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TIMBERDALE is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_REGULATOR is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -# CONFIG_AGP is not set -# CONFIG_VGA_ARB is not set -# CONFIG_VGA_SWITCHEROO is not set - -# -# Direct Rendering Manager -# -# CONFIG_DRM is not set - -# -# Frame buffer Devices -# -# CONFIG_FB is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set -# CONFIG_VGASTATE is not set - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -# CONFIG_VGACON_SOFT_SCROLLBACK is not set -CONFIG_DUMMY_CONSOLE=y -# CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -# CONFIG_UHID is not set -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACRUX is not set -# CONFIG_HID_APPLE is not set -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_HUION is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO_TPKBD is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SONY is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THINGM is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_OTG_FSM is not set -# CONFIG_USB_MON is not set -# CONFIG_USB_WUSB_CBAF is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1760_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FUSBH200_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_SSB is not set -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_SSB is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=y -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -# CONFIG_USB_SERIAL_GENERIC is not set -# CONFIG_USB_SERIAL_SIMPLE is not set -# CONFIG_USB_SERIAL_AIRCABLE is not set -# CONFIG_USB_SERIAL_ARK3116 is not set -# CONFIG_USB_SERIAL_BELKIN is not set -# CONFIG_USB_SERIAL_CH341 is not set -# CONFIG_USB_SERIAL_WHITEHEAT is not set -# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set -# CONFIG_USB_SERIAL_CP210X is not set -# CONFIG_USB_SERIAL_CYPRESS_M8 is not set -# CONFIG_USB_SERIAL_EMPEG is not set -# CONFIG_USB_SERIAL_FTDI_SIO is not set -# CONFIG_USB_SERIAL_VISOR is not set -# CONFIG_USB_SERIAL_IPAQ is not set -# CONFIG_USB_SERIAL_IR is not set -# CONFIG_USB_SERIAL_EDGEPORT is not set -# CONFIG_USB_SERIAL_EDGEPORT_TI is not set -# CONFIG_USB_SERIAL_F81232 is not set -# CONFIG_USB_SERIAL_GARMIN is not set -# CONFIG_USB_SERIAL_IPW is not set -# CONFIG_USB_SERIAL_IUU is not set -# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set -# CONFIG_USB_SERIAL_KEYSPAN is not set -# CONFIG_USB_SERIAL_KLSI is not set -# CONFIG_USB_SERIAL_KOBIL_SCT is not set -# CONFIG_USB_SERIAL_MCT_U232 is not set -# CONFIG_USB_SERIAL_METRO is not set -# CONFIG_USB_SERIAL_MOS7720 is not set -# CONFIG_USB_SERIAL_MOS7840 is not set -# CONFIG_USB_SERIAL_MXUPORT is not set -# CONFIG_USB_SERIAL_NAVMAN is not set -# CONFIG_USB_SERIAL_PL2303 is not set -# CONFIG_USB_SERIAL_OTI6858 is not set -# CONFIG_USB_SERIAL_QCAUX is not set -# CONFIG_USB_SERIAL_QUALCOMM is not set -# CONFIG_USB_SERIAL_SPCP8X5 is not set -# CONFIG_USB_SERIAL_SAFE is not set -# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set -# CONFIG_USB_SERIAL_SYMBOL is not set -# CONFIG_USB_SERIAL_TI is not set -# CONFIG_USB_SERIAL_CYBERJACK is not set -# CONFIG_USB_SERIAL_XIRCOM is not set -# CONFIG_USB_SERIAL_OPTION is not set -# CONFIG_USB_SERIAL_OMNINET is not set -# CONFIG_USB_SERIAL_OPTICON is not set -# CONFIG_USB_SERIAL_XSENS_MT is not set -# CONFIG_USB_SERIAL_WISHBONE is not set -# CONFIG_USB_SERIAL_ZTE is not set -# CONFIG_USB_SERIAL_SSU100 is not set -# CONFIG_USB_SERIAL_QT2 is not set -# CONFIG_USB_SERIAL_DEBUG is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_RIO500 is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_LED is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HSIC_USB3503 is not set - -# -# USB Physical Layer drivers -# -# CONFIG_USB_PHY is not set -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_SAMSUNG_USB2PHY is not set -# CONFIG_SAMSUNG_USB3PHY is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_GADGET is not set -# CONFIG_UWB is not set -CONFIG_MMC=y -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_CLKGATE is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -# CONFIG_SDIO_UART is not set -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PCI=y -# CONFIG_MMC_RICOH_MMC is not set -# CONFIG_MMC_SDHCI_ACPI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_WBSD is not set -# CONFIG_MMC_TIFM_SD is not set -CONFIG_MMC_SPI=y -# CONFIG_MMC_SDRICOH_CS is not set -# CONFIG_MMC_CB710 is not set -# CONFIG_MMC_VIA_SDMMC is not set -# CONFIG_MMC_VUB300 is not set -# CONFIG_MMC_USHC is not set -# CONFIG_MMC_USDHI6ROL0 is not set -# CONFIG_MEMSTICK is not set -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y - -# -# LED drivers -# -# CONFIG_LEDS_LM3530 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -# CONFIG_LEDS_LP3944 is not set -# CONFIG_LEDS_LP5521 is not set -# CONFIG_LEDS_LP5523 is not set -# CONFIG_LEDS_LP5562 is not set -# CONFIG_LEDS_LP8501 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_INTEL_SS4200 is not set -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_LM355x is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -# CONFIG_LEDS_BLINKM is not set - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=y -# CONFIG_LEDS_TRIGGER_ONESHOT is not set -# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set -# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set -# CONFIG_LEDS_TRIGGER_CPU is not set -CONFIG_LEDS_TRIGGER_GPIO=y -# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_LEDS_TRIGGER_TRANSIENT is not set -# CONFIG_LEDS_TRIGGER_CAMERA is not set -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -# CONFIG_EDAC is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_DS1307=y -CONFIG_RTC_DRV_DS1374=y -CONFIG_RTC_DRV_DS1672=y -CONFIG_RTC_DRV_DS3232=y -CONFIG_RTC_DRV_MAX6900=y -CONFIG_RTC_DRV_RS5C372=y -CONFIG_RTC_DRV_ISL1208=y -CONFIG_RTC_DRV_ISL12022=y -# CONFIG_RTC_DRV_ISL12057 is not set -CONFIG_RTC_DRV_X1205=y -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -CONFIG_RTC_DRV_PCF8563=y -CONFIG_RTC_DRV_PCF8583=y -CONFIG_RTC_DRV_M41T80=y -# CONFIG_RTC_DRV_M41T80_WDT is not set -CONFIG_RTC_DRV_BQ32K=y -CONFIG_RTC_DRV_S35390A=y -CONFIG_RTC_DRV_FM3130=y -CONFIG_RTC_DRV_RX8581=y -CONFIG_RTC_DRV_RX8025=y -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_DS3234 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_MCP795 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=y -CONFIG_RTC_DRV_DS1511=y -CONFIG_RTC_DRV_DS1553=y -CONFIG_RTC_DRV_DS1742=y -CONFIG_RTC_DRV_STK17TA8=y -CONFIG_RTC_DRV_M48T86=y -CONFIG_RTC_DRV_M48T35=y -CONFIG_RTC_DRV_M48T59=y -CONFIG_RTC_DRV_MSM6242=y -CONFIG_RTC_DRV_BQ4802=y -CONFIG_RTC_DRV_RP5C01=y -CONFIG_RTC_DRV_V3020=y -# CONFIG_RTC_DRV_DS2404 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_MOXART is not set -# CONFIG_RTC_DRV_XGENE is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -# CONFIG_DMADEVICES is not set -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=y -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -# CONFIG_UIO_DMEM_GENIRQ is not set -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -# CONFIG_UIO_PCI_GENERIC is not set -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=y - -# -# Virtio drivers -# -CONFIG_VIRTIO_PCI=y -# CONFIG_VIRTIO_BALLOON is not set -# CONFIG_VIRTIO_MMIO is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_STAGING is not set -CONFIG_X86_PLATFORM_DEVICES=y -# CONFIG_ACERHDF is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_DELL_SMO8800 is not set -# CONFIG_FUJITSU_TABLET is not set -# CONFIG_HP_ACCEL is not set -# CONFIG_HP_WIRELESS is not set -# CONFIG_THINKPAD_ACPI is not set -# CONFIG_SENSORS_HDAPS is not set -# CONFIG_INTEL_MENLOW is not set -# CONFIG_EEEPC_LAPTOP is not set -# CONFIG_ACPI_WMI is not set -# CONFIG_TOPSTAR_LAPTOP is not set -# CONFIG_TOSHIBA_BT_RFKILL is not set -# CONFIG_ACPI_CMPC is not set -# CONFIG_INTEL_IPS is not set -# CONFIG_IBM_RTL is not set -# CONFIG_SAMSUNG_Q10 is not set -# CONFIG_INTEL_RST is not set -# CONFIG_INTEL_SMARTCONNECT is not set -# CONFIG_PVPANIC is not set -# CONFIG_CHROME_PLATFORMS is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Hardware Spinlock drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# CONFIG_SH_TIMER_CMT is not set -# CONFIG_SH_TIMER_MTU2 is not set -# CONFIG_SH_TIMER_TMU is not set -# CONFIG_EM_TIMER_STI is not set -# CONFIG_MAILBOX is not set -CONFIG_IOMMU_SUPPORT=y -# CONFIG_AMD_IOMMU is not set -# CONFIG_INTEL_IOMMU is not set -# CONFIG_IRQ_REMAP is not set - -# -# Remoteproc drivers -# -# CONFIG_STE_MODEM_RPROC is not set - -# -# Rpmsg drivers -# -# CONFIG_PM_DEVFREQ is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -# CONFIG_PWM is not set -# CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set -# CONFIG_FMC is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_SAMSUNG_USB2 is not set -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set -# CONFIG_THUNDERBOLT is not set - -# -# Firmware Drivers -# -CONFIG_EDD=y -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=y -CONFIG_DCDBAS=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=y -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -# CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=y -CONFIG_EXT3_DEFAULTS_TO_ORDERED=y -CONFIG_EXT3_FS_XATTR=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD=y -# CONFIG_JBD_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=y -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_NILFS2_FS is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -# CONFIG_QUOTA is not set -# CONFIG_QUOTACTL is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set -CONFIG_OVERLAYFS_FS=y - -# -# Caches -# -CONFIG_FSCACHE=y -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=y -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_PSTORE is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EXOFS_FS is not set -# CONFIG_F2FS_FS is not set -CONFIG_AUFS_FS=y -CONFIG_AUFS_BRANCH_MAX_127=y -# CONFIG_AUFS_BRANCH_MAX_511 is not set -# CONFIG_AUFS_BRANCH_MAX_1023 is not set -# CONFIG_AUFS_BRANCH_MAX_32767 is not set -CONFIG_AUFS_SBILIST=y -# CONFIG_AUFS_HNOTIFY is not set -# CONFIG_AUFS_EXPORT is not set -# CONFIG_AUFS_FHSM is not set -# CONFIG_AUFS_RDU is not set -# CONFIG_AUFS_SHWH is not set -# CONFIG_AUFS_BR_RAMFS is not set -CONFIG_AUFS_BDEV_LOOP=y -# CONFIG_AUFS_DEBUG is not set -CONFIG_ORE=y -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V2=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -# CONFIG_NFS_V4_2 is not set -CONFIG_PNFS_FILE_LAYOUT=y -CONFIG_PNFS_BLOCK=y -CONFIG_PNFS_OBJLAYOUT=y -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -# CONFIG_NFS_FSCACHE is not set -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFSD=y -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -# CONFIG_NFSD_FAULT_INJECTION is not set -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -CONFIG_SUNRPC_BACKCHANNEL=y -# CONFIG_RPCSEC_GSS_KRB5 is not set -# CONFIG_SUNRPC_DEBUG is not set -# CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y - -# -# printk and dmesg options -# -# CONFIG_PRINTK_TIME is not set -CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 -CONFIG_BOOT_PRINTK_DELAY=y -# CONFIG_DYNAMIC_DEBUG is not set - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y -# CONFIG_FRAME_POINTER is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -CONFIG_HAVE_ARCH_KMEMCHECK=y -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -CONFIG_LOCKUP_DETECTOR=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -CONFIG_TIMER_STATS=y - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -CONFIG_STACKTRACE=y -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_TORTURE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_CPU_STALL_INFO is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y -CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_UPROBE_EVENT is not set -# CONFIG_PROBE_EVENTS is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set - -# -# Runtime Testing -# -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_TEST_MODULE is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_STRICT_DEVMEM=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -# CONFIG_EARLY_PRINTK_DBGP is not set -# CONFIG_X86_PTDUMP is not set -CONFIG_DEBUG_RODATA=y -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set -# CONFIG_DEBUG_NX_TEST is not set -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -# CONFIG_IOMMU_STRESS is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -# CONFIG_DEBUG_BOOT_PARAMS is not set -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_NMI_SELFTEST is not set -# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set -# CONFIG_ENCRYPTED_KEYS is not set -# CONFIG_KEYS_DEBUG_PROC_KEYS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITYFS is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEFAULT_SECURITY="" -CONFIG_XOR_BLOCKS=y -CONFIG_ASYNC_CORE=y -CONFIG_ASYNC_XOR=y -CONFIG_ASYNC_PQ=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -# CONFIG_CRYPTO_FIPS is not set -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=y -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_USER is not set -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_NULL=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=y -# CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_ABLK_HELPER=y -CONFIG_CRYPTO_GLUE_HELPER_X86=y - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_SEQIV=y - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_XTS=y - -# -# Hash modes -# -# CONFIG_CRYPTO_CMAC is not set -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_VMAC=y - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=y -# CONFIG_CRYPTO_CRC32 is not set -# CONFIG_CRYPTO_CRC32_PCLMUL is not set -CONFIG_CRYPTO_CRCT10DIF=y -# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_RMD128=y -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_RMD256=y -CONFIG_CRYPTO_RMD320=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=y -# CONFIG_CRYPTO_SHA256_SSSE3 is not set -# CONFIG_CRYPTO_SHA512_SSSE3 is not set -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_TGR192=y -CONFIG_CRYPTO_WP512=y -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_X86_64=y -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_ANUBIS=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_BLOWFISH=y -CONFIG_CRYPTO_BLOWFISH_COMMON=y -CONFIG_CRYPTO_BLOWFISH_X86_64=y -CONFIG_CRYPTO_CAMELLIA=y -# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set -# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set -# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set -CONFIG_CRYPTO_CAST_COMMON=y -CONFIG_CRYPTO_CAST5=y -# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set -CONFIG_CRYPTO_CAST6=y -# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=y -CONFIG_CRYPTO_KHAZAD=y -CONFIG_CRYPTO_SALSA20=y -CONFIG_CRYPTO_SALSA20_X86_64=y -CONFIG_CRYPTO_SEED=y -CONFIG_CRYPTO_SERPENT=y -# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set -# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set -# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set -CONFIG_CRYPTO_TEA=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_TWOFISH_COMMON=y -CONFIG_CRYPTO_TWOFISH_X86_64=y -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y -# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_ZLIB=y -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_LZ4 is not set -# CONFIG_CRYPTO_LZ4HC is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=y -CONFIG_CRYPTO_DEV_PADLOCK_AES=y -CONFIG_CRYPTO_DEV_PADLOCK_SHA=y -# CONFIG_CRYPTO_DEV_CCP is not set -# CONFIG_ASYMMETRIC_KEY_TYPE is not set -CONFIG_HAVE_KVM=y -# CONFIG_VIRTUALIZATION is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=y -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_IO=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC7=y -CONFIG_LIBCRC32C=y -CONFIG_CRC8=y -# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=y -CONFIG_TEXTSEARCH_BM=y -CONFIG_TEXTSEARCH_FSM=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_NLATTR=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_AVERAGE=y -CONFIG_CORDIC=y -# CONFIG_DDR is not set -CONFIG_OID_REGISTRY=y diff --git a/packages/base/any/kernels/3.16+deb8/kconfig.mk b/packages/base/any/kernels/3.16+deb8/kconfig.mk deleted file mode 100644 index 06a25085..00000000 --- a/packages/base/any/kernels/3.16+deb8/kconfig.mk +++ /dev/null @@ -1,33 +0,0 @@ -############################################################ -# -# -# Copyright 2015 Big Switch Networks, Inc. -# -# Licensed under the Eclipse Public License, Version 1.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.eclipse.org/legal/epl-v10.html -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the -# License. -# -# -############################################################ -# -# 3.16 Kernel Builds -# -############################################################ -THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) -K_MAJOR_VERSION := 3 -K_PATCH_LEVEL := 16 -K_SUB_LEVEL := 7 -K_SUFFIX := -K_PATCH_DIR := $(THIS_DIR)/patches -K_ARCHIVE_NAME := linux-3.16.7-ckt25.tgz -K_NAME := linux-3.16.7-ckt25 -K_ARCHIVE_URL := http://opennetlinux.org/tarballs/linux-3.16.7-ckt25.tgz diff --git a/packages/base/any/kernels/3.16+deb8/patches/3.16-fs-overlayfs.patch b/packages/base/any/kernels/3.16+deb8/patches/3.16-fs-overlayfs.patch deleted file mode 100644 index 93c59266..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/3.16-fs-overlayfs.patch +++ /dev/null @@ -1,4278 +0,0 @@ -diff -urNp a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking ---- a/Documentation/filesystems/Locking 2016-03-02 02:31:21.000000000 -0800 -+++ b/Documentation/filesystems/Locking 2016-07-22 08:49:33.265290309 -0700 -@@ -67,6 +67,7 @@ prototypes: - struct file *, unsigned open_flag, - umode_t create_mode, int *opened); - int (*tmpfile) (struct inode *, struct dentry *, umode_t); -+ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); - - locking rules: - all may block -@@ -96,6 +97,7 @@ fiemap: no - update_time: no - atomic_open: yes - tmpfile: no -+dentry_open: no - - Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on - victim. -diff -urNp a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt ---- a/Documentation/filesystems/overlayfs.txt 1969-12-31 16:00:00.000000000 -0800 -+++ b/Documentation/filesystems/overlayfs.txt 2016-07-22 08:49:33.265290309 -0700 -@@ -0,0 +1,198 @@ -+Written by: Neil Brown -+ -+Overlay Filesystem -+================== -+ -+This document describes a prototype for a new approach to providing -+overlay-filesystem functionality in Linux (sometimes referred to as -+union-filesystems). An overlay-filesystem tries to present a -+filesystem which is the result over overlaying one filesystem on top -+of the other. -+ -+The result will inevitably fail to look exactly like a normal -+filesystem for various technical reasons. The expectation is that -+many use cases will be able to ignore these differences. -+ -+This approach is 'hybrid' because the objects that appear in the -+filesystem do not all appear to belong to that filesystem. In many -+cases an object accessed in the union will be indistinguishable -+from accessing the corresponding object from the original filesystem. -+This is most obvious from the 'st_dev' field returned by stat(2). -+ -+While directories will report an st_dev from the overlay-filesystem, -+all non-directory objects will report an st_dev from the lower or -+upper filesystem that is providing the object. Similarly st_ino will -+only be unique when combined with st_dev, and both of these can change -+over the lifetime of a non-directory object. Many applications and -+tools ignore these values and will not be affected. -+ -+Upper and Lower -+--------------- -+ -+An overlay filesystem combines two filesystems - an 'upper' filesystem -+and a 'lower' filesystem. When a name exists in both filesystems, the -+object in the 'upper' filesystem is visible while the object in the -+'lower' filesystem is either hidden or, in the case of directories, -+merged with the 'upper' object. -+ -+It would be more correct to refer to an upper and lower 'directory -+tree' rather than 'filesystem' as it is quite possible for both -+directory trees to be in the same filesystem and there is no -+requirement that the root of a filesystem be given for either upper or -+lower. -+ -+The lower filesystem can be any filesystem supported by Linux and does -+not need to be writable. The lower filesystem can even be another -+overlayfs. The upper filesystem will normally be writable and if it -+is it must support the creation of trusted.* extended attributes, and -+must provide valid d_type in readdir responses, so NFS is not suitable. -+ -+A read-only overlay of two read-only filesystems may use any -+filesystem type. -+ -+Directories -+----------- -+ -+Overlaying mainly involves directories. If a given name appears in both -+upper and lower filesystems and refers to a non-directory in either, -+then the lower object is hidden - the name refers only to the upper -+object. -+ -+Where both upper and lower objects are directories, a merged directory -+is formed. -+ -+At mount time, the two directories given as mount options "lowerdir" and -+"upperdir" are combined into a merged directory: -+ -+ mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\ -+workdir=/work /merged -+ -+The "workdir" needs to be an empty directory on the same filesystem -+as upperdir. -+ -+Then whenever a lookup is requested in such a merged directory, the -+lookup is performed in each actual directory and the combined result -+is cached in the dentry belonging to the overlay filesystem. If both -+actual lookups find directories, both are stored and a merged -+directory is created, otherwise only one is stored: the upper if it -+exists, else the lower. -+ -+Only the lists of names from directories are merged. Other content -+such as metadata and extended attributes are reported for the upper -+directory only. These attributes of the lower directory are hidden. -+ -+whiteouts and opaque directories -+-------------------------------- -+ -+In order to support rm and rmdir without changing the lower -+filesystem, an overlay filesystem needs to record in the upper filesystem -+that files have been removed. This is done using whiteouts and opaque -+directories (non-directories are always opaque). -+ -+A whiteout is created as a character device with 0/0 device number. -+When a whiteout is found in the upper level of a merged directory, any -+matching name in the lower level is ignored, and the whiteout itself -+is also hidden. -+ -+A directory is made opaque by setting the xattr "trusted.overlay.opaque" -+to "y". Where the upper filesystem contains an opaque directory, any -+directory in the lower filesystem with the same name is ignored. -+ -+readdir -+------- -+ -+When a 'readdir' request is made on a merged directory, the upper and -+lower directories are each read and the name lists merged in the -+obvious way (upper is read first, then lower - entries that already -+exist are not re-added). This merged name list is cached in the -+'struct file' and so remains as long as the file is kept open. If the -+directory is opened and read by two processes at the same time, they -+will each have separate caches. A seekdir to the start of the -+directory (offset 0) followed by a readdir will cause the cache to be -+discarded and rebuilt. -+ -+This means that changes to the merged directory do not appear while a -+directory is being read. This is unlikely to be noticed by many -+programs. -+ -+seek offsets are assigned sequentially when the directories are read. -+Thus if -+ - read part of a directory -+ - remember an offset, and close the directory -+ - re-open the directory some time later -+ - seek to the remembered offset -+ -+there may be little correlation between the old and new locations in -+the list of filenames, particularly if anything has changed in the -+directory. -+ -+Readdir on directories that are not merged is simply handled by the -+underlying directory (upper or lower). -+ -+ -+Non-directories -+--------------- -+ -+Objects that are not directories (files, symlinks, device-special -+files etc.) are presented either from the upper or lower filesystem as -+appropriate. When a file in the lower filesystem is accessed in a way -+the requires write-access, such as opening for write access, changing -+some metadata etc., the file is first copied from the lower filesystem -+to the upper filesystem (copy_up). Note that creating a hard-link -+also requires copy_up, though of course creation of a symlink does -+not. -+ -+The copy_up may turn out to be unnecessary, for example if the file is -+opened for read-write but the data is not modified. -+ -+The copy_up process first makes sure that the containing directory -+exists in the upper filesystem - creating it and any parents as -+necessary. It then creates the object with the same metadata (owner, -+mode, mtime, symlink-target etc.) and then if the object is a file, the -+data is copied from the lower to the upper filesystem. Finally any -+extended attributes are copied up. -+ -+Once the copy_up is complete, the overlay filesystem simply -+provides direct access to the newly created file in the upper -+filesystem - future operations on the file are barely noticed by the -+overlay filesystem (though an operation on the name of the file such as -+rename or unlink will of course be noticed and handled). -+ -+ -+Non-standard behavior -+--------------------- -+ -+The copy_up operation essentially creates a new, identical file and -+moves it over to the old name. The new file may be on a different -+filesystem, so both st_dev and st_ino of the file may change. -+ -+Any open files referring to this inode will access the old data and -+metadata. Similarly any file locks obtained before copy_up will not -+apply to the copied up file. -+ -+On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and -+fsetxattr(2) will fail with EROFS. -+ -+If a file with multiple hard links is copied up, then this will -+"break" the link. Changes will not be propagated to other names -+referring to the same inode. -+ -+Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory -+object in overlayfs will not contain valid absolute paths, only -+relative paths leading up to the filesystem's root. This will be -+fixed in the future. -+ -+Some operations are not atomic, for example a crash during copy_up or -+rename will leave the filesystem in an inconsistent state. This will -+be addressed in the future. -+ -+Changes to underlying filesystems -+--------------------------------- -+ -+Offline changes, when the overlay is not mounted, are allowed to either -+the upper or the lower trees. -+ -+Changes to the underlying filesystems while part of a mounted overlay -+filesystem are not allowed. If the underlying filesystem is changed, -+the behavior of the overlay is undefined, though it will not result in -+a crash or deadlock. -diff -urNp a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt ---- a/Documentation/filesystems/vfs.txt 2016-03-02 02:31:21.000000000 -0800 -+++ b/Documentation/filesystems/vfs.txt 2016-07-22 08:49:33.265290309 -0700 -@@ -364,6 +364,7 @@ struct inode_operations { - int (*atomic_open)(struct inode *, struct dentry *, struct file *, - unsigned open_flag, umode_t create_mode, int *opened); - int (*tmpfile) (struct inode *, struct dentry *, umode_t); -+ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); - }; - - Again, all methods are called without any locks being held, unless -@@ -696,6 +697,12 @@ struct address_space_operations { - but instead uses bmap to find out where the blocks in the file - are and uses those addresses directly. - -+ dentry_open: this is an alternative to f_op->open(), the difference is that -+ this method may open a file not necessarily originating from the same -+ filesystem as the one i_op->open() was called on. It may be -+ useful for stacking filesystems which want to allow native I/O directly -+ on underlying files. -+ - - invalidatepage: If a page has PagePrivate set, then invalidatepage - will be called when part or all of the page is to be removed -diff -urNp a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c ---- a/fs/btrfs/ioctl.c 2016-03-02 02:31:21.000000000 -0800 -+++ b/fs/btrfs/ioctl.c 2016-07-22 08:49:33.265290309 -0700 -@@ -766,23 +766,6 @@ out: - return ret; - } - --/* copy of check_sticky in fs/namei.c() --* It's inline, so penalty for filesystems that don't use sticky bit is --* minimal. --*/ --static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode) --{ -- kuid_t fsuid = current_fsuid(); -- -- if (!(dir->i_mode & S_ISVTX)) -- return 0; -- if (uid_eq(inode->i_uid, fsuid)) -- return 0; -- if (uid_eq(dir->i_uid, fsuid)) -- return 0; -- return !capable(CAP_FOWNER); --} -- - /* copy of may_delete in fs/namei.c() - * Check whether we can remove a link victim from directory dir, check - * whether the type of victim is right. -@@ -818,8 +801,7 @@ static int btrfs_may_delete(struct inode - return error; - if (IS_APPEND(dir)) - return -EPERM; -- if (btrfs_check_sticky(dir, victim->d_inode)|| -- IS_APPEND(victim->d_inode)|| -+ if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) || - IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) - return -EPERM; - if (isdir) { -diff -urNp a/fs/dcache.c b/fs/dcache.c ---- a/fs/dcache.c 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/dcache.c 2016-07-22 08:49:33.269290309 -0700 -@@ -2655,6 +2655,7 @@ struct dentry *d_ancestor(struct dentry - } - return NULL; - } -+EXPORT_SYMBOL(d_ancestor); - - /* - * This helper attempts to cope with remotely renamed directories -diff -urNp a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c ---- a/fs/ecryptfs/main.c 2016-03-02 02:31:21.000000000 -0800 -+++ b/fs/ecryptfs/main.c 2016-07-22 08:49:33.269290309 -0700 -@@ -576,6 +576,13 @@ static struct dentry *ecryptfs_mount(str - s->s_maxbytes = path.dentry->d_sb->s_maxbytes; - s->s_blocksize = path.dentry->d_sb->s_blocksize; - s->s_magic = ECRYPTFS_SUPER_MAGIC; -+ s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1; -+ -+ rc = -EINVAL; -+ if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { -+ pr_err("eCryptfs: maximum fs stacking depth exceeded\n"); -+ goto out_free; -+ } - - inode = ecryptfs_get_inode(path.dentry->d_inode, s); - rc = PTR_ERR(inode); -diff -urNp a/fs/ext4/namei.c b/fs/ext4/namei.c ---- a/fs/ext4/namei.c 2016-03-02 02:31:21.000000000 -0800 -+++ b/fs/ext4/namei.c 2016-07-22 08:55:33.477298125 -0700 -@@ -1849,10 +1849,10 @@ static int make_indexed_dir(handle_t *ha - - retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); - if (retval) -- goto out_frames; -+ goto out_frames; - retval = ext4_handle_dirty_dirent_node(handle, dir, bh); - if (retval) -- goto out_frames; -+ goto out_frames; - - de = do_split(handle,dir, &bh, frame, &hinfo); - if (IS_ERR(de)) { -@@ -2905,7 +2905,7 @@ retry: - * for transaction commit if we are running out of space - * and thus we deadlock. So we have to stop transaction now - * and restart it when symlink contents is written. -- * -+ * - * To keep fs consistent in case of crash, we have to put inode - * to orphan list in the mean time. - */ -@@ -3186,6 +3186,39 @@ static void ext4_update_dir_count(handle - } - } - -+static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent, -+ int credits, handle_t **h) -+{ -+ struct inode *wh; -+ handle_t *handle; -+ int retries = 0; -+ -+ /* -+ * for inode block, sb block, group summaries, -+ * and inode bitmap -+ */ -+ credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) + -+ EXT4_XATTR_TRANS_BLOCKS + 4); -+retry: -+ wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE, -+ &ent->dentry->d_name, 0, NULL, -+ EXT4_HT_DIR, credits); -+ -+ handle = ext4_journal_current_handle(); -+ if (IS_ERR(wh)) { -+ if (handle) -+ ext4_journal_stop(handle); -+ if (PTR_ERR(wh) == -ENOSPC && -+ ext4_should_retry_alloc(ent->dir->i_sb, &retries)) -+ goto retry; -+ } else { -+ *h = handle; -+ init_special_inode(wh, wh->i_mode, WHITEOUT_DEV); -+ wh->i_op = &ext4_special_inode_operations; -+ } -+ return wh; -+} -+ - /* - * Anybody can rename anything with this: the permission checks are left to the - * higher-level routines. -@@ -3194,8 +3227,9 @@ static void ext4_update_dir_count(handle - * while new_{dentry,inode) refers to the destination dentry/inode - * This comes from rename(const char *oldpath, const char *newpath) - */ --static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, -- struct inode *new_dir, struct dentry *new_dentry) -+static int ext4_simple_rename(struct inode *old_dir, struct dentry *old_dentry, -+ struct inode *new_dir, struct dentry *new_dentry, -+ unsigned int flags) - { - handle_t *handle = NULL; - struct ext4_renament old = { -@@ -3210,6 +3244,9 @@ static int ext4_rename(struct inode *old - }; - int force_reread; - int retval; -+ struct inode *whiteout = NULL; -+ int credits; -+ u8 old_file_type; - - dquot_initialize(old.dir); - dquot_initialize(new.dir); -@@ -3248,11 +3285,17 @@ static int ext4_rename(struct inode *old - if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC)) - ext4_alloc_da_blocks(old.inode); - -- handle = ext4_journal_start(old.dir, EXT4_HT_DIR, -- (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + -- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); -- if (IS_ERR(handle)) -- return PTR_ERR(handle); -+ credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + -+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); -+ if (!(flags & RENAME_WHITEOUT)) { -+ handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); -+ if (IS_ERR(handle)) -+ return PTR_ERR(handle); -+ } else { -+ whiteout = ext4_whiteout_for_rename(&old, credits, &handle); -+ if (IS_ERR(whiteout)) -+ return PTR_ERR(whiteout); -+ } - - if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) - ext4_handle_sync(handle); -@@ -3280,13 +3323,23 @@ static int ext4_rename(struct inode *old - */ - force_reread = (new.dir->i_ino == old.dir->i_ino && - ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); -- if (!new.bh) { -+ -+ old_file_type = old.de->file_type; -+ if (whiteout) { -+ retval = ext4_setent(handle, &old, whiteout->i_ino, -+ EXT4_FT_CHRDEV); -+ if (retval) -+ goto end_rename; -+ ext4_mark_inode_dirty(handle, whiteout); -+ } -+ -+ if (!new.bh) { - retval = ext4_add_entry(handle, new.dentry, old.inode); - if (retval) - goto end_rename; - } else { - retval = ext4_setent(handle, &new, -- old.inode->i_ino, old.de->file_type); -+ old.inode->i_ino, old_file_type); - if (retval) - goto end_rename; - } -@@ -3301,10 +3354,12 @@ static int ext4_rename(struct inode *old - old.inode->i_ctime = ext4_current_time(old.inode); - ext4_mark_inode_dirty(handle, old.inode); - -- /* -- * ok, that's it -- */ -- ext4_rename_delete(handle, &old, force_reread); -+ if (!whiteout) { -+ /* -+ * ok, that's it -+ */ -+ ext4_rename_delete(handle, &old, force_reread); -+ } - - if (new.inode) { - ext4_dec_count(handle, new.inode); -@@ -3340,6 +3395,12 @@ end_rename: - brelse(old.dir_bh); - brelse(old.bh); - brelse(new.bh); -+ if (whiteout) { -+ if (retval) -+ drop_nlink(whiteout); -+ unlock_new_inode(whiteout); -+ iput(whiteout); -+ } - if (handle) - ext4_journal_stop(handle); - return retval; -@@ -3468,22 +3529,26 @@ end_rename: - return retval; - } - -+static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, -+ struct inode *new_dir, struct dentry *new_dentry) -+{ -+ return ext4_simple_rename(old_dir, old_dentry, new_dir, new_dentry, 0); -+} -+ - static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry, - unsigned int flags) - { -- if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) -+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) - return -EINVAL; - - if (flags & RENAME_EXCHANGE) { - return ext4_cross_rename(old_dir, old_dentry, - new_dir, new_dentry); - } -- /* -- * Existence checking was done by the VFS, otherwise "RENAME_NOREPLACE" -- * is equivalent to regular rename. -- */ -- return ext4_rename(old_dir, old_dentry, new_dir, new_dentry); -+ -+ return ext4_simple_rename(old_dir, old_dentry, -+ new_dir, new_dentry, flags); - } - - /* -diff -urNp a/fs/internal.h b/fs/internal.h ---- a/fs/internal.h 2016-03-02 02:31:21.000000000 -0800 -+++ b/fs/internal.h 2016-07-22 08:49:33.269290309 -0700 -@@ -42,7 +42,6 @@ extern void __init chrdev_init(void); - /* - * namei.c - */ --extern int __inode_permission(struct inode *, int); - extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *); - extern int vfs_path_lookup(struct dentry *, struct vfsmount *, - const char *, unsigned int, struct path *); -@@ -135,12 +134,6 @@ extern ssize_t __kernel_write(struct fil - extern int rw_verify_area(int, struct file *, const loff_t *, size_t); - - /* -- * splice.c -- */ --extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, -- loff_t *opos, size_t len, unsigned int flags); -- --/* - * pipe.c - */ - extern const struct file_operations pipefifo_fops; -diff -urNp a/fs/Kconfig b/fs/Kconfig ---- a/fs/Kconfig 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/Kconfig 2016-07-22 08:49:33.265290309 -0700 -@@ -67,6 +67,7 @@ source "fs/quota/Kconfig" - - source "fs/autofs4/Kconfig" - source "fs/fuse/Kconfig" -+source "fs/overlayfs/Kconfig" - - menu "Caches" - -diff -urNp a/fs/Makefile b/fs/Makefile ---- a/fs/Makefile 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/Makefile 2016-07-22 08:49:33.265290309 -0700 -@@ -104,6 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ - obj-$(CONFIG_AUTOFS4_FS) += autofs4/ - obj-$(CONFIG_ADFS_FS) += adfs/ - obj-$(CONFIG_FUSE_FS) += fuse/ -+obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/ - obj-$(CONFIG_UDF_FS) += udf/ - obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ - obj-$(CONFIG_OMFS_FS) += omfs/ -diff -urNp a/fs/namei.c b/fs/namei.c ---- a/fs/namei.c 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/namei.c 2016-07-22 08:49:33.269290309 -0700 -@@ -416,6 +416,7 @@ int __inode_permission(struct inode *ino - - return security_inode_permission(inode, mask); - } -+EXPORT_SYMBOL(__inode_permission); - - /** - * sb_permission - Check superblock-level permissions -@@ -2405,22 +2406,17 @@ kern_path_mountpoint(int dfd, const char - } - EXPORT_SYMBOL(kern_path_mountpoint); - --/* -- * It's inline, so penalty for filesystems that don't use sticky bit is -- * minimal. -- */ --static inline int check_sticky(struct inode *dir, struct inode *inode) -+int __check_sticky(struct inode *dir, struct inode *inode) - { - kuid_t fsuid = current_fsuid(); - -- if (!(dir->i_mode & S_ISVTX)) -- return 0; - if (uid_eq(inode->i_uid, fsuid)) - return 0; - if (uid_eq(dir->i_uid, fsuid)) - return 0; - return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); - } -+EXPORT_SYMBOL(__check_sticky); - - /* - * Check whether we can remove a link victim from directory dir, check -@@ -3087,9 +3083,12 @@ finish_open_created: - error = may_open(&nd->path, acc_mode, open_flag); - if (error) - goto out; -- file->f_path.mnt = nd->path.mnt; -- error = finish_open(file, nd->path.dentry, NULL, opened); -- if (error) { -+ -+ BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ -+ error = vfs_open(&nd->path, file, current_cred()); -+ if (!error) { -+ *opened |= FILE_OPENED; -+ } else { - if (error == -EOPENSTALE) - goto stale_open; - goto out; -@@ -4230,12 +4229,16 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, - bool should_retry = false; - int error; - -- if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) -+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) - return -EINVAL; - -- if ((flags & RENAME_NOREPLACE) && (flags & RENAME_EXCHANGE)) -+ if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && -+ (flags & RENAME_EXCHANGE)) - return -EINVAL; - -+ if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) -+ return -EPERM; -+ - retry: - from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags); - if (IS_ERR(from)) { -@@ -4367,6 +4370,20 @@ SYSCALL_DEFINE2(rename, const char __use - return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); - } - -+int vfs_whiteout(struct inode *dir, struct dentry *dentry) -+{ -+ int error = may_create(dir, dentry); -+ if (error) -+ return error; -+ -+ if (!dir->i_op->mknod) -+ return -EPERM; -+ -+ return dir->i_op->mknod(dir, dentry, -+ S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); -+} -+EXPORT_SYMBOL(vfs_whiteout); -+ - int readlink_copy(char __user *buffer, int buflen, const char *link) - { - int len = PTR_ERR(link); -diff -urNp a/fs/namespace.c b/fs/namespace.c ---- a/fs/namespace.c 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/namespace.c 2016-07-22 08:49:33.269290309 -0700 -@@ -1600,6 +1600,33 @@ void drop_collected_mounts(struct vfsmou - namespace_unlock(); - } - -+/** -+ * clone_private_mount - create a private clone of a path -+ * -+ * This creates a new vfsmount, which will be the clone of @path. The new will -+ * not be attached anywhere in the namespace and will be private (i.e. changes -+ * to the originating mount won't be propagated into this). -+ * -+ * Release with mntput(). -+ */ -+struct vfsmount *clone_private_mount(struct path *path) -+{ -+ struct mount *old_mnt = real_mount(path->mnt); -+ struct mount *new_mnt; -+ -+ if (IS_MNT_UNBINDABLE(old_mnt)) -+ return ERR_PTR(-EINVAL); -+ -+ down_read(&namespace_sem); -+ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); -+ up_read(&namespace_sem); -+ if (IS_ERR(new_mnt)) -+ return ERR_CAST(new_mnt); -+ -+ return &new_mnt->mnt; -+} -+EXPORT_SYMBOL_GPL(clone_private_mount); -+ - int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, - struct vfsmount *root) - { -diff -urNp a/fs/open.c b/fs/open.c ---- a/fs/open.c 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/open.c 2016-07-22 08:49:33.269290309 -0700 -@@ -825,8 +825,7 @@ struct file *dentry_open(const struct pa - f = get_empty_filp(); - if (!IS_ERR(f)) { - f->f_flags = flags; -- f->f_path = *path; -- error = do_dentry_open(f, NULL, cred); -+ error = vfs_open(path, f, cred); - if (!error) { - /* from now on we need fput() to dispose of f */ - error = open_check_o_direct(f); -@@ -843,6 +842,26 @@ struct file *dentry_open(const struct pa - } - EXPORT_SYMBOL(dentry_open); - -+/** -+ * vfs_open - open the file at the given path -+ * @path: path to open -+ * @filp: newly allocated file with f_flag initialized -+ * @cred: credentials to use -+ */ -+int vfs_open(const struct path *path, struct file *filp, -+ const struct cred *cred) -+{ -+ struct inode *inode = path->dentry->d_inode; -+ -+ if (inode->i_op->dentry_open) -+ return inode->i_op->dentry_open(path->dentry, filp, cred); -+ else { -+ filp->f_path = *path; -+ return do_dentry_open(filp, NULL, cred); -+ } -+} -+EXPORT_SYMBOL(vfs_open); -+ - static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) - { - int lookup_flags = 0; -diff -urNp a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c ---- a/fs/overlayfs/copy_up.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/copy_up.c 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,431 @@ -+/* -+ * -+ * Copyright (C) 2011 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "overlayfs.h" -+ -+#define OVL_COPY_UP_CHUNK_SIZE (1 << 20) -+ -+int ovl_copy_xattr(struct dentry *old, struct dentry *new) -+{ -+ ssize_t list_size, size; -+ char *buf, *name, *value; -+ int error; -+ -+ if (!old->d_inode->i_op->getxattr || -+ !new->d_inode->i_op->getxattr) -+ return 0; -+ -+ list_size = vfs_listxattr(old, NULL, 0); -+ if (list_size <= 0) { -+ if (list_size == -EOPNOTSUPP) -+ return 0; -+ return list_size; -+ } -+ -+ buf = kzalloc(list_size, GFP_KERNEL); -+ if (!buf) -+ return -ENOMEM; -+ -+ error = -ENOMEM; -+ value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL); -+ if (!value) -+ goto out; -+ -+ list_size = vfs_listxattr(old, buf, list_size); -+ if (list_size <= 0) { -+ error = list_size; -+ goto out_free_value; -+ } -+ -+ for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { -+ size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX); -+ if (size <= 0) { -+ error = size; -+ goto out_free_value; -+ } -+ error = vfs_setxattr(new, name, value, size, 0); -+ if (error) -+ goto out_free_value; -+ } -+ -+out_free_value: -+ kfree(value); -+out: -+ kfree(buf); -+ return error; -+} -+ -+static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) -+{ -+ struct file *old_file; -+ struct file *new_file; -+ loff_t old_pos = 0; -+ loff_t new_pos = 0; -+ int error = 0; -+ -+ if (len == 0) -+ return 0; -+ -+ old_file = ovl_path_open(old, O_RDONLY); -+ if (IS_ERR(old_file)) -+ return PTR_ERR(old_file); -+ -+ new_file = ovl_path_open(new, O_WRONLY); -+ if (IS_ERR(new_file)) { -+ error = PTR_ERR(new_file); -+ goto out_fput; -+ } -+ -+ /* FIXME: copy up sparse files efficiently */ -+ while (len) { -+ size_t this_len = OVL_COPY_UP_CHUNK_SIZE; -+ long bytes; -+ -+ if (len < this_len) -+ this_len = len; -+ -+ if (signal_pending_state(TASK_KILLABLE, current)) { -+ error = -EINTR; -+ break; -+ } -+ -+ bytes = do_splice_direct(old_file, &old_pos, -+ new_file, &new_pos, -+ this_len, SPLICE_F_MOVE); -+ if (bytes <= 0) { -+ error = bytes; -+ break; -+ } -+ WARN_ON(old_pos != new_pos); -+ -+ len -= bytes; -+ } -+ -+ fput(new_file); -+out_fput: -+ fput(old_file); -+ return error; -+} -+ -+static char *ovl_read_symlink(struct dentry *realdentry) -+{ -+ int res; -+ char *buf; -+ struct inode *inode = realdentry->d_inode; -+ mm_segment_t old_fs; -+ -+ res = -EINVAL; -+ if (!inode->i_op->readlink) -+ goto err; -+ -+ res = -ENOMEM; -+ buf = (char *) __get_free_page(GFP_KERNEL); -+ if (!buf) -+ goto err; -+ -+ old_fs = get_fs(); -+ set_fs(get_ds()); -+ /* The cast to a user pointer is valid due to the set_fs() */ -+ res = inode->i_op->readlink(realdentry, -+ (char __user *)buf, PAGE_SIZE - 1); -+ set_fs(old_fs); -+ if (res < 0) { -+ free_page((unsigned long) buf); -+ goto err; -+ } -+ buf[res] = '\0'; -+ -+ return buf; -+ -+err: -+ return ERR_PTR(res); -+} -+ -+static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat) -+{ -+ struct iattr attr = { -+ .ia_valid = -+ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, -+ .ia_atime = stat->atime, -+ .ia_mtime = stat->mtime, -+ }; -+ -+ return notify_change(upperdentry, &attr, NULL); -+} -+ -+int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) -+{ -+ int err = 0; -+ -+ mutex_lock(&upperdentry->d_inode->i_mutex); -+ if (!S_ISLNK(stat->mode)) { -+ struct iattr attr = { -+ .ia_valid = ATTR_MODE, -+ .ia_mode = stat->mode, -+ }; -+ err = notify_change(upperdentry, &attr, NULL); -+ } -+ if (!err) { -+ struct iattr attr = { -+ .ia_valid = ATTR_UID | ATTR_GID, -+ .ia_uid = stat->uid, -+ .ia_gid = stat->gid, -+ }; -+ err = notify_change(upperdentry, &attr, NULL); -+ } -+ if (!err) -+ ovl_set_timestamps(upperdentry, stat); -+ mutex_unlock(&upperdentry->d_inode->i_mutex); -+ -+ return err; -+ -+} -+ -+static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, -+ struct dentry *dentry, struct path *lowerpath, -+ struct kstat *stat, const char *link) -+{ -+ struct inode *wdir = workdir->d_inode; -+ struct inode *udir = upperdir->d_inode; -+ struct dentry *newdentry = NULL; -+ struct dentry *upper = NULL; -+ umode_t mode = stat->mode; -+ int err; -+ -+ newdentry = ovl_lookup_temp(workdir, dentry); -+ err = PTR_ERR(newdentry); -+ if (IS_ERR(newdentry)) -+ goto out; -+ -+ upper = lookup_one_len(dentry->d_name.name, upperdir, -+ dentry->d_name.len); -+ err = PTR_ERR(upper); -+ if (IS_ERR(upper)) -+ goto out; -+ -+ /* Can't properly set mode on creation because of the umask */ -+ stat->mode &= S_IFMT; -+ err = ovl_create_real(wdir, newdentry, stat, link, NULL, true); -+ stat->mode = mode; -+ if (err) -+ goto out; -+ -+ if (S_ISREG(stat->mode)) { -+ struct path upperpath; -+ ovl_path_upper(dentry, &upperpath); -+ BUG_ON(upperpath.dentry != NULL); -+ upperpath.dentry = newdentry; -+ -+ err = ovl_copy_up_data(lowerpath, &upperpath, stat->size); -+ if (err) -+ goto out_cleanup; -+ } -+ -+ err = ovl_copy_xattr(lowerpath->dentry, newdentry); -+ if (err) -+ goto out_cleanup; -+ -+ err = ovl_set_attr(newdentry, stat); -+ if (err) -+ goto out_cleanup; -+ -+ err = ovl_do_rename(wdir, newdentry, udir, upper, 0); -+ if (err) -+ goto out_cleanup; -+ -+ ovl_dentry_update(dentry, newdentry); -+ newdentry = NULL; -+ -+ /* -+ * Easiest way to get rid of the lower dentry reference is to -+ * drop this dentry. This is neither needed nor possible for -+ * directories. -+ * -+ * Non-directores become opaque when copied up. -+ */ -+ if (!S_ISDIR(stat->mode)) { -+ ovl_dentry_set_opaque(dentry, true); -+ d_drop(dentry); -+ } -+out: -+ dput(upper); -+ dput(newdentry); -+ return err; -+ -+out_cleanup: -+ ovl_cleanup(wdir, newdentry); -+ goto out; -+} -+ -+/* -+ * Copy up a single dentry -+ * -+ * Directory renames only allowed on "pure upper" (already created on -+ * upper filesystem, never copied up). Directories which are on lower or -+ * are merged may not be renamed. For these -EXDEV is returned and -+ * userspace has to deal with it. This means, when copying up a -+ * directory we can rely on it and ancestors being stable. -+ * -+ * Non-directory renames start with copy up of source if necessary. The -+ * actual rename will only proceed once the copy up was successful. Copy -+ * up uses upper parent i_mutex for exclusion. Since rename can change -+ * d_parent it is possible that the copy up will lock the old parent. At -+ * that point the file will have already been copied up anyway. -+ */ -+static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, -+ struct path *lowerpath, struct kstat *stat) -+{ -+ struct dentry *workdir = ovl_workdir(dentry); -+ int err; -+ struct kstat pstat; -+ struct path parentpath; -+ struct dentry *upperdir; -+ const struct cred *old_cred; -+ struct cred *override_cred; -+ char *link = NULL; -+ -+ ovl_path_upper(parent, &parentpath); -+ upperdir = parentpath.dentry; -+ -+ err = vfs_getattr(&parentpath, &pstat); -+ if (err) -+ return err; -+ -+ if (S_ISLNK(stat->mode)) { -+ link = ovl_read_symlink(lowerpath->dentry); -+ if (IS_ERR(link)) -+ return PTR_ERR(link); -+ } -+ -+ err = -ENOMEM; -+ override_cred = prepare_creds(); -+ if (!override_cred) -+ goto out_free_link; -+ -+ override_cred->fsuid = stat->uid; -+ override_cred->fsgid = stat->gid; -+ /* -+ * CAP_SYS_ADMIN for copying up extended attributes -+ * CAP_DAC_OVERRIDE for create -+ * CAP_FOWNER for chmod, timestamp update -+ * CAP_FSETID for chmod -+ * CAP_CHOWN for chown -+ * CAP_MKNOD for mknod -+ */ -+ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); -+ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); -+ cap_raise(override_cred->cap_effective, CAP_FOWNER); -+ cap_raise(override_cred->cap_effective, CAP_FSETID); -+ cap_raise(override_cred->cap_effective, CAP_CHOWN); -+ cap_raise(override_cred->cap_effective, CAP_MKNOD); -+ old_cred = override_creds(override_cred); -+ -+ err = -EIO; -+ if (lock_rename(workdir, upperdir) != NULL) { -+ pr_err("overlayfs: failed to lock workdir+upperdir\n"); -+ goto out_unlock; -+ } -+ if (ovl_path_type(dentry) != OVL_PATH_LOWER) { -+ err = 0; -+ } else { -+ err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, -+ stat, link); -+ if (!err) { -+ /* Restore timestamps on parent (best effort) */ -+ ovl_set_timestamps(upperdir, &pstat); -+ } -+ } -+out_unlock: -+ unlock_rename(workdir, upperdir); -+ -+ revert_creds(old_cred); -+ put_cred(override_cred); -+ -+out_free_link: -+ if (link) -+ free_page((unsigned long) link); -+ -+ return err; -+} -+ -+int ovl_copy_up(struct dentry *dentry) -+{ -+ int err; -+ -+ err = 0; -+ while (!err) { -+ struct dentry *next; -+ struct dentry *parent; -+ struct path lowerpath; -+ struct kstat stat; -+ enum ovl_path_type type = ovl_path_type(dentry); -+ -+ if (type != OVL_PATH_LOWER) -+ break; -+ -+ next = dget(dentry); -+ /* find the topmost dentry not yet copied up */ -+ for (;;) { -+ parent = dget_parent(next); -+ -+ type = ovl_path_type(parent); -+ if (type != OVL_PATH_LOWER) -+ break; -+ -+ dput(next); -+ next = parent; -+ } -+ -+ ovl_path_lower(next, &lowerpath); -+ err = vfs_getattr(&lowerpath, &stat); -+ if (!err) -+ err = ovl_copy_up_one(parent, next, &lowerpath, &stat); -+ -+ dput(parent); -+ dput(next); -+ } -+ -+ return err; -+} -+ -+/* Optimize by not copying up the file first and truncating later */ -+int ovl_copy_up_truncate(struct dentry *dentry, loff_t size) -+{ -+ int err; -+ struct kstat stat; -+ struct path lowerpath; -+ struct dentry *parent = dget_parent(dentry); -+ -+ err = ovl_copy_up(parent); -+ if (err) -+ goto out_dput_parent; -+ -+ ovl_path_lower(dentry, &lowerpath); -+ err = vfs_getattr(&lowerpath, &stat); -+ if (err) -+ goto out_dput_parent; -+ -+ if (size < stat.size) -+ stat.size = size; -+ -+ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat); -+ -+out_dput_parent: -+ dput(parent); -+ return err; -+} -diff -urNp a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c ---- a/fs/overlayfs/dir.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/dir.c 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,922 @@ -+/* -+ * -+ * Copyright (C) 2011 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "overlayfs.h" -+ -+void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) -+{ -+ int err; -+ -+ if (S_ISDIR(wdentry->d_inode->i_mode)) -+ err = ovl_do_rmdir(wdir, wdentry); -+ else -+ err = ovl_do_unlink(wdir, wdentry); -+ -+ if (err) { -+ pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n", -+ wdentry, err); -+ } -+} -+ -+struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) -+{ -+ struct dentry *temp; -+ char name[20]; -+ -+ snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry); -+ -+ temp = lookup_one_len(name, workdir, strlen(name)); -+ if (!IS_ERR(temp) && temp->d_inode) { -+ pr_err("overlayfs: workdir/%s already exists\n", name); -+ dput(temp); -+ temp = ERR_PTR(-EIO); -+ } -+ -+ return temp; -+} -+ -+/* caller holds i_mutex on workdir */ -+static struct dentry *ovl_whiteout(struct dentry *workdir, -+ struct dentry *dentry) -+{ -+ int err; -+ struct dentry *whiteout; -+ struct inode *wdir = workdir->d_inode; -+ -+ whiteout = ovl_lookup_temp(workdir, dentry); -+ if (IS_ERR(whiteout)) -+ return whiteout; -+ -+ err = ovl_do_whiteout(wdir, whiteout); -+ if (err) { -+ dput(whiteout); -+ whiteout = ERR_PTR(err); -+ } -+ -+ return whiteout; -+} -+ -+int ovl_create_real(struct inode *dir, struct dentry *newdentry, -+ struct kstat *stat, const char *link, -+ struct dentry *hardlink, bool debug) -+{ -+ int err; -+ -+ if (newdentry->d_inode) -+ return -ESTALE; -+ -+ if (hardlink) { -+ err = ovl_do_link(hardlink, dir, newdentry, debug); -+ } else { -+ switch (stat->mode & S_IFMT) { -+ case S_IFREG: -+ err = ovl_do_create(dir, newdentry, stat->mode, debug); -+ break; -+ -+ case S_IFDIR: -+ err = ovl_do_mkdir(dir, newdentry, stat->mode, debug); -+ break; -+ -+ case S_IFCHR: -+ case S_IFBLK: -+ case S_IFIFO: -+ case S_IFSOCK: -+ err = ovl_do_mknod(dir, newdentry, -+ stat->mode, stat->rdev, debug); -+ break; -+ -+ case S_IFLNK: -+ err = ovl_do_symlink(dir, newdentry, link, debug); -+ break; -+ -+ default: -+ err = -EPERM; -+ } -+ } -+ if (!err && WARN_ON(!newdentry->d_inode)) { -+ /* -+ * Not quite sure if non-instantiated dentry is legal or not. -+ * VFS doesn't seem to care so check and warn here. -+ */ -+ err = -ENOENT; -+ } -+ return err; -+} -+ -+static int ovl_set_opaque(struct dentry *upperdentry) -+{ -+ return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); -+} -+ -+static void ovl_remove_opaque(struct dentry *upperdentry) -+{ -+ int err; -+ -+ err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); -+ if (err) { -+ pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", -+ upperdentry->d_name.name, err); -+ } -+} -+ -+static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, -+ struct kstat *stat) -+{ -+ int err; -+ enum ovl_path_type type; -+ struct path realpath; -+ -+ type = ovl_path_real(dentry, &realpath); -+ err = vfs_getattr(&realpath, stat); -+ if (err) -+ return err; -+ -+ stat->dev = dentry->d_sb->s_dev; -+ stat->ino = dentry->d_inode->i_ino; -+ -+ /* -+ * It's probably not worth it to count subdirs to get the -+ * correct link count. nlink=1 seems to pacify 'find' and -+ * other utilities. -+ */ -+ if (type == OVL_PATH_MERGE) -+ stat->nlink = 1; -+ -+ return 0; -+} -+ -+static int ovl_create_upper(struct dentry *dentry, struct inode *inode, -+ struct kstat *stat, const char *link, -+ struct dentry *hardlink) -+{ -+ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); -+ struct inode *udir = upperdir->d_inode; -+ struct dentry *newdentry; -+ int err; -+ -+ mutex_lock_nested(&udir->i_mutex, I_MUTEX_PARENT); -+ newdentry = lookup_one_len(dentry->d_name.name, upperdir, -+ dentry->d_name.len); -+ err = PTR_ERR(newdentry); -+ if (IS_ERR(newdentry)) -+ goto out_unlock; -+ err = ovl_create_real(udir, newdentry, stat, link, hardlink, false); -+ if (err) -+ goto out_dput; -+ -+ ovl_dentry_version_inc(dentry->d_parent); -+ ovl_dentry_update(dentry, newdentry); -+ ovl_copyattr(newdentry->d_inode, inode); -+ d_instantiate(dentry, inode); -+ newdentry = NULL; -+out_dput: -+ dput(newdentry); -+out_unlock: -+ mutex_unlock(&udir->i_mutex); -+ return err; -+} -+ -+static int ovl_lock_rename_workdir(struct dentry *workdir, -+ struct dentry *upperdir) -+{ -+ /* Workdir should not be subdir of upperdir and vice versa */ -+ if (lock_rename(workdir, upperdir) != NULL) { -+ unlock_rename(workdir, upperdir); -+ pr_err("overlayfs: failed to lock workdir+upperdir\n"); -+ return -EIO; -+ } -+ return 0; -+} -+ -+static struct dentry *ovl_clear_empty(struct dentry *dentry, -+ struct list_head *list) -+{ -+ struct dentry *workdir = ovl_workdir(dentry); -+ struct inode *wdir = workdir->d_inode; -+ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); -+ struct inode *udir = upperdir->d_inode; -+ struct path upperpath; -+ struct dentry *upper; -+ struct dentry *opaquedir; -+ struct kstat stat; -+ int err; -+ -+ err = ovl_lock_rename_workdir(workdir, upperdir); -+ if (err) -+ goto out; -+ -+ ovl_path_upper(dentry, &upperpath); -+ err = vfs_getattr(&upperpath, &stat); -+ if (err) -+ goto out; -+ -+ err = -ESTALE; -+ if (!S_ISDIR(stat.mode)) -+ goto out; -+ -+ opaquedir = ovl_lookup_temp(workdir, dentry); -+ err = PTR_ERR(opaquedir); -+ if (IS_ERR(opaquedir)) -+ goto out_unlock; -+ -+ err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true); -+ if (err) -+ goto out_dput; -+ -+ upper = upperpath.dentry; -+ err = ovl_copy_xattr(upper, opaquedir); -+ if (err) -+ goto out_cleanup; -+ -+ err = ovl_set_opaque(opaquedir); -+ if (err) -+ goto out_cleanup; -+ -+ err = ovl_set_attr(opaquedir, &stat); -+ if (err) -+ goto out_cleanup; -+ -+ err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE); -+ if (err) -+ goto out_cleanup; -+ -+ unlock_rename(workdir, upperdir); -+ ovl_cleanup_whiteouts(upper, list); -+ mutex_lock_nested(&wdir->i_mutex, I_MUTEX_PARENT); -+ ovl_cleanup(wdir, upper); -+ mutex_unlock(&wdir->i_mutex); -+ -+ /* dentry's upper doesn't match now, get rid of it */ -+ d_drop(dentry); -+ -+ return opaquedir; -+ -+out_cleanup: -+ ovl_cleanup(wdir, opaquedir); -+out_dput: -+ dput(opaquedir); -+out_unlock: -+ unlock_rename(workdir, upperdir); -+out: -+ return ERR_PTR(err); -+} -+ -+static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry, -+ enum ovl_path_type type) -+{ -+ int err; -+ struct dentry *ret = NULL; -+ LIST_HEAD(list); -+ -+ err = ovl_check_empty_dir(dentry, &list); -+ if (err) -+ ret = ERR_PTR(err); -+ else if (type == OVL_PATH_MERGE) -+ ret = ovl_clear_empty(dentry, &list); -+ -+ ovl_cache_free(&list); -+ -+ return ret; -+} -+ -+static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, -+ struct kstat *stat, const char *link, -+ struct dentry *hardlink) -+{ -+ struct dentry *workdir = ovl_workdir(dentry); -+ struct inode *wdir = workdir->d_inode; -+ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); -+ struct inode *udir = upperdir->d_inode; -+ struct dentry *upper; -+ struct dentry *newdentry; -+ int err; -+ -+ err = ovl_lock_rename_workdir(workdir, upperdir); -+ if (err) -+ goto out; -+ -+ newdentry = ovl_lookup_temp(workdir, dentry); -+ err = PTR_ERR(newdentry); -+ if (IS_ERR(newdentry)) -+ goto out_unlock; -+ -+ upper = lookup_one_len(dentry->d_name.name, upperdir, -+ dentry->d_name.len); -+ err = PTR_ERR(upper); -+ if (IS_ERR(upper)) -+ goto out_dput; -+ -+ err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true); -+ if (err) -+ goto out_dput2; -+ -+ if (S_ISDIR(stat->mode)) { -+ err = ovl_set_opaque(newdentry); -+ if (err) -+ goto out_cleanup; -+ -+ err = ovl_do_rename(wdir, newdentry, udir, upper, -+ RENAME_EXCHANGE); -+ if (err) -+ goto out_cleanup; -+ -+ ovl_cleanup(wdir, upper); -+ } else { -+ err = ovl_do_rename(wdir, newdentry, udir, upper, 0); -+ if (err) -+ goto out_cleanup; -+ } -+ ovl_dentry_version_inc(dentry->d_parent); -+ ovl_dentry_update(dentry, newdentry); -+ ovl_copyattr(newdentry->d_inode, inode); -+ d_instantiate(dentry, inode); -+ newdentry = NULL; -+out_dput2: -+ dput(upper); -+out_dput: -+ dput(newdentry); -+out_unlock: -+ unlock_rename(workdir, upperdir); -+out: -+ return err; -+ -+out_cleanup: -+ ovl_cleanup(wdir, newdentry); -+ goto out_dput2; -+} -+ -+static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev, -+ const char *link, struct dentry *hardlink) -+{ -+ int err; -+ struct inode *inode; -+ struct kstat stat = { -+ .mode = mode, -+ .rdev = rdev, -+ }; -+ -+ err = -ENOMEM; -+ inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata); -+ if (!inode) -+ goto out; -+ -+ err = ovl_copy_up(dentry->d_parent); -+ if (err) -+ goto out_iput; -+ -+ if (!ovl_dentry_is_opaque(dentry)) { -+ err = ovl_create_upper(dentry, inode, &stat, link, hardlink); -+ } else { -+ const struct cred *old_cred; -+ struct cred *override_cred; -+ -+ err = -ENOMEM; -+ override_cred = prepare_creds(); -+ if (!override_cred) -+ goto out_iput; -+ -+ /* -+ * CAP_SYS_ADMIN for setting opaque xattr -+ * CAP_DAC_OVERRIDE for create in workdir, rename -+ * CAP_FOWNER for removing whiteout from sticky dir -+ */ -+ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); -+ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); -+ cap_raise(override_cred->cap_effective, CAP_FOWNER); -+ old_cred = override_creds(override_cred); -+ -+ err = ovl_create_over_whiteout(dentry, inode, &stat, link, -+ hardlink); -+ -+ revert_creds(old_cred); -+ put_cred(override_cred); -+ } -+ -+ if (!err) -+ inode = NULL; -+out_iput: -+ iput(inode); -+out: -+ return err; -+} -+ -+static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, -+ const char *link) -+{ -+ int err; -+ -+ err = ovl_want_write(dentry); -+ if (!err) { -+ err = ovl_create_or_link(dentry, mode, rdev, link, NULL); -+ ovl_drop_write(dentry); -+ } -+ -+ return err; -+} -+ -+static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode, -+ bool excl) -+{ -+ return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL); -+} -+ -+static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) -+{ -+ return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL); -+} -+ -+static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, -+ dev_t rdev) -+{ -+ /* Don't allow creation of "whiteout" on overlay */ -+ if (S_ISCHR(mode) && rdev == WHITEOUT_DEV) -+ return -EPERM; -+ -+ return ovl_create_object(dentry, mode, rdev, NULL); -+} -+ -+static int ovl_symlink(struct inode *dir, struct dentry *dentry, -+ const char *link) -+{ -+ return ovl_create_object(dentry, S_IFLNK, 0, link); -+} -+ -+static int ovl_link(struct dentry *old, struct inode *newdir, -+ struct dentry *new) -+{ -+ int err; -+ struct dentry *upper; -+ -+ err = ovl_want_write(old); -+ if (err) -+ goto out; -+ -+ err = ovl_copy_up(old); -+ if (err) -+ goto out_drop_write; -+ -+ upper = ovl_dentry_upper(old); -+ err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper); -+ -+out_drop_write: -+ ovl_drop_write(old); -+out: -+ return err; -+} -+ -+static int ovl_remove_and_whiteout(struct dentry *dentry, -+ enum ovl_path_type type, bool is_dir) -+{ -+ struct dentry *workdir = ovl_workdir(dentry); -+ struct inode *wdir = workdir->d_inode; -+ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); -+ struct inode *udir = upperdir->d_inode; -+ struct dentry *whiteout; -+ struct dentry *upper; -+ struct dentry *opaquedir = NULL; -+ int err; -+ -+ if (is_dir) { -+ opaquedir = ovl_check_empty_and_clear(dentry, type); -+ err = PTR_ERR(opaquedir); -+ if (IS_ERR(opaquedir)) -+ goto out; -+ } -+ -+ err = ovl_lock_rename_workdir(workdir, upperdir); -+ if (err) -+ goto out_dput; -+ -+ whiteout = ovl_whiteout(workdir, dentry); -+ err = PTR_ERR(whiteout); -+ if (IS_ERR(whiteout)) -+ goto out_unlock; -+ -+ if (type == OVL_PATH_LOWER) { -+ upper = lookup_one_len(dentry->d_name.name, upperdir, -+ dentry->d_name.len); -+ err = PTR_ERR(upper); -+ if (IS_ERR(upper)) -+ goto kill_whiteout; -+ -+ err = ovl_do_rename(wdir, whiteout, udir, upper, 0); -+ dput(upper); -+ if (err) -+ goto kill_whiteout; -+ } else { -+ int flags = 0; -+ -+ upper = ovl_dentry_upper(dentry); -+ if (opaquedir) -+ upper = opaquedir; -+ err = -ESTALE; -+ if (upper->d_parent != upperdir) -+ goto kill_whiteout; -+ -+ if (is_dir) -+ flags |= RENAME_EXCHANGE; -+ -+ err = ovl_do_rename(wdir, whiteout, udir, upper, flags); -+ if (err) -+ goto kill_whiteout; -+ -+ if (is_dir) -+ ovl_cleanup(wdir, upper); -+ } -+ ovl_dentry_version_inc(dentry->d_parent); -+out_d_drop: -+ d_drop(dentry); -+ dput(whiteout); -+out_unlock: -+ unlock_rename(workdir, upperdir); -+out_dput: -+ dput(opaquedir); -+out: -+ return err; -+ -+kill_whiteout: -+ ovl_cleanup(wdir, whiteout); -+ goto out_d_drop; -+} -+ -+static int ovl_remove_upper(struct dentry *dentry, bool is_dir) -+{ -+ struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); -+ struct inode *dir = upperdir->d_inode; -+ struct dentry *upper = ovl_dentry_upper(dentry); -+ int err; -+ -+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); -+ err = -ESTALE; -+ if (upper->d_parent == upperdir) { -+ if (is_dir) -+ err = vfs_rmdir(dir, upper); -+ else -+ err = vfs_unlink(dir, upper, NULL); -+ -+ ovl_dentry_version_inc(dentry->d_parent); -+ } -+ -+ /* -+ * Keeping this dentry hashed would mean having to release -+ * upperpath/lowerpath, which could only be done if we are the -+ * sole user of this dentry. Too tricky... Just unhash for -+ * now. -+ */ -+ d_drop(dentry); -+ mutex_unlock(&dir->i_mutex); -+ -+ return err; -+} -+ -+static inline int ovl_check_sticky(struct dentry *dentry) -+{ -+ struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode; -+ struct inode *inode = ovl_dentry_real(dentry)->d_inode; -+ -+ if (check_sticky(dir, inode)) -+ return -EPERM; -+ -+ return 0; -+} -+ -+static int ovl_do_remove(struct dentry *dentry, bool is_dir) -+{ -+ enum ovl_path_type type; -+ int err; -+ -+ err = ovl_check_sticky(dentry); -+ if (err) -+ goto out; -+ -+ err = ovl_want_write(dentry); -+ if (err) -+ goto out; -+ -+ err = ovl_copy_up(dentry->d_parent); -+ if (err) -+ goto out_drop_write; -+ -+ type = ovl_path_type(dentry); -+ if (type == OVL_PATH_UPPER && !ovl_dentry_is_opaque(dentry)) { -+ err = ovl_remove_upper(dentry, is_dir); -+ } else { -+ const struct cred *old_cred; -+ struct cred *override_cred; -+ -+ err = -ENOMEM; -+ override_cred = prepare_creds(); -+ if (!override_cred) -+ goto out_drop_write; -+ -+ /* -+ * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir -+ * CAP_DAC_OVERRIDE for create in workdir, rename -+ * CAP_FOWNER for removing whiteout from sticky dir -+ * CAP_FSETID for chmod of opaque dir -+ * CAP_CHOWN for chown of opaque dir -+ */ -+ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); -+ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); -+ cap_raise(override_cred->cap_effective, CAP_FOWNER); -+ cap_raise(override_cred->cap_effective, CAP_FSETID); -+ cap_raise(override_cred->cap_effective, CAP_CHOWN); -+ old_cred = override_creds(override_cred); -+ -+ err = ovl_remove_and_whiteout(dentry, type, is_dir); -+ -+ revert_creds(old_cred); -+ put_cred(override_cred); -+ } -+out_drop_write: -+ ovl_drop_write(dentry); -+out: -+ return err; -+} -+ -+static int ovl_unlink(struct inode *dir, struct dentry *dentry) -+{ -+ return ovl_do_remove(dentry, false); -+} -+ -+static int ovl_rmdir(struct inode *dir, struct dentry *dentry) -+{ -+ return ovl_do_remove(dentry, true); -+} -+ -+static int ovl_rename2(struct inode *olddir, struct dentry *old, -+ struct inode *newdir, struct dentry *new, -+ unsigned int flags) -+{ -+ int err; -+ enum ovl_path_type old_type; -+ enum ovl_path_type new_type; -+ struct dentry *old_upperdir; -+ struct dentry *new_upperdir; -+ struct dentry *olddentry; -+ struct dentry *newdentry; -+ struct dentry *trap; -+ bool old_opaque; -+ bool new_opaque; -+ bool new_create = false; -+ bool cleanup_whiteout = false; -+ bool overwrite = !(flags & RENAME_EXCHANGE); -+ bool is_dir = S_ISDIR(old->d_inode->i_mode); -+ bool new_is_dir = false; -+ struct dentry *opaquedir = NULL; -+ const struct cred *old_cred = NULL; -+ struct cred *override_cred = NULL; -+ -+ err = -EINVAL; -+ if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) -+ goto out; -+ -+ flags &= ~RENAME_NOREPLACE; -+ -+ err = ovl_check_sticky(old); -+ if (err) -+ goto out; -+ -+ /* Don't copy up directory trees */ -+ old_type = ovl_path_type(old); -+ err = -EXDEV; -+ if (old_type != OVL_PATH_UPPER && is_dir) -+ goto out; -+ -+ if (new->d_inode) { -+ err = ovl_check_sticky(new); -+ if (err) -+ goto out; -+ -+ if (S_ISDIR(new->d_inode->i_mode)) -+ new_is_dir = true; -+ -+ new_type = ovl_path_type(new); -+ err = -EXDEV; -+ if (!overwrite && new_type != OVL_PATH_UPPER && new_is_dir) -+ goto out; -+ -+ err = 0; -+ if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { -+ if (ovl_dentry_lower(old)->d_inode == -+ ovl_dentry_lower(new)->d_inode) -+ goto out; -+ } -+ if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { -+ if (ovl_dentry_upper(old)->d_inode == -+ ovl_dentry_upper(new)->d_inode) -+ goto out; -+ } -+ } else { -+ new_type = OVL_PATH_UPPER; -+ } -+ -+ err = ovl_want_write(old); -+ if (err) -+ goto out; -+ -+ err = ovl_copy_up(old); -+ if (err) -+ goto out_drop_write; -+ -+ err = ovl_copy_up(new->d_parent); -+ if (err) -+ goto out_drop_write; -+ if (!overwrite) { -+ err = ovl_copy_up(new); -+ if (err) -+ goto out_drop_write; -+ } -+ -+ old_opaque = ovl_dentry_is_opaque(old) || old_type != OVL_PATH_UPPER; -+ new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER; -+ -+ if (old_opaque || new_opaque) { -+ err = -ENOMEM; -+ override_cred = prepare_creds(); -+ if (!override_cred) -+ goto out_drop_write; -+ -+ /* -+ * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir -+ * CAP_DAC_OVERRIDE for create in workdir -+ * CAP_FOWNER for removing whiteout from sticky dir -+ * CAP_FSETID for chmod of opaque dir -+ * CAP_CHOWN for chown of opaque dir -+ */ -+ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); -+ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); -+ cap_raise(override_cred->cap_effective, CAP_FOWNER); -+ cap_raise(override_cred->cap_effective, CAP_FSETID); -+ cap_raise(override_cred->cap_effective, CAP_CHOWN); -+ old_cred = override_creds(override_cred); -+ } -+ -+ if (overwrite && new_type != OVL_PATH_UPPER && new_is_dir) { -+ opaquedir = ovl_check_empty_and_clear(new, new_type); -+ err = PTR_ERR(opaquedir); -+ if (IS_ERR(opaquedir)) { -+ opaquedir = NULL; -+ goto out_revert_creds; -+ } -+ } -+ -+ if (overwrite) { -+ if (old_opaque) { -+ if (new->d_inode || !new_opaque) { -+ /* Whiteout source */ -+ flags |= RENAME_WHITEOUT; -+ } else { -+ /* Switch whiteouts */ -+ flags |= RENAME_EXCHANGE; -+ } -+ } else if (is_dir && !new->d_inode && new_opaque) { -+ flags |= RENAME_EXCHANGE; -+ cleanup_whiteout = true; -+ } -+ } -+ -+ old_upperdir = ovl_dentry_upper(old->d_parent); -+ new_upperdir = ovl_dentry_upper(new->d_parent); -+ -+ trap = lock_rename(new_upperdir, old_upperdir); -+ -+ olddentry = ovl_dentry_upper(old); -+ newdentry = ovl_dentry_upper(new); -+ if (newdentry) { -+ if (opaquedir) { -+ newdentry = opaquedir; -+ opaquedir = NULL; -+ } else { -+ dget(newdentry); -+ } -+ } else { -+ new_create = true; -+ newdentry = lookup_one_len(new->d_name.name, new_upperdir, -+ new->d_name.len); -+ err = PTR_ERR(newdentry); -+ if (IS_ERR(newdentry)) -+ goto out_unlock; -+ } -+ -+ err = -ESTALE; -+ if (olddentry->d_parent != old_upperdir) -+ goto out_dput; -+ if (newdentry->d_parent != new_upperdir) -+ goto out_dput; -+ if (olddentry == trap) -+ goto out_dput; -+ if (newdentry == trap) -+ goto out_dput; -+ -+ if (is_dir && !old_opaque && new_opaque) { -+ err = ovl_set_opaque(olddentry); -+ if (err) -+ goto out_dput; -+ } -+ if (!overwrite && new_is_dir && old_opaque && !new_opaque) { -+ err = ovl_set_opaque(newdentry); -+ if (err) -+ goto out_dput; -+ } -+ -+ if (old_opaque || new_opaque) { -+ err = ovl_do_rename(old_upperdir->d_inode, olddentry, -+ new_upperdir->d_inode, newdentry, -+ flags); -+ } else { -+ /* No debug for the plain case */ -+ BUG_ON(flags & ~RENAME_EXCHANGE); -+ err = vfs_rename(old_upperdir->d_inode, olddentry, -+ new_upperdir->d_inode, newdentry, -+ NULL, flags); -+ } -+ -+ if (err) { -+ if (is_dir && !old_opaque && new_opaque) -+ ovl_remove_opaque(olddentry); -+ if (!overwrite && new_is_dir && old_opaque && !new_opaque) -+ ovl_remove_opaque(newdentry); -+ goto out_dput; -+ } -+ -+ if (is_dir && old_opaque && !new_opaque) -+ ovl_remove_opaque(olddentry); -+ if (!overwrite && new_is_dir && !old_opaque && new_opaque) -+ ovl_remove_opaque(newdentry); -+ -+ if (old_opaque != new_opaque) { -+ ovl_dentry_set_opaque(old, new_opaque); -+ if (!overwrite) -+ ovl_dentry_set_opaque(new, old_opaque); -+ } -+ -+ if (cleanup_whiteout) -+ ovl_cleanup(old_upperdir->d_inode, newdentry); -+ -+ /* -+ * Copy-up already unhashed it, but then vfs_rename() rehashed it. -+ * See comment in ovl_copy_up_locked() as to why we drop the dentry(). -+ */ -+ if (!is_dir && old_type == OVL_PATH_LOWER) -+ d_drop(old); -+ -+ if (!overwrite && !new_is_dir && new_type == OVL_PATH_LOWER) -+ d_drop(new); -+ -+ ovl_dentry_version_inc(old->d_parent); -+ ovl_dentry_version_inc(new->d_parent); -+ -+out_dput: -+ dput(newdentry); -+out_unlock: -+ unlock_rename(new_upperdir, old_upperdir); -+out_revert_creds: -+ if (old_opaque || new_opaque) { -+ revert_creds(old_cred); -+ put_cred(override_cred); -+ } -+out_drop_write: -+ ovl_drop_write(old); -+out: -+ dput(opaquedir); -+ return err; -+} -+ -+static int ovl_rename(struct inode *olddir, struct dentry *old, -+ struct inode *newdir, struct dentry *new) -+{ -+ return ovl_rename2(olddir, old, newdir, new, 0); -+} -+ -+ -+const struct inode_operations ovl_dir_inode_operations = { -+ .lookup = ovl_lookup, -+ .mkdir = ovl_mkdir, -+ .symlink = ovl_symlink, -+ .unlink = ovl_unlink, -+ .rmdir = ovl_rmdir, -+ .rename = ovl_rename, -+ .rename2 = ovl_rename2, -+ .link = ovl_link, -+ .setattr = ovl_setattr, -+ .create = ovl_create, -+ .mknod = ovl_mknod, -+ .permission = ovl_permission, -+ .getattr = ovl_dir_getattr, -+ .setxattr = ovl_setxattr, -+ .getxattr = ovl_getxattr, -+ .listxattr = ovl_listxattr, -+ .removexattr = ovl_removexattr, -+}; -diff -urNp a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c ---- a/fs/overlayfs/inode.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/inode.c 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,408 @@ -+/* -+ * -+ * Copyright (C) 2011 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include "overlayfs.h" -+ -+int ovl_setattr(struct dentry *dentry, struct iattr *attr) -+{ -+ struct dentry *upperdentry; -+ int err; -+ -+ err = ovl_want_write(dentry); -+ if (err) -+ goto out; -+ -+ if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry)) -+ err = ovl_copy_up_truncate(dentry, attr->ia_size); -+ else -+ err = ovl_copy_up(dentry); -+ if (err) -+ goto out_drop_write; -+ -+ upperdentry = ovl_dentry_upper(dentry); -+ -+ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) -+ attr->ia_valid &= ~ATTR_MODE; -+ -+ mutex_lock(&upperdentry->d_inode->i_mutex); -+ err = notify_change(upperdentry, attr, NULL); -+ if (!err) -+ ovl_copyattr(upperdentry->d_inode, dentry->d_inode); -+ mutex_unlock(&upperdentry->d_inode->i_mutex); -+ -+out_drop_write: -+ ovl_drop_write(dentry); -+out: -+ return err; -+} -+ -+static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry, -+ struct kstat *stat) -+{ -+ struct path realpath; -+ -+ ovl_path_real(dentry, &realpath); -+ return vfs_getattr(&realpath, stat); -+} -+ -+int ovl_permission(struct inode *inode, int mask) -+{ -+ struct ovl_entry *oe; -+ struct dentry *alias = NULL; -+ struct inode *realinode; -+ struct dentry *realdentry; -+ bool is_upper; -+ int err; -+ -+ if (S_ISDIR(inode->i_mode)) { -+ oe = inode->i_private; -+ } else if (mask & MAY_NOT_BLOCK) { -+ return -ECHILD; -+ } else { -+ /* -+ * For non-directories find an alias and get the info -+ * from there. -+ */ -+ alias = d_find_any_alias(inode); -+ if (WARN_ON(!alias)) -+ return -ENOENT; -+ -+ oe = alias->d_fsdata; -+ } -+ -+ realdentry = ovl_entry_real(oe, &is_upper); -+ -+ /* Careful in RCU walk mode */ -+ realinode = ACCESS_ONCE(realdentry->d_inode); -+ if (!realinode) { -+ WARN_ON(!(mask & MAY_NOT_BLOCK)); -+ err = -ENOENT; -+ goto out_dput; -+ } -+ -+ if (mask & MAY_WRITE) { -+ umode_t mode = realinode->i_mode; -+ -+ /* -+ * Writes will always be redirected to upper layer, so -+ * ignore lower layer being read-only. -+ * -+ * If the overlay itself is read-only then proceed -+ * with the permission check, don't return EROFS. -+ * This will only happen if this is the lower layer of -+ * another overlayfs. -+ * -+ * If upper fs becomes read-only after the overlay was -+ * constructed return EROFS to prevent modification of -+ * upper layer. -+ */ -+ err = -EROFS; -+ if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) && -+ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) -+ goto out_dput; -+ } -+ -+ err = __inode_permission(realinode, mask); -+out_dput: -+ dput(alias); -+ return err; -+} -+ -+ -+struct ovl_link_data { -+ struct dentry *realdentry; -+ void *cookie; -+}; -+ -+static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd) -+{ -+ void *ret; -+ struct dentry *realdentry; -+ struct inode *realinode; -+ -+ realdentry = ovl_dentry_real(dentry); -+ realinode = realdentry->d_inode; -+ -+ if (WARN_ON(!realinode->i_op->follow_link)) -+ return ERR_PTR(-EPERM); -+ -+ ret = realinode->i_op->follow_link(realdentry, nd); -+ if (IS_ERR(ret)) -+ return ret; -+ -+ if (realinode->i_op->put_link) { -+ struct ovl_link_data *data; -+ -+ data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL); -+ if (!data) { -+ realinode->i_op->put_link(realdentry, nd, ret); -+ return ERR_PTR(-ENOMEM); -+ } -+ data->realdentry = realdentry; -+ data->cookie = ret; -+ -+ return data; -+ } else { -+ return NULL; -+ } -+} -+ -+static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c) -+{ -+ struct inode *realinode; -+ struct ovl_link_data *data = c; -+ -+ if (!data) -+ return; -+ -+ realinode = data->realdentry->d_inode; -+ realinode->i_op->put_link(data->realdentry, nd, data->cookie); -+ kfree(data); -+} -+ -+static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) -+{ -+ struct path realpath; -+ struct inode *realinode; -+ -+ ovl_path_real(dentry, &realpath); -+ realinode = realpath.dentry->d_inode; -+ -+ if (!realinode->i_op->readlink) -+ return -EINVAL; -+ -+ touch_atime(&realpath); -+ -+ return realinode->i_op->readlink(realpath.dentry, buf, bufsiz); -+} -+ -+ -+static bool ovl_is_private_xattr(const char *name) -+{ -+ return strncmp(name, "trusted.overlay.", 14) == 0; -+} -+ -+int ovl_setxattr(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags) -+{ -+ int err; -+ struct dentry *upperdentry; -+ -+ err = ovl_want_write(dentry); -+ if (err) -+ goto out; -+ -+ err = -EPERM; -+ if (ovl_is_private_xattr(name)) -+ goto out_drop_write; -+ -+ err = ovl_copy_up(dentry); -+ if (err) -+ goto out_drop_write; -+ -+ upperdentry = ovl_dentry_upper(dentry); -+ err = vfs_setxattr(upperdentry, name, value, size, flags); -+ -+out_drop_write: -+ ovl_drop_write(dentry); -+out: -+ return err; -+} -+ -+ssize_t ovl_getxattr(struct dentry *dentry, const char *name, -+ void *value, size_t size) -+{ -+ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && -+ ovl_is_private_xattr(name)) -+ return -ENODATA; -+ -+ return vfs_getxattr(ovl_dentry_real(dentry), name, value, size); -+} -+ -+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) -+{ -+ ssize_t res; -+ int off; -+ -+ res = vfs_listxattr(ovl_dentry_real(dentry), list, size); -+ if (res <= 0 || size == 0) -+ return res; -+ -+ if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE) -+ return res; -+ -+ /* filter out private xattrs */ -+ for (off = 0; off < res;) { -+ char *s = list + off; -+ size_t slen = strlen(s) + 1; -+ -+ BUG_ON(off + slen > res); -+ -+ if (ovl_is_private_xattr(s)) { -+ res -= slen; -+ memmove(s, s + slen, res - off); -+ } else { -+ off += slen; -+ } -+ } -+ -+ return res; -+} -+ -+int ovl_removexattr(struct dentry *dentry, const char *name) -+{ -+ int err; -+ struct path realpath; -+ enum ovl_path_type type; -+ -+ err = ovl_want_write(dentry); -+ if (err) -+ goto out; -+ -+ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && -+ ovl_is_private_xattr(name)) -+ goto out_drop_write; -+ -+ type = ovl_path_real(dentry, &realpath); -+ if (type == OVL_PATH_LOWER) { -+ err = vfs_getxattr(realpath.dentry, name, NULL, 0); -+ if (err < 0) -+ goto out_drop_write; -+ -+ err = ovl_copy_up(dentry); -+ if (err) -+ goto out_drop_write; -+ -+ ovl_path_upper(dentry, &realpath); -+ } -+ -+ err = vfs_removexattr(realpath.dentry, name); -+out_drop_write: -+ ovl_drop_write(dentry); -+out: -+ return err; -+} -+ -+static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, -+ struct dentry *realdentry) -+{ -+ if (type != OVL_PATH_LOWER) -+ return false; -+ -+ if (special_file(realdentry->d_inode->i_mode)) -+ return false; -+ -+ if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC)) -+ return false; -+ -+ return true; -+} -+ -+static int ovl_dentry_open(struct dentry *dentry, struct file *file, -+ const struct cred *cred) -+{ -+ int err; -+ struct path realpath; -+ enum ovl_path_type type; -+ bool want_write = false; -+ -+ type = ovl_path_real(dentry, &realpath); -+ if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) { -+ want_write = true; -+ err = ovl_want_write(dentry); -+ if (err) -+ goto out; -+ -+ if (file->f_flags & O_TRUNC) -+ err = ovl_copy_up_truncate(dentry, 0); -+ else -+ err = ovl_copy_up(dentry); -+ if (err) -+ goto out_drop_write; -+ -+ ovl_path_upper(dentry, &realpath); -+ } -+ -+ err = vfs_open(&realpath, file, cred); -+out_drop_write: -+ if (want_write) -+ ovl_drop_write(dentry); -+out: -+ return err; -+} -+ -+static const struct inode_operations ovl_file_inode_operations = { -+ .setattr = ovl_setattr, -+ .permission = ovl_permission, -+ .getattr = ovl_getattr, -+ .setxattr = ovl_setxattr, -+ .getxattr = ovl_getxattr, -+ .listxattr = ovl_listxattr, -+ .removexattr = ovl_removexattr, -+ .dentry_open = ovl_dentry_open, -+}; -+ -+static const struct inode_operations ovl_symlink_inode_operations = { -+ .setattr = ovl_setattr, -+ .follow_link = ovl_follow_link, -+ .put_link = ovl_put_link, -+ .readlink = ovl_readlink, -+ .getattr = ovl_getattr, -+ .setxattr = ovl_setxattr, -+ .getxattr = ovl_getxattr, -+ .listxattr = ovl_listxattr, -+ .removexattr = ovl_removexattr, -+}; -+ -+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, -+ struct ovl_entry *oe) -+{ -+ struct inode *inode; -+ -+ inode = new_inode(sb); -+ if (!inode) -+ return NULL; -+ -+ mode &= S_IFMT; -+ -+ inode->i_ino = get_next_ino(); -+ inode->i_mode = mode; -+ inode->i_flags |= S_NOATIME | S_NOCMTIME; -+ -+ switch (mode) { -+ case S_IFDIR: -+ inode->i_private = oe; -+ inode->i_op = &ovl_dir_inode_operations; -+ inode->i_fop = &ovl_dir_operations; -+ break; -+ -+ case S_IFLNK: -+ inode->i_op = &ovl_symlink_inode_operations; -+ break; -+ -+ case S_IFREG: -+ case S_IFSOCK: -+ case S_IFBLK: -+ case S_IFCHR: -+ case S_IFIFO: -+ inode->i_op = &ovl_file_inode_operations; -+ break; -+ -+ default: -+ WARN(1, "illegal file type: %i\n", mode); -+ iput(inode); -+ inode = NULL; -+ } -+ -+ return inode; -+ -+} -diff -urNp a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig ---- a/fs/overlayfs/Kconfig 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/Kconfig 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,10 @@ -+config OVERLAYFS_FS -+ tristate "Overlay filesystem support" -+ help -+ An overlay filesystem combines two filesystems - an 'upper' filesystem -+ and a 'lower' filesystem. When a name exists in both filesystems, the -+ object in the 'upper' filesystem is visible while the object in the -+ 'lower' filesystem is either hidden or, in the case of directories, -+ merged with the 'upper' object. -+ -+ For more information see Documentation/filesystems/overlayfs.txt -diff -urNp a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile ---- a/fs/overlayfs/Makefile 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/Makefile 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,7 @@ -+# -+# Makefile for the overlay filesystem. -+# -+ -+obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o -+ -+overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o -diff -urNp a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h ---- a/fs/overlayfs/overlayfs.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/overlayfs.h 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,187 @@ -+/* -+ * -+ * Copyright (C) 2011 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ */ -+ -+#include -+ -+struct ovl_entry; -+ -+enum ovl_path_type { -+ OVL_PATH_UPPER, -+ OVL_PATH_MERGE, -+ OVL_PATH_LOWER, -+}; -+ -+extern const char *ovl_opaque_xattr; -+extern const struct dentry_operations ovl_dentry_operations; -+ -+static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) -+{ -+ int err = vfs_rmdir(dir, dentry); -+ pr_debug("rmdir(%pd2) = %i\n", dentry, err); -+ return err; -+} -+ -+static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry) -+{ -+ int err = vfs_unlink(dir, dentry, NULL); -+ pr_debug("unlink(%pd2) = %i\n", dentry, err); -+ return err; -+} -+ -+static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir, -+ struct dentry *new_dentry, bool debug) -+{ -+ int err = vfs_link(old_dentry, dir, new_dentry, NULL); -+ if (debug) { -+ pr_debug("link(%pd2, %pd2) = %i\n", -+ old_dentry, new_dentry, err); -+ } -+ return err; -+} -+ -+static inline int ovl_do_create(struct inode *dir, struct dentry *dentry, -+ umode_t mode, bool debug) -+{ -+ int err = vfs_create(dir, dentry, mode, true); -+ if (debug) -+ pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err); -+ return err; -+} -+ -+static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry, -+ umode_t mode, bool debug) -+{ -+ int err = vfs_mkdir(dir, dentry, mode); -+ if (debug) -+ pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err); -+ return err; -+} -+ -+static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry, -+ umode_t mode, dev_t dev, bool debug) -+{ -+ int err = vfs_mknod(dir, dentry, mode, dev); -+ if (debug) { -+ pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", -+ dentry, mode, dev, err); -+ } -+ return err; -+} -+ -+static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry, -+ const char *oldname, bool debug) -+{ -+ int err = vfs_symlink(dir, dentry, oldname); -+ if (debug) -+ pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err); -+ return err; -+} -+ -+static inline int ovl_do_setxattr(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags) -+{ -+ int err = vfs_setxattr(dentry, name, value, size, flags); -+ pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n", -+ dentry, name, (int) size, (char *) value, flags, err); -+ return err; -+} -+ -+static inline int ovl_do_removexattr(struct dentry *dentry, const char *name) -+{ -+ int err = vfs_removexattr(dentry, name); -+ pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err); -+ return err; -+} -+ -+static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry, -+ struct inode *newdir, struct dentry *newdentry, -+ unsigned int flags) -+{ -+ int err; -+ -+ pr_debug("rename2(%pd2, %pd2, 0x%x)\n", -+ olddentry, newdentry, flags); -+ -+ err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags); -+ -+ if (err) { -+ pr_debug("...rename2(%pd2, %pd2, ...) = %i\n", -+ olddentry, newdentry, err); -+ } -+ return err; -+} -+ -+static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry) -+{ -+ int err = vfs_whiteout(dir, dentry); -+ pr_debug("whiteout(%pd2) = %i\n", dentry, err); -+ return err; -+} -+ -+enum ovl_path_type ovl_path_type(struct dentry *dentry); -+u64 ovl_dentry_version_get(struct dentry *dentry); -+void ovl_dentry_version_inc(struct dentry *dentry); -+void ovl_path_upper(struct dentry *dentry, struct path *path); -+void ovl_path_lower(struct dentry *dentry, struct path *path); -+enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); -+struct dentry *ovl_dentry_upper(struct dentry *dentry); -+struct dentry *ovl_dentry_lower(struct dentry *dentry); -+struct dentry *ovl_dentry_real(struct dentry *dentry); -+struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper); -+struct dentry *ovl_workdir(struct dentry *dentry); -+int ovl_want_write(struct dentry *dentry); -+void ovl_drop_write(struct dentry *dentry); -+bool ovl_dentry_is_opaque(struct dentry *dentry); -+void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque); -+bool ovl_is_whiteout(struct dentry *dentry); -+void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); -+struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, -+ unsigned int flags); -+struct file *ovl_path_open(struct path *path, int flags); -+ -+struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry, -+ struct kstat *stat, const char *link); -+ -+/* readdir.c */ -+extern const struct file_operations ovl_dir_operations; -+int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list); -+void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list); -+void ovl_cache_free(struct list_head *list); -+ -+/* inode.c */ -+int ovl_setattr(struct dentry *dentry, struct iattr *attr); -+int ovl_permission(struct inode *inode, int mask); -+int ovl_setxattr(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags); -+ssize_t ovl_getxattr(struct dentry *dentry, const char *name, -+ void *value, size_t size); -+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); -+int ovl_removexattr(struct dentry *dentry, const char *name); -+ -+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, -+ struct ovl_entry *oe); -+static inline void ovl_copyattr(struct inode *from, struct inode *to) -+{ -+ to->i_uid = from->i_uid; -+ to->i_gid = from->i_gid; -+} -+ -+/* dir.c */ -+extern const struct inode_operations ovl_dir_inode_operations; -+struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); -+int ovl_create_real(struct inode *dir, struct dentry *newdentry, -+ struct kstat *stat, const char *link, -+ struct dentry *hardlink, bool debug); -+void ovl_cleanup(struct inode *dir, struct dentry *dentry); -+ -+/* copy_up.c */ -+int ovl_copy_up(struct dentry *dentry); -+int ovl_copy_up_truncate(struct dentry *dentry, loff_t size); -+int ovl_copy_xattr(struct dentry *old, struct dentry *new); -+int ovl_set_attr(struct dentry *upper, struct kstat *stat); -diff -urNp a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c ---- a/fs/overlayfs/readdir.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/readdir.c 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,518 @@ -+/* -+ * -+ * Copyright (C) 2011 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "overlayfs.h" -+ -+struct ovl_cache_entry { -+ const char *name; -+ unsigned int len; -+ unsigned int type; -+ u64 ino; -+ bool is_whiteout; -+ struct list_head l_node; -+ struct rb_node node; -+}; -+ -+struct ovl_readdir_data { -+ struct dir_context ctx; -+ bool is_merge; -+ struct rb_root *root; -+ struct list_head *list; -+ struct list_head *middle; -+ int count; -+ int err; -+}; -+ -+struct ovl_dir_file { -+ bool is_real; -+ bool is_cached; -+ struct list_head cursor; -+ u64 cache_version; -+ struct list_head cache; -+ struct file *realfile; -+}; -+ -+static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) -+{ -+ return container_of(n, struct ovl_cache_entry, node); -+} -+ -+static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, -+ const char *name, int len) -+{ -+ struct rb_node *node = root->rb_node; -+ int cmp; -+ -+ while (node) { -+ struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); -+ -+ cmp = strncmp(name, p->name, len); -+ if (cmp > 0) -+ node = p->node.rb_right; -+ else if (cmp < 0 || len < p->len) -+ node = p->node.rb_left; -+ else -+ return p; -+ } -+ -+ return NULL; -+} -+ -+static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, -+ u64 ino, unsigned int d_type) -+{ -+ struct ovl_cache_entry *p; -+ -+ p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL); -+ if (p) { -+ char *name_copy = (char *) (p + 1); -+ memcpy(name_copy, name, len); -+ name_copy[len] = '\0'; -+ p->name = name_copy; -+ p->len = len; -+ p->type = d_type; -+ p->ino = ino; -+ p->is_whiteout = false; -+ } -+ -+ return p; -+} -+ -+static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, -+ const char *name, int len, u64 ino, -+ unsigned int d_type) -+{ -+ struct rb_node **newp = &rdd->root->rb_node; -+ struct rb_node *parent = NULL; -+ struct ovl_cache_entry *p; -+ -+ while (*newp) { -+ int cmp; -+ struct ovl_cache_entry *tmp; -+ -+ parent = *newp; -+ tmp = ovl_cache_entry_from_node(*newp); -+ cmp = strncmp(name, tmp->name, len); -+ if (cmp > 0) -+ newp = &tmp->node.rb_right; -+ else if (cmp < 0 || len < tmp->len) -+ newp = &tmp->node.rb_left; -+ else -+ return 0; -+ } -+ -+ p = ovl_cache_entry_new(name, len, ino, d_type); -+ if (p == NULL) -+ return -ENOMEM; -+ -+ list_add_tail(&p->l_node, rdd->list); -+ rb_link_node(&p->node, parent, newp); -+ rb_insert_color(&p->node, rdd->root); -+ -+ return 0; -+} -+ -+static int ovl_fill_lower(struct ovl_readdir_data *rdd, -+ const char *name, int namelen, -+ loff_t offset, u64 ino, unsigned int d_type) -+{ -+ struct ovl_cache_entry *p; -+ -+ p = ovl_cache_entry_find(rdd->root, name, namelen); -+ if (p) { -+ list_move_tail(&p->l_node, rdd->middle); -+ } else { -+ p = ovl_cache_entry_new(name, namelen, ino, d_type); -+ if (p == NULL) -+ rdd->err = -ENOMEM; -+ else -+ list_add_tail(&p->l_node, rdd->middle); -+ } -+ -+ return rdd->err; -+} -+ -+void ovl_cache_free(struct list_head *list) -+{ -+ struct ovl_cache_entry *p; -+ struct ovl_cache_entry *n; -+ -+ list_for_each_entry_safe(p, n, list, l_node) -+ kfree(p); -+ -+ INIT_LIST_HEAD(list); -+} -+ -+static int ovl_fill_merge(void *buf, const char *name, int namelen, -+ loff_t offset, u64 ino, unsigned int d_type) -+{ -+ struct ovl_readdir_data *rdd = buf; -+ -+ rdd->count++; -+ if (!rdd->is_merge) -+ return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); -+ else -+ return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type); -+} -+ -+static inline int ovl_dir_read(struct path *realpath, -+ struct ovl_readdir_data *rdd) -+{ -+ struct file *realfile; -+ int err; -+ -+ realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); -+ if (IS_ERR(realfile)) -+ return PTR_ERR(realfile); -+ -+ rdd->ctx.pos = 0; -+ do { -+ rdd->count = 0; -+ rdd->err = 0; -+ err = iterate_dir(realfile, &rdd->ctx); -+ if (err >= 0) -+ err = rdd->err; -+ } while (!err && rdd->count); -+ fput(realfile); -+ -+ return err; -+} -+ -+static void ovl_dir_reset(struct file *file) -+{ -+ struct ovl_dir_file *od = file->private_data; -+ enum ovl_path_type type = ovl_path_type(file->f_path.dentry); -+ -+ if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) { -+ list_del_init(&od->cursor); -+ ovl_cache_free(&od->cache); -+ od->is_cached = false; -+ } -+ WARN_ON(!od->is_real && type != OVL_PATH_MERGE); -+ if (od->is_real && type == OVL_PATH_MERGE) { -+ fput(od->realfile); -+ od->realfile = NULL; -+ od->is_real = false; -+ } -+} -+ -+static int ovl_dir_mark_whiteouts(struct dentry *dir, -+ struct ovl_readdir_data *rdd) -+{ -+ struct ovl_cache_entry *p; -+ struct dentry *dentry; -+ const struct cred *old_cred; -+ struct cred *override_cred; -+ -+ override_cred = prepare_creds(); -+ if (!override_cred) { -+ ovl_cache_free(rdd->list); -+ return -ENOMEM; -+ } -+ -+ /* -+ * CAP_DAC_OVERRIDE for lookup -+ */ -+ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); -+ old_cred = override_creds(override_cred); -+ -+ mutex_lock(&dir->d_inode->i_mutex); -+ list_for_each_entry(p, rdd->list, l_node) { -+ if (p->type != DT_CHR) -+ continue; -+ -+ dentry = lookup_one_len(p->name, dir, p->len); -+ if (IS_ERR(dentry)) -+ continue; -+ -+ p->is_whiteout = ovl_is_whiteout(dentry); -+ dput(dentry); -+ } -+ mutex_unlock(&dir->d_inode->i_mutex); -+ -+ revert_creds(old_cred); -+ put_cred(override_cred); -+ -+ return 0; -+} -+ -+static inline int ovl_dir_read_merged(struct path *upperpath, -+ struct path *lowerpath, -+ struct list_head *list) -+{ -+ int err; -+ struct rb_root root = RB_ROOT; -+ struct list_head middle; -+ struct ovl_readdir_data rdd = { -+ .ctx.actor = ovl_fill_merge, -+ .list = list, -+ .root = &root, -+ .is_merge = false, -+ }; -+ -+ if (upperpath->dentry) { -+ err = ovl_dir_read(upperpath, &rdd); -+ if (err) -+ goto out; -+ -+ if (lowerpath->dentry) { -+ err = ovl_dir_mark_whiteouts(upperpath->dentry, &rdd); -+ if (err) -+ goto out; -+ } -+ } -+ if (lowerpath->dentry) { -+ /* -+ * Insert lowerpath entries before upperpath ones, this allows -+ * offsets to be reasonably constant -+ */ -+ list_add(&middle, rdd.list); -+ rdd.middle = &middle; -+ rdd.is_merge = true; -+ err = ovl_dir_read(lowerpath, &rdd); -+ list_del(&middle); -+ } -+out: -+ return err; -+ -+} -+ -+static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) -+{ -+ struct list_head *l; -+ loff_t off; -+ -+ l = od->cache.next; -+ for (off = 0; off < pos; off++) { -+ if (l == &od->cache) -+ break; -+ l = l->next; -+ } -+ list_move_tail(&od->cursor, l); -+} -+ -+static int ovl_iterate(struct file *file, struct dir_context *ctx) -+{ -+ struct ovl_dir_file *od = file->private_data; -+ int res; -+ -+ if (!ctx->pos) -+ ovl_dir_reset(file); -+ -+ if (od->is_real) { -+ res = iterate_dir(od->realfile, ctx); -+ -+ return res; -+ } -+ -+ if (!od->is_cached) { -+ struct path lowerpath; -+ struct path upperpath; -+ -+ ovl_path_lower(file->f_path.dentry, &lowerpath); -+ ovl_path_upper(file->f_path.dentry, &upperpath); -+ -+ res = ovl_dir_read_merged(&upperpath, &lowerpath, &od->cache); -+ if (res) { -+ ovl_cache_free(&od->cache); -+ return res; -+ } -+ -+ od->cache_version = ovl_dentry_version_get(file->f_path.dentry); -+ od->is_cached = true; -+ -+ ovl_seek_cursor(od, ctx->pos); -+ } -+ -+ while (od->cursor.next != &od->cache) { -+ struct ovl_cache_entry *p; -+ -+ p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node); -+ if (!p->is_whiteout) { -+ if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) -+ break; -+ } -+ ctx->pos++; -+ list_move(&od->cursor, &p->l_node); -+ } -+ -+ return 0; -+} -+ -+static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) -+{ -+ loff_t res; -+ struct ovl_dir_file *od = file->private_data; -+ -+ mutex_lock(&file_inode(file)->i_mutex); -+ if (!file->f_pos) -+ ovl_dir_reset(file); -+ -+ if (od->is_real) { -+ res = vfs_llseek(od->realfile, offset, origin); -+ file->f_pos = od->realfile->f_pos; -+ } else { -+ res = -EINVAL; -+ -+ switch (origin) { -+ case SEEK_CUR: -+ offset += file->f_pos; -+ break; -+ case SEEK_SET: -+ break; -+ default: -+ goto out_unlock; -+ } -+ if (offset < 0) -+ goto out_unlock; -+ -+ if (offset != file->f_pos) { -+ file->f_pos = offset; -+ if (od->is_cached) -+ ovl_seek_cursor(od, offset); -+ } -+ res = offset; -+ } -+out_unlock: -+ mutex_unlock(&file_inode(file)->i_mutex); -+ -+ return res; -+} -+ -+static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, -+ int datasync) -+{ -+ struct ovl_dir_file *od = file->private_data; -+ -+ /* May need to reopen directory if it got copied up */ -+ if (!od->realfile) { -+ struct path upperpath; -+ -+ ovl_path_upper(file->f_path.dentry, &upperpath); -+ od->realfile = ovl_path_open(&upperpath, O_RDONLY); -+ if (IS_ERR(od->realfile)) -+ return PTR_ERR(od->realfile); -+ } -+ -+ return vfs_fsync_range(od->realfile, start, end, datasync); -+} -+ -+static int ovl_dir_release(struct inode *inode, struct file *file) -+{ -+ struct ovl_dir_file *od = file->private_data; -+ -+ list_del(&od->cursor); -+ ovl_cache_free(&od->cache); -+ if (od->realfile) -+ fput(od->realfile); -+ kfree(od); -+ -+ return 0; -+} -+ -+static int ovl_dir_open(struct inode *inode, struct file *file) -+{ -+ struct path realpath; -+ struct file *realfile; -+ struct ovl_dir_file *od; -+ enum ovl_path_type type; -+ -+ od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); -+ if (!od) -+ return -ENOMEM; -+ -+ type = ovl_path_real(file->f_path.dentry, &realpath); -+ realfile = ovl_path_open(&realpath, file->f_flags); -+ if (IS_ERR(realfile)) { -+ kfree(od); -+ return PTR_ERR(realfile); -+ } -+ INIT_LIST_HEAD(&od->cache); -+ INIT_LIST_HEAD(&od->cursor); -+ od->is_cached = false; -+ od->realfile = realfile; -+ od->is_real = (type != OVL_PATH_MERGE); -+ file->private_data = od; -+ -+ return 0; -+} -+ -+const struct file_operations ovl_dir_operations = { -+ .read = generic_read_dir, -+ .open = ovl_dir_open, -+ .iterate = ovl_iterate, -+ .llseek = ovl_dir_llseek, -+ .fsync = ovl_dir_fsync, -+ .release = ovl_dir_release, -+}; -+ -+int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) -+{ -+ int err; -+ struct path lowerpath; -+ struct path upperpath; -+ struct ovl_cache_entry *p; -+ -+ ovl_path_upper(dentry, &upperpath); -+ ovl_path_lower(dentry, &lowerpath); -+ -+ err = ovl_dir_read_merged(&upperpath, &lowerpath, list); -+ if (err) -+ return err; -+ -+ err = 0; -+ -+ list_for_each_entry(p, list, l_node) { -+ if (p->is_whiteout) -+ continue; -+ -+ if (p->name[0] == '.') { -+ if (p->len == 1) -+ continue; -+ if (p->len == 2 && p->name[1] == '.') -+ continue; -+ } -+ err = -ENOTEMPTY; -+ break; -+ } -+ -+ return err; -+} -+ -+void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list) -+{ -+ struct ovl_cache_entry *p; -+ -+ mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_PARENT); -+ list_for_each_entry(p, list, l_node) { -+ struct dentry *dentry; -+ -+ if (!p->is_whiteout) -+ continue; -+ -+ dentry = lookup_one_len(p->name, upper, p->len); -+ if (IS_ERR(dentry)) { -+ pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n", -+ upper->d_name.name, p->len, p->name, -+ (int) PTR_ERR(dentry)); -+ continue; -+ } -+ ovl_cleanup(upper->d_inode, dentry); -+ dput(dentry); -+ } -+ mutex_unlock(&upper->d_inode->i_mutex); -+} -diff -urNp a/fs/overlayfs/super.c b/fs/overlayfs/super.c ---- a/fs/overlayfs/super.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/fs/overlayfs/super.c 2016-07-22 08:49:33.269290309 -0700 -@@ -0,0 +1,762 @@ -+/* -+ * -+ * Copyright (C) 2011 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published by -+ * the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "overlayfs.h" -+ -+MODULE_AUTHOR("Miklos Szeredi "); -+MODULE_DESCRIPTION("Overlay filesystem"); -+MODULE_LICENSE("GPL"); -+ -+#define OVERLAYFS_SUPER_MAGIC 0x794c764f -+ -+struct ovl_config { -+ char *lowerdir; -+ char *upperdir; -+ char *workdir; -+}; -+ -+/* private information held for overlayfs's superblock */ -+struct ovl_fs { -+ struct vfsmount *upper_mnt; -+ struct vfsmount *lower_mnt; -+ struct dentry *workdir; -+ long lower_namelen; -+ /* pathnames of lower and upper dirs, for show_options */ -+ struct ovl_config config; -+}; -+ -+/* private information held for every overlayfs dentry */ -+struct ovl_entry { -+ /* -+ * Keep "double reference" on upper dentries, so that -+ * d_delete() doesn't think it's OK to reset d_inode to NULL. -+ */ -+ struct dentry *__upperdentry; -+ struct dentry *lowerdentry; -+ union { -+ struct { -+ u64 version; -+ bool opaque; -+ }; -+ struct rcu_head rcu; -+ }; -+}; -+ -+const char *ovl_opaque_xattr = "trusted.overlay.opaque"; -+ -+ -+enum ovl_path_type ovl_path_type(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ if (oe->__upperdentry) { -+ if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode)) -+ return OVL_PATH_MERGE; -+ else -+ return OVL_PATH_UPPER; -+ } else { -+ return OVL_PATH_LOWER; -+ } -+} -+ -+static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) -+{ -+ struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry); -+ /* -+ * Make sure to order reads to upperdentry wrt ovl_dentry_update() -+ */ -+ smp_read_barrier_depends(); -+ return upperdentry; -+} -+ -+void ovl_path_upper(struct dentry *dentry, struct path *path) -+{ -+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ path->mnt = ofs->upper_mnt; -+ path->dentry = ovl_upperdentry_dereference(oe); -+} -+ -+enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) -+{ -+ -+ enum ovl_path_type type = ovl_path_type(dentry); -+ -+ if (type == OVL_PATH_LOWER) -+ ovl_path_lower(dentry, path); -+ else -+ ovl_path_upper(dentry, path); -+ -+ return type; -+} -+ -+struct dentry *ovl_dentry_upper(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ return ovl_upperdentry_dereference(oe); -+} -+ -+struct dentry *ovl_dentry_lower(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ return oe->lowerdentry; -+} -+ -+struct dentry *ovl_dentry_real(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ struct dentry *realdentry; -+ -+ realdentry = ovl_upperdentry_dereference(oe); -+ if (!realdentry) -+ realdentry = oe->lowerdentry; -+ -+ return realdentry; -+} -+ -+struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) -+{ -+ struct dentry *realdentry; -+ -+ realdentry = ovl_upperdentry_dereference(oe); -+ if (realdentry) { -+ *is_upper = true; -+ } else { -+ realdentry = oe->lowerdentry; -+ *is_upper = false; -+ } -+ return realdentry; -+} -+ -+void ovl_path_lower(struct dentry *dentry, struct path *path) -+{ -+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ path->mnt = ofs->lower_mnt; -+ path->dentry = oe->lowerdentry; -+} -+ -+int ovl_want_write(struct dentry *dentry) -+{ -+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; -+ return mnt_want_write(ofs->upper_mnt); -+} -+ -+void ovl_drop_write(struct dentry *dentry) -+{ -+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; -+ mnt_drop_write(ofs->upper_mnt); -+} -+ -+struct dentry *ovl_workdir(struct dentry *dentry) -+{ -+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; -+ return ofs->workdir; -+} -+ -+bool ovl_dentry_is_opaque(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ return oe->opaque; -+} -+ -+void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ oe->opaque = opaque; -+} -+ -+void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex)); -+ WARN_ON(oe->__upperdentry); -+ BUG_ON(!upperdentry->d_inode); -+ /* -+ * Make sure upperdentry is consistent before making it visible to -+ * ovl_upperdentry_dereference(). -+ */ -+ smp_wmb(); -+ oe->__upperdentry = dget(upperdentry); -+} -+ -+void ovl_dentry_version_inc(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); -+ oe->version++; -+} -+ -+u64 ovl_dentry_version_get(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); -+ return oe->version; -+} -+ -+bool ovl_is_whiteout(struct dentry *dentry) -+{ -+ struct inode *inode = dentry->d_inode; -+ -+ return inode && IS_WHITEOUT(inode); -+} -+ -+static bool ovl_is_opaquedir(struct dentry *dentry) -+{ -+ int res; -+ char val; -+ struct inode *inode = dentry->d_inode; -+ -+ if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) -+ return false; -+ -+ res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); -+ if (res == 1 && val == 'y') -+ return true; -+ -+ return false; -+} -+ -+static void ovl_entry_free(struct rcu_head *head) -+{ -+ struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu); -+ kfree(oe); -+} -+ -+static void ovl_dentry_release(struct dentry *dentry) -+{ -+ struct ovl_entry *oe = dentry->d_fsdata; -+ -+ if (oe) { -+ dput(oe->__upperdentry); -+ dput(oe->__upperdentry); -+ dput(oe->lowerdentry); -+ call_rcu(&oe->rcu, ovl_entry_free); -+ } -+} -+ -+const struct dentry_operations ovl_dentry_operations = { -+ .d_release = ovl_dentry_release, -+}; -+ -+static struct ovl_entry *ovl_alloc_entry(void) -+{ -+ return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); -+} -+ -+static inline struct dentry *ovl_lookup_real(struct dentry *dir, -+ struct qstr *name) -+{ -+ struct dentry *dentry; -+ -+ mutex_lock(&dir->d_inode->i_mutex); -+ dentry = lookup_one_len(name->name, dir, name->len); -+ mutex_unlock(&dir->d_inode->i_mutex); -+ -+ if (IS_ERR(dentry)) { -+ if (PTR_ERR(dentry) == -ENOENT) -+ dentry = NULL; -+ } else if (!dentry->d_inode) { -+ dput(dentry); -+ dentry = NULL; -+ } -+ return dentry; -+} -+ -+static int ovl_do_lookup(struct dentry *dentry) -+{ -+ struct ovl_entry *oe; -+ struct dentry *upperdir; -+ struct dentry *lowerdir; -+ struct dentry *upperdentry = NULL; -+ struct dentry *lowerdentry = NULL; -+ struct inode *inode = NULL; -+ int err; -+ -+ err = -ENOMEM; -+ oe = ovl_alloc_entry(); -+ if (!oe) -+ goto out; -+ -+ upperdir = ovl_dentry_upper(dentry->d_parent); -+ lowerdir = ovl_dentry_lower(dentry->d_parent); -+ -+ if (upperdir) { -+ upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); -+ err = PTR_ERR(upperdentry); -+ if (IS_ERR(upperdentry)) -+ goto out_put_dir; -+ -+ if (lowerdir && upperdentry) { -+ if (ovl_is_whiteout(upperdentry)) { -+ dput(upperdentry); -+ upperdentry = NULL; -+ oe->opaque = true; -+ } else if (ovl_is_opaquedir(upperdentry)) { -+ oe->opaque = true; -+ } -+ } -+ } -+ if (lowerdir && !oe->opaque) { -+ lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); -+ err = PTR_ERR(lowerdentry); -+ if (IS_ERR(lowerdentry)) -+ goto out_dput_upper; -+ } -+ -+ if (lowerdentry && upperdentry && -+ (!S_ISDIR(upperdentry->d_inode->i_mode) || -+ !S_ISDIR(lowerdentry->d_inode->i_mode))) { -+ dput(lowerdentry); -+ lowerdentry = NULL; -+ oe->opaque = true; -+ } -+ -+ if (lowerdentry || upperdentry) { -+ struct dentry *realdentry; -+ -+ realdentry = upperdentry ? upperdentry : lowerdentry; -+ err = -ENOMEM; -+ inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, -+ oe); -+ if (!inode) -+ goto out_dput; -+ ovl_copyattr(realdentry->d_inode, inode); -+ } -+ -+ if (upperdentry) -+ oe->__upperdentry = dget(upperdentry); -+ -+ if (lowerdentry) -+ oe->lowerdentry = lowerdentry; -+ -+ dentry->d_fsdata = oe; -+ dentry->d_op = &ovl_dentry_operations; -+ d_add(dentry, inode); -+ -+ return 0; -+ -+out_dput: -+ dput(lowerdentry); -+out_dput_upper: -+ dput(upperdentry); -+out_put_dir: -+ kfree(oe); -+out: -+ return err; -+} -+ -+struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, -+ unsigned int flags) -+{ -+ int err = ovl_do_lookup(dentry); -+ -+ if (err) -+ return ERR_PTR(err); -+ -+ return NULL; -+} -+ -+struct file *ovl_path_open(struct path *path, int flags) -+{ -+ return dentry_open(path, flags, current_cred()); -+} -+ -+static void ovl_put_super(struct super_block *sb) -+{ -+ struct ovl_fs *ufs = sb->s_fs_info; -+ -+ dput(ufs->workdir); -+ mntput(ufs->upper_mnt); -+ mntput(ufs->lower_mnt); -+ -+ kfree(ufs->config.lowerdir); -+ kfree(ufs->config.upperdir); -+ kfree(ufs->config.workdir); -+ kfree(ufs); -+} -+ -+/** -+ * ovl_statfs -+ * @sb: The overlayfs super block -+ * @buf: The struct kstatfs to fill in with stats -+ * -+ * Get the filesystem statistics. As writes always target the upper layer -+ * filesystem pass the statfs to the same filesystem. -+ */ -+static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) -+{ -+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; -+ struct dentry *root_dentry = dentry->d_sb->s_root; -+ struct path path; -+ int err; -+ -+ ovl_path_upper(root_dentry, &path); -+ -+ err = vfs_statfs(&path, buf); -+ if (!err) { -+ buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen); -+ buf->f_type = OVERLAYFS_SUPER_MAGIC; -+ } -+ -+ return err; -+} -+ -+/** -+ * ovl_show_options -+ * -+ * Prints the mount options for a given superblock. -+ * Returns zero; does not fail. -+ */ -+static int ovl_show_options(struct seq_file *m, struct dentry *dentry) -+{ -+ struct super_block *sb = dentry->d_sb; -+ struct ovl_fs *ufs = sb->s_fs_info; -+ -+ seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); -+ seq_printf(m, ",upperdir=%s", ufs->config.upperdir); -+ seq_printf(m, ",workdir=%s", ufs->config.workdir); -+ return 0; -+} -+ -+static const struct super_operations ovl_super_operations = { -+ .put_super = ovl_put_super, -+ .statfs = ovl_statfs, -+ .show_options = ovl_show_options, -+}; -+ -+enum { -+ OPT_LOWERDIR, -+ OPT_UPPERDIR, -+ OPT_WORKDIR, -+ OPT_ERR, -+}; -+ -+static const match_table_t ovl_tokens = { -+ {OPT_LOWERDIR, "lowerdir=%s"}, -+ {OPT_UPPERDIR, "upperdir=%s"}, -+ {OPT_WORKDIR, "workdir=%s"}, -+ {OPT_ERR, NULL} -+}; -+ -+static int ovl_parse_opt(char *opt, struct ovl_config *config) -+{ -+ char *p; -+ -+ config->upperdir = NULL; -+ config->lowerdir = NULL; -+ config->workdir = NULL; -+ -+ while ((p = strsep(&opt, ",")) != NULL) { -+ int token; -+ substring_t args[MAX_OPT_ARGS]; -+ -+ if (!*p) -+ continue; -+ -+ token = match_token(p, ovl_tokens, args); -+ switch (token) { -+ case OPT_UPPERDIR: -+ kfree(config->upperdir); -+ config->upperdir = match_strdup(&args[0]); -+ if (!config->upperdir) -+ return -ENOMEM; -+ break; -+ -+ case OPT_LOWERDIR: -+ kfree(config->lowerdir); -+ config->lowerdir = match_strdup(&args[0]); -+ if (!config->lowerdir) -+ return -ENOMEM; -+ break; -+ -+ case OPT_WORKDIR: -+ kfree(config->workdir); -+ config->workdir = match_strdup(&args[0]); -+ if (!config->workdir) -+ return -ENOMEM; -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ } -+ return 0; -+} -+ -+#define OVL_WORKDIR_NAME "work" -+ -+static struct dentry *ovl_workdir_create(struct vfsmount *mnt, -+ struct dentry *dentry) -+{ -+ struct inode *dir = dentry->d_inode; -+ struct dentry *work; -+ int err; -+ bool retried = false; -+ -+ err = mnt_want_write(mnt); -+ if (err) -+ return ERR_PTR(err); -+ -+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); -+retry: -+ work = lookup_one_len(OVL_WORKDIR_NAME, dentry, -+ strlen(OVL_WORKDIR_NAME)); -+ -+ if (!IS_ERR(work)) { -+ struct kstat stat = { -+ .mode = S_IFDIR | 0, -+ }; -+ -+ if (work->d_inode) { -+ err = -EEXIST; -+ if (retried) -+ goto out_dput; -+ -+ retried = true; -+ ovl_cleanup(dir, work); -+ dput(work); -+ goto retry; -+ } -+ -+ err = ovl_create_real(dir, work, &stat, NULL, NULL, true); -+ if (err) -+ goto out_dput; -+ } -+out_unlock: -+ mutex_unlock(&dir->i_mutex); -+ mnt_drop_write(mnt); -+ -+ return work; -+ -+out_dput: -+ dput(work); -+ work = ERR_PTR(err); -+ goto out_unlock; -+} -+ -+static int ovl_mount_dir(const char *name, struct path *path) -+{ -+ int err; -+ -+ err = kern_path(name, LOOKUP_FOLLOW, path); -+ if (err) { -+ pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); -+ err = -EINVAL; -+ } -+ return err; -+} -+ -+static int ovl_fill_super(struct super_block *sb, void *data, int silent) -+{ -+ struct path lowerpath; -+ struct path upperpath; -+ struct path workpath; -+ struct inode *root_inode; -+ struct dentry *root_dentry; -+ struct ovl_entry *oe; -+ struct ovl_fs *ufs; -+ struct kstatfs statfs; -+ int err; -+ -+ err = -ENOMEM; -+ ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL); -+ if (!ufs) -+ goto out; -+ -+ err = ovl_parse_opt((char *) data, &ufs->config); -+ if (err) -+ goto out_free_ufs; -+ -+ /* FIXME: workdir is not needed for a R/O mount */ -+ err = -EINVAL; -+ if (!ufs->config.upperdir || !ufs->config.lowerdir || -+ !ufs->config.workdir) { -+ pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); -+ goto out_free_config; -+ } -+ -+ oe = ovl_alloc_entry(); -+ if (oe == NULL) -+ goto out_free_config; -+ -+ err = ovl_mount_dir(ufs->config.upperdir, &upperpath); -+ if (err) -+ goto out_free_oe; -+ -+ err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); -+ if (err) -+ goto out_put_upperpath; -+ -+ err = ovl_mount_dir(ufs->config.workdir, &workpath); -+ if (err) -+ goto out_put_lowerpath; -+ -+ err = -EINVAL; -+ if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || -+ !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || -+ !S_ISDIR(workpath.dentry->d_inode->i_mode)) { -+ pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); -+ goto out_put_workpath; -+ } -+ -+ if (upperpath.mnt != workpath.mnt) { -+ pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); -+ goto out_put_workpath; -+ } -+ if (upperpath.dentry == workpath.dentry || -+ d_ancestor(upperpath.dentry, workpath.dentry) || -+ d_ancestor(workpath.dentry, upperpath.dentry)) { -+ pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); -+ goto out_put_workpath; -+ } -+ -+ err = vfs_statfs(&lowerpath, &statfs); -+ if (err) { -+ pr_err("overlayfs: statfs failed on lowerpath\n"); -+ goto out_put_workpath; -+ } -+ ufs->lower_namelen = statfs.f_namelen; -+ -+ sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, -+ lowerpath.mnt->mnt_sb->s_stack_depth) + 1; -+ -+ err = -EINVAL; -+ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { -+ pr_err("overlayfs: maximum fs stacking depth exceeded\n"); -+ goto out_put_lowerpath; -+ } -+ -+ -+ ufs->upper_mnt = clone_private_mount(&upperpath); -+ err = PTR_ERR(ufs->upper_mnt); -+ if (IS_ERR(ufs->upper_mnt)) { -+ pr_err("overlayfs: failed to clone upperpath\n"); -+ goto out_put_workpath; -+ } -+ -+ ufs->lower_mnt = clone_private_mount(&lowerpath); -+ err = PTR_ERR(ufs->lower_mnt); -+ if (IS_ERR(ufs->lower_mnt)) { -+ pr_err("overlayfs: failed to clone lowerpath\n"); -+ goto out_put_upper_mnt; -+ } -+ -+ ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); -+ err = PTR_ERR(ufs->workdir); -+ if (IS_ERR(ufs->workdir)) { -+ pr_err("overlayfs: failed to create directory %s/%s\n", -+ ufs->config.workdir, OVL_WORKDIR_NAME); -+ goto out_put_lower_mnt; -+ } -+ -+ /* -+ * Make lower_mnt R/O. That way fchmod/fchown on lower file -+ * will fail instead of modifying lower fs. -+ */ -+ ufs->lower_mnt->mnt_flags |= MNT_READONLY; -+ -+ /* If the upper fs is r/o, we mark overlayfs r/o too */ -+ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) -+ sb->s_flags |= MS_RDONLY; -+ -+ err = -ENOMEM; -+ root_inode = ovl_new_inode(sb, S_IFDIR, oe); -+ if (!root_inode) -+ goto out_put_workdir; -+ -+ root_dentry = d_make_root(root_inode); -+ if (!root_dentry) -+ goto out_put_workdir; -+ -+ mntput(upperpath.mnt); -+ mntput(lowerpath.mnt); -+ path_put(&workpath); -+ -+ oe->__upperdentry = dget(upperpath.dentry); -+ oe->lowerdentry = lowerpath.dentry; -+ -+ root_dentry->d_fsdata = oe; -+ root_dentry->d_op = &ovl_dentry_operations; -+ -+ sb->s_magic = OVERLAYFS_SUPER_MAGIC; -+ sb->s_op = &ovl_super_operations; -+ sb->s_root = root_dentry; -+ sb->s_fs_info = ufs; -+ -+ return 0; -+ -+out_put_workdir: -+ dput(ufs->workdir); -+out_put_lower_mnt: -+ mntput(ufs->lower_mnt); -+out_put_upper_mnt: -+ mntput(ufs->upper_mnt); -+out_put_workpath: -+ path_put(&workpath); -+out_put_lowerpath: -+ path_put(&lowerpath); -+out_put_upperpath: -+ path_put(&upperpath); -+out_free_oe: -+ kfree(oe); -+out_free_config: -+ kfree(ufs->config.lowerdir); -+ kfree(ufs->config.upperdir); -+ kfree(ufs->config.workdir); -+out_free_ufs: -+ kfree(ufs); -+out: -+ return err; -+} -+ -+static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, -+ const char *dev_name, void *raw_data) -+{ -+ return mount_nodev(fs_type, flags, raw_data, ovl_fill_super); -+} -+ -+static struct file_system_type ovl_fs_type = { -+ .owner = THIS_MODULE, -+ .name = "overlay", -+ .mount = ovl_mount, -+ .kill_sb = kill_anon_super, -+}; -+MODULE_ALIAS_FS("overlay"); -+ -+static int __init ovl_init(void) -+{ -+ return register_filesystem(&ovl_fs_type); -+} -+ -+static void __exit ovl_exit(void) -+{ -+ unregister_filesystem(&ovl_fs_type); -+} -+ -+module_init(ovl_init); -+module_exit(ovl_exit); -diff -urNp a/fs/splice.c b/fs/splice.c ---- a/fs/splice.c 2016-07-02 04:41:41.000000000 -0700 -+++ b/fs/splice.c 2016-07-22 08:49:33.269290309 -0700 -@@ -1350,6 +1350,7 @@ long do_splice_direct(struct file *in, l - - return ret; - } -+EXPORT_SYMBOL(do_splice_direct); - - static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, - struct pipe_inode_info *opipe, -diff -urNp a/include/linux/fs.h b/include/linux/fs.h ---- a/include/linux/fs.h 2016-07-02 04:41:41.000000000 -0700 -+++ b/include/linux/fs.h 2016-07-22 08:49:33.269290309 -0700 -@@ -225,6 +225,20 @@ typedef void (dio_iodone_t)(struct kiocb - #define ATTR_TIMES_SET (1 << 16) - - /* -+ * Whiteout is represented by a char device. The following constants define the -+ * mode and device number to use. -+ */ -+#define WHITEOUT_MODE 0 -+#define WHITEOUT_DEV 0 -+ -+/* -+ * Whiteout is represented by a char device. The following constants define the -+ * mode and device number to use. -+ */ -+#define WHITEOUT_MODE 0 -+#define WHITEOUT_DEV 0 -+ -+/* - * This is the Inode Attributes structure, used for notify_change(). It - * uses the above definitions as flags, to know which values have changed. - * Also, in this manner, a Filesystem can look at only the values it cares -@@ -256,6 +270,12 @@ struct iattr { - */ - #include - -+/* -+ * Maximum number of layers of fs stack. Needs to be limited to -+ * prevent kernel stack overflow -+ */ -+#define FILESYSTEM_MAX_STACK_DEPTH 2 -+ - /** - * enum positive_aop_returns - aop return codes with specific semantics - * -@@ -1283,6 +1303,11 @@ struct super_block { - struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; - struct list_lru s_inode_lru ____cacheline_aligned_in_smp; - struct rcu_head rcu; -+ -+ /* -+ * Indicates how deep in a filesystem stack this SB is -+ */ -+ int s_stack_depth; - }; - - extern struct timespec current_fs_time(struct super_block *sb); -@@ -1415,6 +1440,7 @@ extern int vfs_link(struct dentry *, str - extern int vfs_rmdir(struct inode *, struct dentry *); - extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); - extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); -+extern int vfs_whiteout(struct inode *, struct dentry *); - - /* - * VFS dentry helper functions. -@@ -1545,6 +1571,7 @@ struct inode_operations { - umode_t create_mode, int *opened); - int (*tmpfile) (struct inode *, struct dentry *, umode_t); - int (*set_acl)(struct inode *, struct posix_acl *, int); -+ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); - } ____cacheline_aligned; - - ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, -@@ -1642,6 +1669,9 @@ struct super_operations { - #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) - #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) - -+#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ -+ (inode)->i_rdev == WHITEOUT_DEV) -+ - /* - * Inode state bits. Protected by inode->i_lock - * -@@ -2056,6 +2086,7 @@ extern struct file *file_open_name(struc - extern struct file *filp_open(const char *, int, umode_t); - extern struct file *file_open_root(struct dentry *, struct vfsmount *, - const char *, int); -+extern int vfs_open(const struct path *, struct file *, const struct cred *); - extern struct file * dentry_open(const struct path *, int, const struct cred *); - extern int filp_close(struct file *, fl_owner_t id); - -@@ -2269,7 +2300,9 @@ extern sector_t bmap(struct inode *, sec - #endif - extern int notify_change(struct dentry *, struct iattr *, struct inode **); - extern int inode_permission(struct inode *, int); -+extern int __inode_permission(struct inode *, int); - extern int generic_permission(struct inode *, int); -+extern int __check_sticky(struct inode *dir, struct inode *inode); - - static inline bool execute_ok(struct inode *inode) - { -@@ -2467,6 +2500,9 @@ extern ssize_t iter_file_splice_write(st - struct file *, loff_t *, size_t, unsigned int); - extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, - struct file *out, loff_t *, size_t len, unsigned int flags); -+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, -+ loff_t *opos, size_t len, unsigned int flags); -+ - - extern void - file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); -@@ -2754,6 +2790,14 @@ static inline int is_sxid(umode_t mode) - return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); - } - -+static inline int check_sticky(struct inode *dir, struct inode *inode) -+{ -+ if (!(dir->i_mode & S_ISVTX)) -+ return 0; -+ -+ return __check_sticky(dir, inode); -+} -+ - static inline void inode_has_no_xattr(struct inode *inode) - { - if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) -diff -urNp a/include/linux/mount.h b/include/linux/mount.h ---- a/include/linux/mount.h 2016-03-02 02:31:21.000000000 -0800 -+++ b/include/linux/mount.h 2016-07-22 08:49:33.269290309 -0700 -@@ -81,6 +81,9 @@ extern void mnt_pin(struct vfsmount *mnt - extern void mnt_unpin(struct vfsmount *mnt); - extern int __mnt_is_readonly(struct vfsmount *mnt); - -+struct path; -+extern struct vfsmount *clone_private_mount(struct path *path); -+ - struct file_system_type; - extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, - int flags, const char *name, -diff -urNp a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h ---- a/include/uapi/linux/fs.h 2016-03-02 02:31:21.000000000 -0800 -+++ b/include/uapi/linux/fs.h 2016-07-22 08:49:33.269290309 -0700 -@@ -37,6 +37,7 @@ - - #define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */ - #define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ -+#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */ - - struct fstrim_range { - __u64 start; -diff -urNp a/MAINTAINERS b/MAINTAINERS ---- a/MAINTAINERS 2016-07-02 04:41:41.000000000 -0700 -+++ b/MAINTAINERS 2016-07-22 08:49:33.265290309 -0700 -@@ -6668,6 +6668,13 @@ F: drivers/scsi/osd/ - F: include/scsi/osd_* - F: fs/exofs/ - -+OVERLAYFS FILESYSTEM -+M: Miklos Szeredi -+L: linux-fsdevel@vger.kernel.org -+S: Supported -+F: fs/overlayfs/* -+F: Documentation/filesystems/overlayfs.txt -+ - P54 WIRELESS DRIVER - M: Christian Lamparter - L: linux-wireless@vger.kernel.org -diff -urNp a/mm/shmem.c b/mm/shmem.c ---- a/mm/shmem.c 2016-07-02 04:41:41.000000000 -0700 -+++ b/mm/shmem.c 2016-07-22 08:49:33.269290309 -0700 -@@ -2301,20 +2301,82 @@ static int shmem_rmdir(struct inode *dir - return shmem_unlink(dir, dentry); - } - -+static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) -+{ -+ bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); -+ bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); -+ -+ if (old_dir != new_dir && old_is_dir != new_is_dir) { -+ if (old_is_dir) { -+ drop_nlink(old_dir); -+ inc_nlink(new_dir); -+ } else { -+ drop_nlink(new_dir); -+ inc_nlink(old_dir); -+ } -+ } -+ old_dir->i_ctime = old_dir->i_mtime = -+ new_dir->i_ctime = new_dir->i_mtime = -+ old_dentry->d_inode->i_ctime = -+ new_dentry->d_inode->i_ctime = CURRENT_TIME; -+ -+ return 0; -+} -+ -+static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) -+{ -+ struct dentry *whiteout; -+ int error; -+ -+ whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); -+ if (!whiteout) -+ return -ENOMEM; -+ -+ error = shmem_mknod(old_dir, whiteout, -+ S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); -+ dput(whiteout); -+ if (error) -+ return error; -+ -+ /* -+ * Cheat and hash the whiteout while the old dentry is still in -+ * place, instead of playing games with FS_RENAME_DOES_D_MOVE. -+ * -+ * d_lookup() will consistently find one of them at this point, -+ * not sure which one, but that isn't even important. -+ */ -+ d_rehash(whiteout); -+ return 0; -+} -+ - /* - * The VFS layer already does all the dentry stuff for rename, - * we just have to decrement the usage count for the target if - * it exists so that the VFS layer correctly free's it when it - * gets overwritten. - */ --static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) -+static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) - { - struct inode *inode = old_dentry->d_inode; - int they_are_dirs = S_ISDIR(inode->i_mode); - -+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) -+ return -EINVAL; -+ -+ if (flags & RENAME_EXCHANGE) -+ return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); -+ - if (!simple_empty(new_dentry)) - return -ENOTEMPTY; - -+ if (flags & RENAME_WHITEOUT) { -+ int error; -+ -+ error = shmem_whiteout(old_dir, old_dentry); -+ if (error) -+ return error; -+ } -+ - if (new_dentry->d_inode) { - (void) shmem_unlink(new_dir, new_dentry); - if (they_are_dirs) { -@@ -2334,6 +2396,11 @@ static int shmem_rename(struct inode *ol - return 0; - } - -+static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) -+{ -+ return shmem_rename2(old_dir, old_dentry, new_dir, new_dentry, 0); -+} -+ - static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) - { - int error; -@@ -3068,6 +3135,7 @@ static const struct inode_operations shm - .rmdir = shmem_rmdir, - .mknod = shmem_mknod, - .rename = shmem_rename, -+ .rename2 = shmem_rename2, - .tmpfile = shmem_tmpfile, - #endif - #ifdef CONFIG_TMPFS_XATTR diff --git a/packages/base/any/kernels/3.16+deb8/patches/changelog.patch b/packages/base/any/kernels/3.16+deb8/patches/changelog.patch deleted file mode 100644 index 99ef7c6f..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/changelog.patch +++ /dev/null @@ -1,18 +0,0 @@ ---- debian/changelog 2015-08-04 00:50:04.000000000 +0000 -+++ changelog 2015-12-20 04:20:25.032779900 +0000 -@@ -1,3 +1,15 @@ -+linux (3.16.7-ckt11-2+acs8u2) acs; urgency=high -+ -+ * add driver patches for MLNX SN2700 -+ -+ -- Guohan Lu Sun, 19 Dec 2015 01:50:04 +0100 -+ -+linux (3.16.7-ckt11-2+acs8u1) acs; urgency=high -+ -+ * add support for S6000 -+ -+ -- Shuotian Cheng Sun, 19 Dec 2015 01:50:04 +0100 -+ - linux (3.16.7-ckt11-1+deb8u3) jessie-security; urgency=high - - * path_openat(): fix double fput() (CVE-2015-5706) diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-arista-piix4-mux-patch.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-arista-piix4-mux-patch.patch deleted file mode 100644 index 040d6b88..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-arista-piix4-mux-patch.patch +++ /dev/null @@ -1,146 +0,0 @@ -From f75a16bc0dfc83cf3df1db7ede4d7357e7be5952 Mon Sep 17 00:00:00 2001 -From: Chulei Wu -Date: Wed, 2 Mar 2016 04:09:53 +0000 -Subject: [PATCH] arista piix4 mux patch - ---- - drivers/i2c/busses/i2c-piix4.c | 63 +++++++++++++++++++++++++++++++++++++----- - 1 file changed, 56 insertions(+), 7 deletions(-) - -diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c -index a6f54ba..eafc035 100644 ---- a/drivers/i2c/busses/i2c-piix4.c -+++ b/drivers/i2c/busses/i2c-piix4.c -@@ -128,6 +128,7 @@ static const struct dmi_system_id piix4_dmi_ibm[] = { - - struct i2c_piix4_adapdata { - unsigned short smba; -+ int mux; - }; - - static int piix4_setup(struct pci_dev *PIIX4_dev, -@@ -528,6 +529,43 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, - return 0; - } - -+static s32 piix4_access_mux(struct i2c_adapter * adap, u16 addr, -+ unsigned short flags, char read_write, -+ u8 command, int size, union i2c_smbus_data * data) -+{ -+ static DEFINE_MUTEX(mux_mutex); -+ struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); -+ int piix4_mux = adapdata->mux; -+ static int last_mux = -1; -+ s32 ret; -+ unsigned short smba_idx = 0xcd6; -+ u8 smb_en = 0x2c; -+ u8 val; -+ -+ if ( piix4_mux == -1 ) { -+ return piix4_access(adap, addr, flags, read_write, command, size, data); -+ } -+ -+ mutex_lock(&mux_mutex); -+ -+ if ( last_mux != piix4_mux ) { -+ /* Select the correct bus mux*/ -+ outb_p(smb_en, smba_idx); -+ val = inb_p(smba_idx + 1); -+ val = (val & 0xf9) | (piix4_mux << 1); -+ outb_p(val, smba_idx + 1); -+ -+ last_mux = piix4_mux; -+ dev_dbg(&adap->dev, "set mux to 0x%02x\n", piix4_mux); -+ } -+ -+ ret = piix4_access(adap, addr, flags, read_write, command, size, data); -+ -+ mutex_unlock(&mux_mutex); -+ -+ return ret; -+} -+ - static u32 piix4_func(struct i2c_adapter *adapter) - { - return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | -@@ -536,7 +574,7 @@ static u32 piix4_func(struct i2c_adapter *adapter) - } - - static const struct i2c_algorithm smbus_algorithm = { -- .smbus_xfer = piix4_access, -+ .smbus_xfer = piix4_access_mux, - .functionality = piix4_func, - }; - -@@ -569,7 +607,7 @@ static struct i2c_adapter *piix4_main_adapter; - static struct i2c_adapter *piix4_aux_adapter; - - static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, -- struct i2c_adapter **padap) -+ struct i2c_adapter **padap, int mux) - { - struct i2c_adapter *adap; - struct i2c_piix4_adapdata *adapdata; -@@ -593,6 +631,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, - } - - adapdata->smba = smba; -+ adapdata->mux = mux; - - /* set up the sysfs linkage to our parent device */ - adap->dev.parent = &dev->dev; -@@ -618,6 +657,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, - static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) - { - int retval; -+ int mux = -1; -+ int aux_smba; - - if ((dev->vendor == PCI_VENDOR_ID_ATI && - dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && -@@ -633,7 +674,14 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) - return retval; - - /* Try to register main SMBus adapter, give up if we can't */ -- retval = piix4_add_adapter(dev, retval, &piix4_main_adapter); -+ aux_smba = retval; -+ if (dev->vendor == PCI_VENDOR_ID_AMD && -+ dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { -+ mux = -1; -+ } else { -+ mux = 0; -+ } -+ retval = piix4_add_adapter(dev, retval, &piix4_main_adapter, mux); - if (retval < 0) - return retval; - -@@ -644,21 +692,22 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) - dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) { - if (dev->revision < 0x40) { - retval = piix4_setup_aux(dev, id, 0x58); -+ mux = -1; - } else { -- /* SB800 added aux bus too */ -- retval = piix4_setup_sb800(dev, id, 1); -+ retval = aux_smba; -+ mux = 1; - } - } - - if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { - retval = piix4_setup_sb800(dev, id, 1); -+ mux = -1; - } -- - if (retval > 0) { - /* Try to add the aux adapter if it exists, - * piix4_add_adapter will clean up if this fails */ -- piix4_add_adapter(dev, retval, &piix4_aux_adapter); -+ piix4_add_adapter(dev, retval, &piix4_aux_adapter, mux); - } - - return 0; --- -2.1.4 - diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch deleted file mode 100644 index 6060b15e..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-at24-fix-odd-length-two-byte-access.patch +++ /dev/null @@ -1,34 +0,0 @@ ---- a/drivers/misc/eeprom/at24.c 2016-10-06 12:45:49.290365545 +0000 -+++ b/drivers/misc/eeprom/at24.c 2016-10-06 12:47:08.630368526 +0000 -@@ -84,9 +84,9 @@ - * - * This value is forced to be a power of two so that writes align on pages. - */ --static unsigned io_limit = 128; -+static unsigned io_limit = 32; - module_param(io_limit, uint, 0); --MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)"); -+MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 32)"); - - /* - * Specs often allow 5 msec for a page write, sometimes 20 msec; -@@ -192,7 +192,8 @@ - count = I2C_SMBUS_BLOCK_MAX; - break; - case I2C_SMBUS_WORD_DATA: -- count = 2; -+ /* Check for odd length transaction */ -+ count = (count == 1) ? 1 : 2; - break; - case I2C_SMBUS_BYTE_DATA: - count = 1; -@@ -237,7 +238,8 @@ - status = i2c_smbus_read_word_data(client, offset); - if (status >= 0) { - buf[0] = status & 0xff; -- buf[1] = status >> 8; -+ if (count == 2) -+ buf[1] = status >> 8; - status = count; - } - break; diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-fix-rpm-calc.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-fix-rpm-calc.patch deleted file mode 100644 index e5401626..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-fix-rpm-calc.patch +++ /dev/null @@ -1,196 +0,0 @@ -MAX6620 fix rpm calculation accuracy - -From: Cumulus Networks - -The driver only fills the most significant 8 bits of the fan tach -count (11 bit value). Fixing the driver to use all of 11 bits for -more accuracy. ---- - drivers/hwmon/max6620.c | 105 +++++++++++++++++++++-------------------------- - 1 file changed, 46 insertions(+), 59 deletions(-) - -diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c -index 3c337c7..76c1f7f 100644 ---- a/drivers/hwmon/max6620.c -+++ b/drivers/hwmon/max6620.c -@@ -46,6 +46,8 @@ - - /* clock: The clock frequency of the chip the driver should assume */ - static int clock = 8192; -+static u32 sr = 2; -+static u32 np = 2; - - module_param(clock, int, S_IRUGO); - -@@ -213,22 +215,22 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, cha - - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct max6620_data *data = max6620_update_device(dev); -- int rpm; -- -- /* -- * Calculation details: -- * -- * Each tachometer counts over an interval given by the "count" -- * register (0.25, 0.5, 1 or 2 seconds). This module assumes -- * that the fans produce two pulses per revolution (this seems -- * to be the most common). -- */ -- if(data->tach[attr->index] == 0 || data->tach[attr->index] == 255) { -+ struct i2c_client *client = to_i2c_client(dev); -+ u32 rpm = 0; -+ u32 tach = 0; -+ u32 tach1 = 0; -+ u32 tach2 = 0; -+ -+ tach1 = i2c_smbus_read_byte_data(client, tach_reg[attr->index]); -+ tach1 = (tach1 << 3) & 0x7f8; -+ tach2 = i2c_smbus_read_byte_data(client, tach_reg[attr->index] + 1); -+ tach2 = (tach2 >> 5) & 0x7; -+ tach = tach1 | tach2; -+ if (tach == 0) { - rpm = 0; - } else { -- rpm = ((clock / (data->tach[attr->index] << 3)) * 30 * DIV_FROM_REG(data->fandyn[attr->index])); -+ rpm = (60 * sr * clock)/(tach * np); - } -- - return sprintf(buf, "%d\n", rpm); - } - -@@ -236,22 +238,21 @@ static ssize_t get_target(struct device *dev, struct device_attribute *devattr, - - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct max6620_data *data = max6620_update_device(dev); -- int kscale, ktach, rpm; -- -- /* -- * Use the datasheet equation: -- * -- * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)] -- * -- * then multiply by 60 to give rpm. -- */ -- -- kscale = DIV_FROM_REG(data->fandyn[attr->index]); -- ktach = data->target[attr->index]; -- if(ktach == 0) { -+ struct i2c_client *client = to_i2c_client(dev); -+ u32 rpm; -+ u32 target; -+ u32 target1; -+ u32 target2; -+ -+ target1 = i2c_smbus_read_byte_data(client, target_reg[attr->index]); -+ target1 = (target1 << 3) & 0x7f8; -+ target2 = i2c_smbus_read_byte_data(client, target_reg[attr->index] + 1); -+ target2 = (target2 >> 5) & 0x7; -+ target = target1 | target2; -+ if (target == 0) { - rpm = 0; - } else { -- rpm = ((60 * kscale * clock) / (ktach << 3)); -+ rpm = (60 * sr * clock)/(target * np); - } - return sprintf(buf, "%d\n", rpm); - } -@@ -261,9 +262,11 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, - struct i2c_client *client = to_i2c_client(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct max6620_data *data = i2c_get_clientdata(client); -- int kscale, ktach; -- unsigned long rpm; -+ u32 rpm; - int err; -+ u32 target; -+ u32 target1; -+ u32 target2; - - err = kstrtoul(buf, 10, &rpm); - if (err) -@@ -271,25 +274,13 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, - - rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); - -- /* -- * Divide the required speed by 60 to get from rpm to rps, then -- * use the datasheet equation: -- * -- * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1 -- */ -- - mutex_lock(&data->update_lock); - -- kscale = DIV_FROM_REG(data->fandyn[attr->index]); -- ktach = ((60 * kscale * clock) / rpm); -- if (ktach < 0) -- ktach = 0; -- if (ktach > 255) -- ktach = 255; -- data->target[attr->index] = ktach; -- -- i2c_smbus_write_byte_data(client, target_reg[attr->index], data->target[attr->index]); -- i2c_smbus_write_byte_data(client, target_reg[attr->index]+0x01, 0x00); -+ target = (60 * sr * 8192)/(rpm * np); -+ target1 = (target >> 3) & 0xff; -+ target2 = (target << 5) & 0xe0; -+ i2c_smbus_write_byte_data(client, target_reg[attr->index], target1); -+ i2c_smbus_write_byte_data(client, target_reg[attr->index] + 1, target2); - - mutex_unlock(&data->update_lock); - -@@ -609,8 +600,11 @@ static int max6620_init_client(struct i2c_client *client) { - } - - -- -- if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config)) { -+ /* -+ * Set bit 4, disable other fans from going full speed on a fail -+ * failure. -+ */ -+ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config | 0x10)) { - dev_err(&client->dev, "Config write error, aborting.\n"); - return err; - } -@@ -618,28 +612,21 @@ static int max6620_init_client(struct i2c_client *client) { - data->config = config; - for (i = 0; i < 4; i++) { - data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); -- data->fancfg[i] |= 0x80; // enable TACH monitoring -+ data->fancfg[i] |= 0xa8; // enable TACH monitoring - i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); - data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); -- data-> fandyn[i] |= 0x1C; -+ /* 2 counts (001) and Rate change 100 (0.125 secs) */ -+ data-> fandyn[i] = 0x30; - i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); - data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); - data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); - data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); - data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); - -- -- - } -- -- -- - return 0; - } - -- -- -- - static struct max6620_data *max6620_update_device(struct device *dev) - { - int i; -@@ -678,7 +665,7 @@ static struct max6620_data *max6620_update_device(struct device *dev) - return data; - } - --module_i2c_driver(max6620_driver); -+// module_i2c_driver(max6620_driver); - - static int __init max6620_init(void) - { diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-update.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-update.patch deleted file mode 100644 index b4cfe0cf..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620-update.patch +++ /dev/null @@ -1,113 +0,0 @@ -Update MAX6620 driver to support newer kernel version - -From: Shuotian Cheng - - ---- - drivers/hwmon/max6620.c | 25 +++++++++++-------------- - 1 file changed, 11 insertions(+), 14 deletions(-) - -diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c -index 76c1f7f..fb49195 100644 ---- a/drivers/hwmon/max6620.c -+++ b/drivers/hwmon/max6620.c -@@ -183,7 +183,7 @@ static struct i2c_driver max6620_driver = { - .name = "max6620", - }, - .probe = max6620_probe, -- .remove = __devexit_p(max6620_remove), -+ .remove = max6620_remove, - .id_table = max6620_id, - .address_list = normal_i2c, - }; -@@ -231,6 +231,7 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, cha - } else { - rpm = (60 * sr * clock)/(tach * np); - } -+ - return sprintf(buf, "%d\n", rpm); - } - -@@ -262,17 +263,17 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, - struct i2c_client *client = to_i2c_client(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct max6620_data *data = i2c_get_clientdata(client); -- u32 rpm; -+ unsigned long rpm; - int err; -- u32 target; -- u32 target1; -- u32 target2; -+ unsigned long target; -+ unsigned long target1; -+ unsigned long target2; - - err = kstrtoul(buf, 10, &rpm); - if (err) - return err; - -- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); -+ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX); - - mutex_lock(&data->update_lock); - -@@ -326,7 +327,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, con - if (err) - return err; - -- pwm = SENSORS_LIMIT(pwm, 0, 255); -+ pwm = clamp_val(pwm, 0, 255); - - mutex_lock(&data->update_lock); - -@@ -534,7 +535,7 @@ static struct attribute_group max6620_attr_grp = { - * Real code - */ - --static int __devinit max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { -+static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { - - struct max6620_data *data; - int err; -@@ -575,7 +576,7 @@ dev_info(&client->dev, "Sysfs entries created\n"); - return err; - } - --static int __devexit max6620_remove(struct i2c_client *client) { -+static int max6620_remove(struct i2c_client *client) { - - struct max6620_data *data = i2c_get_clientdata(client); - -@@ -599,7 +600,6 @@ static int max6620_init_client(struct i2c_client *client) { - return err; - } - -- - /* - * Set bit 4, disable other fans from going full speed on a fail - * failure. -@@ -615,14 +615,13 @@ static int max6620_init_client(struct i2c_client *client) { - data->fancfg[i] |= 0xa8; // enable TACH monitoring - i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); - data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); -- /* 2 counts (001) and Rate change 100 (0.125 secs) */ -+ /* 2 counts (001) and Rate change 100 (0.125 secs) */ - data-> fandyn[i] = 0x30; - i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); - data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); - data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); - data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); - data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); -- - } - return 0; - } -@@ -665,8 +664,6 @@ static struct max6620_data *max6620_update_device(struct device *dev) - return data; - } - --// module_i2c_driver(max6620_driver); -- - static int __init max6620_init(void) - { - return i2c_add_driver(&max6620_driver); diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620.patch deleted file mode 100644 index 119c12ee..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-max6620.patch +++ /dev/null @@ -1,753 +0,0 @@ -Driver for MAX6620 Fan sensor - -From: Cumulus Networks - - ---- - drivers/hwmon/Kconfig | 10 + - drivers/hwmon/Makefile | 1 - drivers/hwmon/max6620.c | 702 +++++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 713 insertions(+) - create mode 100644 drivers/hwmon/max6620.c - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 02d3d85..ca38e05 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -784,6 +784,16 @@ config SENSORS_MAX6650 - This driver can also be built as a module. If so, the module - will be called max6650. - -+config SENSORS_MAX6620 -+ tristate "Maxim MAX6620 sensor chip" -+ depends on I2C -+ help -+ If you say yes here you get support for the MAX6620 -+ sensor chips. -+ -+ This driver can also be built as a module. If so, the module -+ will be called max6620. -+ - config SENSORS_MAX6697 - tristate "Maxim MAX6697 and compatibles" - depends on I2C -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index 3dc0f02..8837a7b 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -111,6 +111,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o - obj-$(CONFIG_SENSORS_MAX6639) += max6639.o - obj-$(CONFIG_SENSORS_MAX6642) += max6642.o - obj-$(CONFIG_SENSORS_MAX6650) += max6650.o -+obj-$(CONFIG_SENSORS_MAX6620) += max6620.o - obj-$(CONFIG_SENSORS_MAX6697) += max6697.o - obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o - obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o -diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c -new file mode 100644 -index 0000000..3c337c7 ---- /dev/null -+++ b/drivers/hwmon/max6620.c -@@ -0,0 +1,702 @@ -+/* -+ * max6620.c - Linux Kernel module for hardware monitoring. -+ * -+ * (C) 2012 by L. Grunenberg -+ * -+ * based on code written by : -+ * 2007 by Hans J. Koch -+ * John Morris -+ * Copyright (c) 2003 Spirent Communications -+ * and Claus Gindhart -+ * -+ * This module has only been tested with the MAX6620 chip. -+ * -+ * The datasheet was last seen at: -+ * -+ * http://pdfserv.maxim-ic.com/en/ds/MAX6620.pdf -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * Insmod parameters -+ */ -+ -+ -+/* clock: The clock frequency of the chip the driver should assume */ -+static int clock = 8192; -+ -+module_param(clock, int, S_IRUGO); -+ -+static const unsigned short normal_i2c[] = {0x0a, 0x1a, 0x2a, I2C_CLIENT_END}; -+ -+/* -+ * MAX 6620 registers -+ */ -+ -+#define MAX6620_REG_CONFIG 0x00 -+#define MAX6620_REG_FAULT 0x01 -+#define MAX6620_REG_CONF_FAN0 0x02 -+#define MAX6620_REG_CONF_FAN1 0x03 -+#define MAX6620_REG_CONF_FAN2 0x04 -+#define MAX6620_REG_CONF_FAN3 0x05 -+#define MAX6620_REG_DYN_FAN0 0x06 -+#define MAX6620_REG_DYN_FAN1 0x07 -+#define MAX6620_REG_DYN_FAN2 0x08 -+#define MAX6620_REG_DYN_FAN3 0x09 -+#define MAX6620_REG_TACH0 0x10 -+#define MAX6620_REG_TACH1 0x12 -+#define MAX6620_REG_TACH2 0x14 -+#define MAX6620_REG_TACH3 0x16 -+#define MAX6620_REG_VOLT0 0x18 -+#define MAX6620_REG_VOLT1 0x1A -+#define MAX6620_REG_VOLT2 0x1C -+#define MAX6620_REG_VOLT3 0x1E -+#define MAX6620_REG_TAR0 0x20 -+#define MAX6620_REG_TAR1 0x22 -+#define MAX6620_REG_TAR2 0x24 -+#define MAX6620_REG_TAR3 0x26 -+#define MAX6620_REG_DAC0 0x28 -+#define MAX6620_REG_DAC1 0x2A -+#define MAX6620_REG_DAC2 0x2C -+#define MAX6620_REG_DAC3 0x2E -+ -+/* -+ * Config register bits -+ */ -+ -+#define MAX6620_CFG_RUN 0x80 -+#define MAX6620_CFG_POR 0x40 -+#define MAX6620_CFG_TIMEOUT 0x20 -+#define MAX6620_CFG_FULLFAN 0x10 -+#define MAX6620_CFG_OSC 0x08 -+#define MAX6620_CFG_WD_MASK 0x06 -+#define MAX6620_CFG_WD_2 0x02 -+#define MAX6620_CFG_WD_6 0x04 -+#define MAX6620_CFG_WD10 0x06 -+#define MAX6620_CFG_WD 0x01 -+ -+ -+/* -+ * Failure status register bits -+ */ -+ -+#define MAX6620_FAIL_TACH0 0x10 -+#define MAX6620_FAIL_TACH1 0x20 -+#define MAX6620_FAIL_TACH2 0x40 -+#define MAX6620_FAIL_TACH3 0x80 -+#define MAX6620_FAIL_MASK0 0x01 -+#define MAX6620_FAIL_MASK1 0x02 -+#define MAX6620_FAIL_MASK2 0x04 -+#define MAX6620_FAIL_MASK3 0x08 -+ -+ -+/* Minimum and maximum values of the FAN-RPM */ -+#define FAN_RPM_MIN 240 -+#define FAN_RPM_MAX 30000 -+ -+#define DIV_FROM_REG(reg) (1 << ((reg & 0xE0) >> 5)) -+ -+static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id); -+static int max6620_init_client(struct i2c_client *client); -+static int max6620_remove(struct i2c_client *client); -+static struct max6620_data *max6620_update_device(struct device *dev); -+ -+static const u8 config_reg[] = { -+ MAX6620_REG_CONF_FAN0, -+ MAX6620_REG_CONF_FAN1, -+ MAX6620_REG_CONF_FAN2, -+ MAX6620_REG_CONF_FAN3, -+}; -+ -+static const u8 dyn_reg[] = { -+ MAX6620_REG_DYN_FAN0, -+ MAX6620_REG_DYN_FAN1, -+ MAX6620_REG_DYN_FAN2, -+ MAX6620_REG_DYN_FAN3, -+}; -+ -+static const u8 tach_reg[] = { -+ MAX6620_REG_TACH0, -+ MAX6620_REG_TACH1, -+ MAX6620_REG_TACH2, -+ MAX6620_REG_TACH3, -+}; -+ -+static const u8 volt_reg[] = { -+ MAX6620_REG_VOLT0, -+ MAX6620_REG_VOLT1, -+ MAX6620_REG_VOLT2, -+ MAX6620_REG_VOLT3, -+}; -+ -+static const u8 target_reg[] = { -+ MAX6620_REG_TAR0, -+ MAX6620_REG_TAR1, -+ MAX6620_REG_TAR2, -+ MAX6620_REG_TAR3, -+}; -+ -+static const u8 dac_reg[] = { -+ MAX6620_REG_DAC0, -+ MAX6620_REG_DAC1, -+ MAX6620_REG_DAC2, -+ MAX6620_REG_DAC3, -+}; -+ -+/* -+ * Driver data (common to all clients) -+ */ -+ -+static const struct i2c_device_id max6620_id[] = { -+ { "max6620", 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, max6620_id); -+ -+static struct i2c_driver max6620_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "max6620", -+ }, -+ .probe = max6620_probe, -+ .remove = __devexit_p(max6620_remove), -+ .id_table = max6620_id, -+ .address_list = normal_i2c, -+}; -+ -+/* -+ * Client data (each client gets its own) -+ */ -+ -+struct max6620_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ int nr_fans; -+ char valid; /* zero until following fields are valid */ -+ unsigned long last_updated; /* in jiffies */ -+ -+ /* register values */ -+ u8 speed[4]; -+ u8 config; -+ u8 fancfg[4]; -+ u8 fandyn[4]; -+ u8 tach[4]; -+ u8 volt[4]; -+ u8 target[4]; -+ u8 dac[4]; -+ u8 fault; -+}; -+ -+static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, char *buf) { -+ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct max6620_data *data = max6620_update_device(dev); -+ int rpm; -+ -+ /* -+ * Calculation details: -+ * -+ * Each tachometer counts over an interval given by the "count" -+ * register (0.25, 0.5, 1 or 2 seconds). This module assumes -+ * that the fans produce two pulses per revolution (this seems -+ * to be the most common). -+ */ -+ if(data->tach[attr->index] == 0 || data->tach[attr->index] == 255) { -+ rpm = 0; -+ } else { -+ rpm = ((clock / (data->tach[attr->index] << 3)) * 30 * DIV_FROM_REG(data->fandyn[attr->index])); -+ } -+ -+ return sprintf(buf, "%d\n", rpm); -+} -+ -+static ssize_t get_target(struct device *dev, struct device_attribute *devattr, char *buf) { -+ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct max6620_data *data = max6620_update_device(dev); -+ int kscale, ktach, rpm; -+ -+ /* -+ * Use the datasheet equation: -+ * -+ * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)] -+ * -+ * then multiply by 60 to give rpm. -+ */ -+ -+ kscale = DIV_FROM_REG(data->fandyn[attr->index]); -+ ktach = data->target[attr->index]; -+ if(ktach == 0) { -+ rpm = 0; -+ } else { -+ rpm = ((60 * kscale * clock) / (ktach << 3)); -+ } -+ return sprintf(buf, "%d\n", rpm); -+} -+ -+static ssize_t set_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { -+ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct max6620_data *data = i2c_get_clientdata(client); -+ int kscale, ktach; -+ unsigned long rpm; -+ int err; -+ -+ err = kstrtoul(buf, 10, &rpm); -+ if (err) -+ return err; -+ -+ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); -+ -+ /* -+ * Divide the required speed by 60 to get from rpm to rps, then -+ * use the datasheet equation: -+ * -+ * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1 -+ */ -+ -+ mutex_lock(&data->update_lock); -+ -+ kscale = DIV_FROM_REG(data->fandyn[attr->index]); -+ ktach = ((60 * kscale * clock) / rpm); -+ if (ktach < 0) -+ ktach = 0; -+ if (ktach > 255) -+ ktach = 255; -+ data->target[attr->index] = ktach; -+ -+ i2c_smbus_write_byte_data(client, target_reg[attr->index], data->target[attr->index]); -+ i2c_smbus_write_byte_data(client, target_reg[attr->index]+0x01, 0x00); -+ -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+/* -+ * Get/set the fan speed in open loop mode using pwm1 sysfs file. -+ * Speed is given as a relative value from 0 to 255, where 255 is maximum -+ * speed. Note that this is done by writing directly to the chip's DAC, -+ * it won't change the closed loop speed set by fan1_target. -+ * Also note that due to rounding errors it is possible that you don't read -+ * back exactly the value you have set. -+ */ -+ -+static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { -+ -+ int pwm; -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct max6620_data *data = max6620_update_device(dev); -+ -+ /* -+ * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans. -+ * Lower DAC values mean higher speeds. -+ */ -+ pwm = ((int)data->volt[attr->index]); -+ -+ if (pwm < 0) -+ pwm = 0; -+ -+ return sprintf(buf, "%d\n", pwm); -+} -+ -+static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { -+ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct max6620_data *data = i2c_get_clientdata(client); -+ unsigned long pwm; -+ int err; -+ -+ err = kstrtoul(buf, 10, &pwm); -+ if (err) -+ return err; -+ -+ pwm = SENSORS_LIMIT(pwm, 0, 255); -+ -+ mutex_lock(&data->update_lock); -+ -+ data->dac[attr->index] = pwm; -+ -+ -+ i2c_smbus_write_byte_data(client, dac_reg[attr->index], data->dac[attr->index]); -+ i2c_smbus_write_byte_data(client, dac_reg[attr->index]+1, 0x00); -+ -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+/* -+ * Get/Set controller mode: -+ * Possible values: -+ * 0 = Fan always on -+ * 1 = Open loop, Voltage is set according to speed, not regulated. -+ * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer -+ */ -+ -+static ssize_t get_enable(struct device *dev, struct device_attribute *devattr, char *buf) { -+ -+ struct max6620_data *data = max6620_update_device(dev); -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ int mode = (data->fancfg[attr->index] & 0x80 ) >> 7; -+ int sysfs_modes[2] = {1, 2}; -+ -+ return sprintf(buf, "%d\n", sysfs_modes[mode]); -+} -+ -+static ssize_t set_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { -+ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct max6620_data *data = i2c_get_clientdata(client); -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ int max6620_modes[3] = {0, 1, 0}; -+ unsigned long mode; -+ int err; -+ -+ err = kstrtoul(buf, 10, &mode); -+ if (err) -+ return err; -+ -+ if (mode > 2) -+ return -EINVAL; -+ -+ mutex_lock(&data->update_lock); -+ -+ data->fancfg[attr->index] = i2c_smbus_read_byte_data(client, config_reg[attr->index]); -+ data->fancfg[attr->index] = (data->fancfg[attr->index] & ~0x80) -+ | (max6620_modes[mode] << 7); -+ -+ i2c_smbus_write_byte_data(client, config_reg[attr->index], data->fancfg[attr->index]); -+ -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+/* -+ * Read/write functions for fan1_div sysfs file. The MAX6620 has no such -+ * divider. We handle this by converting between divider and counttime: -+ * -+ * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, 3, 4 or 5 -+ * -+ * Lower values of k allow to connect a faster fan without the risk of -+ * counter overflow. The price is lower resolution. You can also set counttime -+ * using the module parameter. Note that the module parameter "prescaler" also -+ * influences the behaviour. Unfortunately, there's no sysfs attribute -+ * defined for that. See the data sheet for details. -+ */ -+ -+static ssize_t get_div(struct device *dev, struct device_attribute *devattr, char *buf) { -+ -+ struct max6620_data *data = max6620_update_device(dev); -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ -+ return sprintf(buf, "%d\n", DIV_FROM_REG(data->fandyn[attr->index])); -+} -+ -+static ssize_t set_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { -+ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct max6620_data *data = i2c_get_clientdata(client); -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ unsigned long div; -+ int err; -+ u8 div_bin; -+ -+ err = kstrtoul(buf, 10, &div); -+ if (err) -+ return err; -+ -+ mutex_lock(&data->update_lock); -+ switch (div) { -+ case 1: -+ div_bin = 0; -+ break; -+ case 2: -+ div_bin = 1; -+ break; -+ case 4: -+ div_bin = 2; -+ break; -+ case 8: -+ div_bin = 3; -+ break; -+ case 16: -+ div_bin = 4; -+ break; -+ case 32: -+ div_bin = 5; -+ break; -+ default: -+ mutex_unlock(&data->update_lock); -+ return -EINVAL; -+ } -+ data->fandyn[attr->index] &= 0x1F; -+ data->fandyn[attr->index] |= div_bin << 5; -+ i2c_smbus_write_byte_data(client, dyn_reg[attr->index], data->fandyn[attr->index]); -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+/* -+ * Get alarm stati: -+ * Possible values: -+ * 0 = no alarm -+ * 1 = alarm -+ */ -+ -+static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { -+ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct max6620_data *data = max6620_update_device(dev); -+ struct i2c_client *client = to_i2c_client(dev); -+ int alarm = 0; -+ -+ if (data->fault & (1 << attr->index)) { -+ mutex_lock(&data->update_lock); -+ alarm = 1; -+ data->fault &= ~(1 << attr->index); -+ data->fault |= i2c_smbus_read_byte_data(client, -+ MAX6620_REG_FAULT); -+ mutex_unlock(&data->update_lock); -+ } -+ -+ return sprintf(buf, "%d\n", alarm); -+} -+ -+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0); -+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1); -+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2); -+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3); -+static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target, 0); -+static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div, 0); -+// static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 0); -+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0); -+static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO, get_target, set_target, 1); -+static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, get_div, set_div, 1); -+// static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 1); -+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1); -+static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO, get_target, set_target, 2); -+static SENSOR_DEVICE_ATTR(fan3_div, S_IWUSR | S_IRUGO, get_div, set_div, 2); -+// static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 2); -+static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2); -+static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO, get_target, set_target, 3); -+static SENSOR_DEVICE_ATTR(fan4_div, S_IWUSR | S_IRUGO, get_div, set_div, 3); -+// static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 3); -+static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3); -+ -+static struct attribute *max6620_attrs[] = { -+ &sensor_dev_attr_fan1_input.dev_attr.attr, -+ &sensor_dev_attr_fan2_input.dev_attr.attr, -+ &sensor_dev_attr_fan3_input.dev_attr.attr, -+ &sensor_dev_attr_fan4_input.dev_attr.attr, -+ &sensor_dev_attr_fan1_target.dev_attr.attr, -+ &sensor_dev_attr_fan1_div.dev_attr.attr, -+// &sensor_dev_attr_pwm1_enable.dev_attr.attr, -+ &sensor_dev_attr_pwm1.dev_attr.attr, -+ &sensor_dev_attr_fan2_target.dev_attr.attr, -+ &sensor_dev_attr_fan2_div.dev_attr.attr, -+// &sensor_dev_attr_pwm2_enable.dev_attr.attr, -+ &sensor_dev_attr_pwm2.dev_attr.attr, -+ &sensor_dev_attr_fan3_target.dev_attr.attr, -+ &sensor_dev_attr_fan3_div.dev_attr.attr, -+// &sensor_dev_attr_pwm3_enable.dev_attr.attr, -+ &sensor_dev_attr_pwm3.dev_attr.attr, -+ &sensor_dev_attr_fan4_target.dev_attr.attr, -+ &sensor_dev_attr_fan4_div.dev_attr.attr, -+// &sensor_dev_attr_pwm4_enable.dev_attr.attr, -+ &sensor_dev_attr_pwm4.dev_attr.attr, -+ NULL -+}; -+ -+static struct attribute_group max6620_attr_grp = { -+ .attrs = max6620_attrs, -+}; -+ -+ -+/* -+ * Real code -+ */ -+ -+static int __devinit max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { -+ -+ struct max6620_data *data; -+ int err; -+ -+ data = devm_kzalloc(&client->dev, sizeof(struct max6620_data), GFP_KERNEL); -+ if (!data) { -+ dev_err(&client->dev, "out of memory.\n"); -+ return -ENOMEM; -+ } -+ -+ i2c_set_clientdata(client, data); -+ mutex_init(&data->update_lock); -+ data->nr_fans = id->driver_data; -+ -+ /* -+ * Initialize the max6620 chip -+ */ -+ dev_info(&client->dev, "About to initialize module\n"); -+ -+ err = max6620_init_client(client); -+ if (err) -+ return err; -+ dev_info(&client->dev, "Module initialized\n"); -+ -+ err = sysfs_create_group(&client->dev.kobj, &max6620_attr_grp); -+ if (err) -+ return err; -+dev_info(&client->dev, "Sysfs entries created\n"); -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (!IS_ERR(data->hwmon_dev)) -+ return 0; -+ -+ err = PTR_ERR(data->hwmon_dev); -+ dev_err(&client->dev, "error registering hwmon device.\n"); -+ -+ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp); -+ return err; -+} -+ -+static int __devexit max6620_remove(struct i2c_client *client) { -+ -+ struct max6620_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ -+ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp); -+ return 0; -+} -+ -+static int max6620_init_client(struct i2c_client *client) { -+ -+ struct max6620_data *data = i2c_get_clientdata(client); -+ int config; -+ int err = -EIO; -+ int i; -+ -+ config = i2c_smbus_read_byte_data(client, MAX6620_REG_CONFIG); -+ -+ if (config < 0) { -+ dev_err(&client->dev, "Error reading config, aborting.\n"); -+ return err; -+ } -+ -+ -+ -+ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config)) { -+ dev_err(&client->dev, "Config write error, aborting.\n"); -+ return err; -+ } -+ -+ data->config = config; -+ for (i = 0; i < 4; i++) { -+ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); -+ data->fancfg[i] |= 0x80; // enable TACH monitoring -+ i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); -+ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); -+ data-> fandyn[i] |= 0x1C; -+ i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); -+ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); -+ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); -+ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); -+ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); -+ -+ -+ -+ } -+ -+ -+ -+ return 0; -+} -+ -+ -+ -+ -+static struct max6620_data *max6620_update_device(struct device *dev) -+{ -+ int i; -+ struct i2c_client *client = to_i2c_client(dev); -+ struct max6620_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { -+ -+ for (i = 0; i < 4; i++) { -+ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); -+ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); -+ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); -+ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); -+ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); -+ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); -+ } -+ -+ -+ /* -+ * Alarms are cleared on read in case the condition that -+ * caused the alarm is removed. Keep the value latched here -+ * for providing the register through different alarm files. -+ */ -+ u8 fault_reg; -+ fault_reg = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT); -+ data->fault |= (fault_reg >> 4) & (fault_reg & 0x0F); -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+module_i2c_driver(max6620_driver); -+ -+static int __init max6620_init(void) -+{ -+ return i2c_add_driver(&max6620_driver); -+} -+module_init(max6620_init); -+ -+/** -+ * sht21_init() - clean up driver -+ * -+ * Called when module is removed. -+ */ -+static void __exit max6620_exit(void) -+{ -+ i2c_del_driver(&max6620_driver); -+} -+module_exit(max6620_exit); -+ -+MODULE_AUTHOR("Lucas Grunenberg"); -+MODULE_DESCRIPTION("MAX6620 sensor driver"); -+MODULE_LICENSE("GPL"); diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-add-dps460-support.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-add-dps460-support.patch deleted file mode 100644 index 812f619a..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-add-dps460-support.patch +++ /dev/null @@ -1,78 +0,0 @@ -enable PMBUS_SKIP_STATUS_CHECK for dps460 - -From: Vadim Pasternak - -Patch for pmbus - includes disabling of PMBus status check through platform data structure. -This is due to some PMBus don't support the STATUS_CML register, or report communication errors -for no explicable reason. For such chips, checking the status register must be disabled. ---- - drivers/hwmon/pmbus/pmbus.c | 14 ++++++++++++++ - drivers/hwmon/pmbus/pmbus_core.c | 3 +++ - 2 files changed, 17 insertions(+) - -diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c -index 7e91700..6dd75fb 100644 ---- a/drivers/hwmon/pmbus/pmbus.c -+++ b/drivers/hwmon/pmbus/pmbus.c -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include - #include "pmbus.h" - - /* -@@ -166,14 +167,26 @@ static int pmbus_probe(struct i2c_client *client, - const struct i2c_device_id *id) - { - struct pmbus_driver_info *info; -+ struct pmbus_platform_data *pdata = NULL; -+ struct device *dev = &client->dev; - - info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info), - GFP_KERNEL); - if (!info) - return -ENOMEM; - -+ if (!strncmp(id->name, "dps460", sizeof("dps460"))) { -+ pdata = kzalloc(sizeof(struct pmbus_platform_data), GFP_KERNEL); -+ if (!pdata) { -+ kfree(info); -+ return -ENOMEM; -+ } -+ pdata->flags = PMBUS_SKIP_STATUS_CHECK; -+ } -+ - info->pages = id->driver_data; - info->identify = pmbus_identify; -+ dev->platform_data = pdata; - - return pmbus_do_probe(client, id, info); - } -@@ -195,6 +208,7 @@ static const struct i2c_device_id pmbus_id[] = { - {"tps40400", 1}, - {"tps40422", 2}, - {"udt020", 1}, -+ {"dps460", 1}, - {} - }; - -diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c -index 291d11f..09b123f 100644 ---- a/drivers/hwmon/pmbus/pmbus_core.c -+++ b/drivers/hwmon/pmbus/pmbus_core.c -@@ -1792,8 +1792,11 @@ EXPORT_SYMBOL_GPL(pmbus_do_probe); - int pmbus_do_remove(struct i2c_client *client) - { - struct pmbus_data *data = i2c_get_clientdata(client); -+ const struct pmbus_platform_data *pdata = dev_get_platdata(&client->dev); - hwmon_device_unregister(data->hwmon_dev); - kfree(data->group.attrs); -+ if (pdata) -+ kfree(pdata); - return 0; - } - EXPORT_SYMBOL_GPL(pmbus_do_remove); --- -2.1.4 - diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch deleted file mode 100644 index 38550707..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch +++ /dev/null @@ -1,96 +0,0 @@ -Update pmbus_data data structure to meet kernel implementation - -From: Shuotian Cheng - -The pmbus_data data structure is pasted in the driver. -Cumulus patch is for kernel 3.2.x. -Update this data structure to meet current kernel (3.16.x) implementation. ---- - drivers/hwmon/pmbus/dni_dps460.c | 42 +++++++++++++++----------------------- - 1 file changed, 17 insertions(+), 25 deletions(-) - -diff --git a/drivers/hwmon/pmbus/dni_dps460.c b/drivers/hwmon/pmbus/dni_dps460.c -index c687217..1607b65 100644 ---- a/drivers/hwmon/pmbus/dni_dps460.c -+++ b/drivers/hwmon/pmbus/dni_dps460.c -@@ -39,41 +39,32 @@ enum chips { dni_dps460 }; - #define FAN_VALUE_MAX 0x64 - - /* Needed to access the mutex. Copied from pmbus_core.c */ --#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) -+#define PB_STATUS_BASE 0 -+#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES) -+#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES) -+#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES) -+#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES) -+#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) -+#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES) -+#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1) -+#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1) - struct pmbus_data { -+ struct device *dev; - struct device *hwmon_dev; - - u32 flags; /* from platform data */ - -- int exponent; /* linear mode: exponent for output voltages */ -+ int exponent[PMBUS_PAGES]; -+ /* linear mode: exponent for output voltages */ - - const struct pmbus_driver_info *info; - - int max_attributes; - int num_attributes; -- struct attribute **attributes; - struct attribute_group group; -+ const struct attribute_group *groups[2]; - -- /* -- * Sensors cover both sensor and limit registers. -- */ -- int max_sensors; -- int num_sensors; - struct pmbus_sensor *sensors; -- /* -- * Booleans are used for alarms. -- * Values are determined from status registers. -- */ -- int max_booleans; -- int num_booleans; -- struct pmbus_boolean *booleans; -- /* -- * Labels are used to map generic names (e.g., "in1") -- * to PMBus specific names (e.g., "vin" or "vout1"). -- */ -- int max_labels; -- int num_labels; -- struct pmbus_label *labels; - - struct mutex update_lock; - bool valid; -@@ -84,6 +75,7 @@ struct pmbus_data { - * so we keep them all together. - */ - u8 status[PB_NUM_STATUS_REG]; -+ u8 status_register; - - u8 currpage; - }; -@@ -123,14 +115,14 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr, - struct i2c_client *client = to_i2c_client(dev); - struct pmbus_data *data = i2c_get_clientdata(client); - int err; -- unsigned int val; -- unsigned int rpm; -+ unsigned long val; -+ unsigned long rpm; - - err = kstrtol(buf, 10, &rpm); - if (err) - return err; - -- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); -+ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX); - - mutex_lock(&data->update_lock); - diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460.patch deleted file mode 100644 index 8d93c157..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-dni_dps460.patch +++ /dev/null @@ -1,304 +0,0 @@ -Add PMBUS driver for DNI DPS460 Power Supply - -From: Cumulus Networks - - ---- - drivers/hwmon/pmbus/Kconfig | 10 ++ - drivers/hwmon/pmbus/Makefile | 1 - drivers/hwmon/pmbus/dni_dps460.c | 253 ++++++++++++++++++++++++++++++++++++++ - 3 files changed, 264 insertions(+) - create mode 100644 drivers/hwmon/pmbus/dni_dps460.c - -diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig -index ec48945..7d3b1aa 100644 ---- a/drivers/hwmon/pmbus/Kconfig -+++ b/drivers/hwmon/pmbus/Kconfig -@@ -77,6 +77,16 @@ config SENSORS_MAX34440 - This driver can also be built as a module. If so, the module will - be called max34440. - -+config SENSORS_DNI_DPS460 -+ tristate "Delta DPS460" -+ default n -+ help -+ If you say yes here you get hardware monitoring support for Delta -+ DPS460. -+ -+ This driver can also be built as a module. If so, the module will -+ be called dni_dps460. -+ - config SENSORS_MAX8688 - tristate "Maxim MAX8688" - default n -diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile -index 5e6c316..767d086 100644 ---- a/drivers/hwmon/pmbus/Makefile -+++ b/drivers/hwmon/pmbus/Makefile -@@ -9,6 +9,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o - obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o - obj-$(CONFIG_SENSORS_MAX16064) += max16064.o - obj-$(CONFIG_SENSORS_MAX34440) += max34440.o -+obj-$(CONFIG_SENSORS_DNI_DPS460) += dni_dps460.o - obj-$(CONFIG_SENSORS_MAX8688) += max8688.o - obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o - obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o -diff --git a/drivers/hwmon/pmbus/dni_dps460.c b/drivers/hwmon/pmbus/dni_dps460.c -new file mode 100644 -index 0000000..c687217 ---- /dev/null -+++ b/drivers/hwmon/pmbus/dni_dps460.c -@@ -0,0 +1,253 @@ -+/* -+ * Hardware monitoring driver for Delta DPS460 -+ * -+ * Copyright (C) 2014 Cumulus Networks, LLC -+ * Author: Puneet Shenoy -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "pmbus.h" -+ -+enum chips { dni_dps460 }; -+ -+/* Data provided by DELL Inc */ -+#define FAN_RPM_MIN 7200 -+#define FAN_RPM_MAX 18000 -+#define FAN_VALUE_MIN 0x28 -+#define FAN_VALUE_MAX 0x64 -+ -+/* Needed to access the mutex. Copied from pmbus_core.c */ -+#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) -+struct pmbus_data { -+ struct device *hwmon_dev; -+ -+ u32 flags; /* from platform data */ -+ -+ int exponent; /* linear mode: exponent for output voltages */ -+ -+ const struct pmbus_driver_info *info; -+ -+ int max_attributes; -+ int num_attributes; -+ struct attribute **attributes; -+ struct attribute_group group; -+ -+ /* -+ * Sensors cover both sensor and limit registers. -+ */ -+ int max_sensors; -+ int num_sensors; -+ struct pmbus_sensor *sensors; -+ /* -+ * Booleans are used for alarms. -+ * Values are determined from status registers. -+ */ -+ int max_booleans; -+ int num_booleans; -+ struct pmbus_boolean *booleans; -+ /* -+ * Labels are used to map generic names (e.g., "in1") -+ * to PMBus specific names (e.g., "vin" or "vout1"). -+ */ -+ int max_labels; -+ int num_labels; -+ struct pmbus_label *labels; -+ -+ struct mutex update_lock; -+ bool valid; -+ unsigned long last_updated; /* in jiffies */ -+ -+ /* -+ * A single status register covers multiple attributes, -+ * so we keep them all together. -+ */ -+ u8 status[PB_NUM_STATUS_REG]; -+ -+ u8 currpage; -+}; -+ -+/* -+ * We are only concerned with the first fan. The get_target and set_target are -+ * are written accordingly. -+ */ -+static ssize_t get_target(struct device *dev, struct device_attribute *devattr, -+ char *buf) { -+ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct pmbus_data *data = i2c_get_clientdata(client); -+ int val; -+ u32 rpm; -+ -+ /* -+ * The FAN_COMMAND_n takes a value which is not the RPM. -+ * The value and RPM have a liner relation. -+ * rpm = (FAN_RPM_MIN/FAN_VALUE_MIN) * val -+ * The slope is (FAN_RPM_MIN/FAN_VALUE_MIN) = 180 -+ */ -+ mutex_lock(&data->update_lock); -+ val = pmbus_read_word_data(client, 0, PMBUS_FAN_COMMAND_1); -+ pmbus_clear_faults(client); -+ mutex_unlock(&data->update_lock); -+ if (val < 0) { -+ return val; -+ } -+ rpm = val * (FAN_RPM_MIN/FAN_VALUE_MIN); -+ return sprintf(buf, "%d\n", rpm); -+} -+ -+static ssize_t set_target(struct device *dev, struct device_attribute *devattr, -+ const char *buf, size_t count) { -+ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct pmbus_data *data = i2c_get_clientdata(client); -+ int err; -+ unsigned int val; -+ unsigned int rpm; -+ -+ err = kstrtol(buf, 10, &rpm); -+ if (err) -+ return err; -+ -+ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); -+ -+ mutex_lock(&data->update_lock); -+ -+ val = FAN_VALUE_MIN * rpm; -+ val /= FAN_RPM_MIN; -+ pmbus_write_word_data(client, 0, PMBUS_FAN_COMMAND_1, (u16)val); -+ pmbus_clear_faults(client); -+ -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+static ssize_t show_pec(struct device *dev, struct device_attribute *dummy, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC)); -+} -+ -+static ssize_t set_pec(struct device *dev, struct device_attribute *dummy, -+ const char *buf, size_t count) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ long val; -+ int err; -+ -+ err = strict_strtol(buf, 10, &val); -+ if (err < 0) -+ return err; -+ -+ if (val != 0) -+ client->flags |= I2C_CLIENT_PEC; -+ else -+ client->flags &= ~I2C_CLIENT_PEC; -+ -+ return count; -+} -+ -+static SENSOR_DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec, 0); -+static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, -+ set_target, 0); -+ -+static struct attribute *dni_dps460_attrs[] = { -+ &sensor_dev_attr_fan1_target.dev_attr.attr, -+ &sensor_dev_attr_pec.dev_attr.attr, -+ NULL -+}; -+static struct attribute_group dni_dps460_attr_grp = { -+ .attrs = dni_dps460_attrs, -+}; -+ -+static int dni_dps460_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct pmbus_driver_info *info; -+ int ret; -+ -+ if (!i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_BYTE_DATA | -+ I2C_FUNC_SMBUS_WORD_DATA | -+ I2C_FUNC_SMBUS_PEC)) -+ return -ENODEV; -+ -+ /* Needs PEC(PACKET ERROR CODE). Writes wont work without this. */ -+ client->flags = I2C_CLIENT_PEC; -+ -+ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL); -+ if (!info) -+ return -ENOMEM; -+ -+ /* Use only 1 page with 1 Fan, 2 Temps. */ -+ info->pages = 1; -+ info->func[0] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | -+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP; -+ -+ ret = pmbus_do_probe(client, id, info); -+ if (ret < 0) -+ goto out; -+ -+ ret = sysfs_create_group(&client->dev.kobj, &dni_dps460_attr_grp); -+ if (ret) -+ goto out; -+ return 0; -+out: -+ kfree(info); -+ return ret; -+} -+ -+static int dni_dps460_remove(struct i2c_client *client) -+{ -+ struct pmbus_data *data = i2c_get_clientdata(client); -+ -+ sysfs_remove_group(&client->dev.kobj, &dni_dps460_attr_grp); -+ if (data->info) -+ kfree(data->info); -+ pmbus_do_remove(client); -+ return 0; -+} -+ -+static const struct i2c_device_id dni_dps460_id[] = { -+ {"dni_dps460", dni_dps460}, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, dni_dps460_id); -+ -+static struct i2c_driver dni_dps460_driver = { -+ .driver = { -+ .name = "dni_dps460", -+ }, -+ .probe = dni_dps460_probe, -+ .remove = dni_dps460_remove, -+ .id_table = dni_dps460_id, -+}; -+ -+module_i2c_driver(dni_dps460_driver); -+ -+MODULE_AUTHOR("Puneet Shenoy"); -+MODULE_DESCRIPTION("PMBus driver for Delta DPS460"); -+MODULE_LICENSE("GPL"); diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch deleted file mode 100644 index 5d948675..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch +++ /dev/null @@ -1,89 +0,0 @@ -mlnx patch for UCD9200 - -From: Vadim Pasternak - -Patch replaces in device probing routine (ucd9000_probe) call -i2c_smbus_read_block_data with i2c_smbus_read_i2c_block_data. - -The first call executes the SMBus "block read" protocol. -Using this function requires that the client's adapter support -the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter -drivers support this. In particular Mellanox i2c controller doesn't -support it. API i2c_smbus_read_i2c_block_data is supposed to be -more generic and be supported by all i2c client adapters. ---- - drivers/hwmon/pmbus/ucd9200.c | 26 +++++++++++++++++++++----- - 1 files changed, 21 insertions(+), 5 deletions(-) - -diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c -index 033d6ac..119130c 100644 ---- a/drivers/hwmon/pmbus/ucd9200.c -+++ b/drivers/hwmon/pmbus/ucd9200.c -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include - #include "pmbus.h" - - #define UCD9200_PHASE_INFO 0xd2 -@@ -52,14 +53,15 @@ static int ucd9200_probe(struct i2c_client *client, - u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; - struct pmbus_driver_info *info; - const struct i2c_device_id *mid; -- int i, j, ret; -+ int i, j, ret, n, len; -+ u8* buff; - - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_BLOCK_DATA)) - return -ENODEV; - -- ret = i2c_smbus_read_block_data(client, UCD9200_DEVICE_ID, -+ ret = i2c_smbus_read_i2c_block_data(client, UCD9200_DEVICE_ID, 8, - block_buffer); - if (ret < 0) { - dev_err(&client->dev, "Failed to read device ID\n"); -@@ -68,8 +70,22 @@ static int ucd9200_probe(struct i2c_client *client, - block_buffer[ret] = '\0'; - dev_info(&client->dev, "Device ID %s\n", block_buffer); - -+ len = strlen(block_buffer); -+ for (n=0; n < len; n++) { -+ if (isalnum(block_buffer[n])) -+ break; -+ } -+ if (n >= len) { -+ dev_err(&client->dev, "Incorrect device name\n"); -+ return -ENODEV; -+ } -+ buff = &block_buffer[n]; -+ len = strlen(buff); -+ - for (mid = ucd9200_id; mid->name[0]; mid++) { -- if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) -+ if (len != strlen(mid->name)) -+ continue; -+ if (!strncasecmp(mid->name, buff, strlen(mid->name))) - break; - } - if (!mid->name[0]) { -@@ -86,7 +102,7 @@ static int ucd9200_probe(struct i2c_client *client, - if (!info) - return -ENOMEM; - -- ret = i2c_smbus_read_block_data(client, UCD9200_PHASE_INFO, -+ ret = i2c_smbus_read_i2c_block_data(client, UCD9200_PHASE_INFO, 4, - block_buffer); - if (ret < 0) { - dev_err(&client->dev, "Failed to read phase information\n"); -@@ -100,7 +116,7 @@ static int ucd9200_probe(struct i2c_client *client, - * the first unconfigured rail. - */ - info->pages = 0; -- for (i = 0; i < ret; i++) { -+ for (i = 1; i < ret; i++) { - if (!block_buffer[i]) - break; - info->pages++; diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch deleted file mode 100644 index bf6c4fc7..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch +++ /dev/null @@ -1,57 +0,0 @@ -Add 'delay' module param to the driver. - -From: Cumulus Networks - -This is needed on S6000 for safe PMBUS access. -Without setting the 'delay', the ismt driver throws 'completion wait -timed out' error message. ---- - drivers/i2c/busses/i2c-ismt.c | 13 ++++++++++--- - 1 file changed, 10 insertions(+), 3 deletions(-) - -diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c -index d9ee43c..b2b3856 100644 ---- a/drivers/i2c/busses/i2c-ismt.c -+++ b/drivers/i2c/busses/i2c-ismt.c -@@ -70,6 +70,7 @@ - #include - #include - #include -+#include - - #include - -@@ -192,9 +193,12 @@ static const struct pci_device_id ismt_ids[] = { - MODULE_DEVICE_TABLE(pci, ismt_ids); - - /* Bus speed control bits for slow debuggers - refer to the docs for usage */ --static unsigned int bus_speed; -+static unsigned int bus_speed = 100; -+static unsigned int delay = 1000; - module_param(bus_speed, uint, S_IRUGO); --MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)"); -+MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (1000 by default)"); -+module_param(delay, uint, S_IRUGO); -+MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)"); - - /** - * __ismt_desc_dump() - dump the contents of a specific descriptor -@@ -391,6 +395,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, - struct ismt_priv *priv = i2c_get_adapdata(adap); - struct device *dev = &priv->pci_dev->dev; - -+ if (delay > 0) -+ udelay(delay); -+ - desc = &priv->hw[priv->head]; - - /* Initialize the DMA buffer */ -@@ -756,7 +763,7 @@ static void ismt_hw_init(struct ismt_priv *priv) - bus_speed = 1000; - break; - } -- dev_dbg(dev, "SMBus clock is running at %d kHz\n", bus_speed); -+ dev_info(dev, "SMBus clock is running at %d kHz with delay %d us\n", bus_speed, delay); - } - - /** diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch deleted file mode 100644 index 612b02db..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-i2c-bus-intel-ismt-enable-param.patch +++ /dev/null @@ -1,27 +0,0 @@ -diff -urpN a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c ---- a/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:12:49.589201206 +0000 -+++ b/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:15:03.973204122 +0000 -@@ -200,6 +200,11 @@ MODULE_PARM_DESC(bus_speed, "Bus Speed i - module_param(delay, uint, S_IRUGO); - MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)"); - -+/* Enable/Disable driver */ -+static unsigned int enable = 1; -+module_param(enable, uint, S_IRUGO); -+MODULE_PARM_DESC(enable, "Enable or disable the ISMT driver (enabled by default)"); -+ - /** - * __ismt_desc_dump() - dump the contents of a specific descriptor - */ -@@ -852,6 +857,11 @@ ismt_probe(struct pci_dev *pdev, const s - struct ismt_priv *priv; - unsigned long start, len; - -+ if(!enable) { -+ dev_warn(&pdev->dev, "module is disabled.\n"); -+ return -ENODEV; -+ } -+ - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch deleted file mode 100644 index a3134c43..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-igb-version-5.3.54.patch +++ /dev/null @@ -1,48795 +0,0 @@ -diff -Nu a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile ---- a/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 09:20:24.786171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 10:43:55.318238134 +0000 -@@ -32,5 +32,7 @@ - obj-$(CONFIG_IGB) += igb.o - - igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ -- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ -- e1000_i210.o igb_ptp.o igb_hwmon.o -+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ -+ e1000_i210.o igb_ptp.o igb_hwmon.o \ -+ e1000_manage.o igb_param.o kcompat.o e1000_api.o \ -+ igb_vmdq.o igb_procfs.o igb_debugfs.o -diff -Nu a/drivers/net/ethernet/intel/igb/Module.supported b/drivers/net/ethernet/intel/igb/Module.supported ---- a/drivers/net/ethernet/intel/igb/Module.supported 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/Module.supported 2016-11-13 10:27:24.246224975 +0000 -@@ -0,0 +1 @@ -+igb.ko external -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c ---- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 14:32:08.575567168 +0000 -@@ -1,94 +1,134 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -- --/* e1000_82575 -- * e1000_82576 -- */ -+/******************************************************************************* - --#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. - --#include --#include --#include -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+/* -+ * 82575EB Gigabit Network Connection -+ * 82575EB Gigabit Backplane Connection -+ * 82575GB Gigabit Network Connection -+ * 82576 Gigabit Network Connection -+ * 82576 Quad Port Gigabit Mezzanine Adapter -+ * 82580 Gigabit Network Connection -+ * I350 Gigabit Network Connection -+ */ - --#include "e1000_mac.h" --#include "e1000_82575.h" -+#include "e1000_api.h" - #include "e1000_i210.h" - --static s32 igb_get_invariants_82575(struct e1000_hw *); --static s32 igb_acquire_phy_82575(struct e1000_hw *); --static void igb_release_phy_82575(struct e1000_hw *); --static s32 igb_acquire_nvm_82575(struct e1000_hw *); --static void igb_release_nvm_82575(struct e1000_hw *); --static s32 igb_check_for_link_82575(struct e1000_hw *); --static s32 igb_get_cfg_done_82575(struct e1000_hw *); --static s32 igb_init_hw_82575(struct e1000_hw *); --static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); --static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); --static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); --static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); --static s32 igb_reset_hw_82575(struct e1000_hw *); --static s32 igb_reset_hw_82580(struct e1000_hw *); --static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); --static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); --static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); --static s32 igb_setup_copper_link_82575(struct e1000_hw *); --static s32 igb_setup_serdes_link_82575(struct e1000_hw *); --static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); --static void igb_clear_hw_cntrs_82575(struct e1000_hw *); --static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); --static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, -- u16 *); --static s32 igb_get_phy_id_82575(struct e1000_hw *); --static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); --static bool igb_sgmii_active_82575(struct e1000_hw *); --static s32 igb_reset_init_script_82575(struct e1000_hw *); --static s32 igb_read_mac_addr_82575(struct e1000_hw *); --static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); --static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); --static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); --static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); --static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); --static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); -+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); -+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); -+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); -+static void e1000_release_phy_82575(struct e1000_hw *hw); -+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); -+static void e1000_release_nvm_82575(struct e1000_hw *hw); -+static s32 e1000_check_for_link_82575(struct e1000_hw *hw); -+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); -+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); -+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, -+ u16 *duplex); -+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); -+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, -+ u16 *data); -+static s32 e1000_reset_hw_82575(struct e1000_hw *hw); -+static s32 e1000_reset_hw_82580(struct e1000_hw *hw); -+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, -+ u32 offset, u16 *data); -+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, -+ u32 offset, u16 data); -+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, -+ bool active); -+static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, -+ bool active); -+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, -+ bool active); -+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); -+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); -+static s32 e1000_get_media_type_82575(struct e1000_hw *hw); -+static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); -+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); -+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, -+ u32 offset, u16 data); -+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); -+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); -+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, -+ u16 *speed, u16 *duplex); -+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); -+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); -+static bool e1000_sgmii_active_82575(struct e1000_hw *hw); -+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); -+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); -+static void e1000_config_collision_dist_82575(struct e1000_hw *hw); -+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); -+static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); -+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); -+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); -+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); -+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); -+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); -+static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, -+ u16 offset); -+static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, -+ u16 offset); -+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); -+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); -+static void e1000_clear_vfta_i350(struct e1000_hw *hw); -+ -+static void e1000_i2c_start(struct e1000_hw *hw); -+static void e1000_i2c_stop(struct e1000_hw *hw); -+static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); -+static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); -+static s32 e1000_get_i2c_ack(struct e1000_hw *hw); -+static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); -+static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); -+static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); -+static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); -+static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); -+static bool e1000_get_i2c_data(u32 *i2cctl); -+ - static const u16 e1000_82580_rxpbs_table[] = { - 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; -+#define E1000_82580_RXPBS_TABLE_SIZE \ -+ (sizeof(e1000_82580_rxpbs_table) / \ -+ sizeof(e1000_82580_rxpbs_table[0])) - - /** -- * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO -+ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO - * @hw: pointer to the HW structure - * - * Called to determine if the I2C pins are being used for I2C or as an - * external MDIO interface since the two options are mutually exclusive. - **/ --static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) -+static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) - { - u32 reg = 0; - bool ext_mdio = false; - -+ DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); -+ - switch (hw->mac.type) { - case e1000_82575: - case e1000_82576: -- reg = rd32(E1000_MDIC); -+ reg = E1000_READ_REG(hw, E1000_MDIC); - ext_mdio = !!(reg & E1000_MDIC_DEST); - break; - case e1000_82580: -@@ -96,7 +136,7 @@ - case e1000_i354: - case e1000_i210: - case e1000_i211: -- reg = rd32(E1000_MDICNFG); -+ reg = E1000_READ_REG(hw, E1000_MDICNFG); - ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); - break; - default: -@@ -106,135 +146,98 @@ - } - - /** -- * igb_check_for_link_media_swap - Check which M88E1112 interface linked -- * @hw: pointer to the HW structure -- * -- * Poll the M88E1112 interfaces to see which interface achieved link. -- */ --static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) --{ -- struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val; -- u16 data; -- u8 port = 0; -- -- /* Check the copper medium. */ -- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); -- if (ret_val) -- return ret_val; -- -- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); -- if (ret_val) -- return ret_val; -- -- if (data & E1000_M88E1112_STATUS_LINK) -- port = E1000_MEDIA_PORT_COPPER; -- -- /* Check the other medium. */ -- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); -- if (ret_val) -- return ret_val; -- -- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); -- if (ret_val) -- return ret_val; -- -- /* reset page to 0 */ -- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); -- if (ret_val) -- return ret_val; -- -- if (data & E1000_M88E1112_STATUS_LINK) -- port = E1000_MEDIA_PORT_OTHER; -- -- /* Determine if a swap needs to happen. */ -- if (port && (hw->dev_spec._82575.media_port != port)) { -- hw->dev_spec._82575.media_port = port; -- hw->dev_spec._82575.media_changed = true; -- } else { -- ret_val = igb_check_for_link_82575(hw); -- } -- -- return 0; --} -- --/** -- * igb_init_phy_params_82575 - Init PHY func ptrs. -+ * e1000_init_phy_params_82575 - Init PHY func ptrs. - * @hw: pointer to the HW structure - **/ --static s32 igb_init_phy_params_82575(struct e1000_hw *hw) -+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u32 ctrl_ext; - -+ DEBUGFUNC("e1000_init_phy_params_82575"); -+ -+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; -+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; -+ - if (hw->phy.media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; - goto out; - } - -+ phy->ops.power_up = igb_e1000_power_up_phy_copper; -+ phy->ops.power_down = e1000_power_down_phy_copper_82575; -+ - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - -- ctrl_ext = rd32(E1000_CTRL_EXT); -+ phy->ops.acquire = e1000_acquire_phy_82575; -+ phy->ops.check_reset_block = e1000_check_reset_block_generic; -+ phy->ops.commit = e1000_phy_sw_reset_generic; -+ phy->ops.get_cfg_done = e1000_get_cfg_done_82575; -+ phy->ops.release = e1000_release_phy_82575; - -- if (igb_sgmii_active_82575(hw)) { -- phy->ops.reset = igb_phy_hw_reset_sgmii_82575; -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ -+ if (e1000_sgmii_active_82575(hw)) { -+ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; - ctrl_ext |= E1000_CTRL_I2C_ENA; - } else { -- phy->ops.reset = igb_phy_hw_reset; -+ phy->ops.reset = e1000_phy_hw_reset_generic; - ctrl_ext &= ~E1000_CTRL_I2C_ENA; - } - -- wr32(E1000_CTRL_EXT, ctrl_ext); -- igb_reset_mdicnfg_82580(hw); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); -+ e1000_reset_mdicnfg_82580(hw); - -- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { -- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; -- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; -+ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { -+ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; -+ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; - } else { - switch (hw->mac.type) { - case e1000_82580: - case e1000_i350: - case e1000_i354: -- phy->ops.read_reg = igb_read_phy_reg_82580; -- phy->ops.write_reg = igb_write_phy_reg_82580; -+ phy->ops.read_reg = e1000_read_phy_reg_82580; -+ phy->ops.write_reg = e1000_write_phy_reg_82580; - break; - case e1000_i210: - case e1000_i211: -- phy->ops.read_reg = igb_read_phy_reg_gs40g; -- phy->ops.write_reg = igb_write_phy_reg_gs40g; -+ phy->ops.read_reg = e1000_read_phy_reg_gs40g; -+ phy->ops.write_reg = e1000_write_phy_reg_gs40g; - break; - default: -- phy->ops.read_reg = igb_read_phy_reg_igp; -- phy->ops.write_reg = igb_write_phy_reg_igp; -+ phy->ops.read_reg = e1000_read_phy_reg_igp; -+ phy->ops.write_reg = e1000_write_phy_reg_igp; - } - } - -- /* set lan id */ -- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> -- E1000_STATUS_FUNC_SHIFT; -- - /* Set phy->phy_addr and phy->id. */ -- ret_val = igb_get_phy_id_82575(hw); -- if (ret_val) -- return ret_val; -+ ret_val = e1000_get_phy_id_82575(hw); - - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case M88E1543_E_PHY_ID: -+ case M88E1512_E_PHY_ID: - case I347AT4_E_PHY_ID: - case M88E1112_E_PHY_ID: -+ case M88E1340M_E_PHY_ID: - case M88E1111_I_PHY_ID: - phy->type = e1000_phy_m88; -- phy->ops.check_polarity = igb_check_polarity_m88; -- phy->ops.get_phy_info = igb_get_phy_info_m88; -- if (phy->id != M88E1111_I_PHY_ID) -+ phy->ops.check_polarity = igb_e1000_check_polarity_m88; -+ phy->ops.get_info = e1000_get_phy_info_m88; -+ if (phy->id == I347AT4_E_PHY_ID || -+ phy->id == M88E1112_E_PHY_ID || -+ phy->id == M88E1340M_E_PHY_ID) - phy->ops.get_cable_length = -- igb_get_cable_length_m88_gen2; -+ e1000_get_cable_length_m88_gen2; -+ else if (phy->id == M88E1543_E_PHY_ID || -+ phy->id == M88E1512_E_PHY_ID) -+ phy->ops.get_cable_length = -+ e1000_get_cable_length_m88_gen2; - else -- phy->ops.get_cable_length = igb_get_cable_length_m88; -- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; -+ phy->ops.get_cable_length = e1000_get_cable_length_m88; -+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; - /* Check if this PHY is confgured for media swap. */ - if (phy->id == M88E1112_E_PHY_ID) { - u16 data; -@@ -256,35 +259,48 @@ - if (data == E1000_M88E1112_AUTO_COPPER_SGMII || - data == E1000_M88E1112_AUTO_COPPER_BASEX) - hw->mac.ops.check_for_link = -- igb_check_for_link_media_swap; -+ e1000_check_for_link_media_swap; -+ } -+ if (phy->id == M88E1512_E_PHY_ID) { -+ ret_val = e1000_initialize_M88E1512_phy(hw); -+ if (ret_val) -+ goto out; -+ } -+ if (phy->id == M88E1543_E_PHY_ID) { -+ ret_val = e1000_initialize_M88E1543_phy(hw); -+ if (ret_val) -+ goto out; - } - break; - case IGP03E1000_E_PHY_ID: -+ case IGP04E1000_E_PHY_ID: - phy->type = e1000_phy_igp_3; -- phy->ops.get_phy_info = igb_get_phy_info_igp; -- phy->ops.get_cable_length = igb_get_cable_length_igp_2; -- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; -- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; -- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; -+ phy->ops.check_polarity = igb_e1000_check_polarity_igp; -+ phy->ops.get_info = e1000_get_phy_info_igp; -+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2; -+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; -+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; -+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; - break; - case I82580_I_PHY_ID: - case I350_I_PHY_ID: - phy->type = e1000_phy_82580; -+ phy->ops.check_polarity = igb_e1000_check_polarity_82577; - phy->ops.force_speed_duplex = -- igb_phy_force_speed_duplex_82580; -- phy->ops.get_cable_length = igb_get_cable_length_82580; -- phy->ops.get_phy_info = igb_get_phy_info_82580; -- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; -- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; -+ igb_e1000_phy_force_speed_duplex_82577; -+ phy->ops.get_cable_length = igb_e1000_get_cable_length_82577; -+ phy->ops.get_info = igb_e1000_get_phy_info_82577; -+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; -+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; - break; - case I210_I_PHY_ID: - phy->type = e1000_phy_i210; -- phy->ops.check_polarity = igb_check_polarity_m88; -- phy->ops.get_phy_info = igb_get_phy_info_m88; -- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; -- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; -- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; -- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; -+ phy->ops.check_polarity = igb_e1000_check_polarity_m88; -+ phy->ops.get_info = e1000_get_phy_info_m88; -+ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; -+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; -+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; -+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; - break; - default: - ret_val = -E1000_ERR_PHY; -@@ -296,19 +312,21 @@ - } - - /** -- * igb_init_nvm_params_82575 - Init NVM func ptrs. -+ * e1000_init_nvm_params_82575 - Init NVM func ptrs. - * @hw: pointer to the HW structure - **/ --static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) -+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) - { - struct e1000_nvm_info *nvm = &hw->nvm; -- u32 eecd = rd32(E1000_EECD); -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); - u16 size; - -+ DEBUGFUNC("e1000_init_nvm_params_82575"); -+ - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); -- -- /* Added to a constant, "size" becomes the left-shift value -+ /* -+ * Added to a constant, "size" becomes the left-shift value - * for setting word_size. - */ - size += NVM_WORD_SIZE_BASE_SHIFT; -@@ -320,433 +338,272 @@ - size = 15; - - nvm->word_size = 1 << size; -- nvm->opcode_bits = 8; -- nvm->delay_usec = 1; -+ if (hw->mac.type < e1000_i210) { -+ nvm->opcode_bits = 8; -+ nvm->delay_usec = 1; -+ -+ switch (nvm->override) { -+ case e1000_nvm_override_spi_large: -+ nvm->page_size = 32; -+ nvm->address_bits = 16; -+ break; -+ case e1000_nvm_override_spi_small: -+ nvm->page_size = 8; -+ nvm->address_bits = 8; -+ break; -+ default: -+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; -+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? -+ 16 : 8; -+ break; -+ } -+ if (nvm->word_size == (1 << 15)) -+ nvm->page_size = 128; - -- switch (nvm->override) { -- case e1000_nvm_override_spi_large: -- nvm->page_size = 32; -- nvm->address_bits = 16; -- break; -- case e1000_nvm_override_spi_small: -- nvm->page_size = 8; -- nvm->address_bits = 8; -- break; -- default: -- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; -- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? -- 16 : 8; -- break; -- } -- if (nvm->word_size == (1 << 15)) -- nvm->page_size = 128; -- -- nvm->type = e1000_nvm_eeprom_spi; -- -- /* NVM Function Pointers */ -- nvm->ops.acquire = igb_acquire_nvm_82575; -- nvm->ops.release = igb_release_nvm_82575; -- nvm->ops.write = igb_write_nvm_spi; -- nvm->ops.validate = igb_validate_nvm_checksum; -- nvm->ops.update = igb_update_nvm_checksum; -+ nvm->type = e1000_nvm_eeprom_spi; -+ } else { -+ nvm->type = e1000_nvm_flash_hw; -+ } -+ -+ /* Function Pointers */ -+ nvm->ops.acquire = e1000_acquire_nvm_82575; -+ nvm->ops.release = e1000_release_nvm_82575; - if (nvm->word_size < (1 << 15)) -- nvm->ops.read = igb_read_nvm_eerd; -+ nvm->ops.read = e1000_read_nvm_eerd; - else -- nvm->ops.read = igb_read_nvm_spi; -+ nvm->ops.read = e1000_read_nvm_spi; -+ -+ nvm->ops.write = e1000_write_nvm_spi; -+ nvm->ops.validate = e1000_validate_nvm_checksum_generic; -+ nvm->ops.update = e1000_update_nvm_checksum_generic; -+ nvm->ops.valid_led_default = e1000_valid_led_default_82575; - - /* override generic family function pointers for specific descendants */ - switch (hw->mac.type) { - case e1000_82580: -- nvm->ops.validate = igb_validate_nvm_checksum_82580; -- nvm->ops.update = igb_update_nvm_checksum_82580; -+ nvm->ops.validate = e1000_validate_nvm_checksum_82580; -+ nvm->ops.update = e1000_update_nvm_checksum_82580; - break; -- case e1000_i354: - case e1000_i350: -- nvm->ops.validate = igb_validate_nvm_checksum_i350; -- nvm->ops.update = igb_update_nvm_checksum_i350; -+ case e1000_i354: -+ nvm->ops.validate = e1000_validate_nvm_checksum_i350; -+ nvm->ops.update = e1000_update_nvm_checksum_i350; - break; - default: - break; - } - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_init_mac_params_82575 - Init MAC func ptrs. -+ * e1000_init_mac_params_82575 - Init MAC func ptrs. - * @hw: pointer to the HW structure - **/ --static s32 igb_init_mac_params_82575(struct e1000_hw *hw) -+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) - { - struct e1000_mac_info *mac = &hw->mac; - struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; - -+ DEBUGFUNC("e1000_init_mac_params_82575"); -+ -+ /* Derives media type */ -+ e1000_get_media_type_82575(hw); - /* Set mta register count */ - mac->mta_reg_count = 128; -+ /* Set uta register count */ -+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; - /* Set rar entry count */ -- switch (mac->type) { -- case e1000_82576: -+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; -+ if (mac->type == e1000_82576) - mac->rar_entry_count = E1000_RAR_ENTRIES_82576; -- break; -- case e1000_82580: -+ if (mac->type == e1000_82580) - mac->rar_entry_count = E1000_RAR_ENTRIES_82580; -- break; -- case e1000_i350: -- case e1000_i354: -+ if (mac->type == e1000_i350 || mac->type == e1000_i354) - mac->rar_entry_count = E1000_RAR_ENTRIES_I350; -- break; -- default: -- mac->rar_entry_count = E1000_RAR_ENTRIES_82575; -- break; -- } -- /* reset */ -- if (mac->type >= e1000_82580) -- mac->ops.reset_hw = igb_reset_hw_82580; -- else -- mac->ops.reset_hw = igb_reset_hw_82575; - -- if (mac->type >= e1000_i210) { -- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; -- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; -+ /* Enable EEE default settings for EEE supported devices */ -+ if (mac->type >= e1000_i350) -+ dev_spec->eee_disable = false; - -- } else { -- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; -- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; -- } -+ /* Allow a single clear of the SW semaphore on I210 and newer */ -+ if (mac->type >= e1000_i210) -+ dev_spec->clear_semaphore_once = true; - - /* Set if part includes ASF firmware */ - mac->asf_firmware_present = true; -- /* Set if manageability features are enabled. */ -+ /* FWSM register */ -+ mac->has_fwsm = true; -+ /* ARC supported; valid only if manageability features are enabled. */ - mac->arc_subsystem_valid = -- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) -- ? true : false; -- /* enable EEE on i350 parts and later parts */ -- if (mac->type >= e1000_i350) -- dev_spec->eee_disable = false; -+ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); -+ -+ /* Function pointers */ -+ -+ /* bus type/speed/width */ -+ mac->ops.get_bus_info = igb_e1000_get_bus_info_pcie_generic; -+ /* reset */ -+ if (mac->type >= e1000_82580) -+ mac->ops.reset_hw = e1000_reset_hw_82580; - else -- dev_spec->eee_disable = true; -- /* Allow a single clear of the SW semaphore on I210 and newer */ -- if (mac->type >= e1000_i210) -- dev_spec->clear_semaphore_once = true; -+ mac->ops.reset_hw = e1000_reset_hw_82575; -+ /* hw initialization */ -+ if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) -+ mac->ops.init_hw = e1000_init_hw_i210; -+ else -+ mac->ops.init_hw = e1000_init_hw_82575; -+ /* link setup */ -+ mac->ops.setup_link = e1000_setup_link_generic; - /* physical interface link setup */ - mac->ops.setup_physical_interface = - (hw->phy.media_type == e1000_media_type_copper) -- ? igb_setup_copper_link_82575 -- : igb_setup_serdes_link_82575; -- -- if (mac->type == e1000_82580) { -- switch (hw->device_id) { -- /* feature not supported on these id's */ -- case E1000_DEV_ID_DH89XXCC_SGMII: -- case E1000_DEV_ID_DH89XXCC_SERDES: -- case E1000_DEV_ID_DH89XXCC_BACKPLANE: -- case E1000_DEV_ID_DH89XXCC_SFP: -- break; -- default: -- hw->dev_spec._82575.mas_capable = true; -- break; -- } -+ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; -+ /* physical interface shutdown */ -+ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; -+ /* physical interface power up */ -+ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; -+ /* check for link */ -+ mac->ops.check_for_link = e1000_check_for_link_82575; -+ /* read mac address */ -+ mac->ops.read_mac_addr = e1000_read_mac_addr_82575; -+ /* configure collision distance */ -+ mac->ops.config_collision_dist = e1000_config_collision_dist_82575; -+ /* multicast address update */ -+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; -+ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { -+ /* writing VFTA */ -+ mac->ops.write_vfta = e1000_write_vfta_i350; -+ /* clearing VFTA */ -+ mac->ops.clear_vfta = e1000_clear_vfta_i350; -+ } else { -+ /* writing VFTA */ -+ mac->ops.write_vfta = igb_e1000_write_vfta_generic; -+ /* clearing VFTA */ -+ mac->ops.clear_vfta = igb_e1000_clear_vfta_generic; -+ } -+ if (hw->mac.type >= e1000_82580) -+ mac->ops.validate_mdi_setting = -+ e1000_validate_mdi_setting_crossover_generic; -+ /* ID LED init */ -+ mac->ops.id_led_init = e1000_id_led_init_generic; -+ /* blink LED */ -+ mac->ops.blink_led = e1000_blink_led_generic; -+ /* setup LED */ -+ mac->ops.setup_led = e1000_setup_led_generic; -+ /* cleanup LED */ -+ mac->ops.cleanup_led = e1000_cleanup_led_generic; -+ /* turn on/off LED */ -+ mac->ops.led_on = e1000_led_on_generic; -+ mac->ops.led_off = e1000_led_off_generic; -+ /* clear hardware counters */ -+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; -+ /* link info */ -+ mac->ops.get_link_up_info = e1000_get_link_up_info_82575; -+ /* get thermal sensor data */ -+ mac->ops.get_thermal_sensor_data = -+ e1000_get_thermal_sensor_data_generic; -+ mac->ops.init_thermal_sensor_thresh = -+ e1000_init_thermal_sensor_thresh_generic; -+ /* acquire SW_FW sync */ -+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; -+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; -+ if (mac->type >= e1000_i210) { -+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; -+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; - } -- return 0; -+ -+ /* set lan id for port to determine which phy lock to use */ -+ hw->mac.ops.set_lan_id(hw); -+ -+ return E1000_SUCCESS; - } - - /** -- * igb_set_sfp_media_type_82575 - derives SFP module media type. -+ * e1000_init_function_pointers_82575 - Init func ptrs. - * @hw: pointer to the HW structure - * -- * The media type is chosen based on SFP module. -- * compatibility flags retrieved from SFP ID EEPROM. -+ * Called to initialize all function pointers and parameters. - **/ --static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) -+void e1000_init_function_pointers_82575(struct e1000_hw *hw) - { -- s32 ret_val = E1000_ERR_CONFIG; -- u32 ctrl_ext = 0; -- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; -- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; -- u8 tranceiver_type = 0; -- s32 timeout = 3; -+ DEBUGFUNC("e1000_init_function_pointers_82575"); - -- /* Turn I2C interface ON and power on sfp cage */ -- ctrl_ext = rd32(E1000_CTRL_EXT); -- ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; -- wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); -+ hw->mac.ops.init_params = e1000_init_mac_params_82575; -+ hw->nvm.ops.init_params = e1000_init_nvm_params_82575; -+ hw->phy.ops.init_params = e1000_init_phy_params_82575; -+ hw->mbx.ops.init_params = e1000_init_mbx_params_pf; -+} - -- wrfl(); -+/** -+ * e1000_acquire_phy_82575 - Acquire rights to access PHY -+ * @hw: pointer to the HW structure -+ * -+ * Acquire access rights to the correct PHY. -+ **/ -+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) -+{ -+ u16 mask = E1000_SWFW_PHY0_SM; - -- /* Read SFP module data */ -- while (timeout) { -- ret_val = igb_read_sfp_data_byte(hw, -- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), -- &tranceiver_type); -- if (ret_val == 0) -- break; -- msleep(100); -- timeout--; -- } -- if (ret_val != 0) -- goto out; -+ DEBUGFUNC("e1000_acquire_phy_82575"); - -- ret_val = igb_read_sfp_data_byte(hw, -- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), -- (u8 *)eth_flags); -- if (ret_val != 0) -- goto out; -+ if (hw->bus.func == E1000_FUNC_1) -+ mask = E1000_SWFW_PHY1_SM; -+ else if (hw->bus.func == E1000_FUNC_2) -+ mask = E1000_SWFW_PHY2_SM; -+ else if (hw->bus.func == E1000_FUNC_3) -+ mask = E1000_SWFW_PHY3_SM; - -- /* Check if there is some SFP module plugged and powered */ -- if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || -- (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { -- dev_spec->module_plugged = true; -- if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { -- hw->phy.media_type = e1000_media_type_internal_serdes; -- } else if (eth_flags->e100_base_fx) { -- dev_spec->sgmii_active = true; -- hw->phy.media_type = e1000_media_type_internal_serdes; -- } else if (eth_flags->e1000_base_t) { -- dev_spec->sgmii_active = true; -- hw->phy.media_type = e1000_media_type_copper; -- } else { -- hw->phy.media_type = e1000_media_type_unknown; -- hw_dbg("PHY module has not been recognized\n"); -- goto out; -- } -- } else { -- hw->phy.media_type = e1000_media_type_unknown; -- } -- ret_val = 0; --out: -- /* Restore I2C interface setting */ -- wr32(E1000_CTRL_EXT, ctrl_ext); -- return ret_val; -+ return hw->mac.ops.acquire_swfw_sync(hw, mask); - } - --static s32 igb_get_invariants_82575(struct e1000_hw *hw) -+/** -+ * e1000_release_phy_82575 - Release rights to access PHY -+ * @hw: pointer to the HW structure -+ * -+ * A wrapper to release access rights to the correct PHY. -+ **/ -+static void e1000_release_phy_82575(struct e1000_hw *hw) - { -- struct e1000_mac_info *mac = &hw->mac; -- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; -- s32 ret_val; -- u32 ctrl_ext = 0; -- u32 link_mode = 0; -+ u16 mask = E1000_SWFW_PHY0_SM; - -- switch (hw->device_id) { -- case E1000_DEV_ID_82575EB_COPPER: -- case E1000_DEV_ID_82575EB_FIBER_SERDES: -- case E1000_DEV_ID_82575GB_QUAD_COPPER: -- mac->type = e1000_82575; -- break; -- case E1000_DEV_ID_82576: -- case E1000_DEV_ID_82576_NS: -- case E1000_DEV_ID_82576_NS_SERDES: -- case E1000_DEV_ID_82576_FIBER: -- case E1000_DEV_ID_82576_SERDES: -- case E1000_DEV_ID_82576_QUAD_COPPER: -- case E1000_DEV_ID_82576_QUAD_COPPER_ET2: -- case E1000_DEV_ID_82576_SERDES_QUAD: -- mac->type = e1000_82576; -- break; -- case E1000_DEV_ID_82580_COPPER: -- case E1000_DEV_ID_82580_FIBER: -- case E1000_DEV_ID_82580_QUAD_FIBER: -- case E1000_DEV_ID_82580_SERDES: -- case E1000_DEV_ID_82580_SGMII: -- case E1000_DEV_ID_82580_COPPER_DUAL: -- case E1000_DEV_ID_DH89XXCC_SGMII: -- case E1000_DEV_ID_DH89XXCC_SERDES: -- case E1000_DEV_ID_DH89XXCC_BACKPLANE: -- case E1000_DEV_ID_DH89XXCC_SFP: -- mac->type = e1000_82580; -- break; -- case E1000_DEV_ID_I350_COPPER: -- case E1000_DEV_ID_I350_FIBER: -- case E1000_DEV_ID_I350_SERDES: -- case E1000_DEV_ID_I350_SGMII: -- mac->type = e1000_i350; -- break; -- case E1000_DEV_ID_I210_COPPER: -- case E1000_DEV_ID_I210_FIBER: -- case E1000_DEV_ID_I210_SERDES: -- case E1000_DEV_ID_I210_SGMII: -- case E1000_DEV_ID_I210_COPPER_FLASHLESS: -- case E1000_DEV_ID_I210_SERDES_FLASHLESS: -- mac->type = e1000_i210; -- break; -- case E1000_DEV_ID_I211_COPPER: -- mac->type = e1000_i211; -- break; -- case E1000_DEV_ID_I354_BACKPLANE_1GBPS: -- case E1000_DEV_ID_I354_SGMII: -- case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: -- mac->type = e1000_i354; -- break; -- default: -- return -E1000_ERR_MAC_INIT; -- break; -- } -+ DEBUGFUNC("e1000_release_phy_82575"); - -- /* Set media type */ -- /* The 82575 uses bits 22:23 for link mode. The mode can be changed -- * based on the EEPROM. We cannot rely upon device ID. There -- * is no distinguishable difference between fiber and internal -- * SerDes mode on the 82575. There can be an external PHY attached -- * on the SGMII interface. For this, we'll set sgmii_active to true. -- */ -- hw->phy.media_type = e1000_media_type_copper; -- dev_spec->sgmii_active = false; -- dev_spec->module_plugged = false; -+ if (hw->bus.func == E1000_FUNC_1) -+ mask = E1000_SWFW_PHY1_SM; -+ else if (hw->bus.func == E1000_FUNC_2) -+ mask = E1000_SWFW_PHY2_SM; -+ else if (hw->bus.func == E1000_FUNC_3) -+ mask = E1000_SWFW_PHY3_SM; - -- ctrl_ext = rd32(E1000_CTRL_EXT); -+ hw->mac.ops.release_swfw_sync(hw, mask); -+} - -- link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; -- switch (link_mode) { -- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: -- hw->phy.media_type = e1000_media_type_internal_serdes; -- break; -- case E1000_CTRL_EXT_LINK_MODE_SGMII: -- /* Get phy control interface type set (MDIO vs. I2C)*/ -- if (igb_sgmii_uses_mdio_82575(hw)) { -- hw->phy.media_type = e1000_media_type_copper; -- dev_spec->sgmii_active = true; -- break; -- } -- /* fall through for I2C based SGMII */ -- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: -- /* read media type from SFP EEPROM */ -- ret_val = igb_set_sfp_media_type_82575(hw); -- if ((ret_val != 0) || -- (hw->phy.media_type == e1000_media_type_unknown)) { -- /* If media type was not identified then return media -- * type defined by the CTRL_EXT settings. -- */ -- hw->phy.media_type = e1000_media_type_internal_serdes; -+/** -+ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * -+ * Reads the PHY register at offset using the serial gigabit media independent -+ * interface and stores the retrieved information in data. -+ **/ -+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, -+ u16 *data) -+{ -+ s32 ret_val = -E1000_ERR_PARAM; - -- if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { -- hw->phy.media_type = e1000_media_type_copper; -- dev_spec->sgmii_active = true; -- } -+ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); - -- break; -- } -+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { -+ DEBUGOUT1("PHY Address %u is out of range\n", offset); -+ goto out; -+ } - -- /* do not change link mode for 100BaseFX */ -- if (dev_spec->eth_flags.e100_base_fx) -- break; -+ ret_val = hw->phy.ops.acquire(hw); -+ if (ret_val) -+ goto out; - -- /* change current link mode setting */ -- ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; -- -- if (hw->phy.media_type == e1000_media_type_copper) -- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; -- else -- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; -- -- wr32(E1000_CTRL_EXT, ctrl_ext); -- -- break; -- default: -- break; -- } -- -- /* mac initialization and operations */ -- ret_val = igb_init_mac_params_82575(hw); -- if (ret_val) -- goto out; -- -- /* NVM initialization */ -- ret_val = igb_init_nvm_params_82575(hw); -- switch (hw->mac.type) { -- case e1000_i210: -- case e1000_i211: -- ret_val = igb_init_nvm_params_i210(hw); -- break; -- default: -- break; -- } -- -- if (ret_val) -- goto out; -- -- /* if part supports SR-IOV then initialize mailbox parameters */ -- switch (mac->type) { -- case e1000_82576: -- case e1000_i350: -- igb_init_mbx_params_pf(hw); -- break; -- default: -- break; -- } -- -- /* setup PHY parameters */ -- ret_val = igb_init_phy_params_82575(hw); -- --out: -- return ret_val; --} -- --/** -- * igb_acquire_phy_82575 - Acquire rights to access PHY -- * @hw: pointer to the HW structure -- * -- * Acquire access rights to the correct PHY. This is a -- * function pointer entry point called by the api module. -- **/ --static s32 igb_acquire_phy_82575(struct e1000_hw *hw) --{ -- u16 mask = E1000_SWFW_PHY0_SM; -- -- if (hw->bus.func == E1000_FUNC_1) -- mask = E1000_SWFW_PHY1_SM; -- else if (hw->bus.func == E1000_FUNC_2) -- mask = E1000_SWFW_PHY2_SM; -- else if (hw->bus.func == E1000_FUNC_3) -- mask = E1000_SWFW_PHY3_SM; -- -- return hw->mac.ops.acquire_swfw_sync(hw, mask); --} -- --/** -- * igb_release_phy_82575 - Release rights to access PHY -- * @hw: pointer to the HW structure -- * -- * A wrapper to release access rights to the correct PHY. This is a -- * function pointer entry point called by the api module. -- **/ --static void igb_release_phy_82575(struct e1000_hw *hw) --{ -- u16 mask = E1000_SWFW_PHY0_SM; -- -- if (hw->bus.func == E1000_FUNC_1) -- mask = E1000_SWFW_PHY1_SM; -- else if (hw->bus.func == E1000_FUNC_2) -- mask = E1000_SWFW_PHY2_SM; -- else if (hw->bus.func == E1000_FUNC_3) -- mask = E1000_SWFW_PHY3_SM; -- -- hw->mac.ops.release_swfw_sync(hw, mask); --} -- --/** -- * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii -- * @hw: pointer to the HW structure -- * @offset: register offset to be read -- * @data: pointer to the read data -- * -- * Reads the PHY register at offset using the serial gigabit media independent -- * interface and stores the retrieved information in data. -- **/ --static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, -- u16 *data) --{ -- s32 ret_val = -E1000_ERR_PARAM; -- -- if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { -- hw_dbg("PHY Address %u is out of range\n", offset); -- goto out; -- } -- -- ret_val = hw->phy.ops.acquire(hw); -- if (ret_val) -- goto out; -- -- ret_val = igb_read_phy_reg_i2c(hw, offset, data); -+ ret_val = e1000_read_phy_reg_i2c(hw, offset, data); - - hw->phy.ops.release(hw); - -@@ -755,7 +612,7 @@ - } - - /** -- * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii -+ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset -@@ -763,14 +620,15 @@ - * Writes the data to PHY register at the offset using the serial gigabit - * media independent interface. - **/ --static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, -+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, - u16 data) - { - s32 ret_val = -E1000_ERR_PARAM; - -+ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); - - if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { -- hw_dbg("PHY Address %d is out of range\n", offset); -+ DEBUGOUT1("PHY Address %d is out of range\n", offset); - goto out; - } - -@@ -778,7 +636,7 @@ - if (ret_val) - goto out; - -- ret_val = igb_write_phy_reg_i2c(hw, offset, data); -+ ret_val = e1000_write_phy_reg_i2c(hw, offset, data); - - hw->phy.ops.release(hw); - -@@ -787,41 +645,44 @@ - } - - /** -- * igb_get_phy_id_82575 - Retrieve PHY addr and id -+ * e1000_get_phy_id_82575 - Retrieve PHY addr and id - * @hw: pointer to the HW structure - * - * Retrieves the PHY address and ID for both PHY's which do and do not use - * sgmi interface. - **/ --static s32 igb_get_phy_id_82575(struct e1000_hw *hw) -+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u16 phy_id; - u32 ctrl_ext; - u32 mdic; - -- /* Extra read required for some PHY's on i354 */ -+ DEBUGFUNC("e1000_get_phy_id_82575"); -+ -+ /* some i354 devices need an extra read for phy id */ - if (hw->mac.type == e1000_i354) -- igb_get_phy_id(hw); -+ e1000_get_phy_id(hw); - -- /* For SGMII PHYs, we try the list of possible addresses until -+ /* -+ * For SGMII PHYs, we try the list of possible addresses until - * we find one that works. For non-SGMII PHYs - * (e.g. integrated copper PHYs), an address of 1 should - * work. The result of this function should mean phy->phy_addr - * and phy->id are set correctly. - */ -- if (!(igb_sgmii_active_82575(hw))) { -+ if (!e1000_sgmii_active_82575(hw)) { - phy->addr = 1; -- ret_val = igb_get_phy_id(hw); -+ ret_val = e1000_get_phy_id(hw); - goto out; - } - -- if (igb_sgmii_uses_mdio_82575(hw)) { -+ if (e1000_sgmii_uses_mdio_82575(hw)) { - switch (hw->mac.type) { - case e1000_82575: - case e1000_82576: -- mdic = rd32(E1000_MDIC); -+ mdic = E1000_READ_REG(hw, E1000_MDIC); - mdic &= E1000_MDIC_PHY_MASK; - phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; - break; -@@ -830,7 +691,7 @@ - case e1000_i354: - case e1000_i210: - case e1000_i211: -- mdic = rd32(E1000_MDICNFG); -+ mdic = E1000_READ_REG(hw, E1000_MDICNFG); - mdic &= E1000_MDICNFG_PHY_MASK; - phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; - break; -@@ -839,31 +700,35 @@ - goto out; - break; - } -- ret_val = igb_get_phy_id(hw); -+ ret_val = e1000_get_phy_id(hw); - goto out; - } - - /* Power on sgmii phy if it is disabled */ -- ctrl_ext = rd32(E1000_CTRL_EXT); -- wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); -- wrfl(); -- msleep(300); -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, -+ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); -+ E1000_WRITE_FLUSH(hw); -+ msec_delay(300); - -- /* The address field in the I2CCMD register is 3 bits and 0 is invalid. -+ /* -+ * The address field in the I2CCMD register is 3 bits and 0 is invalid. - * Therefore, we need to test 1-7 - */ - for (phy->addr = 1; phy->addr < 8; phy->addr++) { -- ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); -- if (ret_val == 0) { -- hw_dbg("Vendor ID 0x%08X read at address %u\n", -- phy_id, phy->addr); -- /* At the time of this writing, The M88 part is -+ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); -+ if (ret_val == E1000_SUCCESS) { -+ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", -+ phy_id, phy->addr); -+ /* -+ * At the time of this writing, The M88 part is - * the only supported SGMII PHY product. - */ - if (phy_id == M88_VENDOR) - break; - } else { -- hw_dbg("PHY address %u was unreadable\n", phy->addr); -+ DEBUGOUT1("PHY address %u was unreadable\n", -+ phy->addr); - } - } - -@@ -871,49 +736,60 @@ - if (phy->addr == 8) { - phy->addr = 0; - ret_val = -E1000_ERR_PHY; -- goto out; - } else { -- ret_val = igb_get_phy_id(hw); -+ ret_val = e1000_get_phy_id(hw); - } - - /* restore previous sfp cage power state */ -- wr32(E1000_CTRL_EXT, ctrl_ext); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); - - out: - return ret_val; - } - - /** -- * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset -+ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset - * @hw: pointer to the HW structure - * - * Resets the PHY using the serial gigabit media independent interface. - **/ --static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) -+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) - { -- s32 ret_val; -+ s32 ret_val = E1000_SUCCESS; -+ struct e1000_phy_info *phy = &hw->phy; - -- /* This isn't a true "hard" reset, but is the only reset -+ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); -+ -+ /* -+ * This isn't a true "hard" reset, but is the only reset - * available to us at this time. - */ - -- hw_dbg("Soft resetting SGMII attached PHY...\n"); -+ DEBUGOUT("Soft resetting SGMII attached PHY...\n"); -+ -+ if (!(hw->phy.ops.write_reg)) -+ goto out; - -- /* SFP documentation requires the following to configure the SPF module -+ /* -+ * SFP documentation requires the following to configure the SPF module - * to work on SGMII. No further documentation is given. - */ - ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); - if (ret_val) - goto out; - -- ret_val = igb_phy_sw_reset(hw); -+ ret_val = hw->phy.ops.commit(hw); -+ if (ret_val) -+ goto out; - -+ if (phy->id == M88E1512_E_PHY_ID) -+ ret_val = e1000_initialize_M88E1512_phy(hw); - out: - return ret_val; - } - - /** -- * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state -+ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * -@@ -925,12 +801,17 @@ - * This is a function pointer entry point only called by - * PHY setup routines. - **/ --static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) -+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val; -+ s32 ret_val = E1000_SUCCESS; - u16 data; - -+ DEBUGFUNC("e1000_set_d0_lplu_state_82575"); -+ -+ if (!(hw->phy.ops.read_reg)) -+ goto out; -+ - ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); - if (ret_val) - goto out; -@@ -938,47 +819,52 @@ - if (active) { - data |= IGP02E1000_PM_D0_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, -- data); -+ data); - if (ret_val) - goto out; - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, -- &data); -+ &data); - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, -- data); -+ data); - if (ret_val) - goto out; - } else { - data &= ~IGP02E1000_PM_D0_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, -- data); -- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used -+ data); -+ /* -+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = phy->ops.read_reg(hw, -- IGP01E1000_PHY_PORT_CONFIG, &data); -+ IGP01E1000_PHY_PORT_CONFIG, -+ &data); - if (ret_val) - goto out; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, -- IGP01E1000_PHY_PORT_CONFIG, data); -+ IGP01E1000_PHY_PORT_CONFIG, -+ data); - if (ret_val) - goto out; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = phy->ops.read_reg(hw, -- IGP01E1000_PHY_PORT_CONFIG, &data); -+ IGP01E1000_PHY_PORT_CONFIG, -+ &data); - if (ret_val) - goto out; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, -- IGP01E1000_PHY_PORT_CONFIG, data); -+ IGP01E1000_PHY_PORT_CONFIG, -+ data); - if (ret_val) - goto out; - } -@@ -989,7 +875,7 @@ - } - - /** -- * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state -+ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * -@@ -1001,12 +887,14 @@ - * This is a function pointer entry point only called by - * PHY setup routines. - **/ --static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) -+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) - { - struct e1000_phy_info *phy = &hw->phy; -- u16 data; -+ u32 data; - -- data = rd32(E1000_82580_PHY_POWER_MGMT); -+ DEBUGFUNC("e1000_set_d0_lplu_state_82580"); -+ -+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); - - if (active) { - data |= E1000_82580_PM_D0_LPLU; -@@ -1016,7 +904,8 @@ - } else { - data &= ~E1000_82580_PM_D0_LPLU; - -- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used -+ /* -+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. -@@ -1024,14 +913,15 @@ - if (phy->smart_speed == e1000_smart_speed_on) - data |= E1000_82580_PM_SPD; - else if (phy->smart_speed == e1000_smart_speed_off) -- data &= ~E1000_82580_PM_SPD; } -+ data &= ~E1000_82580_PM_SPD; -+ } - -- wr32(E1000_82580_PHY_POWER_MGMT, data); -- return 0; -+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); -+ return E1000_SUCCESS; - } - - /** -- * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 -+ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * -@@ -1044,16 +934,19 @@ - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. - **/ --static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) -+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) - { - struct e1000_phy_info *phy = &hw->phy; -- u16 data; -+ u32 data; - -- data = rd32(E1000_82580_PHY_POWER_MGMT); -+ DEBUGFUNC("e1000_set_d3_lplu_state_82580"); -+ -+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); - - if (!active) { - data &= ~E1000_82580_PM_D3_LPLU; -- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used -+ /* -+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. -@@ -1070,12 +963,12 @@ - data &= ~E1000_82580_PM_SPD; - } - -- wr32(E1000_82580_PHY_POWER_MGMT, data); -- return 0; -+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); -+ return E1000_SUCCESS; - } - - /** -- * igb_acquire_nvm_82575 - Request for access to EEPROM -+ * e1000_acquire_nvm_82575 - Request for access to EEPROM - * @hw: pointer to the HW structure - * - * Acquire the necessary semaphores for exclusive access to the EEPROM. -@@ -1083,148 +976,183 @@ - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ --static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) -+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) - { -- s32 ret_val; -+ s32 ret_val = E1000_SUCCESS; - -- ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); -+ DEBUGFUNC("e1000_acquire_nvm_82575"); -+ -+ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); - if (ret_val) - goto out; - -- ret_val = igb_acquire_nvm(hw); -+ /* -+ * Check if there is some access -+ * error this access may hook on -+ */ -+ if (hw->mac.type == e1000_i350) { -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); -+ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | -+ E1000_EECD_TIMEOUT)) { -+ /* Clear all access error flags */ -+ E1000_WRITE_REG(hw, E1000_EECD, eecd | -+ E1000_EECD_ERROR_CLR); -+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); -+ } -+ } -+ -+ if (hw->mac.type == e1000_82580) { -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); -+ if (eecd & E1000_EECD_BLOCKED) { -+ /* Clear access error flag */ -+ E1000_WRITE_REG(hw, E1000_EECD, eecd | -+ E1000_EECD_BLOCKED); -+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); -+ } -+ } - -+ ret_val = e1000_acquire_nvm_generic(hw); - if (ret_val) -- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); -+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); - - out: - return ret_val; - } - - /** -- * igb_release_nvm_82575 - Release exclusive access to EEPROM -+ * e1000_release_nvm_82575 - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit, - * then release the semaphores acquired. - **/ --static void igb_release_nvm_82575(struct e1000_hw *hw) -+static void e1000_release_nvm_82575(struct e1000_hw *hw) - { -- igb_release_nvm(hw); -- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); -+ DEBUGFUNC("e1000_release_nvm_82575"); -+ -+ e1000_release_nvm_generic(hw); -+ -+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); - } - - /** -- * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore -+ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Acquire the SW/FW semaphore to access the PHY or NVM. The mask - * will also specify which port we're acquiring the lock for. - **/ --static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) -+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) - { - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 16; -- s32 ret_val = 0; -- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ -+ s32 ret_val = E1000_SUCCESS; -+ s32 i = 0, timeout = 200; -+ -+ DEBUGFUNC("e1000_acquire_swfw_sync_82575"); - - while (i < timeout) { -- if (igb_get_hw_semaphore(hw)) { -+ if (e1000_get_hw_semaphore_generic(hw)) { - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } - -- swfw_sync = rd32(E1000_SW_FW_SYNC); -+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); - if (!(swfw_sync & (fwmask | swmask))) - break; - -- /* Firmware currently using resource (fwmask) -+ /* -+ * Firmware currently using resource (fwmask) - * or other software thread using resource (swmask) - */ -- igb_put_hw_semaphore(hw); -- mdelay(5); -+ e1000_put_hw_semaphore_generic(hw); -+ msec_delay_irq(5); - i++; - } - - if (i == timeout) { -- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); -+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } - - swfw_sync |= swmask; -- wr32(E1000_SW_FW_SYNC, swfw_sync); -+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); - -- igb_put_hw_semaphore(hw); -+ e1000_put_hw_semaphore_generic(hw); - - out: - return ret_val; - } - - /** -- * igb_release_swfw_sync_82575 - Release SW/FW semaphore -+ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Release the SW/FW semaphore used to access the PHY or NVM. The mask - * will also specify which port we're releasing the lock for. - **/ --static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) -+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) - { - u32 swfw_sync; - -- while (igb_get_hw_semaphore(hw) != 0) -+ DEBUGFUNC("e1000_release_swfw_sync_82575"); -+ -+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) - ; /* Empty */ - -- swfw_sync = rd32(E1000_SW_FW_SYNC); -+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); - swfw_sync &= ~mask; -- wr32(E1000_SW_FW_SYNC, swfw_sync); -+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); - -- igb_put_hw_semaphore(hw); -+ e1000_put_hw_semaphore_generic(hw); - } - - /** -- * igb_get_cfg_done_82575 - Read config done bit -+ * e1000_get_cfg_done_82575 - Read config done bit - * @hw: pointer to the HW structure - * - * Read the management control register for the config done bit for - * completion status. NOTE: silicon which is EEPROM-less will fail trying - * to read the config done bit, so an error is *ONLY* logged and returns -- * 0. If we were to return with error, EEPROM-less silicon -+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon - * would not be able to be reset or change link. - **/ --static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) -+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) - { - s32 timeout = PHY_CFG_TIMEOUT; - u32 mask = E1000_NVM_CFG_DONE_PORT_0; - -- if (hw->bus.func == 1) -+ DEBUGFUNC("e1000_get_cfg_done_82575"); -+ -+ if (hw->bus.func == E1000_FUNC_1) - mask = E1000_NVM_CFG_DONE_PORT_1; - else if (hw->bus.func == E1000_FUNC_2) - mask = E1000_NVM_CFG_DONE_PORT_2; - else if (hw->bus.func == E1000_FUNC_3) - mask = E1000_NVM_CFG_DONE_PORT_3; -- - while (timeout) { -- if (rd32(E1000_EEMNGCTL) & mask) -+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) - break; -- usleep_range(1000, 2000); -+ msec_delay(1); - timeout--; - } - if (!timeout) -- hw_dbg("MNG configuration cycle has not completed.\n"); -+ DEBUGOUT("MNG configuration cycle has not completed.\n"); - - /* If EEPROM is not marked present, init the PHY manually */ -- if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && -+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && - (hw->phy.type == e1000_phy_igp_3)) -- igb_phy_init_script_igp3(hw); -+ e1000_phy_init_script_igp3(hw); - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_get_link_up_info_82575 - Get link speed/duplex info -+ * e1000_get_link_up_info_82575 - Get link speed/duplex info - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex -@@ -1233,87 +1161,156 @@ - * interface, use PCS to retrieve the link speed and duplex information. - * Otherwise, use the generic function to get the link speed and duplex info. - **/ --static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, -+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, - u16 *duplex) - { - s32 ret_val; - -+ DEBUGFUNC("e1000_get_link_up_info_82575"); -+ - if (hw->phy.media_type != e1000_media_type_copper) -- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, -+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, - duplex); - else -- ret_val = igb_get_speed_and_duplex_copper(hw, speed, -+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, - duplex); - - return ret_val; - } - - /** -- * igb_check_for_link_82575 - Check for link -+ * e1000_check_for_link_82575 - Check for link - * @hw: pointer to the HW structure - * - * If sgmii is enabled, then use the pcs register to determine link, otherwise - * use the generic interface for determining link. - **/ --static s32 igb_check_for_link_82575(struct e1000_hw *hw) -+static s32 e1000_check_for_link_82575(struct e1000_hw *hw) - { - s32 ret_val; - u16 speed, duplex; - -+ DEBUGFUNC("e1000_check_for_link_82575"); -+ - if (hw->phy.media_type != e1000_media_type_copper) { -- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, -- &duplex); -- /* Use this flag to determine if link needs to be checked or -- * not. If we have link clear the flag so that we do not -+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, -+ &duplex); -+ /* -+ * Use this flag to determine if link needs to be checked or -+ * not. If we have link clear the flag so that we do not - * continue to check for link. - */ - hw->mac.get_link_status = !hw->mac.serdes_has_link; - -- /* Configure Flow Control now that Auto-Neg has completed. -+ /* -+ * Configure Flow Control now that Auto-Neg has completed. - * First, we need to restore the desired flow control - * settings because we may have had to re-autoneg with a - * different link partner. - */ -- ret_val = igb_config_fc_after_link_up(hw); -+ ret_val = e1000_config_fc_after_link_up_generic(hw); - if (ret_val) -- hw_dbg("Error configuring flow control\n"); -+ DEBUGOUT("Error configuring flow control\n"); - } else { -- ret_val = igb_check_for_copper_link(hw); -+ ret_val = e1000_check_for_copper_link_generic(hw); - } - - return ret_val; - } - - /** -- * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown -+ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked -+ * @hw: pointer to the HW structure -+ * -+ * Poll the M88E1112 interfaces to see which interface achieved link. -+ */ -+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val; -+ u16 data; -+ u8 port = 0; -+ -+ DEBUGFUNC("e1000_check_for_link_media_swap"); -+ -+ /* Check for copper. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); -+ if (ret_val) -+ return ret_val; -+ -+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); -+ if (ret_val) -+ return ret_val; -+ -+ if (data & E1000_M88E1112_STATUS_LINK) -+ port = E1000_MEDIA_PORT_COPPER; -+ -+ /* Check for other. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); -+ if (ret_val) -+ return ret_val; -+ -+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); -+ if (ret_val) -+ return ret_val; -+ -+ if (data & E1000_M88E1112_STATUS_LINK) -+ port = E1000_MEDIA_PORT_OTHER; -+ -+ /* Determine if a swap needs to happen. */ -+ if (port && (hw->dev_spec._82575.media_port != port)) { -+ hw->dev_spec._82575.media_port = port; -+ hw->dev_spec._82575.media_changed = true; -+ } -+ -+ if (port == E1000_MEDIA_PORT_COPPER) { -+ /* reset page to 0 */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); -+ if (ret_val) -+ return ret_val; -+ e1000_check_for_link_82575(hw); -+ } else { -+ e1000_check_for_link_82575(hw); -+ /* reset page to 0 */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); -+ if (ret_val) -+ return ret_val; -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown - * @hw: pointer to the HW structure - **/ --void igb_power_up_serdes_link_82575(struct e1000_hw *hw) -+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) - { - u32 reg; - -+ DEBUGFUNC("e1000_power_up_serdes_link_82575"); - - if ((hw->phy.media_type != e1000_media_type_internal_serdes) && -- !igb_sgmii_active_82575(hw)) -+ !e1000_sgmii_active_82575(hw)) - return; - - /* Enable PCS to turn on link */ -- reg = rd32(E1000_PCS_CFG0); -+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); - reg |= E1000_PCS_CFG_PCS_EN; -- wr32(E1000_PCS_CFG0, reg); -+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); - - /* Power up the laser */ -- reg = rd32(E1000_CTRL_EXT); -+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); - reg &= ~E1000_CTRL_EXT_SDP3_DATA; -- wr32(E1000_CTRL_EXT, reg); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); - - /* flush the write to verify completion */ -- wrfl(); -- usleep_range(1000, 2000); -+ E1000_WRITE_FLUSH(hw); -+ msec_delay(1); - } - - /** -- * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex -+ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex -@@ -1321,28 +1318,26 @@ - * Using the physical coding sub-layer (PCS), retrieve the current speed and - * duplex, then store the values in the pointers provided. - **/ --static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, -- u16 *duplex) -+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, -+ u16 *speed, u16 *duplex) - { - struct e1000_mac_info *mac = &hw->mac; -- u32 pcs, status; -+ u32 pcs; -+ u32 status; - -- /* Set up defaults for the return values of this function */ -- mac->serdes_has_link = false; -- *speed = 0; -- *duplex = 0; -+ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); - -- /* Read the PCS Status register for link state. For non-copper mode, -+ /* -+ * Read the PCS Status register for link state. For non-copper mode, - * the status register is not accurate. The PCS status register is - * used instead. - */ -- pcs = rd32(E1000_PCS_LSTAT); -+ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); - -- /* The link up bit determines when link is up on autoneg. The sync ok -- * gets set once both sides sync up and agree upon link. Stable link -- * can be determined by checking for both link up and link sync ok -+ /* -+ * The link up bit determines when link is up on autoneg. - */ -- if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { -+ if (pcs & E1000_PCS_LSTS_LINK_OK) { - mac->serdes_has_link = true; - - /* Detect and store PCS speed */ -@@ -1359,192 +1354,202 @@ - else - *duplex = HALF_DUPLEX; - -- /* Check if it is an I354 2.5Gb backplane connection. */ -+ /* Check if it is an I354 2.5Gb backplane connection. */ - if (mac->type == e1000_i354) { -- status = rd32(E1000_STATUS); -+ status = E1000_READ_REG(hw, E1000_STATUS); - if ((status & E1000_STATUS_2P5_SKU) && - !(status & E1000_STATUS_2P5_SKU_OVER)) { - *speed = SPEED_2500; - *duplex = FULL_DUPLEX; -- hw_dbg("2500 Mbs, "); -- hw_dbg("Full Duplex\n"); -+ DEBUGOUT("2500 Mbs, "); -+ DEBUGOUT("Full Duplex\n"); - } - } - -+ } else { -+ mac->serdes_has_link = false; -+ *speed = 0; -+ *duplex = 0; - } - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_shutdown_serdes_link_82575 - Remove link during power down -+ * e1000_shutdown_serdes_link_82575 - Remove link during power down - * @hw: pointer to the HW structure - * -- * In the case of fiber serdes, shut down optics and PCS on driver unload -+ * In the case of serdes shut down sfp and PCS on driver unload - * when management pass thru is not enabled. - **/ --void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) -+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) - { - u32 reg; - -- if (hw->phy.media_type != e1000_media_type_internal_serdes && -- igb_sgmii_active_82575(hw)) -+ DEBUGFUNC("e1000_shutdown_serdes_link_82575"); -+ -+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) && -+ !e1000_sgmii_active_82575(hw)) - return; - -- if (!igb_enable_mng_pass_thru(hw)) { -+ if (!igb_e1000_enable_mng_pass_thru(hw)) { - /* Disable PCS to turn off link */ -- reg = rd32(E1000_PCS_CFG0); -+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); - reg &= ~E1000_PCS_CFG_PCS_EN; -- wr32(E1000_PCS_CFG0, reg); -+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); - - /* shutdown the laser */ -- reg = rd32(E1000_CTRL_EXT); -+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); - reg |= E1000_CTRL_EXT_SDP3_DATA; -- wr32(E1000_CTRL_EXT, reg); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); - - /* flush the write to verify completion */ -- wrfl(); -- usleep_range(1000, 2000); -+ E1000_WRITE_FLUSH(hw); -+ msec_delay(1); - } -+ -+ return; - } - - /** -- * igb_reset_hw_82575 - Reset hardware -+ * e1000_reset_hw_82575 - Reset hardware - * @hw: pointer to the HW structure - * -- * This resets the hardware into a known state. This is a -- * function pointer entry point called by the api module. -+ * This resets the hardware into a known state. - **/ --static s32 igb_reset_hw_82575(struct e1000_hw *hw) -+static s32 e1000_reset_hw_82575(struct e1000_hw *hw) - { - u32 ctrl; - s32 ret_val; - -- /* Prevent the PCI-E bus from sticking if there is no TLP connection -+ DEBUGFUNC("e1000_reset_hw_82575"); -+ -+ /* -+ * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ -- ret_val = igb_disable_pcie_master(hw); -+ ret_val = e1000_disable_pcie_master_generic(hw); - if (ret_val) -- hw_dbg("PCI-E Master disable polling has failed.\n"); -+ DEBUGOUT("PCI-E Master disable polling has failed.\n"); - - /* set the completion timeout for interface */ -- ret_val = igb_set_pcie_completion_timeout(hw); -+ ret_val = e1000_set_pcie_completion_timeout(hw); - if (ret_val) -- hw_dbg("PCI-E Set completion timeout has failed.\n"); -+ DEBUGOUT("PCI-E Set completion timeout has failed.\n"); - -- hw_dbg("Masking off all interrupts\n"); -- wr32(E1000_IMC, 0xffffffff); -+ DEBUGOUT("Masking off all interrupts\n"); -+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); - -- wr32(E1000_RCTL, 0); -- wr32(E1000_TCTL, E1000_TCTL_PSP); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_RCTL, 0); -+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); -+ E1000_WRITE_FLUSH(hw); - -- usleep_range(10000, 20000); -+ msec_delay(10); - -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - -- hw_dbg("Issuing a global reset to MAC\n"); -- wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); -+ DEBUGOUT("Issuing a global reset to MAC\n"); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); - -- ret_val = igb_get_auto_rd_done(hw); -+ ret_val = e1000_get_auto_rd_done_generic(hw); - if (ret_val) { -- /* When auto config read does not complete, do not -+ /* -+ * When auto config read does not complete, do not - * return with an error. This can happen in situations - * where there is no eeprom and prevents getting link. - */ -- hw_dbg("Auto Read Done did not complete\n"); -+ DEBUGOUT("Auto Read Done did not complete\n"); - } - - /* If EEPROM is not present, run manual init scripts */ -- if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) -- igb_reset_init_script_82575(hw); -+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) -+ e1000_reset_init_script_82575(hw); - - /* Clear any pending interrupt events. */ -- wr32(E1000_IMC, 0xffffffff); -- rd32(E1000_ICR); -+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); -+ E1000_READ_REG(hw, E1000_ICR); - - /* Install any alternate MAC address into RAR0 */ -- ret_val = igb_check_alt_mac_addr(hw); -+ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); - - return ret_val; - } - - /** -- * igb_init_hw_82575 - Initialize hardware -+ * e1000_init_hw_82575 - Initialize hardware - * @hw: pointer to the HW structure - * - * This inits the hardware readying it for operation. - **/ --static s32 igb_init_hw_82575(struct e1000_hw *hw) -+s32 e1000_init_hw_82575(struct e1000_hw *hw) - { - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - u16 i, rar_count = mac->rar_entry_count; - -- if ((hw->mac.type >= e1000_i210) && -- !(igb_get_flash_presence_i210(hw))) { -- ret_val = igb_pll_workaround_i210(hw); -- if (ret_val) -- return ret_val; -- } -+ DEBUGFUNC("e1000_init_hw_82575"); - - /* Initialize identification LED */ -- ret_val = igb_id_led_init(hw); -+ ret_val = mac->ops.id_led_init(hw); - if (ret_val) { -- hw_dbg("Error initializing identification LED\n"); -+ DEBUGOUT("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ - } - - /* Disabling VLAN filtering */ -- hw_dbg("Initializing the IEEE VLAN\n"); -- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) -- igb_clear_vfta_i350(hw); -- else -- igb_clear_vfta(hw); -+ DEBUGOUT("Initializing the IEEE VLAN\n"); -+ mac->ops.clear_vfta(hw); - - /* Setup the receive address */ -- igb_init_rx_addrs(hw, rar_count); -+ e1000_init_rx_addrs_generic(hw, rar_count); - - /* Zero out the Multicast HASH table */ -- hw_dbg("Zeroing the MTA\n"); -+ DEBUGOUT("Zeroing the MTA\n"); - for (i = 0; i < mac->mta_reg_count; i++) -- array_wr32(E1000_MTA, i, 0); -+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); - - /* Zero out the Unicast HASH table */ -- hw_dbg("Zeroing the UTA\n"); -+ DEBUGOUT("Zeroing the UTA\n"); - for (i = 0; i < mac->uta_reg_count; i++) -- array_wr32(E1000_UTA, i, 0); -+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); - - /* Setup link and flow control */ -- ret_val = igb_setup_link(hw); -+ ret_val = mac->ops.setup_link(hw); - -- /* Clear all of the statistics registers (clear on read). It is -+ /* Set the default MTU size */ -+ hw->dev_spec._82575.mtu = 1500; -+ -+ /* -+ * Clear all of the statistics registers (clear on read). It is - * important that we do this after we have tried to establish link - * because the symbol error count will increment wildly if there - * is no link. - */ -- igb_clear_hw_cntrs_82575(hw); -+ e1000_clear_hw_cntrs_82575(hw); -+ - return ret_val; - } - - /** -- * igb_setup_copper_link_82575 - Configure copper link settings -+ * e1000_setup_copper_link_82575 - Configure copper link settings - * @hw: pointer to the HW structure - * - * Configures the link for auto-neg or forced speed and duplex. Then we check - * for link, once link is established calls to configure collision distance - * and flow control are called. - **/ --static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) -+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) - { - u32 ctrl; -- s32 ret_val; -+ s32 ret_val; - u32 phpm_reg; - -- ctrl = rd32(E1000_CTRL); -+ DEBUGFUNC("e1000_setup_copper_link_82575"); -+ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - ctrl |= E1000_CTRL_SLU; - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - - /* Clear Go Link Disconnect bit on supported devices */ - switch (hw->mac.type) { -@@ -1552,25 +1557,25 @@ - case e1000_i350: - case e1000_i210: - case e1000_i211: -- phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); -+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); - phpm_reg &= ~E1000_82580_PM_GO_LINKD; -- wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); -+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); - break; - default: - break; - } - -- ret_val = igb_setup_serdes_link_82575(hw); -+ ret_val = e1000_setup_serdes_link_82575(hw); - if (ret_val) - goto out; - -- if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { -+ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { - /* allow time for SFP cage time to power up phy */ -- msleep(300); -+ msec_delay(300); - - ret_val = hw->phy.ops.reset(hw); - if (ret_val) { -- hw_dbg("Error resetting the PHY.\n"); -+ DEBUGOUT("Error resetting the PHY.\n"); - goto out; - } - } -@@ -1580,20 +1585,22 @@ - switch (hw->phy.id) { - case I347AT4_E_PHY_ID: - case M88E1112_E_PHY_ID: -+ case M88E1340M_E_PHY_ID: - case M88E1543_E_PHY_ID: -+ case M88E1512_E_PHY_ID: - case I210_I_PHY_ID: -- ret_val = igb_copper_link_setup_m88_gen2(hw); -+ ret_val = e1000_copper_link_setup_m88_gen2(hw); - break; - default: -- ret_val = igb_copper_link_setup_m88(hw); -+ ret_val = e1000_copper_link_setup_m88(hw); - break; - } - break; - case e1000_phy_igp_3: -- ret_val = igb_copper_link_setup_igp(hw); -+ ret_val = e1000_copper_link_setup_igp(hw); - break; - case e1000_phy_82580: -- ret_val = igb_copper_link_setup_82580(hw); -+ ret_val = igb_e1000_copper_link_setup_82577(hw); - break; - default: - ret_val = -E1000_ERR_PHY; -@@ -1603,13 +1610,13 @@ - if (ret_val) - goto out; - -- ret_val = igb_setup_copper_link(hw); -+ ret_val = e1000_setup_copper_link_generic(hw); - out: - return ret_val; - } - - /** -- * igb_setup_serdes_link_82575 - Setup link for serdes -+ * e1000_setup_serdes_link_82575 - Setup link for serdes - * @hw: pointer to the HW structure - * - * Configure the physical coding sub-layer (PCS) link. The PCS link is -@@ -1617,45 +1624,40 @@ - * interface (sgmii), or serdes fiber is being used. Configures the link - * for auto-negotiation or forces speed/duplex. - **/ --static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) -+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) - { - u32 ctrl_ext, ctrl_reg, reg, anadv_reg; - bool pcs_autoneg; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u16 data; - -+ DEBUGFUNC("e1000_setup_serdes_link_82575"); -+ - if ((hw->phy.media_type != e1000_media_type_internal_serdes) && -- !igb_sgmii_active_82575(hw)) -+ !e1000_sgmii_active_82575(hw)) - return ret_val; - -- -- /* On the 82575, SerDes loopback mode persists until it is -+ /* -+ * On the 82575, SerDes loopback mode persists until it is - * explicitly turned off or a power cycle is performed. A read to - * the register does not indicate its status. Therefore, we ensure - * loopback mode is disabled during initialization. - */ -- wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); -+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); - -- /* power on the sfp cage if present and turn on I2C */ -- ctrl_ext = rd32(E1000_CTRL_EXT); -+ /* power on the sfp cage if present */ -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); - ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; -- ctrl_ext |= E1000_CTRL_I2C_ENA; -- wr32(E1000_CTRL_EXT, ctrl_ext); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); - -- ctrl_reg = rd32(E1000_CTRL); -+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); - ctrl_reg |= E1000_CTRL_SLU; - -- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { -- /* set both sw defined pins */ -+ /* set both sw defined pins on 82575/82576*/ -+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) - ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; - -- /* Set switch control to serdes energy detect */ -- reg = rd32(E1000_CONNSW); -- reg |= E1000_CONNSW_ENRGSRC; -- wr32(E1000_CONNSW, reg); -- } -- -- reg = rd32(E1000_PCS_LCTL); -+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); - - /* default pcs_autoneg to the same setting as mac autoneg */ - pcs_autoneg = hw->mac.autoneg; -@@ -1670,12 +1672,13 @@ - case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - /* disable PCS autoneg and support parallel detect only */ - pcs_autoneg = false; -+ /* fall through to default case */ - default: - if (hw->mac.type == e1000_82575 || - hw->mac.type == e1000_82576) { - ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); - if (ret_val) { -- hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); -+ DEBUGOUT("NVM Read Error\n"); - return ret_val; - } - -@@ -1683,27 +1686,29 @@ - pcs_autoneg = false; - } - -- /* non-SGMII modes only supports a speed of 1000/Full for the -+ /* -+ * non-SGMII modes only supports a speed of 1000/Full for the - * link so it is best to just force the MAC and let the pcs - * link either autoneg or be forced to 1000/Full - */ - ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | -- E1000_CTRL_FD | E1000_CTRL_FRCDPX; -+ E1000_CTRL_FD | E1000_CTRL_FRCDPX; - - /* set speed of 1000/Full if speed/duplex is forced */ - reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; - break; - } - -- wr32(E1000_CTRL, ctrl_reg); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); - -- /* New SerDes mode allows for forcing speed or autonegotiating speed -+ /* -+ * New SerDes mode allows for forcing speed or autonegotiating speed - * at 1gb. Autoneg should be default set by most drivers. This is the - * mode that will be compatible with older link partners and switches. - * However, both are supported by the hardware and some drivers/tools. - */ - reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | -- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); -+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); - - if (pcs_autoneg) { - /* Set PCS register for autoneg */ -@@ -1714,8 +1719,9 @@ - reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; - - /* Configure flow control advertisement for autoneg */ -- anadv_reg = rd32(E1000_PCS_ANADV); -+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); - anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); -+ - switch (hw->fc.requested_mode) { - case e1000_fc_full: - case e1000_fc_rx_pause: -@@ -1728,251 +1734,480 @@ - default: - break; - } -- wr32(E1000_PCS_ANADV, anadv_reg); - -- hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); -+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); -+ -+ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); - } else { - /* Set PCS register for forced link */ -- reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ -+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ - - /* Force flow control for forced link */ - reg |= E1000_PCS_LCTL_FORCE_FCTRL; - -- hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); -+ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); - } - -- wr32(E1000_PCS_LCTL, reg); -+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); - -- if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) -- igb_force_mac_fc(hw); -+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) -+ e1000_force_mac_fc_generic(hw); - - return ret_val; - } - - /** -- * igb_sgmii_active_82575 - Return sgmii state -+ * e1000_get_media_type_82575 - derives current media type. - * @hw: pointer to the HW structure - * -- * 82575 silicon has a serialized gigabit media independent interface (sgmii) -- * which can be enabled for use in the embedded applications. Simply -- * return the current state of the sgmii interface. -+ * The media type is chosen reflecting few settings. -+ * The following are taken into account: -+ * - link mode set in the current port Init Control Word #3 -+ * - current link mode settings in CSR register -+ * - MDIO vs. I2C PHY control interface chosen -+ * - SFP module media type - **/ --static bool igb_sgmii_active_82575(struct e1000_hw *hw) -+static s32 e1000_get_media_type_82575(struct e1000_hw *hw) - { - struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; -- return dev_spec->sgmii_active; --} -- --/** -- * igb_reset_init_script_82575 - Inits HW defaults after reset -- * @hw: pointer to the HW structure -- * -- * Inits recommended HW defaults after a reset when there is no EEPROM -- * detected. This is only for the 82575. -- **/ --static s32 igb_reset_init_script_82575(struct e1000_hw *hw) --{ -- if (hw->mac.type == e1000_82575) { -- hw_dbg("Running reset init script for 82575\n"); -- /* SerDes configuration via SERDESCTRL */ -- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); -- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); -- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); -- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); -+ s32 ret_val = E1000_SUCCESS; -+ u32 ctrl_ext = 0; -+ u32 link_mode = 0; - -- /* CCM configuration via CCMCTL register */ -- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); -- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); -+ /* Set internal phy as default */ -+ dev_spec->sgmii_active = false; -+ dev_spec->module_plugged = false; - -- /* PCIe lanes configuration */ -- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); -- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); -- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); -- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); -+ /* Get CSR setting */ -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); - -- /* PCIe PLL Configuration */ -- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); -- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); -- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); -- } -+ /* extract link mode setting */ -+ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; - -- return 0; --} -+ switch (link_mode) { -+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: -+ hw->phy.media_type = e1000_media_type_internal_serdes; -+ break; -+ case E1000_CTRL_EXT_LINK_MODE_GMII: -+ hw->phy.media_type = e1000_media_type_copper; -+ break; -+ case E1000_CTRL_EXT_LINK_MODE_SGMII: -+ /* Get phy control interface type set (MDIO vs. I2C)*/ -+ if (e1000_sgmii_uses_mdio_82575(hw)) { -+ hw->phy.media_type = e1000_media_type_copper; -+ dev_spec->sgmii_active = true; -+ break; -+ } -+ /* fall through for I2C based SGMII */ -+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: -+ /* read media type from SFP EEPROM */ -+ ret_val = e1000_set_sfp_media_type_82575(hw); -+ if ((ret_val != E1000_SUCCESS) || -+ (hw->phy.media_type == e1000_media_type_unknown)) { -+ /* -+ * If media type was not identified then return media -+ * type defined by the CTRL_EXT settings. -+ */ -+ hw->phy.media_type = e1000_media_type_internal_serdes; - --/** -- * igb_read_mac_addr_82575 - Read device MAC address -- * @hw: pointer to the HW structure -- **/ --static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) --{ -- s32 ret_val = 0; -+ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { -+ hw->phy.media_type = e1000_media_type_copper; -+ dev_spec->sgmii_active = true; -+ } - -- /* If there's an alternate MAC address place it in RAR0 -- * so that it will override the Si installed default perm -- * address. -- */ -- ret_val = igb_check_alt_mac_addr(hw); -- if (ret_val) -- goto out; -+ break; -+ } - -- ret_val = igb_read_mac_addr(hw); -+ /* do not change link mode for 100BaseFX */ -+ if (dev_spec->eth_flags.e100_base_fx) -+ break; -+ -+ /* change current link mode setting */ -+ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; -+ -+ if (hw->phy.media_type == e1000_media_type_copper) -+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; -+ else -+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; -+ -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); -+ -+ break; -+ } -+ -+ return ret_val; -+} -+ -+/** -+ * e1000_set_sfp_media_type_82575 - derives SFP module media type. -+ * @hw: pointer to the HW structure -+ * -+ * The media type is chosen based on SFP module. -+ * compatibility flags retrieved from SFP ID EEPROM. -+ **/ -+static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_ERR_CONFIG; -+ u32 ctrl_ext = 0; -+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; -+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; -+ u8 tranceiver_type = 0; -+ s32 timeout = 3; -+ -+ /* Turn I2C interface ON and power on sfp cage */ -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); -+ -+ E1000_WRITE_FLUSH(hw); -+ -+ /* Read SFP module data */ -+ while (timeout) { -+ ret_val = e1000_read_sfp_data_byte(hw, -+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), -+ &tranceiver_type); -+ if (ret_val == E1000_SUCCESS) -+ break; -+ msec_delay(100); -+ timeout--; -+ } -+ if (ret_val != E1000_SUCCESS) -+ goto out; -+ -+ ret_val = e1000_read_sfp_data_byte(hw, -+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), -+ (u8 *)eth_flags); -+ if (ret_val != E1000_SUCCESS) -+ goto out; -+ -+ /* Check if there is some SFP module plugged and powered */ -+ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || -+ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { -+ dev_spec->module_plugged = true; -+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { -+ hw->phy.media_type = e1000_media_type_internal_serdes; -+ } else if (eth_flags->e100_base_fx) { -+ dev_spec->sgmii_active = true; -+ hw->phy.media_type = e1000_media_type_internal_serdes; -+ } else if (eth_flags->e1000_base_t) { -+ dev_spec->sgmii_active = true; -+ hw->phy.media_type = e1000_media_type_copper; -+ } else { -+ hw->phy.media_type = e1000_media_type_unknown; -+ DEBUGOUT("PHY module has not been recognized\n"); -+ goto out; -+ } -+ } else { -+ hw->phy.media_type = e1000_media_type_unknown; -+ } -+ ret_val = E1000_SUCCESS; -+out: -+ /* Restore I2C interface setting */ -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); -+ return ret_val; -+} -+ -+/** -+ * e1000_valid_led_default_82575 - Verify a valid default LED config -+ * @hw: pointer to the HW structure -+ * @data: pointer to the NVM (EEPROM) -+ * -+ * Read the EEPROM for the current default LED configuration. If the -+ * LED configuration is not valid, set to a valid LED configuration. -+ **/ -+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) -+{ -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_valid_led_default_82575"); -+ -+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ goto out; -+ } -+ -+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { -+ switch (hw->phy.media_type) { -+ case e1000_media_type_internal_serdes: -+ *data = ID_LED_DEFAULT_82575_SERDES; -+ break; -+ case e1000_media_type_copper: -+ default: -+ *data = ID_LED_DEFAULT; -+ break; -+ } -+ } -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_sgmii_active_82575 - Return sgmii state -+ * @hw: pointer to the HW structure -+ * -+ * 82575 silicon has a serialized gigabit media independent interface (sgmii) -+ * which can be enabled for use in the embedded applications. Simply -+ * return the current state of the sgmii interface. -+ **/ -+static bool e1000_sgmii_active_82575(struct e1000_hw *hw) -+{ -+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; -+ return dev_spec->sgmii_active; -+} -+ -+/** -+ * e1000_reset_init_script_82575 - Inits HW defaults after reset -+ * @hw: pointer to the HW structure -+ * -+ * Inits recommended HW defaults after a reset when there is no EEPROM -+ * detected. This is only for the 82575. -+ **/ -+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) -+{ -+ DEBUGFUNC("e1000_reset_init_script_82575"); -+ -+ if (hw->mac.type == e1000_82575) { -+ DEBUGOUT("Running reset init script for 82575\n"); -+ /* SerDes configuration via SERDESCTRL */ -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); -+ -+ /* CCM configuration via CCMCTL register */ -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); -+ -+ /* PCIe lanes configuration */ -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); -+ -+ /* PCIe PLL Configuration */ -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); -+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_mac_addr_82575 - Read device MAC address -+ * @hw: pointer to the HW structure -+ **/ -+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_read_mac_addr_82575"); -+ -+ /* -+ * If there's an alternate MAC address place it in RAR0 -+ * so that it will override the Si installed default perm -+ * address. -+ */ -+ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); -+ if (ret_val) -+ goto out; -+ -+ ret_val = igb_e1000_read_mac_addr_generic(hw); - - out: - return ret_val; - } - - /** -- * igb_power_down_phy_copper_82575 - Remove link during PHY power down -+ * e1000_config_collision_dist_82575 - Configure collision distance -+ * @hw: pointer to the HW structure -+ * -+ * Configures the collision distance to the default value and is used -+ * during link setup. -+ **/ -+static void e1000_config_collision_dist_82575(struct e1000_hw *hw) -+{ -+ u32 tctl_ext; -+ -+ DEBUGFUNC("e1000_config_collision_dist_82575"); -+ -+ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); -+ -+ tctl_ext &= ~E1000_TCTL_EXT_COLD; -+ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; -+ -+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); -+ E1000_WRITE_FLUSH(hw); -+} -+ -+/** -+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, remove the link. - **/ --void igb_power_down_phy_copper_82575(struct e1000_hw *hw) -+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) - { -+ struct e1000_phy_info *phy = &hw->phy; -+ -+ if (!(phy->ops.check_reset_block)) -+ return; -+ - /* If the management interface is not enabled, then power down */ -- if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) -- igb_power_down_phy_copper(hw); -+ if (!(igb_e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) -+ igb_e1000_power_down_phy_copper(hw); -+ -+ return; - } - - /** -- * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters -+ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters - * @hw: pointer to the HW structure - * - * Clears the hardware counters by reading the counter registers. - **/ --static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) -+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) - { -- igb_clear_hw_cntrs_base(hw); -+ DEBUGFUNC("e1000_clear_hw_cntrs_82575"); - -- rd32(E1000_PRC64); -- rd32(E1000_PRC127); -- rd32(E1000_PRC255); -- rd32(E1000_PRC511); -- rd32(E1000_PRC1023); -- rd32(E1000_PRC1522); -- rd32(E1000_PTC64); -- rd32(E1000_PTC127); -- rd32(E1000_PTC255); -- rd32(E1000_PTC511); -- rd32(E1000_PTC1023); -- rd32(E1000_PTC1522); -- -- rd32(E1000_ALGNERRC); -- rd32(E1000_RXERRC); -- rd32(E1000_TNCRS); -- rd32(E1000_CEXTERR); -- rd32(E1000_TSCTC); -- rd32(E1000_TSCTFC); -- -- rd32(E1000_MGTPRC); -- rd32(E1000_MGTPDC); -- rd32(E1000_MGTPTC); -- -- rd32(E1000_IAC); -- rd32(E1000_ICRXOC); -- -- rd32(E1000_ICRXPTC); -- rd32(E1000_ICRXATC); -- rd32(E1000_ICTXPTC); -- rd32(E1000_ICTXATC); -- rd32(E1000_ICTXQEC); -- rd32(E1000_ICTXQMTC); -- rd32(E1000_ICRXDMTC); -- -- rd32(E1000_CBTMPC); -- rd32(E1000_HTDPMC); -- rd32(E1000_CBRMPC); -- rd32(E1000_RPTHC); -- rd32(E1000_HGPTC); -- rd32(E1000_HTCBDPC); -- rd32(E1000_HGORCL); -- rd32(E1000_HGORCH); -- rd32(E1000_HGOTCL); -- rd32(E1000_HGOTCH); -- rd32(E1000_LENERRS); -+ e1000_clear_hw_cntrs_base_generic(hw); -+ -+ E1000_READ_REG(hw, E1000_PRC64); -+ E1000_READ_REG(hw, E1000_PRC127); -+ E1000_READ_REG(hw, E1000_PRC255); -+ E1000_READ_REG(hw, E1000_PRC511); -+ E1000_READ_REG(hw, E1000_PRC1023); -+ E1000_READ_REG(hw, E1000_PRC1522); -+ E1000_READ_REG(hw, E1000_PTC64); -+ E1000_READ_REG(hw, E1000_PTC127); -+ E1000_READ_REG(hw, E1000_PTC255); -+ E1000_READ_REG(hw, E1000_PTC511); -+ E1000_READ_REG(hw, E1000_PTC1023); -+ E1000_READ_REG(hw, E1000_PTC1522); -+ -+ E1000_READ_REG(hw, E1000_ALGNERRC); -+ E1000_READ_REG(hw, E1000_RXERRC); -+ E1000_READ_REG(hw, E1000_TNCRS); -+ E1000_READ_REG(hw, E1000_CEXTERR); -+ E1000_READ_REG(hw, E1000_TSCTC); -+ E1000_READ_REG(hw, E1000_TSCTFC); -+ -+ E1000_READ_REG(hw, E1000_MGTPRC); -+ E1000_READ_REG(hw, E1000_MGTPDC); -+ E1000_READ_REG(hw, E1000_MGTPTC); -+ -+ E1000_READ_REG(hw, E1000_IAC); -+ E1000_READ_REG(hw, E1000_ICRXOC); -+ -+ E1000_READ_REG(hw, E1000_ICRXPTC); -+ E1000_READ_REG(hw, E1000_ICRXATC); -+ E1000_READ_REG(hw, E1000_ICTXPTC); -+ E1000_READ_REG(hw, E1000_ICTXATC); -+ E1000_READ_REG(hw, E1000_ICTXQEC); -+ E1000_READ_REG(hw, E1000_ICTXQMTC); -+ E1000_READ_REG(hw, E1000_ICRXDMTC); -+ -+ E1000_READ_REG(hw, E1000_CBTMPC); -+ E1000_READ_REG(hw, E1000_HTDPMC); -+ E1000_READ_REG(hw, E1000_CBRMPC); -+ E1000_READ_REG(hw, E1000_RPTHC); -+ E1000_READ_REG(hw, E1000_HGPTC); -+ E1000_READ_REG(hw, E1000_HTCBDPC); -+ E1000_READ_REG(hw, E1000_HGORCL); -+ E1000_READ_REG(hw, E1000_HGORCH); -+ E1000_READ_REG(hw, E1000_HGOTCL); -+ E1000_READ_REG(hw, E1000_HGOTCH); -+ E1000_READ_REG(hw, E1000_LENERRS); - - /* This register should not be read in copper configurations */ -- if (hw->phy.media_type == e1000_media_type_internal_serdes || -- igb_sgmii_active_82575(hw)) -- rd32(E1000_SCVPC); -+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) || -+ e1000_sgmii_active_82575(hw)) -+ E1000_READ_REG(hw, E1000_SCVPC); - } - - /** -- * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable -+ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable - * @hw: pointer to the HW structure - * -- * After rx enable if managability is enabled then there is likely some -+ * After Rx enable, if manageability is enabled then there is likely some - * bad data at the start of the fifo and possibly in the DMA fifo. This - * function clears the fifos and flushes any packets that came in as rx was - * being enabled. - **/ --void igb_rx_fifo_flush_82575(struct e1000_hw *hw) -+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) - { - u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; - int i, ms_wait; - -+ DEBUGFUNC("e1000_rx_fifo_flush_82575"); -+ -+ /* disable IPv6 options as per hardware errata */ -+ rfctl = E1000_READ_REG(hw, E1000_RFCTL); -+ rfctl |= E1000_RFCTL_IPV6_EX_DIS; -+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); -+ - if (hw->mac.type != e1000_82575 || -- !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) -+ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) - return; - -- /* Disable all RX queues */ -+ /* Disable all Rx queues */ - for (i = 0; i < 4; i++) { -- rxdctl[i] = rd32(E1000_RXDCTL(i)); -- wr32(E1000_RXDCTL(i), -- rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); -+ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); -+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), -+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); - } - /* Poll all queues to verify they have shut down */ - for (ms_wait = 0; ms_wait < 10; ms_wait++) { -- usleep_range(1000, 2000); -+ msec_delay(1); - rx_enabled = 0; - for (i = 0; i < 4; i++) -- rx_enabled |= rd32(E1000_RXDCTL(i)); -+ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); - if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) - break; - } - - if (ms_wait == 10) -- hw_dbg("Queue disable timed out after 10ms\n"); -+ DEBUGOUT("Queue disable timed out after 10ms\n"); - - /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all - * incoming packets are rejected. Set enable and wait 2ms so that - * any packet that was coming in as RCTL.EN was set is flushed - */ -- rfctl = rd32(E1000_RFCTL); -- wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); -+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); - -- rlpml = rd32(E1000_RLPML); -- wr32(E1000_RLPML, 0); -+ rlpml = E1000_READ_REG(hw, E1000_RLPML); -+ E1000_WRITE_REG(hw, E1000_RLPML, 0); - -- rctl = rd32(E1000_RCTL); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); - temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); - temp_rctl |= E1000_RCTL_LPE; - -- wr32(E1000_RCTL, temp_rctl); -- wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); -- wrfl(); -- usleep_range(2000, 3000); -+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); -+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); -+ E1000_WRITE_FLUSH(hw); -+ msec_delay(2); - -- /* Enable RX queues that were previously enabled and restore our -+ /* Enable Rx queues that were previously enabled and restore our - * previous state - */ - for (i = 0; i < 4; i++) -- wr32(E1000_RXDCTL(i), rxdctl[i]); -- wr32(E1000_RCTL, rctl); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); -+ E1000_WRITE_FLUSH(hw); - -- wr32(E1000_RLPML, rlpml); -- wr32(E1000_RFCTL, rfctl); -+ E1000_WRITE_REG(hw, E1000_RLPML, rlpml); -+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); - - /* Flush receive errors generated by workaround */ -- rd32(E1000_ROC); -- rd32(E1000_RNBC); -- rd32(E1000_MPC); -+ E1000_READ_REG(hw, E1000_ROC); -+ E1000_READ_REG(hw, E1000_RNBC); -+ E1000_READ_REG(hw, E1000_MPC); - } - - /** -- * igb_set_pcie_completion_timeout - set pci-e completion timeout -+ * e1000_set_pcie_completion_timeout - set pci-e completion timeout - * @hw: pointer to the HW structure - * - * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, -@@ -1981,17 +2216,18 @@ - * increase the value to either 10ms to 200ms for capability version 1 config, - * or 16ms to 55ms for version 2. - **/ --static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) -+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) - { -- u32 gcr = rd32(E1000_GCR); -- s32 ret_val = 0; -+ u32 gcr = E1000_READ_REG(hw, E1000_GCR); -+ s32 ret_val = E1000_SUCCESS; - u16 pcie_devctl2; - - /* only take action if timeout value is defaulted to 0 */ - if (gcr & E1000_GCR_CMPL_TMOUT_MASK) - goto out; - -- /* if capabilities version is type 1 we can write the -+ /* -+ * if capababilities version is type 1 we can write the - * timeout of 10ms to 200ms through the GCR register - */ - if (!(gcr & E1000_GCR_CAP_VER2)) { -@@ -1999,36 +2235,37 @@ - goto out; - } - -- /* for version 2 capabilities we need to write the config space -+ /* -+ * for version 2 capabilities we need to write the config space - * directly in order to set the completion timeout value for - * 16ms to 55ms - */ -- ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, -- &pcie_devctl2); -+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, -+ &pcie_devctl2); - if (ret_val) - goto out; - - pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; - -- ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, -- &pcie_devctl2); -+ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, -+ &pcie_devctl2); - out: - /* disable completion timeout resend */ - gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; - -- wr32(E1000_GCR, gcr); -+ E1000_WRITE_REG(hw, E1000_GCR, gcr); - return ret_val; - } - - /** -- * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing -+ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing - * @hw: pointer to the hardware struct - * @enable: state to enter, either enabled or disabled - * @pf: Physical Function pool - do not set anti-spoofing for the PF - * - * enables/disables L2 switch anti-spoofing functionality. - **/ --void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) -+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) - { - u32 reg_val, reg_offset; - -@@ -2044,7 +2281,7 @@ - return; - } - -- reg_val = rd32(reg_offset); -+ reg_val = E1000_READ_REG(hw, reg_offset); - if (enable) { - reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); -@@ -2056,66 +2293,67 @@ - reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - } -- wr32(reg_offset, reg_val); -+ E1000_WRITE_REG(hw, reg_offset, reg_val); - } - - /** -- * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback -+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback - * @hw: pointer to the hardware struct - * @enable: state to enter, either enabled or disabled - * - * enables/disables L2 switch loopback functionality. - **/ --void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) -+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) - { - u32 dtxswc; - - switch (hw->mac.type) { - case e1000_82576: -- dtxswc = rd32(E1000_DTXSWC); -+ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); - if (enable) - dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; - else - dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; -- wr32(E1000_DTXSWC, dtxswc); -+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); - break; -- case e1000_i354: - case e1000_i350: -- dtxswc = rd32(E1000_TXSWC); -+ case e1000_i354: -+ dtxswc = E1000_READ_REG(hw, E1000_TXSWC); - if (enable) - dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; - else - dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; -- wr32(E1000_TXSWC, dtxswc); -+ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); - break; - default: - /* Currently no other hardware supports loopback */ - break; - } - -+ - } - - /** -- * igb_vmdq_set_replication_pf - enable or disable vmdq replication -+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication - * @hw: pointer to the hardware struct - * @enable: state to enter, either enabled or disabled - * - * enables/disables replication of packets across multiple pools. - **/ --void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) -+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) - { -- u32 vt_ctl = rd32(E1000_VT_CTL); -+ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); - - if (enable) - vt_ctl |= E1000_VT_CTL_VM_REPL_EN; - else - vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; - -- wr32(E1000_VT_CTL, vt_ctl); -+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); - } - - /** -- * igb_read_phy_reg_82580 - Read 82580 MDI control register -+ * e1000_read_phy_reg_82580 - Read 82580 MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data -@@ -2123,15 +2361,17 @@ - * Reads the MDI control register in the PHY at offset and stores the - * information read to data. - **/ --static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) -+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) - { - s32 ret_val; - -+ DEBUGFUNC("e1000_read_phy_reg_82580"); -+ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - -- ret_val = igb_read_phy_reg_mdic(hw, offset, data); -+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data); - - hw->phy.ops.release(hw); - -@@ -2140,23 +2380,24 @@ - } - - /** -- * igb_write_phy_reg_82580 - Write 82580 MDI control register -+ * e1000_write_phy_reg_82580 - Write 82580 MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write to register at offset - * - * Writes data to MDI control register in the PHY at offset. - **/ --static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) -+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) - { - s32 ret_val; - -+ DEBUGFUNC("e1000_write_phy_reg_82580"); - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - -- ret_val = igb_write_phy_reg_mdic(hw, offset, data); -+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data); - - hw->phy.ops.release(hw); - -@@ -2165,123 +2406,133 @@ - } - - /** -- * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits -+ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits - * @hw: pointer to the HW structure - * - * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on - * the values found in the EEPROM. This addresses an issue in which these - * bits are not restored from EEPROM after reset. - **/ --static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) -+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u32 mdicnfg; - u16 nvm_data = 0; - -+ DEBUGFUNC("e1000_reset_mdicnfg_82580"); -+ - if (hw->mac.type != e1000_82580) - goto out; -- if (!igb_sgmii_active_82575(hw)) -+ if (!e1000_sgmii_active_82575(hw)) - goto out; - - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + - NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, - &nvm_data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -+ DEBUGOUT("NVM Read Error\n"); - goto out; - } - -- mdicnfg = rd32(E1000_MDICNFG); -+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); - if (nvm_data & NVM_WORD24_EXT_MDIO) - mdicnfg |= E1000_MDICNFG_EXT_MDIO; - if (nvm_data & NVM_WORD24_COM_MDIO) - mdicnfg |= E1000_MDICNFG_COM_MDIO; -- wr32(E1000_MDICNFG, mdicnfg); -+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); - out: - return ret_val; - } - - /** -- * igb_reset_hw_82580 - Reset hardware -+ * e1000_reset_hw_82580 - Reset hardware - * @hw: pointer to the HW structure - * - * This resets function or entire device (all ports, etc.) - * to a known state. - **/ --static s32 igb_reset_hw_82580(struct e1000_hw *hw) -+static s32 e1000_reset_hw_82580(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - /* BH SW mailbox bit in SW_FW_SYNC */ - u16 swmbsw_mask = E1000_SW_SYNCH_MB; - u32 ctrl; - bool global_device_reset = hw->dev_spec._82575.global_device_reset; - -+ DEBUGFUNC("e1000_reset_hw_82580"); -+ - hw->dev_spec._82575.global_device_reset = false; - -- /* due to hw errata, global device reset doesn't always -- * work on 82580 -- */ -+ /* 82580 does not reliably do global_device_reset due to hw errata */ - if (hw->mac.type == e1000_82580) - global_device_reset = false; - - /* Get current control state. */ -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - -- /* Prevent the PCI-E bus from sticking if there is no TLP connection -+ /* -+ * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ -- ret_val = igb_disable_pcie_master(hw); -+ ret_val = e1000_disable_pcie_master_generic(hw); - if (ret_val) -- hw_dbg("PCI-E Master disable polling has failed.\n"); -+ DEBUGOUT("PCI-E Master disable polling has failed.\n"); - -- hw_dbg("Masking off all interrupts\n"); -- wr32(E1000_IMC, 0xffffffff); -- wr32(E1000_RCTL, 0); -- wr32(E1000_TCTL, E1000_TCTL_PSP); -- wrfl(); -+ DEBUGOUT("Masking off all interrupts\n"); -+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); -+ E1000_WRITE_REG(hw, E1000_RCTL, 0); -+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); -+ E1000_WRITE_FLUSH(hw); - -- usleep_range(10000, 11000); -+ msec_delay(10); - - /* Determine whether or not a global dev reset is requested */ -- if (global_device_reset && -- hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) -+ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, -+ swmbsw_mask)) - global_device_reset = false; - -- if (global_device_reset && -- !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) -+ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & -+ E1000_STAT_DEV_RST_SET)) - ctrl |= E1000_CTRL_DEV_RST; - else - ctrl |= E1000_CTRL_RST; - -- wr32(E1000_CTRL, ctrl); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - -- /* Add delay to insure DEV_RST has time to complete */ -- if (global_device_reset) -- usleep_range(5000, 6000); -+ switch (hw->device_id) { -+ case E1000_DEV_ID_DH89XXCC_SGMII: -+ break; -+ default: -+ E1000_WRITE_FLUSH(hw); -+ break; -+ } -+ -+ /* Add delay to insure DEV_RST or RST has time to complete */ -+ msec_delay(5); - -- ret_val = igb_get_auto_rd_done(hw); -+ ret_val = e1000_get_auto_rd_done_generic(hw); - if (ret_val) { -- /* When auto config read does not complete, do not -+ /* -+ * When auto config read does not complete, do not - * return with an error. This can happen in situations - * where there is no eeprom and prevents getting link. - */ -- hw_dbg("Auto Read Done did not complete\n"); -+ DEBUGOUT("Auto Read Done did not complete\n"); - } - - /* clear global device reset status bit */ -- wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); -+ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); - - /* Clear any pending interrupt events. */ -- wr32(E1000_IMC, 0xffffffff); -- rd32(E1000_ICR); -+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); -+ E1000_READ_REG(hw, E1000_ICR); - -- ret_val = igb_reset_mdicnfg_82580(hw); -+ ret_val = e1000_reset_mdicnfg_82580(hw); - if (ret_val) -- hw_dbg("Could not reset MDICNFG based on EEPROM\n"); -+ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); - - /* Install any alternate MAC address into RAR0 */ -- ret_val = igb_check_alt_mac_addr(hw); -+ ret_val = igb_e1000_check_alt_mac_addr_generic(hw); - - /* Release semaphore */ - if (global_device_reset) -@@ -2291,7 +2542,7 @@ - } - - /** -- * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size -+ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size - * @data: data received by reading RXPBS register - * - * The 82580 uses a table based approach for packet buffer allocation sizes. -@@ -2300,398 +2551,1222 @@ - * 0x0 36 72 144 1 2 4 8 16 - * 0x8 35 70 140 rsv rsv rsv rsv rsv - */ --u16 igb_rxpbs_adjust_82580(u32 data) -+u16 e1000_rxpbs_adjust_82580(u32 data) - { - u16 ret_val = 0; - -- if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) -+ if (data < E1000_82580_RXPBS_TABLE_SIZE) - ret_val = e1000_82580_rxpbs_table[data]; - -- return ret_val; -+ return ret_val; -+} -+ -+/** -+ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM -+ * checksum -+ * @hw: pointer to the HW structure -+ * @offset: offset in words of the checksum protected region -+ * -+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM -+ * and then verifies that the sum of the EEPROM is equal to 0xBABA. -+ **/ -+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ u16 checksum = 0; -+ u16 i, nvm_data; -+ -+ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); -+ -+ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { -+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ goto out; -+ } -+ checksum += nvm_data; -+ } -+ -+ if (checksum != (u16) NVM_SUM) { -+ DEBUGOUT("NVM Checksum Invalid\n"); -+ ret_val = -E1000_ERR_NVM; -+ goto out; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_update_nvm_checksum_with_offset - Update EEPROM -+ * checksum -+ * @hw: pointer to the HW structure -+ * @offset: offset in words of the checksum protected region -+ * -+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM -+ * up to the checksum. Then calculates the EEPROM checksum and writes the -+ * value to the EEPROM. -+ **/ -+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) -+{ -+ s32 ret_val; -+ u16 checksum = 0; -+ u16 i, nvm_data; -+ -+ DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); -+ -+ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { -+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error while updating checksum.\n"); -+ goto out; -+ } -+ checksum += nvm_data; -+ } -+ checksum = (u16) NVM_SUM - checksum; -+ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, -+ &checksum); -+ if (ret_val) -+ DEBUGOUT("NVM Write Error while updating checksum.\n"); -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum -+ * @hw: pointer to the HW structure -+ * -+ * Calculates the EEPROM section checksum by reading/adding each word of -+ * the EEPROM and then verifies that the sum of the EEPROM is -+ * equal to 0xBABA. -+ **/ -+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ u16 eeprom_regions_count = 1; -+ u16 j, nvm_data; -+ u16 nvm_offset; -+ -+ DEBUGFUNC("e1000_validate_nvm_checksum_82580"); -+ -+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ goto out; -+ } -+ -+ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { -+ /* if chekcsums compatibility bit is set validate checksums -+ * for all 4 ports. */ -+ eeprom_regions_count = 4; -+ } -+ -+ for (j = 0; j < eeprom_regions_count; j++) { -+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -+ ret_val = e1000_validate_nvm_checksum_with_offset(hw, -+ nvm_offset); -+ if (ret_val != E1000_SUCCESS) -+ goto out; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum -+ * @hw: pointer to the HW structure -+ * -+ * Updates the EEPROM section checksums for all 4 ports by reading/adding -+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM -+ * checksum and writes the value to the EEPROM. -+ **/ -+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ u16 j, nvm_data; -+ u16 nvm_offset; -+ -+ DEBUGFUNC("e1000_update_nvm_checksum_82580"); -+ -+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); -+ goto out; -+ } -+ -+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { -+ /* set compatibility bit to validate checksums appropriately */ -+ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; -+ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, -+ &nvm_data); -+ if (ret_val) { -+ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); -+ goto out; -+ } -+ } -+ -+ for (j = 0; j < 4; j++) { -+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); -+ if (ret_val) -+ goto out; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum -+ * @hw: pointer to the HW structure -+ * -+ * Calculates the EEPROM section checksum by reading/adding each word of -+ * the EEPROM and then verifies that the sum of the EEPROM is -+ * equal to 0xBABA. -+ **/ -+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ u16 j; -+ u16 nvm_offset; -+ -+ DEBUGFUNC("e1000_validate_nvm_checksum_i350"); -+ -+ for (j = 0; j < 4; j++) { -+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -+ ret_val = e1000_validate_nvm_checksum_with_offset(hw, -+ nvm_offset); -+ if (ret_val != E1000_SUCCESS) -+ goto out; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum -+ * @hw: pointer to the HW structure -+ * -+ * Updates the EEPROM section checksums for all 4 ports by reading/adding -+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM -+ * checksum and writes the value to the EEPROM. -+ **/ -+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ u16 j; -+ u16 nvm_offset; -+ -+ DEBUGFUNC("e1000_update_nvm_checksum_i350"); -+ -+ for (j = 0; j < 4; j++) { -+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); -+ if (ret_val != E1000_SUCCESS) -+ goto out; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * __e1000_access_emi_reg - Read/write EMI register -+ * @hw: pointer to the HW structure -+ * @addr: EMI address to program -+ * @data: pointer to value to read/write from/to the EMI address -+ * @read: boolean flag to indicate read or write -+ **/ -+static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, -+ u16 *data, bool read) -+{ -+ s32 ret_val; -+ -+ DEBUGFUNC("__e1000_access_emi_reg"); -+ -+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); -+ if (ret_val) -+ return ret_val; -+ -+ if (read) -+ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); -+ else -+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); -+ -+ return ret_val; -+} -+ -+/** -+ * e1000_read_emi_reg - Read Extended Management Interface register -+ * @hw: pointer to the HW structure -+ * @addr: EMI address to program -+ * @data: value to be read from the EMI address -+ **/ -+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) -+{ -+ DEBUGFUNC("e1000_read_emi_reg"); -+ -+ return __e1000_access_emi_reg(hw, addr, data, true); -+} -+ -+/** -+ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY -+ * @hw: pointer to the HW structure -+ * -+ * Initialize Marvell 1512 to work correctly with Avoton. -+ **/ -+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("e1000_initialize_M88E1512_phy"); -+ -+ /* Check if this is correct PHY. */ -+ if (phy->id != M88E1512_E_PHY_ID) -+ goto out; -+ -+ /* Switch to PHY page 0xFF. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); -+ if (ret_val) -+ goto out; -+ -+ /* Switch to PHY page 0xFB. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); -+ if (ret_val) -+ goto out; -+ -+ /* Switch to PHY page 0x12. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); -+ if (ret_val) -+ goto out; -+ -+ /* Change mode to SGMII-to-Copper */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); -+ if (ret_val) -+ goto out; -+ -+ /* Return the PHY to page 0. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.commit(hw); -+ if (ret_val) { -+ DEBUGOUT("Error committing the PHY changes\n"); -+ return ret_val; -+ } -+ -+ msec_delay(1000); -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY -+ * @hw: pointer to the HW structure -+ * -+ * Initialize Marvell 1543 to work correctly with Avoton. -+ **/ -+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("e1000_initialize_M88E1543_phy"); -+ -+ /* Check if this is correct PHY. */ -+ if (phy->id != M88E1543_E_PHY_ID) -+ goto out; -+ -+ /* Switch to PHY page 0xFF. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); -+ if (ret_val) -+ goto out; -+ -+ /* Switch to PHY page 0xFB. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D); -+ if (ret_val) -+ goto out; -+ -+ /* Switch to PHY page 0x12. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); -+ if (ret_val) -+ goto out; -+ -+ /* Change mode to SGMII-to-Copper */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); -+ if (ret_val) -+ goto out; -+ -+ /* Switch to PHY page 1. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); -+ if (ret_val) -+ goto out; -+ -+ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); -+ if (ret_val) -+ goto out; -+ -+ /* Return the PHY to page 0. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.commit(hw); -+ if (ret_val) { -+ DEBUGOUT("Error committing the PHY changes\n"); -+ return ret_val; -+ } -+ -+ msec_delay(1000); -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_set_eee_i350 - Enable/disable EEE support -+ * @hw: pointer to the HW structure -+ * @adv1g: boolean flag enabling 1G EEE advertisement -+ * @adv100m: boolean flag enabling 100M EEE advertisement -+ * -+ * Enable/disable EEE based on setting in dev_spec structure. -+ * -+ **/ -+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) -+{ -+ u32 ipcnfg, eeer; -+ -+ DEBUGFUNC("e1000_set_eee_i350"); -+ -+ if ((hw->mac.type < e1000_i350) || -+ (hw->phy.media_type != e1000_media_type_copper)) -+ goto out; -+ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); -+ eeer = E1000_READ_REG(hw, E1000_EEER); -+ -+ /* enable or disable per user setting */ -+ if (!(hw->dev_spec._82575.eee_disable)) { -+ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); -+ -+ if (adv100M) -+ ipcnfg |= E1000_IPCNFG_EEE_100M_AN; -+ else -+ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; -+ -+ if (adv1G) -+ ipcnfg |= E1000_IPCNFG_EEE_1G_AN; -+ else -+ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; -+ -+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | -+ E1000_EEER_LPI_FC); -+ -+ /* This bit should not be set in normal operation. */ -+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP) -+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); -+ } else { -+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); -+ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | -+ E1000_EEER_LPI_FC); -+ } -+ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); -+ E1000_WRITE_REG(hw, E1000_EEER, eeer); -+ E1000_READ_REG(hw, E1000_IPCNFG); -+ E1000_READ_REG(hw, E1000_EEER); -+out: -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_set_eee_i354 - Enable/disable EEE support -+ * @hw: pointer to the HW structure -+ * @adv1g: boolean flag enabling 1G EEE advertisement -+ * @adv100m: boolean flag enabling 100M EEE advertisement -+ * -+ * Enable/disable EEE legacy mode based on setting in dev_spec structure. -+ * -+ **/ -+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val = E1000_SUCCESS; -+ u16 phy_data; -+ -+ DEBUGFUNC("e1000_set_eee_i354"); -+ -+ if ((hw->phy.media_type != e1000_media_type_copper) || -+ ((phy->id != M88E1543_E_PHY_ID) && -+ (phy->id != M88E1512_E_PHY_ID))) -+ goto out; -+ -+ if (!hw->dev_spec._82575.eee_disable) { -+ /* Switch to PHY page 18. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); -+ if (ret_val) -+ goto out; -+ -+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, -+ &phy_data); -+ if (ret_val) -+ goto out; -+ -+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, -+ phy_data); -+ if (ret_val) -+ goto out; -+ -+ /* Return the PHY to page 0. */ -+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); -+ if (ret_val) -+ goto out; -+ -+ /* Turn on EEE advertisement. */ -+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -+ E1000_EEE_ADV_DEV_I354, -+ &phy_data); -+ if (ret_val) -+ goto out; -+ -+ if (adv100M) -+ phy_data |= E1000_EEE_ADV_100_SUPPORTED; -+ else -+ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; -+ -+ if (adv1G) -+ phy_data |= E1000_EEE_ADV_1000_SUPPORTED; -+ else -+ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; -+ -+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -+ E1000_EEE_ADV_DEV_I354, -+ phy_data); -+ } else { -+ /* Turn off EEE advertisement. */ -+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -+ E1000_EEE_ADV_DEV_I354, -+ &phy_data); -+ if (ret_val) -+ goto out; -+ -+ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | -+ E1000_EEE_ADV_1000_SUPPORTED); -+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -+ E1000_EEE_ADV_DEV_I354, -+ phy_data); -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_get_eee_status_i354 - Get EEE status -+ * @hw: pointer to the HW structure -+ * @status: EEE status -+ * -+ * Get EEE status by guessing based on whether Tx or Rx LPI indications have -+ * been received. -+ **/ -+s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val = E1000_SUCCESS; -+ u16 phy_data; -+ -+ DEBUGFUNC("e1000_get_eee_status_i354"); -+ -+ /* Check if EEE is supported on this device. */ -+ if ((hw->phy.media_type != e1000_media_type_copper) || -+ ((phy->id != M88E1543_E_PHY_ID) && -+ (phy->id != M88E1512_E_PHY_ID))) -+ goto out; -+ -+ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, -+ E1000_PCS_STATUS_DEV_I354, -+ &phy_data); -+ if (ret_val) -+ goto out; -+ -+ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | -+ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; -+ -+out: -+ return ret_val; -+} -+ -+/* Due to a hw errata, if the host tries to configure the VFTA register -+ * while performing queries from the BMC or DMA, then the VFTA in some -+ * cases won't be written. -+ */ -+ -+/** -+ * e1000_clear_vfta_i350 - Clear VLAN filter table -+ * @hw: pointer to the HW structure -+ * -+ * Clears the register array which contains the VLAN filter table by -+ * setting all the values to 0. -+ **/ -+void e1000_clear_vfta_i350(struct e1000_hw *hw) -+{ -+ u32 offset; -+ int i; -+ -+ DEBUGFUNC("e1000_clear_vfta_350"); -+ -+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { -+ for (i = 0; i < 10; i++) -+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); -+ -+ E1000_WRITE_FLUSH(hw); -+ } -+} -+ -+/** -+ * e1000_write_vfta_i350 - Write value to VLAN filter table -+ * @hw: pointer to the HW structure -+ * @offset: register offset in VLAN filter table -+ * @value: register value written to VLAN filter table -+ * -+ * Writes value at the given offset in the register array which stores -+ * the VLAN filter table. -+ **/ -+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) -+{ -+ int i; -+ -+ DEBUGFUNC("e1000_write_vfta_350"); -+ -+ for (i = 0; i < 10; i++) -+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); -+ -+ E1000_WRITE_FLUSH(hw); -+} -+ -+/** -+ * e1000_set_i2c_bb - Enable I2C bit-bang -+ * @hw: pointer to the HW structure -+ * -+ * Enable I2C bit-bang interface -+ * -+ **/ -+s32 e1000_set_i2c_bb(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ u32 ctrl_ext, i2cparams; -+ -+ DEBUGFUNC("e1000_set_i2c_bb"); -+ -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ ctrl_ext |= E1000_CTRL_I2C_ENA; -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); -+ E1000_WRITE_FLUSH(hw); -+ -+ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ i2cparams |= E1000_I2CBB_EN; -+ i2cparams |= E1000_I2C_DATA_OE_N; -+ i2cparams |= E1000_I2C_CLK_OE_N; -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); -+ E1000_WRITE_FLUSH(hw); -+ -+ return ret_val; -+} -+ -+/** -+ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C -+ * @hw: pointer to hardware structure -+ * @byte_offset: byte offset to read -+ * @dev_addr: device address -+ * @data: value read -+ * -+ * Performs byte read operation over I2C interface at -+ * a specified device address. -+ **/ -+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, -+ u8 dev_addr, u8 *data) -+{ -+ s32 status = E1000_SUCCESS; -+ u32 max_retry = 10; -+ u32 retry = 1; -+ u16 swfw_mask = 0; -+ -+ bool nack = true; -+ -+ DEBUGFUNC("e1000_read_i2c_byte_generic"); -+ -+ swfw_mask = E1000_SWFW_PHY0_SM; -+ -+ do { -+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) -+ != E1000_SUCCESS) { -+ status = E1000_ERR_SWFW_SYNC; -+ goto read_byte_out; -+ } -+ -+ e1000_i2c_start(hw); -+ -+ /* Device Address and write indication */ -+ status = e1000_clock_out_i2c_byte(hw, dev_addr); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_get_i2c_ack(hw); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_clock_out_i2c_byte(hw, byte_offset); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_get_i2c_ack(hw); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ e1000_i2c_start(hw); -+ -+ /* Device Address and read indication */ -+ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_get_i2c_ack(hw); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_clock_in_i2c_byte(hw, data); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_clock_out_i2c_bit(hw, nack); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ e1000_i2c_stop(hw); -+ break; -+ -+fail: -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); -+ msec_delay(100); -+ e1000_i2c_bus_clear(hw); -+ retry++; -+ if (retry < max_retry) -+ DEBUGOUT("I2C byte read error - Retrying.\n"); -+ else -+ DEBUGOUT("I2C byte read error.\n"); -+ -+ } while (retry < max_retry); -+ -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); -+ -+read_byte_out: -+ -+ return status; -+} -+ -+/** -+ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C -+ * @hw: pointer to hardware structure -+ * @byte_offset: byte offset to write -+ * @dev_addr: device address -+ * @data: value to write -+ * -+ * Performs byte write operation over I2C interface at -+ * a specified device address. -+ **/ -+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, -+ u8 dev_addr, u8 data) -+{ -+ s32 status = E1000_SUCCESS; -+ u32 max_retry = 1; -+ u32 retry = 0; -+ u16 swfw_mask = 0; -+ -+ DEBUGFUNC("e1000_write_i2c_byte_generic"); -+ -+ swfw_mask = E1000_SWFW_PHY0_SM; -+ -+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { -+ status = E1000_ERR_SWFW_SYNC; -+ goto write_byte_out; -+ } -+ -+ do { -+ e1000_i2c_start(hw); -+ -+ status = e1000_clock_out_i2c_byte(hw, dev_addr); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_get_i2c_ack(hw); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_clock_out_i2c_byte(hw, byte_offset); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_get_i2c_ack(hw); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_clock_out_i2c_byte(hw, data); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ status = e1000_get_i2c_ack(hw); -+ if (status != E1000_SUCCESS) -+ goto fail; -+ -+ e1000_i2c_stop(hw); -+ break; -+ -+fail: -+ e1000_i2c_bus_clear(hw); -+ retry++; -+ if (retry < max_retry) -+ DEBUGOUT("I2C byte write error - Retrying.\n"); -+ else -+ DEBUGOUT("I2C byte write error.\n"); -+ } while (retry < max_retry); -+ -+ hw->mac.ops.release_swfw_sync(hw, swfw_mask); -+ -+write_byte_out: -+ -+ return status; -+} -+ -+/** -+ * e1000_i2c_start - Sets I2C start condition -+ * @hw: pointer to hardware structure -+ * -+ * Sets I2C start condition (High -> Low on SDA while SCL is High) -+ **/ -+static void e1000_i2c_start(struct e1000_hw *hw) -+{ -+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ -+ DEBUGFUNC("e1000_i2c_start"); -+ -+ /* Start condition must begin with data and clock high */ -+ e1000_set_i2c_data(hw, &i2cctl, 1); -+ e1000_raise_i2c_clk(hw, &i2cctl); -+ -+ /* Setup time for start condition (4.7us) */ -+ usec_delay(E1000_I2C_T_SU_STA); -+ -+ e1000_set_i2c_data(hw, &i2cctl, 0); -+ -+ /* Hold time for start condition (4us) */ -+ usec_delay(E1000_I2C_T_HD_STA); -+ -+ e1000_lower_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum low period of clock is 4.7 us */ -+ usec_delay(E1000_I2C_T_LOW); -+ - } - - /** -- * igb_validate_nvm_checksum_with_offset - Validate EEPROM -- * checksum -- * @hw: pointer to the HW structure -- * @offset: offset in words of the checksum protected region -+ * e1000_i2c_stop - Sets I2C stop condition -+ * @hw: pointer to hardware structure - * -- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM -- * and then verifies that the sum of the EEPROM is equal to 0xBABA. -+ * Sets I2C stop condition (Low -> High on SDA while SCL is High) - **/ --static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, -- u16 offset) -+static void e1000_i2c_stop(struct e1000_hw *hw) - { -- s32 ret_val = 0; -- u16 checksum = 0; -- u16 i, nvm_data; -+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { -- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); -- if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -- } -- checksum += nvm_data; -- } -+ DEBUGFUNC("e1000_i2c_stop"); - -- if (checksum != (u16) NVM_SUM) { -- hw_dbg("NVM Checksum Invalid\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -- } -+ /* Stop condition must begin with data low and clock high */ -+ e1000_set_i2c_data(hw, &i2cctl, 0); -+ e1000_raise_i2c_clk(hw, &i2cctl); - --out: -- return ret_val; -+ /* Setup time for stop condition (4us) */ -+ usec_delay(E1000_I2C_T_SU_STO); -+ -+ e1000_set_i2c_data(hw, &i2cctl, 1); -+ -+ /* bus free time between stop and start (4.7us)*/ -+ usec_delay(E1000_I2C_T_BUF); - } - - /** -- * igb_update_nvm_checksum_with_offset - Update EEPROM -- * checksum -- * @hw: pointer to the HW structure -- * @offset: offset in words of the checksum protected region -+ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C -+ * @hw: pointer to hardware structure -+ * @data: data byte to clock in - * -- * Updates the EEPROM checksum by reading/adding each word of the EEPROM -- * up to the checksum. Then calculates the EEPROM checksum and writes the -- * value to the EEPROM. -+ * Clocks in one byte data via I2C data/clock - **/ --static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) -+static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) - { -- s32 ret_val; -- u16 checksum = 0; -- u16 i, nvm_data; -+ s32 i; -+ bool bit = 0; - -- for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { -- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); -- if (ret_val) { -- hw_dbg("NVM Read Error while updating checksum.\n"); -- goto out; -- } -- checksum += nvm_data; -+ DEBUGFUNC("e1000_clock_in_i2c_byte"); -+ -+ *data = 0; -+ for (i = 7; i >= 0; i--) { -+ e1000_clock_in_i2c_bit(hw, &bit); -+ *data |= bit << i; - } -- checksum = (u16) NVM_SUM - checksum; -- ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, -- &checksum); -- if (ret_val) -- hw_dbg("NVM Write Error while updating checksum.\n"); - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum -- * @hw: pointer to the HW structure -+ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C -+ * @hw: pointer to hardware structure -+ * @data: data byte clocked out - * -- * Calculates the EEPROM section checksum by reading/adding each word of -- * the EEPROM and then verifies that the sum of the EEPROM is -- * equal to 0xBABA. -+ * Clocks out one byte data via I2C data/clock - **/ --static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) -+static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) - { -- s32 ret_val = 0; -- u16 eeprom_regions_count = 1; -- u16 j, nvm_data; -- u16 nvm_offset; -+ s32 status = E1000_SUCCESS; -+ s32 i; -+ u32 i2cctl; -+ bool bit = 0; - -- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); -- if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -- } -+ DEBUGFUNC("e1000_clock_out_i2c_byte"); - -- if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { -- /* if checksums compatibility bit is set validate checksums -- * for all 4 ports. -- */ -- eeprom_regions_count = 4; -- } -+ for (i = 7; i >= 0; i--) { -+ bit = (data >> i) & 0x1; -+ status = e1000_clock_out_i2c_bit(hw, bit); - -- for (j = 0; j < eeprom_regions_count; j++) { -- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -- ret_val = igb_validate_nvm_checksum_with_offset(hw, -- nvm_offset); -- if (ret_val != 0) -- goto out; -+ if (status != E1000_SUCCESS) -+ break; - } - --out: -- return ret_val; -+ /* Release SDA line (set high) */ -+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ -+ i2cctl |= E1000_I2C_DATA_OE_N; -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); -+ E1000_WRITE_FLUSH(hw); -+ -+ return status; - } - - /** -- * igb_update_nvm_checksum_82580 - Update EEPROM checksum -- * @hw: pointer to the HW structure -+ * e1000_get_i2c_ack - Polls for I2C ACK -+ * @hw: pointer to hardware structure - * -- * Updates the EEPROM section checksums for all 4 ports by reading/adding -- * each word of the EEPROM up to the checksum. Then calculates the EEPROM -- * checksum and writes the value to the EEPROM. -+ * Clocks in/out one bit via I2C data/clock - **/ --static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) -+static s32 e1000_get_i2c_ack(struct e1000_hw *hw) - { -- s32 ret_val; -- u16 j, nvm_data; -- u16 nvm_offset; -+ s32 status = E1000_SUCCESS; -+ u32 i = 0; -+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ u32 timeout = 10; -+ bool ack = true; - -- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); -- if (ret_val) { -- hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); -- goto out; -- } -+ DEBUGFUNC("e1000_get_i2c_ack"); - -- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { -- /* set compatibility bit to validate checksums appropriately */ -- nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; -- ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, -- &nvm_data); -- if (ret_val) { -- hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); -- goto out; -- } -+ e1000_raise_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum high period of clock is 4us */ -+ usec_delay(E1000_I2C_T_HIGH); -+ -+ /* Wait until SCL returns high */ -+ for (i = 0; i < timeout; i++) { -+ usec_delay(1); -+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ if (i2cctl & E1000_I2C_CLK_IN) -+ break; - } -+ if (!(i2cctl & E1000_I2C_CLK_IN)) -+ return E1000_ERR_I2C; - -- for (j = 0; j < 4; j++) { -- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); -- if (ret_val) -- goto out; -+ ack = e1000_get_i2c_data(&i2cctl); -+ if (ack) { -+ DEBUGOUT("I2C ack was not received.\n"); -+ status = E1000_ERR_I2C; - } - --out: -- return ret_val; -+ e1000_lower_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum low period of clock is 4.7 us */ -+ usec_delay(E1000_I2C_T_LOW); -+ -+ return status; - } - - /** -- * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum -- * @hw: pointer to the HW structure -+ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock -+ * @hw: pointer to hardware structure -+ * @data: read data value - * -- * Calculates the EEPROM section checksum by reading/adding each word of -- * the EEPROM and then verifies that the sum of the EEPROM is -- * equal to 0xBABA. -+ * Clocks in one bit via I2C data/clock - **/ --static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) -+static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) - { -- s32 ret_val = 0; -- u16 j; -- u16 nvm_offset; -+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- for (j = 0; j < 4; j++) { -- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -- ret_val = igb_validate_nvm_checksum_with_offset(hw, -- nvm_offset); -- if (ret_val != 0) -- goto out; -- } -+ DEBUGFUNC("e1000_clock_in_i2c_bit"); - --out: -- return ret_val; -+ e1000_raise_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum high period of clock is 4us */ -+ usec_delay(E1000_I2C_T_HIGH); -+ -+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ *data = e1000_get_i2c_data(&i2cctl); -+ -+ e1000_lower_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum low period of clock is 4.7 us */ -+ usec_delay(E1000_I2C_T_LOW); -+ -+ return E1000_SUCCESS; - } - - /** -- * igb_update_nvm_checksum_i350 - Update EEPROM checksum -- * @hw: pointer to the HW structure -+ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock -+ * @hw: pointer to hardware structure -+ * @data: data value to write - * -- * Updates the EEPROM section checksums for all 4 ports by reading/adding -- * each word of the EEPROM up to the checksum. Then calculates the EEPROM -- * checksum and writes the value to the EEPROM. -+ * Clocks out one bit via I2C data/clock - **/ --static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) -+static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) - { -- s32 ret_val = 0; -- u16 j; -- u16 nvm_offset; -+ s32 status; -+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- for (j = 0; j < 4; j++) { -- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); -- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); -- if (ret_val != 0) -- goto out; -+ DEBUGFUNC("e1000_clock_out_i2c_bit"); -+ -+ status = e1000_set_i2c_data(hw, &i2cctl, data); -+ if (status == E1000_SUCCESS) { -+ e1000_raise_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum high period of clock is 4us */ -+ usec_delay(E1000_I2C_T_HIGH); -+ -+ e1000_lower_i2c_clk(hw, &i2cctl); -+ -+ /* Minimum low period of clock is 4.7 us. -+ * This also takes care of the data hold time. -+ */ -+ usec_delay(E1000_I2C_T_LOW); -+ } else { -+ status = E1000_ERR_I2C; -+ DEBUGOUT1("I2C data was not set to %X\n", data); - } - --out: -- return ret_val; -+ return status; - } -- - /** -- * __igb_access_emi_reg - Read/write EMI register -- * @hw: pointer to the HW structure -- * @addr: EMI address to program -- * @data: pointer to value to read/write from/to the EMI address -- * @read: boolean flag to indicate read or write -+ * e1000_raise_i2c_clk - Raises the I2C SCL clock -+ * @hw: pointer to hardware structure -+ * @i2cctl: Current value of I2CCTL register -+ * -+ * Raises the I2C clock line '0'->'1' - **/ --static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, -- u16 *data, bool read) -+static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) - { -- s32 ret_val = 0; -- -- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); -- if (ret_val) -- return ret_val; -+ DEBUGFUNC("e1000_raise_i2c_clk"); - -- if (read) -- ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); -- else -- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); -+ *i2cctl |= E1000_I2C_CLK_OUT; -+ *i2cctl &= ~E1000_I2C_CLK_OE_N; -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); -+ E1000_WRITE_FLUSH(hw); - -- return ret_val; -+ /* SCL rise time (1000ns) */ -+ usec_delay(E1000_I2C_T_RISE); - } - - /** -- * igb_read_emi_reg - Read Extended Management Interface register -- * @hw: pointer to the HW structure -- * @addr: EMI address to program -- * @data: value to be read from the EMI address -+ * e1000_lower_i2c_clk - Lowers the I2C SCL clock -+ * @hw: pointer to hardware structure -+ * @i2cctl: Current value of I2CCTL register -+ * -+ * Lowers the I2C clock line '1'->'0' - **/ --s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) -+static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) - { -- return __igb_access_emi_reg(hw, addr, data, true); -+ -+ DEBUGFUNC("e1000_lower_i2c_clk"); -+ -+ *i2cctl &= ~E1000_I2C_CLK_OUT; -+ *i2cctl &= ~E1000_I2C_CLK_OE_N; -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); -+ E1000_WRITE_FLUSH(hw); -+ -+ /* SCL fall time (300ns) */ -+ usec_delay(E1000_I2C_T_FALL); - } - - /** -- * igb_set_eee_i350 - Enable/disable EEE support -- * @hw: pointer to the HW structure -- * -- * Enable/disable EEE based on setting in dev_spec structure. -+ * e1000_set_i2c_data - Sets the I2C data bit -+ * @hw: pointer to hardware structure -+ * @i2cctl: Current value of I2CCTL register -+ * @data: I2C data value (0 or 1) to set - * -+ * Sets the I2C data bit - **/ --s32 igb_set_eee_i350(struct e1000_hw *hw) -+static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) - { -- u32 ipcnfg, eeer; -+ s32 status = E1000_SUCCESS; - -- if ((hw->mac.type < e1000_i350) || -- (hw->phy.media_type != e1000_media_type_copper)) -- goto out; -- ipcnfg = rd32(E1000_IPCNFG); -- eeer = rd32(E1000_EEER); -+ DEBUGFUNC("e1000_set_i2c_data"); - -- /* enable or disable per user setting */ -- if (!(hw->dev_spec._82575.eee_disable)) { -- u32 eee_su = rd32(E1000_EEE_SU); -+ if (data) -+ *i2cctl |= E1000_I2C_DATA_OUT; -+ else -+ *i2cctl &= ~E1000_I2C_DATA_OUT; - -- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); -- eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | -- E1000_EEER_LPI_FC); -+ *i2cctl &= ~E1000_I2C_DATA_OE_N; -+ *i2cctl |= E1000_I2C_CLK_OE_N; -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); -+ E1000_WRITE_FLUSH(hw); - -- /* This bit should not be set in normal operation. */ -- if (eee_su & E1000_EEE_SU_LPI_CLK_STP) -- hw_dbg("LPI Clock Stop Bit should not be set!\n"); -+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ -+ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); - -- } else { -- ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | -- E1000_IPCNFG_EEE_100M_AN); -- eeer &= ~(E1000_EEER_TX_LPI_EN | -- E1000_EEER_RX_LPI_EN | -- E1000_EEER_LPI_FC); -+ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ if (data != e1000_get_i2c_data(i2cctl)) { -+ status = E1000_ERR_I2C; -+ DEBUGOUT1("Error - I2C data was not set to %X.\n", data); - } -- wr32(E1000_IPCNFG, ipcnfg); -- wr32(E1000_EEER, eeer); -- rd32(E1000_IPCNFG); -- rd32(E1000_EEER); --out: - -- return 0; -+ return status; - } - - /** -- * igb_set_eee_i354 - Enable/disable EEE support -- * @hw: pointer to the HW structure -- * -- * Enable/disable EEE legacy mode based on setting in dev_spec structure. -+ * e1000_get_i2c_data - Reads the I2C SDA data bit -+ * @hw: pointer to hardware structure -+ * @i2cctl: Current value of I2CCTL register - * -+ * Returns the I2C data bit value - **/ --s32 igb_set_eee_i354(struct e1000_hw *hw) -+static bool e1000_get_i2c_data(u32 *i2cctl) - { -- struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -- u16 phy_data; -- -- if ((hw->phy.media_type != e1000_media_type_copper) || -- (phy->id != M88E1543_E_PHY_ID)) -- goto out; -- -- if (!hw->dev_spec._82575.eee_disable) { -- /* Switch to PHY page 18. */ -- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); -- if (ret_val) -- goto out; -- -- ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, -- &phy_data); -- if (ret_val) -- goto out; -- -- phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; -- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, -- phy_data); -- if (ret_val) -- goto out; -- -- /* Return the PHY to page 0. */ -- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); -- if (ret_val) -- goto out; -- -- /* Turn on EEE advertisement. */ -- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -- E1000_EEE_ADV_DEV_I354, -- &phy_data); -- if (ret_val) -- goto out; -+ bool data; - -- phy_data |= E1000_EEE_ADV_100_SUPPORTED | -- E1000_EEE_ADV_1000_SUPPORTED; -- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -- E1000_EEE_ADV_DEV_I354, -- phy_data); -- } else { -- /* Turn off EEE advertisement. */ -- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -- E1000_EEE_ADV_DEV_I354, -- &phy_data); -- if (ret_val) -- goto out; -+ DEBUGFUNC("e1000_get_i2c_data"); - -- phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | -- E1000_EEE_ADV_1000_SUPPORTED); -- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, -- E1000_EEE_ADV_DEV_I354, -- phy_data); -- } -+ if (*i2cctl & E1000_I2C_DATA_IN) -+ data = 1; -+ else -+ data = 0; - --out: -- return ret_val; -+ return data; - } - - /** -- * igb_get_eee_status_i354 - Get EEE status -- * @hw: pointer to the HW structure -- * @status: EEE status -+ * e1000_i2c_bus_clear - Clears the I2C bus -+ * @hw: pointer to hardware structure - * -- * Get EEE status by guessing based on whether Tx or Rx LPI indications have -- * been received. -+ * Clears the I2C bus by sending nine clock pulses. -+ * Used when data line is stuck low. - **/ --s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) -+void e1000_i2c_bus_clear(struct e1000_hw *hw) - { -- struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -- u16 phy_data; -+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); -+ u32 i; - -- /* Check if EEE is supported on this device. */ -- if ((hw->phy.media_type != e1000_media_type_copper) || -- (phy->id != M88E1543_E_PHY_ID)) -- goto out; -+ DEBUGFUNC("e1000_i2c_bus_clear"); - -- ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, -- E1000_PCS_STATUS_DEV_I354, -- &phy_data); -- if (ret_val) -- goto out; -+ e1000_i2c_start(hw); - -- *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | -- E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; -+ e1000_set_i2c_data(hw, &i2cctl, 1); - --out: -- return ret_val; -+ for (i = 0; i < 9; i++) { -+ e1000_raise_i2c_clk(hw, &i2cctl); -+ -+ /* Min high period of clock is 4us */ -+ usec_delay(E1000_I2C_T_HIGH); -+ -+ e1000_lower_i2c_clk(hw, &i2cctl); -+ -+ /* Min low period of clock is 4.7us*/ -+ usec_delay(E1000_I2C_T_LOW); -+ } -+ -+ e1000_i2c_start(hw); -+ -+ /* Put the i2c bus back to default state */ -+ e1000_i2c_stop(hw); - } - - static const u8 e1000_emc_temp_data[4] = { -@@ -2707,14 +3782,13 @@ - E1000_EMC_DIODE3_THERM_LIMIT - }; - --#ifdef CONFIG_IGB_HWMON - /** -- * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data -+ * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data - * @hw: pointer to hardware structure - * - * Updates the temperatures in mac.thermal_sensor_data - **/ --static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) -+s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) - { - u16 ets_offset; - u16 ets_cfg; -@@ -2725,17 +3799,19 @@ - u8 i; - struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; - -+ DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); -+ - if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) - return E1000_NOT_IMPLEMENTED; - -- data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); -+ data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); - - /* Return the internal sensor only if ETS is unsupported */ -- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); -+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); - if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) -- return 0; -+ return E1000_SUCCESS; - -- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); -+ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); - if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) - != NVM_ETS_TYPE_EMC) - return E1000_NOT_IMPLEMENTED; -@@ -2745,7 +3821,7 @@ - num_sensors = E1000_MAX_SENSORS; - - for (i = 1; i < num_sensors; i++) { -- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); -+ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); - sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> - NVM_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> -@@ -2757,17 +3833,17 @@ - E1000_I2C_THERMAL_SENSOR_ADDR, - &data->sensor[i].temp); - } -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds -+ * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds - * @hw: pointer to hardware structure - * - * Sets the thermal sensor thresholds according to the NVM map - * and save off the threshold and location values into mac.thermal_sensor_data - **/ --static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) -+s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) - { - u16 ets_offset; - u16 ets_cfg; -@@ -2780,6 +3856,8 @@ - u8 i; - struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; - -+ DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); -+ - if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) - return E1000_NOT_IMPLEMENTED; - -@@ -2787,16 +3865,16 @@ - - data->sensor[0].location = 0x1; - data->sensor[0].caution_thresh = -- (rd32(E1000_THHIGHTC) & 0xFF); -+ (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); - data->sensor[0].max_op_thresh = -- (rd32(E1000_THLOWTC) & 0xFF); -+ (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); - - /* Return the internal sensor only if ETS is unsupported */ -- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); -+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); - if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) -- return 0; -+ return E1000_SUCCESS; - -- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); -+ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); - if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) - != NVM_ETS_TYPE_EMC) - return E1000_NOT_IMPLEMENTED; -@@ -2806,7 +3884,7 @@ - num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); - - for (i = 1; i <= num_sensors; i++) { -- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); -+ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); - sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> - NVM_ETS_DATA_INDEX_SHIFT); - sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> -@@ -2825,41 +3903,5 @@ - low_thresh_delta; - } - } -- return 0; -+ return E1000_SUCCESS; - } -- --#endif --static struct e1000_mac_operations e1000_mac_ops_82575 = { -- .init_hw = igb_init_hw_82575, -- .check_for_link = igb_check_for_link_82575, -- .rar_set = igb_rar_set, -- .read_mac_addr = igb_read_mac_addr_82575, -- .get_speed_and_duplex = igb_get_link_up_info_82575, --#ifdef CONFIG_IGB_HWMON -- .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, -- .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, --#endif --}; -- --static struct e1000_phy_operations e1000_phy_ops_82575 = { -- .acquire = igb_acquire_phy_82575, -- .get_cfg_done = igb_get_cfg_done_82575, -- .release = igb_release_phy_82575, -- .write_i2c_byte = igb_write_i2c_byte, -- .read_i2c_byte = igb_read_i2c_byte, --}; -- --static struct e1000_nvm_operations e1000_nvm_ops_82575 = { -- .acquire = igb_acquire_nvm_82575, -- .read = igb_read_nvm_eerd, -- .release = igb_release_nvm_82575, -- .write = igb_write_nvm_spi, --}; -- --const struct e1000_info e1000_82575_info = { -- .get_invariants = igb_get_invariants_82575, -- .mac_ops = &e1000_mac_ops_82575, -- .phy_ops = &e1000_phy_ops_82575, -- .nvm_ops = &e1000_nvm_ops_82575, --}; -- -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h ---- a/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,67 +1,149 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_82575_H_ - #define _E1000_82575_H_ - --void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); --void igb_power_up_serdes_link_82575(struct e1000_hw *hw); --void igb_power_down_phy_copper_82575(struct e1000_hw *hw); --void igb_rx_fifo_flush_82575(struct e1000_hw *hw); --s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, -- u8 *data); --s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, -- u8 data); -- --#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ -- (ID_LED_DEF1_DEF2 << 8) | \ -- (ID_LED_DEF1_DEF2 << 4) | \ -- (ID_LED_OFF1_ON2)) -- --#define E1000_RAR_ENTRIES_82575 16 --#define E1000_RAR_ENTRIES_82576 24 --#define E1000_RAR_ENTRIES_82580 24 --#define E1000_RAR_ENTRIES_I350 32 -- --#define E1000_SW_SYNCH_MB 0x00000100 --#define E1000_STAT_DEV_RST_SET 0x00100000 --#define E1000_CTRL_DEV_RST 0x20000000 -- --/* SRRCTL bit definitions */ --#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ --#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ --#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 --#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 --#define E1000_SRRCTL_DROP_EN 0x80000000 --#define E1000_SRRCTL_TIMESTAMP 0x40000000 -+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ -+ (ID_LED_DEF1_DEF2 << 8) | \ -+ (ID_LED_DEF1_DEF2 << 4) | \ -+ (ID_LED_OFF1_ON2)) -+/* -+ * Receive Address Register Count -+ * Number of high/low register pairs in the RAR. The RAR (Receive Address -+ * Registers) holds the directed and multicast addresses that we monitor. -+ * These entries are also used for MAC-based filtering. -+ */ -+/* -+ * For 82576, there are an additional set of RARs that begin at an offset -+ * separate from the first set of RARs. -+ */ -+#define E1000_RAR_ENTRIES_82575 16 -+#define E1000_RAR_ENTRIES_82576 24 -+#define E1000_RAR_ENTRIES_82580 24 -+#define E1000_RAR_ENTRIES_I350 32 -+#define E1000_SW_SYNCH_MB 0x00000100 -+#define E1000_STAT_DEV_RST_SET 0x00100000 -+#define E1000_CTRL_DEV_RST 0x20000000 -+ -+struct e1000_adv_data_desc { -+ __le64 buffer_addr; /* Address of the descriptor's data buffer */ -+ union { -+ u32 data; -+ struct { -+ u32 datalen:16; /* Data buffer length */ -+ u32 rsvd:4; -+ u32 dtyp:4; /* Descriptor type */ -+ u32 dcmd:8; /* Descriptor command */ -+ } config; -+ } lower; -+ union { -+ u32 data; -+ struct { -+ u32 status:4; /* Descriptor status */ -+ u32 idx:4; -+ u32 popts:6; /* Packet Options */ -+ u32 paylen:18; /* Payload length */ -+ } options; -+ } upper; -+}; - -+#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ -+#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ -+#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ -+#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ -+#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ -+#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ -+#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ -+#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ -+#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ -+#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ -+#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ -+#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ -+#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ -+/* Extended Device Control */ -+#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ -+ -+struct e1000_adv_context_desc { -+ union { -+ u32 ip_config; -+ struct { -+ u32 iplen:9; -+ u32 maclen:7; -+ u32 vlan_tag:16; -+ } fields; -+ } ip_setup; -+ u32 seq_num; -+ union { -+ u64 l4_config; -+ struct { -+ u32 mkrloc:9; -+ u32 tucmd:11; -+ u32 dtyp:4; -+ u32 adv:8; -+ u32 rsvd:4; -+ u32 idx:4; -+ u32 l4len:8; -+ u32 mss:16; -+ } fields; -+ } l4_setup; -+}; - --#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 --#define E1000_MRQC_ENABLE_VMDQ 0x00000003 --#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 --#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 --#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 --#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 -+/* SRRCTL bit definitions */ -+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 -+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 -+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -+#define E1000_SRRCTL_TIMESTAMP 0x40000000 -+#define E1000_SRRCTL_DROP_EN 0x80000000 -+ -+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 -+ -+#define E1000_TX_HEAD_WB_ENABLE 0x1 -+#define E1000_TX_SEQNUM_WB_ENABLE 0x2 -+ -+#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 -+#define E1000_MRQC_ENABLE_VMDQ 0x00000003 -+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 -+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 -+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 -+#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 -+ -+#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 -+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ -+ E1000_VMRCTL_MIRROR_PORT_SHIFT) -+#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) -+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) -+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) - - #define E1000_EICR_TX_QUEUE ( \ - E1000_EICR_TX_QUEUE0 | \ -@@ -75,42 +157,114 @@ - E1000_EICR_RX_QUEUE2 | \ - E1000_EICR_RX_QUEUE3) - -+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE -+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE -+ -+#define EIMS_ENABLE_MASK ( \ -+ E1000_EIMS_RX_QUEUE | \ -+ E1000_EIMS_TX_QUEUE | \ -+ E1000_EIMS_TCP_TIMER | \ -+ E1000_EIMS_OTHER) -+ - /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ --#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ --#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ -+#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ -+#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ -+#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -+#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ -+#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ -+#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ -+#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ -+#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ -+#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ -+#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ - - /* Receive Descriptor - Advanced */ - union e1000_adv_rx_desc { - struct { -- __le64 pkt_addr; /* Packet buffer address */ -- __le64 hdr_addr; /* Header buffer address */ -+ __le64 pkt_addr; /* Packet buffer address */ -+ __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { -- struct { -- __le16 pkt_info; /* RSS type, Packet type */ -- __le16 hdr_info; /* Split Head, buf len */ -+ union { -+ __le32 data; -+ struct { -+ __le16 pkt_info; /*RSS type, Pkt type*/ -+ /* Split Header, header buffer len */ -+ __le16 hdr_info; -+ } hs_rss; - } lo_dword; - union { -- __le32 rss; /* RSS Hash */ -+ __le32 rss; /* RSS Hash */ - struct { -- __le16 ip_id; /* IP id */ -- __le16 csum; /* Packet Checksum */ -+ __le16 ip_id; /* IP id */ -+ __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { -- __le32 status_error; /* ext status/error */ -- __le16 length; /* Packet length */ -- __le16 vlan; /* VLAN tag */ -+ __le32 status_error; /* ext status/error */ -+ __le16 length; /* Packet length */ -+ __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ - }; - --#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 --#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 --#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ --#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ -+#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F -+#define E1000_RXDADV_RSSTYPE_SHIFT 12 -+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 -+#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 -+#define E1000_RXDADV_SPH 0x8000 -+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ -+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ -+#define E1000_RXDADV_ERR_HBO 0x00800000 -+ -+/* RSS Hash results */ -+#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 -+#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -+#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 -+#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -+#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -+#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 -+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -+#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -+#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 -+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 -+ -+/* RSS Packet Types as indicated in the receive descriptor */ -+#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 -+#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 -+#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 -+#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ -+#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ -+#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ -+#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ -+#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ -+#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ -+#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ -+#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ -+ -+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -+#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -+#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ -+#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ -+#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ -+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ -+ -+/* LinkSec results */ -+/* Security Processing bit Indication */ -+#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 -+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 -+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 -+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 -+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 -+ -+#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 -+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 -+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 -+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 -+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 - - /* Transmit Descriptor - Advanced */ - union e1000_adv_tx_desc { -@@ -127,16 +281,26 @@ - }; - - /* Adv Transmit Descriptor Config Masks */ --#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ --#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ --#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ --#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ --#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ --#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ --#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ --#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ --#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ --#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ -+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -+#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ -+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ -+#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ -+#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -+#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -+#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -+/* 1st & Last TSO-full iSCSI PDU*/ -+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 -+#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ -+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ - - /* Context descriptors */ - struct e1000_adv_tx_context_desc { -@@ -146,127 +310,174 @@ - __le32 mss_l4len_idx; - }; - --#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ --#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ --#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ --#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ -+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -+#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -+#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ - /* IPSec Encrypt Enable for ESP */ --#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ --#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ -+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 -+/* Req requires Markers and CRC */ -+#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 -+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - /* Adv ctxt IPSec SA IDX mask */ -+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF - /* Adv ctxt IPSec ESP len mask */ -+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF - - /* Additional Transmit Descriptor Control definitions */ --#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ -+#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ - /* Tx Queue Arbitration Priority 0=low, 1=high */ -+#define E1000_TXDCTL_PRIORITY 0x08000000 - - /* Additional Receive Descriptor Control definitions */ --#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ -+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ -+#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ - - /* Direct Cache Access (DCA) definitions */ --#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ --#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ -+#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ -+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ - --#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ --#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ --#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ --#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ --#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ -- --#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ --#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ --#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ --#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ --#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ -- --/* Additional DCA related definitions, note change in position of CPUID */ --#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ --#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ --#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ --#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ -+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ -+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ -+ -+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ -+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ -+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ -+ -+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ -+ -+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ -+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ -+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ -+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ -+ -+/* Additional interrupt register bit definitions */ -+#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ -+#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ -+#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ - - /* ETQF register bit definitions */ --#define E1000_ETQF_FILTER_ENABLE (1 << 26) --#define E1000_ETQF_1588 (1 << 30) -+#define E1000_ETQF_FILTER_ENABLE (1 << 26) -+#define E1000_ETQF_IMM_INT (1 << 29) -+#define E1000_ETQF_1588 (1 << 30) -+#define E1000_ETQF_QUEUE_ENABLE (1 << 31) -+/* -+ * ETQF filter list: one static filter per filter consumer. This is -+ * to avoid filter collisions later. Add new filters -+ * here!! -+ * -+ * Current filters: -+ * EAPOL 802.1x (0x888e): Filter 0 -+ */ -+#define E1000_ETQF_FILTER_EAPOL 0 - --/* FTQF register bit definitions */ --#define E1000_FTQF_VF_BP 0x00008000 --#define E1000_FTQF_1588_TIME_STAMP 0x08000000 --#define E1000_FTQF_MASK 0xF0000000 --#define E1000_FTQF_MASK_PROTO_BP 0x10000000 --#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 -- --#define E1000_NVM_APME_82575 0x0400 --#define MAX_NUM_VFS 8 -- --#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ --#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ --#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ --#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 --#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ -+#define E1000_FTQF_VF_BP 0x00008000 -+#define E1000_FTQF_1588_TIME_STAMP 0x08000000 -+#define E1000_FTQF_MASK 0xF0000000 -+#define E1000_FTQF_MASK_PROTO_BP 0x10000000 -+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 -+#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 -+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 -+ -+#define E1000_NVM_APME_82575 0x0400 -+#define MAX_NUM_VFS 7 -+ -+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ -+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ -+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ -+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 -+#define E1000_DTXSWC_LLE_SHIFT 16 -+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ - - /* Easy defines for setting default pool, would normally be left a zero */ --#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 --#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) -+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 -+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) - - /* Other useful VMD_CTL register defines */ --#define E1000_VT_CTL_IGNORE_MAC (1 << 28) --#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) --#define E1000_VT_CTL_VM_REPL_EN (1 << 30) -+#define E1000_VT_CTL_IGNORE_MAC (1 << 28) -+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) -+#define E1000_VT_CTL_VM_REPL_EN (1 << 30) - - /* Per VM Offload register setup */ --#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ --#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ --#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ --#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ --#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ --#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ --#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ --#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ --#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ --#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ -- --#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ --#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ --#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ -- --#define E1000_VLVF_ARRAY_SIZE 32 --#define E1000_VLVF_VLANID_MASK 0x00000FFF --#define E1000_VLVF_POOLSEL_SHIFT 12 --#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) --#define E1000_VLVF_LVLAN 0x00100000 --#define E1000_VLVF_VLANID_ENABLE 0x80000000 -- --#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ --#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ -- --#define E1000_IOVCTL 0x05BBC --#define E1000_IOVCTL_REUSE_VFQ 0x00000001 -- --#define E1000_RPLOLR_STRVLAN 0x40000000 --#define E1000_RPLOLR_STRCRC 0x80000000 -- --#define E1000_DTXCTL_8023LL 0x0004 --#define E1000_DTXCTL_VLAN_ADDED 0x0008 --#define E1000_DTXCTL_OOS_ENABLE 0x0010 --#define E1000_DTXCTL_MDP_EN 0x0020 --#define E1000_DTXCTL_SPOOF_INT 0x0040 -+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ -+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ -+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ -+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ -+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ -+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ -+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ -+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ -+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ -+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ -+ -+#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ -+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ -+#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ -+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ -+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ -+ -+#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ -+#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ -+ -+#define E1000_VLVF_ARRAY_SIZE 32 -+#define E1000_VLVF_VLANID_MASK 0x00000FFF -+#define E1000_VLVF_POOLSEL_SHIFT 12 -+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) -+#define E1000_VLVF_LVLAN 0x00100000 -+#define E1000_VLVF_VLANID_ENABLE 0x80000000 -+ -+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ -+ -+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ -+ -+#define E1000_IOVCTL 0x05BBC -+#define E1000_IOVCTL_REUSE_VFQ 0x00000001 -+ -+#define E1000_RPLOLR_STRVLAN 0x40000000 -+#define E1000_RPLOLR_STRCRC 0x80000000 -+ -+#define E1000_TCTL_EXT_COLD 0x000FFC00 -+#define E1000_TCTL_EXT_COLD_SHIFT 10 -+ -+#define E1000_DTXCTL_8023LL 0x0004 -+#define E1000_DTXCTL_VLAN_ADDED 0x0008 -+#define E1000_DTXCTL_OOS_ENABLE 0x0010 -+#define E1000_DTXCTL_MDP_EN 0x0020 -+#define E1000_DTXCTL_SPOOF_INT 0x0040 - - #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) - --#define ALL_QUEUES 0xFFFF -- --/* RX packet buffer size defines */ --#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F --void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); --void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); --void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); --u16 igb_rxpbs_adjust_82580(u32 data); --s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); --s32 igb_set_eee_i350(struct e1000_hw *); --s32 igb_set_eee_i354(struct e1000_hw *); --s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); -+#define ALL_QUEUES 0xFFFF - -+/* Rx packet buffer size defines */ -+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F -+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); -+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); -+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); -+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); -+s32 e1000_init_hw_82575(struct e1000_hw *hw); -+ -+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); -+u16 e1000_rxpbs_adjust_82580(u32 data); -+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); -+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); -+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); -+s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); -+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); -+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw); - #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 - #define E1000_EMC_INTERNAL_DATA 0x00 - #define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 -@@ -276,4 +487,26 @@ - #define E1000_EMC_DIODE2_THERM_LIMIT 0x1A - #define E1000_EMC_DIODE3_DATA 0x2A - #define E1000_EMC_DIODE3_THERM_LIMIT 0x30 --#endif -+ -+s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); -+s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); -+ -+/* I2C SDA and SCL timing parameters for standard mode */ -+#define E1000_I2C_T_HD_STA 4 -+#define E1000_I2C_T_LOW 5 -+#define E1000_I2C_T_HIGH 4 -+#define E1000_I2C_T_SU_STA 5 -+#define E1000_I2C_T_HD_DATA 5 -+#define E1000_I2C_T_SU_DATA 1 -+#define E1000_I2C_T_RISE 1 -+#define E1000_I2C_T_FALL 1 -+#define E1000_I2C_T_SU_STO 4 -+#define E1000_I2C_T_BUF 5 -+ -+s32 e1000_set_i2c_bb(struct e1000_hw *hw); -+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, -+ u8 dev_addr, u8 *data); -+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, -+ u8 dev_addr, u8 data); -+void e1000_i2c_bus_clear(struct e1000_hw *hw); -+#endif /* _E1000_82575_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.c b/drivers/net/ethernet/intel/igb/e1000_api.c ---- a/drivers/net/ethernet/intel/igb/e1000_api.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_api.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,1184 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "e1000_api.h" -+ -+/** -+ * e1000_init_mac_params - Initialize MAC function pointers -+ * @hw: pointer to the HW structure -+ * -+ * This function initializes the function pointers for the MAC -+ * set of functions. Called by drivers or by e1000_setup_init_funcs. -+ **/ -+s32 e1000_init_mac_params(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ -+ if (hw->mac.ops.init_params) { -+ ret_val = hw->mac.ops.init_params(hw); -+ if (ret_val) { -+ DEBUGOUT("MAC Initialization Error\n"); -+ goto out; -+ } -+ } else { -+ DEBUGOUT("mac.init_mac_params was NULL\n"); -+ ret_val = -E1000_ERR_CONFIG; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_init_nvm_params - Initialize NVM function pointers -+ * @hw: pointer to the HW structure -+ * -+ * This function initializes the function pointers for the NVM -+ * set of functions. Called by drivers or by e1000_setup_init_funcs. -+ **/ -+s32 e1000_init_nvm_params(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ -+ if (hw->nvm.ops.init_params) { -+ ret_val = hw->nvm.ops.init_params(hw); -+ if (ret_val) { -+ DEBUGOUT("NVM Initialization Error\n"); -+ goto out; -+ } -+ } else { -+ DEBUGOUT("nvm.init_nvm_params was NULL\n"); -+ ret_val = -E1000_ERR_CONFIG; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_init_phy_params - Initialize PHY function pointers -+ * @hw: pointer to the HW structure -+ * -+ * This function initializes the function pointers for the PHY -+ * set of functions. Called by drivers or by e1000_setup_init_funcs. -+ **/ -+s32 e1000_init_phy_params(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ -+ if (hw->phy.ops.init_params) { -+ ret_val = hw->phy.ops.init_params(hw); -+ if (ret_val) { -+ DEBUGOUT("PHY Initialization Error\n"); -+ goto out; -+ } -+ } else { -+ DEBUGOUT("phy.init_phy_params was NULL\n"); -+ ret_val = -E1000_ERR_CONFIG; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_init_mbx_params - Initialize mailbox function pointers -+ * @hw: pointer to the HW structure -+ * -+ * This function initializes the function pointers for the PHY -+ * set of functions. Called by drivers or by e1000_setup_init_funcs. -+ **/ -+s32 e1000_init_mbx_params(struct e1000_hw *hw) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ -+ if (hw->mbx.ops.init_params) { -+ ret_val = hw->mbx.ops.init_params(hw); -+ if (ret_val) { -+ DEBUGOUT("Mailbox Initialization Error\n"); -+ goto out; -+ } -+ } else { -+ DEBUGOUT("mbx.init_mbx_params was NULL\n"); -+ ret_val = -E1000_ERR_CONFIG; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * igb_e1000_set_mac_type - Sets MAC type -+ * @hw: pointer to the HW structure -+ * -+ * This function sets the mac type of the adapter based on the -+ * device ID stored in the hw structure. -+ * MUST BE FIRST FUNCTION CALLED (explicitly or through -+ * e1000_setup_init_funcs()). -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_set_mac_type(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("igb_e1000_set_mac_type"); -+ -+ switch (hw->device_id) { -+ case E1000_DEV_ID_82575EB_COPPER: -+ case E1000_DEV_ID_82575EB_FIBER_SERDES: -+ case E1000_DEV_ID_82575GB_QUAD_COPPER: -+ mac->type = e1000_82575; -+ break; -+ case E1000_DEV_ID_82576: -+ case E1000_DEV_ID_82576_FIBER: -+ case E1000_DEV_ID_82576_SERDES: -+ case E1000_DEV_ID_82576_QUAD_COPPER: -+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2: -+ case E1000_DEV_ID_82576_NS: -+ case E1000_DEV_ID_82576_NS_SERDES: -+ case E1000_DEV_ID_82576_SERDES_QUAD: -+ mac->type = e1000_82576; -+ break; -+ case E1000_DEV_ID_82580_COPPER: -+ case E1000_DEV_ID_82580_FIBER: -+ case E1000_DEV_ID_82580_SERDES: -+ case E1000_DEV_ID_82580_SGMII: -+ case E1000_DEV_ID_82580_COPPER_DUAL: -+ case E1000_DEV_ID_82580_QUAD_FIBER: -+ case E1000_DEV_ID_DH89XXCC_SGMII: -+ case E1000_DEV_ID_DH89XXCC_SERDES: -+ case E1000_DEV_ID_DH89XXCC_BACKPLANE: -+ case E1000_DEV_ID_DH89XXCC_SFP: -+ mac->type = e1000_82580; -+ break; -+ case E1000_DEV_ID_I350_COPPER: -+ case E1000_DEV_ID_I350_FIBER: -+ case E1000_DEV_ID_I350_SERDES: -+ case E1000_DEV_ID_I350_SGMII: -+ case E1000_DEV_ID_I350_DA4: -+ mac->type = e1000_i350; -+ break; -+ case E1000_DEV_ID_I210_COPPER_FLASHLESS: -+ case E1000_DEV_ID_I210_SERDES_FLASHLESS: -+ case E1000_DEV_ID_I210_COPPER: -+ case E1000_DEV_ID_I210_COPPER_OEM1: -+ case E1000_DEV_ID_I210_COPPER_IT: -+ case E1000_DEV_ID_I210_FIBER: -+ case E1000_DEV_ID_I210_SERDES: -+ case E1000_DEV_ID_I210_SGMII: -+ mac->type = e1000_i210; -+ break; -+ case E1000_DEV_ID_I211_COPPER: -+ mac->type = e1000_i211; -+ break; -+ -+ case E1000_DEV_ID_I354_BACKPLANE_1GBPS: -+ case E1000_DEV_ID_I354_SGMII: -+ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: -+ mac->type = e1000_i354; -+ break; -+ default: -+ /* Should never have loaded on this device */ -+ ret_val = -E1000_ERR_MAC_INIT; -+ break; -+ } -+ -+ return ret_val; -+} -+ -+/** -+ * e1000_setup_init_funcs - Initializes function pointers -+ * @hw: pointer to the HW structure -+ * @init_device: true will initialize the rest of the function pointers -+ * getting the device ready for use. false will only set -+ * MAC type and the function pointers for the other init -+ * functions. Passing false will not generate any hardware -+ * reads or writes. -+ * -+ * This function must be called by a driver in order to use the rest -+ * of the 'shared' code files. Called by drivers only. -+ **/ -+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) -+{ -+ s32 ret_val; -+ -+ /* Can't do much good without knowing the MAC type. */ -+ ret_val = igb_e1000_set_mac_type(hw); -+ if (ret_val) { -+ DEBUGOUT("ERROR: MAC type could not be set properly.\n"); -+ goto out; -+ } -+ -+ if (!hw->hw_addr) { -+ DEBUGOUT("ERROR: Registers not mapped\n"); -+ ret_val = -E1000_ERR_CONFIG; -+ goto out; -+ } -+ -+ /* -+ * Init function pointers to generic implementations. We do this first -+ * allowing a driver module to override it afterward. -+ */ -+ e1000_init_mac_ops_generic(hw); -+ e1000_init_phy_ops_generic(hw); -+ e1000_init_nvm_ops_generic(hw); -+ e1000_init_mbx_ops_generic(hw); -+ -+ /* -+ * Set up the init function pointers. These are functions within the -+ * adapter family file that sets up function pointers for the rest of -+ * the functions in that family. -+ */ -+ switch (hw->mac.type) { -+ case e1000_82575: -+ case e1000_82576: -+ case e1000_82580: -+ case e1000_i350: -+ case e1000_i354: -+ e1000_init_function_pointers_82575(hw); -+ break; -+ case e1000_i210: -+ case e1000_i211: -+ e1000_init_function_pointers_i210(hw); -+ break; -+ default: -+ DEBUGOUT("Hardware not supported\n"); -+ ret_val = -E1000_ERR_CONFIG; -+ break; -+ } -+ -+ /* -+ * Initialize the rest of the function pointers. These require some -+ * register reads/writes in some cases. -+ */ -+ if (!(ret_val) && init_device) { -+ ret_val = e1000_init_mac_params(hw); -+ if (ret_val) -+ goto out; -+ -+ ret_val = e1000_init_nvm_params(hw); -+ if (ret_val) -+ goto out; -+ -+ ret_val = e1000_init_phy_params(hw); -+ if (ret_val) -+ goto out; -+ -+ ret_val = e1000_init_mbx_params(hw); -+ if (ret_val) -+ goto out; -+ } -+ -+out: -+ return ret_val; -+} -+ -+/** -+ * igb_e1000_get_bus_info - Obtain bus information for adapter -+ * @hw: pointer to the HW structure -+ * -+ * This will obtain information about the HW bus for which the -+ * adapter is attached and stores it in the hw structure. This is a -+ * function pointer entry point called by drivers. -+ **/ -+ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_get_bus_info(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.get_bus_info) -+ return hw->mac.ops.get_bus_info(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_clear_vfta - Clear VLAN filter table -+ * @hw: pointer to the HW structure -+ * -+ * This clears the VLAN filter table on the adapter. This is a function -+ * pointer entry point called by drivers. -+ **/ -+void e1000_clear_vfta(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.clear_vfta) -+ hw->mac.ops.clear_vfta(hw); -+} -+ -+/** -+ * igb_e1000_write_vfta - Write value to VLAN filter table -+ * @hw: pointer to the HW structure -+ * @offset: the 32-bit offset in which to write the value to. -+ * @value: the 32-bit value to write at location offset. -+ * -+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter -+ * table. This is a function pointer entry point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) -+{ -+ if (hw->mac.ops.write_vfta) -+ hw->mac.ops.write_vfta(hw, offset, value); -+} -+ -+/** -+ * e1000_update_mc_addr_list - Update Multicast addresses -+ * @hw: pointer to the HW structure -+ * @mc_addr_list: array of multicast addresses to program -+ * @mc_addr_count: number of multicast addresses to program -+ * -+ * Updates the Multicast Table Array. -+ * The caller must have a packed mc_addr_list of multicast addresses. -+ **/ -+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, -+ u32 mc_addr_count) -+{ -+ if (hw->mac.ops.update_mc_addr_list) -+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, -+ mc_addr_count); -+} -+ -+/** -+ * igb_e1000_force_mac_fc - Force MAC flow control -+ * @hw: pointer to the HW structure -+ * -+ * Force the MAC's flow control settings. Currently no func pointer exists -+ * and all implementations are handled in the generic version of this -+ * function. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_force_mac_fc(struct e1000_hw *hw) -+{ -+ return e1000_force_mac_fc_generic(hw); -+} -+ -+/** -+ * igb_e1000_check_for_link - Check/Store link connection -+ * @hw: pointer to the HW structure -+ * -+ * This checks the link condition of the adapter and stores the -+ * results in the hw->mac structure. This is a function pointer entry -+ * point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_check_for_link(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.check_for_link) -+ return hw->mac.ops.check_for_link(hw); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * e1000_check_mng_mode - Check management mode -+ * @hw: pointer to the HW structure -+ * -+ * This checks if the adapter has manageability enabled. -+ * This is a function pointer entry point called by drivers. -+ **/ -+bool e1000_check_mng_mode(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.check_mng_mode) -+ return hw->mac.ops.check_mng_mode(hw); -+ -+ return false; -+} -+ -+/** -+ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface -+ * @hw: pointer to the HW structure -+ * @buffer: pointer to the host interface -+ * @length: size of the buffer -+ * -+ * Writes the DHCP information to the host interface. -+ **/ -+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) -+{ -+ return e1000_mng_write_dhcp_info_generic(hw, buffer, length); -+} -+ -+/** -+ * igb_e1000_reset_hw - Reset hardware -+ * @hw: pointer to the HW structure -+ * -+ * This resets the hardware into a known state. This is a function pointer -+ * entry point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_reset_hw(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.reset_hw) -+ return hw->mac.ops.reset_hw(hw); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * igb_e1000_init_hw - Initialize hardware -+ * @hw: pointer to the HW structure -+ * -+ * This inits the hardware readying it for operation. This is a function -+ * pointer entry point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_init_hw(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.init_hw) -+ return hw->mac.ops.init_hw(hw); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * igb_e1000_setup_link - Configures link and flow control -+ * @hw: pointer to the HW structure -+ * -+ * This configures link and flow control settings for the adapter. This -+ * is a function pointer entry point called by drivers. While modules can -+ * also call this, they probably call their own version of this function. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_setup_link(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.setup_link) -+ return hw->mac.ops.setup_link(hw); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * igb_e1000_get_speed_and_duplex - Returns current speed and duplex -+ * @hw: pointer to the HW structure -+ * @speed: pointer to a 16-bit value to store the speed -+ * @duplex: pointer to a 16-bit value to store the duplex. -+ * -+ * This returns the speed and duplex of the adapter in the two 'out' -+ * variables passed in. This is a function pointer entry point called -+ * by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) -+{ -+ if (hw->mac.ops.get_link_up_info) -+ return hw->mac.ops.get_link_up_info(hw, speed, duplex); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * igb_e1000_setup_led - Configures SW controllable LED -+ * @hw: pointer to the HW structure -+ * -+ * This prepares the SW controllable LED for use and saves the current state -+ * of the LED so it can be later restored. This is a function pointer entry -+ * point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_setup_led(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.setup_led) -+ return hw->mac.ops.setup_led(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_cleanup_led - Restores SW controllable LED -+ * @hw: pointer to the HW structure -+ * -+ * This restores the SW controllable LED to the value saved off by -+ * igb_e1000_setup_led. This is a function pointer entry point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_cleanup_led(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.cleanup_led) -+ return hw->mac.ops.cleanup_led(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_blink_led - Blink SW controllable LED -+ * @hw: pointer to the HW structure -+ * -+ * This starts the adapter LED blinking. Request the LED to be setup first -+ * and cleaned up after. This is a function pointer entry point called by -+ * drivers. -+ **/ -+s32 e1000_blink_led(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.blink_led) -+ return hw->mac.ops.blink_led(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_id_led_init - store LED configurations in SW -+ * @hw: pointer to the HW structure -+ * -+ * Initializes the LED config in SW. This is a function pointer entry point -+ * called by drivers. -+ **/ -+s32 e1000_id_led_init(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.id_led_init) -+ return hw->mac.ops.id_led_init(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_led_on - Turn on SW controllable LED -+ * @hw: pointer to the HW structure -+ * -+ * Turns the SW defined LED on. This is a function pointer entry point -+ * called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_led_on(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.led_on) -+ return hw->mac.ops.led_on(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_led_off - Turn off SW controllable LED -+ * @hw: pointer to the HW structure -+ * -+ * Turns the SW defined LED off. This is a function pointer entry point -+ * called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_led_off(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.led_off) -+ return hw->mac.ops.led_off(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_reset_adaptive - Reset adaptive IFS -+ * @hw: pointer to the HW structure -+ * -+ * Resets the adaptive IFS. Currently no func pointer exists and all -+ * implementations are handled in the generic version of this function. -+ **/ -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_reset_adaptive(struct e1000_hw *hw) -+{ -+ e1000_reset_adaptive_generic(hw); -+} -+ -+/** -+ * igb_e1000_update_adaptive - Update adaptive IFS -+ * @hw: pointer to the HW structure -+ * -+ * Updates adapter IFS. Currently no func pointer exists and all -+ * implementations are handled in the generic version of this function. -+ **/ -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_update_adaptive(struct e1000_hw *hw) -+{ -+ e1000_update_adaptive_generic(hw); -+} -+ -+/** -+ * e1000_disable_pcie_master - Disable PCI-Express master access -+ * @hw: pointer to the HW structure -+ * -+ * Disables PCI-Express master access and verifies there are no pending -+ * requests. Currently no func pointer exists and all implementations are -+ * handled in the generic version of this function. -+ **/ -+s32 e1000_disable_pcie_master(struct e1000_hw *hw) -+{ -+ return e1000_disable_pcie_master_generic(hw); -+} -+ -+/** -+ * igb_e1000_config_collision_dist - Configure collision distance -+ * @hw: pointer to the HW structure -+ * -+ * Configures the collision distance to the default value and is used -+ * during link setup. -+ **/ -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_config_collision_dist(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.config_collision_dist) -+ hw->mac.ops.config_collision_dist(hw); -+} -+ -+/** -+ * igb_e1000_rar_set - Sets a receive address register -+ * @hw: pointer to the HW structure -+ * @addr: address to set the RAR to -+ * @index: the RAR to set -+ * -+ * Sets a Receive Address Register (RAR) to the specified address. -+ **/ -+/* Changed name, duplicated with e1000 */ -+int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) -+{ -+ if (hw->mac.ops.rar_set) -+ return hw->mac.ops.rar_set(hw, addr, index); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state -+ * @hw: pointer to the HW structure -+ * -+ * Ensures that the MDI/MDIX SW state is valid. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.validate_mdi_setting) -+ return hw->mac.ops.validate_mdi_setting(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_hash_mc_addr - Determines address location in multicast table -+ * @hw: pointer to the HW structure -+ * @mc_addr: Multicast address to hash. -+ * -+ * This hashes an address to determine its location in the multicast -+ * table. Currently no func pointer exists and all implementations -+ * are handled in the generic version of this function. -+ **/ -+/* Changed name, duplicated with e1000 */ -+u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) -+{ -+ return e1000_hash_mc_addr_generic(hw, mc_addr); -+} -+ -+/** -+ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX -+ * @hw: pointer to the HW structure -+ * -+ * Enables packet filtering on transmit packets if manageability is enabled -+ * and host interface is enabled. -+ * Currently no func pointer exists and all implementations are handled in the -+ * generic version of this function. -+ **/ -+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) -+{ -+ return e1000_enable_tx_pkt_filtering_generic(hw); -+} -+ -+/** -+ * e1000_mng_host_if_write - Writes to the manageability host interface -+ * @hw: pointer to the HW structure -+ * @buffer: pointer to the host interface buffer -+ * @length: size of the buffer -+ * @offset: location in the buffer to write to -+ * @sum: sum of the data (not checksum) -+ * -+ * This function writes the buffer content at the offset given on the host if. -+ * It also does alignment considerations to do the writes in most efficient -+ * way. Also fills up the sum of the buffer in *buffer parameter. -+ **/ -+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, -+ u16 offset, u8 *sum) -+{ -+ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); -+} -+ -+/** -+ * e1000_mng_write_cmd_header - Writes manageability command header -+ * @hw: pointer to the HW structure -+ * @hdr: pointer to the host interface command header -+ * -+ * Writes the command header after does the checksum calculation. -+ **/ -+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, -+ struct e1000_host_mng_command_header *hdr) -+{ -+ return e1000_mng_write_cmd_header_generic(hw, hdr); -+} -+ -+/** -+ * e1000_mng_enable_host_if - Checks host interface is enabled -+ * @hw: pointer to the HW structure -+ * -+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND -+ * -+ * This function checks whether the HOST IF is enabled for command operation -+ * and also checks whether the previous command is completed. It busy waits -+ * in case of previous command is not completed. -+ **/ -+s32 e1000_mng_enable_host_if(struct e1000_hw *hw) -+{ -+ return e1000_mng_enable_host_if_generic(hw); -+} -+ -+/** -+ * e1000_check_reset_block - Verifies PHY can be reset -+ * @hw: pointer to the HW structure -+ * -+ * Checks if the PHY is in a state that can be reset or if manageability -+ * has it tied up. This is a function pointer entry point called by drivers. -+ **/ -+s32 e1000_check_reset_block(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.check_reset_block) -+ return hw->phy.ops.check_reset_block(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_read_phy_reg - Reads PHY register -+ * @hw: pointer to the HW structure -+ * @offset: the register to read -+ * @data: the buffer to store the 16-bit read. -+ * -+ * Reads the PHY register and returns the value in data. -+ * This is a function pointer entry point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) -+{ -+ if (hw->phy.ops.read_reg) -+ return hw->phy.ops.read_reg(hw, offset, data); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_write_phy_reg - Writes PHY register -+ * @hw: pointer to the HW structure -+ * @offset: the register to write -+ * @data: the value to write. -+ * -+ * Writes the PHY register at offset with the value in data. -+ * This is a function pointer entry point called by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) -+{ -+ if (hw->phy.ops.write_reg) -+ return hw->phy.ops.write_reg(hw, offset, data); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_release_phy - Generic release PHY -+ * @hw: pointer to the HW structure -+ * -+ * Return if silicon family does not require a semaphore when accessing the -+ * PHY. -+ **/ -+void e1000_release_phy(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.release) -+ hw->phy.ops.release(hw); -+} -+ -+/** -+ * e1000_acquire_phy - Generic acquire PHY -+ * @hw: pointer to the HW structure -+ * -+ * Return success if silicon family does not require a semaphore when -+ * accessing the PHY. -+ **/ -+s32 e1000_acquire_phy(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.acquire) -+ return hw->phy.ops.acquire(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_kmrn_reg - Reads register using Kumeran interface -+ * @hw: pointer to the HW structure -+ * @offset: the register to read -+ * @data: the location to store the 16-bit value read. -+ * -+ * Reads a register out of the Kumeran interface. Currently no func pointer -+ * exists and all implementations are handled in the generic version of -+ * this function. -+ **/ -+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) -+{ -+ return e1000_read_kmrn_reg_generic(hw, offset, data); -+} -+ -+/** -+ * e1000_write_kmrn_reg - Writes register using Kumeran interface -+ * @hw: pointer to the HW structure -+ * @offset: the register to write -+ * @data: the value to write. -+ * -+ * Writes a register to the Kumeran interface. Currently no func pointer -+ * exists and all implementations are handled in the generic version of -+ * this function. -+ **/ -+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) -+{ -+ return e1000_write_kmrn_reg_generic(hw, offset, data); -+} -+ -+/** -+ * e1000_get_cable_length - Retrieves cable length estimation -+ * @hw: pointer to the HW structure -+ * -+ * This function estimates the cable length and stores them in -+ * hw->phy.min_length and hw->phy.max_length. This is a function pointer -+ * entry point called by drivers. -+ **/ -+s32 e1000_get_cable_length(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.get_cable_length) -+ return hw->phy.ops.get_cable_length(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_get_phy_info - Retrieves PHY information from registers -+ * @hw: pointer to the HW structure -+ * -+ * This function gets some information from various PHY registers and -+ * populates hw->phy values with it. This is a function pointer entry -+ * point called by drivers. -+ **/ -+s32 e1000_get_phy_info(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.get_info) -+ return hw->phy.ops.get_info(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_phy_hw_reset - Hard PHY reset -+ * @hw: pointer to the HW structure -+ * -+ * Performs a hard PHY reset. This is a function pointer entry point called -+ * by drivers. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.reset) -+ return hw->phy.ops.reset(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_phy_commit - Soft PHY reset -+ * @hw: pointer to the HW structure -+ * -+ * Performs a soft PHY reset on those that apply. This is a function pointer -+ * entry point called by drivers. -+ **/ -+s32 e1000_phy_commit(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.commit) -+ return hw->phy.ops.commit(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_set_d0_lplu_state - Sets low power link up state for D0 -+ * @hw: pointer to the HW structure -+ * @active: boolean used to enable/disable lplu -+ * -+ * Success returns 0, Failure returns 1 -+ * -+ * The low power link up (lplu) state is set to the power management level D0 -+ * and SmartSpeed is disabled when active is true, else clear lplu for D0 -+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU -+ * is used during Dx states where the power conservation is most important. -+ * During driver activity, SmartSpeed should be enabled so performance is -+ * maintained. This is a function pointer entry point called by drivers. -+ **/ -+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) -+{ -+ if (hw->phy.ops.set_d0_lplu_state) -+ return hw->phy.ops.set_d0_lplu_state(hw, active); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_set_d3_lplu_state - Sets low power link up state for D3 -+ * @hw: pointer to the HW structure -+ * @active: boolean used to enable/disable lplu -+ * -+ * Success returns 0, Failure returns 1 -+ * -+ * The low power link up (lplu) state is set to the power management level D3 -+ * and SmartSpeed is disabled when active is true, else clear lplu for D3 -+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU -+ * is used during Dx states where the power conservation is most important. -+ * During driver activity, SmartSpeed should be enabled so performance is -+ * maintained. This is a function pointer entry point called by drivers. -+ **/ -+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) -+{ -+ if (hw->phy.ops.set_d3_lplu_state) -+ return hw->phy.ops.set_d3_lplu_state(hw, active); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_read_mac_addr - Reads MAC address -+ * @hw: pointer to the HW structure -+ * -+ * Reads the MAC address out of the adapter and stores it in the HW structure. -+ * Currently no func pointer exists and all implementations are handled in the -+ * generic version of this function. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_read_mac_addr(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.read_mac_addr) -+ return hw->mac.ops.read_mac_addr(hw); -+ -+ return igb_e1000_read_mac_addr_generic(hw); -+} -+ -+/** -+ * e1000_read_pba_string - Read device part number string -+ * @hw: pointer to the HW structure -+ * @pba_num: pointer to device part number -+ * @pba_num_size: size of part number buffer -+ * -+ * Reads the product board assembly (PBA) number from the EEPROM and stores -+ * the value in pba_num. -+ * Currently no func pointer exists and all implementations are handled in the -+ * generic version of this function. -+ **/ -+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) -+{ -+ return igb_e1000_read_pba_string_generic(hw, pba_num, pba_num_size); -+} -+ -+/** -+ * e1000_read_pba_length - Read device part number string length -+ * @hw: pointer to the HW structure -+ * @pba_num_size: size of part number buffer -+ * -+ * Reads the product board assembly (PBA) number length from the EEPROM and -+ * stores the value in pba_num. -+ * Currently no func pointer exists and all implementations are handled in the -+ * generic version of this function. -+ **/ -+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) -+{ -+ return e1000_read_pba_length_generic(hw, pba_num_size); -+} -+ -+/** -+ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum -+ * @hw: pointer to the HW structure -+ * -+ * Validates the NVM checksum is correct. This is a function pointer entry -+ * point called by drivers. -+ **/ -+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) -+{ -+ if (hw->nvm.ops.validate) -+ return hw->nvm.ops.validate(hw); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum -+ * @hw: pointer to the HW structure -+ * -+ * Updates the NVM checksum. Currently no func pointer exists and all -+ * implementations are handled in the generic version of this function. -+ **/ -+s32 e1000_update_nvm_checksum(struct e1000_hw *hw) -+{ -+ if (hw->nvm.ops.update) -+ return hw->nvm.ops.update(hw); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * e1000_reload_nvm - Reloads EEPROM -+ * @hw: pointer to the HW structure -+ * -+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the -+ * extended control register. -+ **/ -+void e1000_reload_nvm(struct e1000_hw *hw) -+{ -+ if (hw->nvm.ops.reload) -+ hw->nvm.ops.reload(hw); -+} -+ -+/** -+ * e1000_read_nvm - Reads NVM (EEPROM) -+ * @hw: pointer to the HW structure -+ * @offset: the word offset to read -+ * @words: number of 16-bit words to read -+ * @data: pointer to the properly sized buffer for the data. -+ * -+ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function -+ * pointer entry point called by drivers. -+ **/ -+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -+{ -+ if (hw->nvm.ops.read) -+ return hw->nvm.ops.read(hw, offset, words, data); -+ -+ return -E1000_ERR_CONFIG; -+} -+ -+/** -+ * e1000_write_nvm - Writes to NVM (EEPROM) -+ * @hw: pointer to the HW structure -+ * @offset: the word offset to read -+ * @words: number of 16-bit words to write -+ * @data: pointer to the properly sized buffer for the data. -+ * -+ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function -+ * pointer entry point called by drivers. -+ **/ -+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -+{ -+ if (hw->nvm.ops.write) -+ return hw->nvm.ops.write(hw, offset, words, data); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register -+ * @hw: pointer to the HW structure -+ * @reg: 32bit register offset -+ * @offset: the register to write -+ * @data: the value to write. -+ * -+ * Writes the PHY register at offset with the value in data. -+ * This is a function pointer entry point called by drivers. -+ **/ -+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, -+ u8 data) -+{ -+ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); -+} -+ -+/** -+ * igb_e1000_power_up_phy - Restores link in case of PHY power down -+ * @hw: pointer to the HW structure -+ * -+ * The phy may be powered down to save power, to turn off link when the -+ * driver is unloaded, or wake on lan is not enabled (among others). -+ **/ -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_power_up_phy(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.power_up) -+ hw->phy.ops.power_up(hw); -+ -+ igb_e1000_setup_link(hw); -+} -+ -+/** -+ * e1000_power_down_phy - Power down PHY -+ * @hw: pointer to the HW structure -+ * -+ * The phy may be powered down to save power, to turn off link when the -+ * driver is unloaded, or wake on lan is not enabled (among others). -+ **/ -+void e1000_power_down_phy(struct e1000_hw *hw) -+{ -+ if (hw->phy.ops.power_down) -+ hw->phy.ops.power_down(hw); -+} -+ -+/** -+ * e1000_power_up_fiber_serdes_link - Power up serdes link -+ * @hw: pointer to the HW structure -+ * -+ * Power on the optics and PCS. -+ **/ -+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.power_up_serdes) -+ hw->mac.ops.power_up_serdes(hw); -+} -+ -+/** -+ * e1000_shutdown_fiber_serdes_link - Remove link during power down -+ * @hw: pointer to the HW structure -+ * -+ * Shutdown the optics and PCS on driver unload. -+ **/ -+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.shutdown_serdes) -+ hw->mac.ops.shutdown_serdes(hw); -+} -+ -+/** -+ * e1000_get_thermal_sensor_data - Gathers thermal sensor data -+ * @hw: pointer to hardware structure -+ * -+ * Updates the temperatures in mac.thermal_sensor_data -+ **/ -+s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.get_thermal_sensor_data) -+ return hw->mac.ops.get_thermal_sensor_data(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds -+ * @hw: pointer to hardware structure -+ * -+ * Sets the thermal sensor thresholds according to the NVM map -+ **/ -+s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) -+{ -+ if (hw->mac.ops.init_thermal_sensor_thresh) -+ return hw->mac.ops.init_thermal_sensor_thresh(hw); -+ -+ return E1000_SUCCESS; -+} -+ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.h b/drivers/net/ethernet/intel/igb/e1000_api.h ---- a/drivers/net/ethernet/intel/igb/e1000_api.h 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_api.h 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,152 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#ifndef _E1000_API_H_ -+#define _E1000_API_H_ -+ -+#include "e1000_hw.h" -+ -+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); -+extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); -+extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); -+extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); -+extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); -+extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); -+ -+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); -+s32 igb_e1000_set_mac_type(struct e1000_hw *hw); -+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); -+s32 e1000_init_mac_params(struct e1000_hw *hw); -+s32 e1000_init_nvm_params(struct e1000_hw *hw); -+s32 e1000_init_phy_params(struct e1000_hw *hw); -+s32 e1000_init_mbx_params(struct e1000_hw *hw); -+s32 igb_e1000_get_bus_info(struct e1000_hw *hw); -+void e1000_clear_vfta(struct e1000_hw *hw); -+void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); -+s32 igb_e1000_force_mac_fc(struct e1000_hw *hw); -+s32 igb_e1000_check_for_link(struct e1000_hw *hw); -+s32 igb_e1000_reset_hw(struct e1000_hw *hw); -+s32 igb_e1000_init_hw(struct e1000_hw *hw); -+s32 igb_e1000_setup_link(struct e1000_hw *hw); -+s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); -+s32 e1000_disable_pcie_master(struct e1000_hw *hw); -+void igb_e1000_config_collision_dist(struct e1000_hw *hw); -+int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); -+u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); -+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, -+ u32 mc_addr_count); -+s32 igb_e1000_setup_led(struct e1000_hw *hw); -+s32 igb_e1000_cleanup_led(struct e1000_hw *hw); -+s32 e1000_check_reset_block(struct e1000_hw *hw); -+s32 e1000_blink_led(struct e1000_hw *hw); -+s32 igb_e1000_led_on(struct e1000_hw *hw); -+s32 igb_e1000_led_off(struct e1000_hw *hw); -+s32 e1000_id_led_init(struct e1000_hw *hw); -+void igb_e1000_reset_adaptive(struct e1000_hw *hw); -+void igb_e1000_update_adaptive(struct e1000_hw *hw); -+s32 e1000_get_cable_length(struct e1000_hw *hw); -+s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw); -+s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, -+ u8 data); -+s32 e1000_get_phy_info(struct e1000_hw *hw); -+void e1000_release_phy(struct e1000_hw *hw); -+s32 e1000_acquire_phy(struct e1000_hw *hw); -+s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw); -+s32 e1000_phy_commit(struct e1000_hw *hw); -+void igb_e1000_power_up_phy(struct e1000_hw *hw); -+void e1000_power_down_phy(struct e1000_hw *hw); -+s32 igb_e1000_read_mac_addr(struct e1000_hw *hw); -+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); -+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); -+void e1000_reload_nvm(struct e1000_hw *hw); -+s32 e1000_update_nvm_checksum(struct e1000_hw *hw); -+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); -+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); -+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); -+bool e1000_check_mng_mode(struct e1000_hw *hw); -+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); -+s32 e1000_mng_enable_host_if(struct e1000_hw *hw); -+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, -+ u16 offset, u8 *sum); -+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, -+ struct e1000_host_mng_command_header *hdr); -+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); -+s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); -+s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); -+ -+/* -+ * TBI_ACCEPT macro definition: -+ * -+ * This macro requires: -+ * a = a pointer to struct e1000_hw -+ * status = the 8 bit status field of the Rx descriptor with EOP set -+ * errors = the 8 bit error field of the Rx descriptor with EOP set -+ * length = the sum of all the length fields of the Rx descriptors that -+ * make up the current frame -+ * last_byte = the last byte of the frame DMAed by the hardware -+ * min_frame_size = the minimum frame length we want to accept. -+ * max_frame_size = the maximum frame length we want to accept. -+ * -+ * This macro is a conditional that should be used in the interrupt -+ * handler's Rx processing routine when RxErrors have been detected. -+ * -+ * Typical use: -+ * ... -+ * if (TBI_ACCEPT) { -+ * accept_frame = true; -+ * e1000_tbi_adjust_stats(adapter, MacAddress); -+ * frame_length--; -+ * } else { -+ * accept_frame = false; -+ * } -+ * ... -+ */ -+ -+/* The carrier extension symbol, as received by the NIC. */ -+#define CARRIER_EXTENSION 0x0F -+ -+#define TBI_ACCEPT(a, status, errors, length, last_byte, \ -+ min_frame_size, max_frame_size) \ -+ (e1000_tbi_sbp_enabled_82543(a) && \ -+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ -+ ((last_byte) == CARRIER_EXTENSION) && \ -+ (((status) & E1000_RXD_STAT_VP) ? \ -+ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ -+ ((length) <= ((max_frame_size) + 1))) : \ -+ (((length) > (min_frame_size)) && \ -+ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) -+ -+#ifndef E1000_MAX -+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) -+#endif -+#ifndef E1000_DIVIDE_ROUND_UP -+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ -+#endif -+#endif /* _E1000_API_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h ---- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,25 +1,26 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_DEFINES_H_ - #define _E1000_DEFINES_H_ -@@ -30,38 +31,55 @@ - - /* Definitions for power management and wakeup registers */ - /* Wake Up Control */ --#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ -+#define E1000_WUC_APME 0x00000001 /* APM Enable */ -+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ -+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ -+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ -+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ - - /* Wake Up Filter Control */ --#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ --#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ --#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ --#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ --#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -+ -+/* Wake Up Status */ -+#define E1000_WUS_LNKC E1000_WUFC_LNKC -+#define E1000_WUS_MAG E1000_WUFC_MAG -+#define E1000_WUS_EX E1000_WUFC_EX -+#define E1000_WUS_MC E1000_WUFC_MC -+#define E1000_WUS_BC E1000_WUFC_BC - - /* Extended Device Control */ --#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ --#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ --#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ --#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ -- -+#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ -+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ -+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ -+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ -+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ -+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ -+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ -+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ - /* Physical Func Reset Done Indication */ - #define E1000_CTRL_EXT_PFRSTD 0x00004000 - #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ -+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ -+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ - #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 --#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -+/* Offset of the link mode field in Ctrl Ext register */ -+#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 - #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 --#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 - #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 --#define E1000_CTRL_EXT_EIAME 0x01000000 -+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -+#define E1000_CTRL_EXT_EIAME 0x01000000 - #define E1000_CTRL_EXT_IRCA 0x00000001 --/* Interrupt delay cancellation */ --/* Driver loaded bit for FW */ --#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 --/* Interrupt acknowledge Auto-mask */ --/* Clear Interrupt timers after IMS clear */ --/* packet buffer parity error detection enabled */ --/* descriptor FIFO parity error detection enable */ -+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ -+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ - #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ - #define E1000_CTRL_EXT_PHYPDEN 0x00100000 - #define E1000_I2CCMD_REG_ADDR_SHIFT 16 -@@ -74,322 +92,446 @@ - #define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) - #define E1000_MAX_SGMII_PHY_REG_ADDR 255 - #define E1000_I2CCMD_PHY_TIMEOUT 200 --#define E1000_IVAR_VALID 0x80 --#define E1000_GPIE_NSICR 0x00000001 --#define E1000_GPIE_MSIX_MODE 0x00000010 --#define E1000_GPIE_EIAME 0x40000000 --#define E1000_GPIE_PBA 0x80000000 -+#define E1000_IVAR_VALID 0x80 -+#define E1000_GPIE_NSICR 0x00000001 -+#define E1000_GPIE_MSIX_MODE 0x00000010 -+#define E1000_GPIE_EIAME 0x40000000 -+#define E1000_GPIE_PBA 0x80000000 - - /* Receive Descriptor bit definitions */ --#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ --#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ --#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ --#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ --#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ --#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ --#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ -- --#define E1000_RXDEXT_STATERR_LB 0x00040000 --#define E1000_RXDEXT_STATERR_CE 0x01000000 --#define E1000_RXDEXT_STATERR_SE 0x02000000 --#define E1000_RXDEXT_STATERR_SEQ 0x04000000 --#define E1000_RXDEXT_STATERR_CXE 0x10000000 --#define E1000_RXDEXT_STATERR_TCPE 0x20000000 --#define E1000_RXDEXT_STATERR_IPE 0x40000000 --#define E1000_RXDEXT_STATERR_RXE 0x80000000 -+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ -+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -+#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ -+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ -+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ -+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ -+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ -+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ -+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -+ -+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ -+#define E1000_RXDEXT_STATERR_LB 0x00040000 -+#define E1000_RXDEXT_STATERR_CE 0x01000000 -+#define E1000_RXDEXT_STATERR_SE 0x02000000 -+#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -+#define E1000_RXDEXT_STATERR_CXE 0x10000000 -+#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -+#define E1000_RXDEXT_STATERR_IPE 0x40000000 -+#define E1000_RXDEXT_STATERR_RXE 0x80000000 -+ -+/* mask to determine if packets should be dropped due to frame errors */ -+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ -+ E1000_RXD_ERR_CE | \ -+ E1000_RXD_ERR_SE | \ -+ E1000_RXD_ERR_SEQ | \ -+ E1000_RXD_ERR_CXE | \ -+ E1000_RXD_ERR_RXE) - - /* Same mask, but for extended and packet split descriptors */ - #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ -- E1000_RXDEXT_STATERR_CE | \ -- E1000_RXDEXT_STATERR_SE | \ -- E1000_RXDEXT_STATERR_SEQ | \ -- E1000_RXDEXT_STATERR_CXE | \ -+ E1000_RXDEXT_STATERR_CE | \ -+ E1000_RXDEXT_STATERR_SE | \ -+ E1000_RXDEXT_STATERR_SEQ | \ -+ E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - --#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 --#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 --#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 --#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 --#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 -+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 -+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 -+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 -+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 -+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 - -+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 - - /* Management Control */ --#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ --#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ --#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ --/* Enable Neighbor Discovery Filtering */ --#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ --#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ -+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ -+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ - /* Enable MAC address filtering */ --#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 -+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 -+/* Enable MNG packets to host memory */ -+#define E1000_MANC_EN_MNG2HOST 0x00200000 -+ -+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ -+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ -+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ -+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ - - /* Receive Control */ --#define E1000_RCTL_EN 0x00000002 /* enable */ --#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ --#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ --#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ --#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ --#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ --#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ --#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ --#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ --#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ --#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ --#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ --#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ --#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ --#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ --#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ --#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ -+#define E1000_RCTL_RST 0x00000001 /* Software reset */ -+#define E1000_RCTL_EN 0x00000002 /* enable */ -+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -+#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ -+#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ -+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ -+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ -+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ -+#define E1000_RCTL_RDMTS_HEX 0x00010000 -+#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX -+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ -+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ -+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ -+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ -+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ -+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ -+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ -+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ -+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ -+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ -+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ -+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ -+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ -+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ -+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ -+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ - - /* Use byte values for the following shift parameters - * Usage: - * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & -- * E1000_PSRCTL_BSIZE0_MASK) | -- * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & -- * E1000_PSRCTL_BSIZE1_MASK) | -- * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & -- * E1000_PSRCTL_BSIZE2_MASK) | -- * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; -- * E1000_PSRCTL_BSIZE3_MASK)) -+ * E1000_PSRCTL_BSIZE0_MASK) | -+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & -+ * E1000_PSRCTL_BSIZE1_MASK) | -+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & -+ * E1000_PSRCTL_BSIZE2_MASK) | -+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; -+ * E1000_PSRCTL_BSIZE3_MASK)) - * where value0 = [128..16256], default=256 - * value1 = [1024..64512], default=4096 - * value2 = [0..64512], default=4096 - * value3 = [0..64512], default=0 - */ - --#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F --#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 --#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 --#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 -- --#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ --#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ --#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ --#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ -+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F -+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 -+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 -+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 -+ -+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ -+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ -+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ -+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ - - /* SWFW_SYNC Definitions */ --#define E1000_SWFW_EEP_SM 0x1 --#define E1000_SWFW_PHY0_SM 0x2 --#define E1000_SWFW_PHY1_SM 0x4 --#define E1000_SWFW_PHY2_SM 0x20 --#define E1000_SWFW_PHY3_SM 0x40 -+#define E1000_SWFW_EEP_SM 0x01 -+#define E1000_SWFW_PHY0_SM 0x02 -+#define E1000_SWFW_PHY1_SM 0x04 -+#define E1000_SWFW_CSR_SM 0x08 -+#define E1000_SWFW_PHY2_SM 0x20 -+#define E1000_SWFW_PHY3_SM 0x40 -+#define E1000_SWFW_SW_MNG_SM 0x400 - --/* FACTPS Definitions */ - /* Device Control */ --#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ --#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ --#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ --#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ --#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ --#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ --#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ --#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ --#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ --#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ --#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ --/* Defined polarity of Dock/Undock indication in SDP[0] */ --/* Reset both PHY ports, through PHYRST_N pin */ --/* enable link status from external LINK_0 and LINK_1 pins */ --#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ --#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ --#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ --#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ --#define E1000_CTRL_RST 0x04000000 /* Global reset */ --#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ --#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ --#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ --#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ --/* Initiate an interrupt to manageability engine */ --#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ -- --/* Bit definitions for the Management Data IO (MDIO) and Management Data -- * Clock (MDC) pins in the Device Control Register. -- */ -+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ -+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ -+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ -+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ -+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ -+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ -+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ -+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -+#define E1000_CTRL_RST 0x04000000 /* Global reset */ -+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -+#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ - --#define E1000_CONNSW_ENRGSRC 0x4 -+#define E1000_CONNSW_ENRGSRC 0x4 - #define E1000_CONNSW_PHYSD 0x400 - #define E1000_CONNSW_PHY_PDN 0x800 - #define E1000_CONNSW_SERDESD 0x200 - #define E1000_CONNSW_AUTOSENSE_CONF 0x2 - #define E1000_CONNSW_AUTOSENSE_EN 0x1 --#define E1000_PCS_CFG_PCS_EN 8 --#define E1000_PCS_LCTL_FLV_LINK_UP 1 --#define E1000_PCS_LCTL_FSV_100 2 --#define E1000_PCS_LCTL_FSV_1000 4 --#define E1000_PCS_LCTL_FDV_FULL 8 --#define E1000_PCS_LCTL_FSD 0x10 --#define E1000_PCS_LCTL_FORCE_LINK 0x20 --#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 --#define E1000_PCS_LCTL_AN_ENABLE 0x10000 --#define E1000_PCS_LCTL_AN_RESTART 0x20000 --#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 --#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 -- --#define E1000_PCS_LSTS_LINK_OK 1 --#define E1000_PCS_LSTS_SPEED_100 2 --#define E1000_PCS_LSTS_SPEED_1000 4 --#define E1000_PCS_LSTS_DUPLEX_FULL 8 --#define E1000_PCS_LSTS_SYNK_OK 0x10 -+#define E1000_PCS_CFG_PCS_EN 8 -+#define E1000_PCS_LCTL_FLV_LINK_UP 1 -+#define E1000_PCS_LCTL_FSV_10 0 -+#define E1000_PCS_LCTL_FSV_100 2 -+#define E1000_PCS_LCTL_FSV_1000 4 -+#define E1000_PCS_LCTL_FDV_FULL 8 -+#define E1000_PCS_LCTL_FSD 0x10 -+#define E1000_PCS_LCTL_FORCE_LINK 0x20 -+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 -+#define E1000_PCS_LCTL_AN_ENABLE 0x10000 -+#define E1000_PCS_LCTL_AN_RESTART 0x20000 -+#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 -+#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 -+ -+#define E1000_PCS_LSTS_LINK_OK 1 -+#define E1000_PCS_LSTS_SPEED_100 2 -+#define E1000_PCS_LSTS_SPEED_1000 4 -+#define E1000_PCS_LSTS_DUPLEX_FULL 8 -+#define E1000_PCS_LSTS_SYNK_OK 0x10 -+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 - - /* Device Status */ --#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ --#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ --#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ --#define E1000_STATUS_FUNC_SHIFT 2 --#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ --#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ --#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ --#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ --/* Change in Dock/Undock state. Clear on write '0'. */ --/* Status of Master requests. */ --#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 --/* BMC external code execution disabled */ -- -+#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ -+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ -+#define E1000_STATUS_FUNC_SHIFT 2 -+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ -+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -+#define E1000_STATUS_SPEED_MASK 0x000000C0 -+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ -+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ -+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ -+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ - #define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ - #define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ --/* Constants used to intrepret the masked PCI-X bus speed. */ - --#define SPEED_10 10 --#define SPEED_100 100 --#define SPEED_1000 1000 --#define SPEED_2500 2500 --#define HALF_DUPLEX 1 --#define FULL_DUPLEX 2 -- -- --#define ADVERTISE_10_HALF 0x0001 --#define ADVERTISE_10_FULL 0x0002 --#define ADVERTISE_100_HALF 0x0004 --#define ADVERTISE_100_FULL 0x0008 --#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ --#define ADVERTISE_1000_FULL 0x0020 -+#define SPEED_10 10 -+#define SPEED_100 100 -+#define SPEED_1000 1000 -+#define SPEED_2500 2500 -+#define HALF_DUPLEX 1 -+#define FULL_DUPLEX 2 -+ -+#define ADVERTISE_10_HALF 0x0001 -+#define ADVERTISE_10_FULL 0x0002 -+#define ADVERTISE_100_HALF 0x0004 -+#define ADVERTISE_100_FULL 0x0008 -+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ -+#define ADVERTISE_1000_FULL 0x0020 - - /* 1000/H is not supported, nor spec-compliant. */ --#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ -- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ -- ADVERTISE_1000_FULL) --#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ -- ADVERTISE_100_HALF | ADVERTISE_100_FULL) --#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) --#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) --#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ -- ADVERTISE_1000_FULL) --#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) -+#define E1000_ALL_SPEED_DUPLEX ( \ -+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ -+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL) -+#define E1000_ALL_NOT_GIG ( \ -+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ -+ ADVERTISE_100_FULL) -+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) -+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) -+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) - --#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX -+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX - - /* LED Control */ --#define E1000_LEDCTL_LED0_MODE_SHIFT 0 --#define E1000_LEDCTL_LED0_BLINK 0x00000080 - #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F -+#define E1000_LEDCTL_LED0_MODE_SHIFT 0 - #define E1000_LEDCTL_LED0_IVRT 0x00000040 -+#define E1000_LEDCTL_LED0_BLINK 0x00000080 - --#define E1000_LEDCTL_MODE_LED_ON 0xE --#define E1000_LEDCTL_MODE_LED_OFF 0xF -+#define E1000_LEDCTL_MODE_LED_ON 0xE -+#define E1000_LEDCTL_MODE_LED_OFF 0xF - - /* Transmit Descriptor bit definitions */ --#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ --#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ --#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ --#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ --#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ --#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ --#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ --/* Extended desc bits for Linksec and timesync */ -+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ -+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ -+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -+#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ -+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ -+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ - - /* Transmit Control */ --#define E1000_TCTL_EN 0x00000002 /* enable tx */ --#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ --#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ --#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ --#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -- --/* DMA Coalescing register fields */ --#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ --#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ --#define E1000_DMACR_DMACTHR_SHIFT 16 --#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ --#define E1000_DMACR_DMAC_LX_SHIFT 28 --#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ --/* DMA Coalescing BMC-to-OS Watchdog Enable */ --#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 -- --#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ -- --#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ -- --#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ --#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ -+#define E1000_TCTL_EN 0x00000002 /* enable Tx */ -+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ - --#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ -- --#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ --#define E1000_FCRTC_RTH_COAL_SHIFT 4 --#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ -- --/* Timestamp in Rx buffer */ --#define E1000_RXPBS_CFG_TS_EN 0x80000000 -- --#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ --#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ -+/* Transmit Arbitration Count */ -+#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ - - /* SerDes Control */ --#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 -+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 -+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 - - /* Receive Checksum Control */ --#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ --#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ --#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ --#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ -+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ -+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ -+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ -+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ - - /* Header split receive */ --#define E1000_RFCTL_LEF 0x00040000 -+#define E1000_RFCTL_NFSW_DIS 0x00000040 -+#define E1000_RFCTL_NFSR_DIS 0x00000080 -+#define E1000_RFCTL_ACK_DIS 0x00001000 -+#define E1000_RFCTL_EXTEN 0x00008000 -+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 -+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 -+#define E1000_RFCTL_LEF 0x00040000 - - /* Collision related configuration parameters */ --#define E1000_COLLISION_THRESHOLD 15 --#define E1000_CT_SHIFT 4 --#define E1000_COLLISION_DISTANCE 63 --#define E1000_COLD_SHIFT 12 -+#define E1000_COLLISION_THRESHOLD 15 -+#define E1000_CT_SHIFT 4 -+#define E1000_COLLISION_DISTANCE 63 -+#define E1000_COLD_SHIFT 12 -+ -+/* Default values for the transmit IPG register */ -+#define DEFAULT_82543_TIPG_IPGT_FIBER 9 -+#define DEFAULT_82543_TIPG_IPGT_COPPER 8 -+ -+#define E1000_TIPG_IPGT_MASK 0x000003FF -+ -+#define DEFAULT_82543_TIPG_IPGR1 8 -+#define E1000_TIPG_IPGR1_SHIFT 10 -+ -+#define DEFAULT_82543_TIPG_IPGR2 6 -+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 -+#define E1000_TIPG_IPGR2_SHIFT 20 - - /* Ethertype field values */ --#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -+ -+#define ETHERNET_FCS_SIZE 4 -+#define MAX_JUMBO_FRAME_SIZE 0x3F00 -+/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ -+#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 -+#define E1000_TX_PTR_GAP 0x1F -+ -+/* Extended Configuration Control and Size */ -+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 -+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 -+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 -+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 -+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 -+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 -+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 -+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 -+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 -+ -+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 -+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 -+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 -+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 - --#define MAX_JUMBO_FRAME_SIZE 0x3F00 -+#define E1000_KABGTXD_BGSQLBIAS 0x00050000 - - /* PBA constants */ --#define E1000_PBA_34K 0x0022 --#define E1000_PBA_64K 0x0040 /* 64KB */ -+#define E1000_PBA_8K 0x0008 /* 8KB */ -+#define E1000_PBA_10K 0x000A /* 10KB */ -+#define E1000_PBA_12K 0x000C /* 12KB */ -+#define E1000_PBA_14K 0x000E /* 14KB */ -+#define E1000_PBA_16K 0x0010 /* 16KB */ -+#define E1000_PBA_18K 0x0012 -+#define E1000_PBA_20K 0x0014 -+#define E1000_PBA_22K 0x0016 -+#define E1000_PBA_24K 0x0018 -+#define E1000_PBA_26K 0x001A -+#define E1000_PBA_30K 0x001E -+#define E1000_PBA_32K 0x0020 -+#define E1000_PBA_34K 0x0022 -+#define E1000_PBA_35K 0x0023 -+#define E1000_PBA_38K 0x0026 -+#define E1000_PBA_40K 0x0028 -+#define E1000_PBA_48K 0x0030 /* 48KB */ -+#define E1000_PBA_64K 0x0040 /* 64KB */ -+ -+#define E1000_PBA_RXA_MASK 0xFFFF -+ -+#define E1000_PBS_16K E1000_PBA_16K -+ -+/* Uncorrectable/correctable ECC Error counts and enable bits */ -+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF -+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 -+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 -+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 -+ -+#define IFS_MAX 80 -+#define IFS_MIN 40 -+#define IFS_RATIO 4 -+#define IFS_STEP 10 -+#define MIN_NUM_XMITS 1000 - - /* SW Semaphore Register */ --#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ --#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ -+ -+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ - - /* Interrupt Cause Read */ --#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ --#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ --#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ --#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ --#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ --#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ --#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ --#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ -+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ -+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ -+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ -+#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ -+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ -+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ -+#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ -+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ -+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ -+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ -+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ -+#define E1000_ICR_TXD_LOW 0x00008000 -+#define E1000_ICR_MNG 0x00040000 /* Manageability event */ -+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ -+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ -+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ - /* If this bit asserted, the driver should claim the interrupt */ --#define E1000_ICR_INT_ASSERTED 0x80000000 --/* LAN connected device generates an interrupt */ --#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ -+#define E1000_ICR_INT_ASSERTED 0x80000000 -+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ -+#define E1000_ICR_FER 0x00400000 /* Fatal Error */ -+ -+#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ -+#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ - - /* Extended Interrupt Cause Read */ --#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ --#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ --#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ --#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ --#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ --#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ --#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ --#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ --#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ -+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ -+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ -+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ -+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ -+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ -+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ -+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ -+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ -+#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ -+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ - /* TCP Timer */ -+#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ -+#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ -+#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ -+#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ - - /* This defines the bits that are set in the Interrupt Mask - * Set/Read Register. Each bit is documented below: -@@ -404,194 +546,207 @@ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ -- E1000_IMS_LSC | \ -- E1000_IMS_DOUTSYNC) -+ E1000_IMS_LSC) - - /* Interrupt Mask Set */ --#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ --#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ --#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ --#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ --#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ --#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ --#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ --#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ --#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ -+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ -+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ -+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ -+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ -+#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ -+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ -+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW -+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ -+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ -+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ -+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ -+#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ - -+#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ -+#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ - /* Extended Interrupt Mask Set */ --#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ -+#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ -+#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ -+#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ -+#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ -+#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ -+#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ -+#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ -+#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ -+#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ -+#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ - - /* Interrupt Cause Set */ --#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ --#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ --#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ -+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ -+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ - - /* Extended Interrupt Cause Set */ --/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ --#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ -+#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ -+#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ -+#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ -+#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ -+#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ -+#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ -+#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ -+#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ -+#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ -+#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ - -+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF -+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ -+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ -+#define E1000_EITR_INTERVAL 0x00007FFC - - /* Transmit Descriptor Control */ -+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ -+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ -+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ -+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ -+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ -+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ - /* Enable the counting of descriptors still to be processed. */ -+#define E1000_TXDCTL_COUNT_DESC 0x00400000 - - /* Flow Control Constants */ --#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 --#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 --#define FLOW_CONTROL_TYPE 0x8808 -- --/* Transmit Config Word */ --#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ --#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ -+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 -+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 -+#define FLOW_CONTROL_TYPE 0x8808 - - /* 802.1q VLAN Packet Size */ --#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ --#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ -+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ -+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ - --/* Receive Address */ --/* Number of high/low register pairs in the RAR. The RAR (Receive Address -+/* Receive Address -+ * Number of high/low register pairs in the RAR. The RAR (Receive Address - * Registers) holds the directed and multicast addresses that we monitor. - * Technically, we have 16 spots. However, we reserve one of these spots - * (RAR[15]) for our directed address used by controllers with - * manageability enabled, allowing us room for 15 multicast addresses. - */ --#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ --#define E1000_RAL_MAC_ADDR_LEN 4 --#define E1000_RAH_MAC_ADDR_LEN 2 --#define E1000_RAH_POOL_MASK 0x03FC0000 --#define E1000_RAH_POOL_1 0x00040000 -+#define E1000_RAR_ENTRIES 15 -+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ -+#define E1000_RAL_MAC_ADDR_LEN 4 -+#define E1000_RAH_MAC_ADDR_LEN 2 -+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 -+#define E1000_RAH_POOL_1 0x00040000 - - /* Error Codes */ --#define E1000_ERR_NVM 1 --#define E1000_ERR_PHY 2 --#define E1000_ERR_CONFIG 3 --#define E1000_ERR_PARAM 4 --#define E1000_ERR_MAC_INIT 5 --#define E1000_ERR_RESET 9 --#define E1000_ERR_MASTER_REQUESTS_PENDING 10 --#define E1000_BLK_PHY_RESET 12 --#define E1000_ERR_SWFW_SYNC 13 --#define E1000_NOT_IMPLEMENTED 14 --#define E1000_ERR_MBX 15 --#define E1000_ERR_INVALID_ARGUMENT 16 --#define E1000_ERR_NO_SPACE 17 --#define E1000_ERR_NVM_PBA_SECTION 18 --#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 --#define E1000_ERR_I2C 20 -+#define E1000_SUCCESS 0 -+#define E1000_ERR_NVM 1 -+#define E1000_ERR_PHY 2 -+#define E1000_ERR_CONFIG 3 -+#define E1000_ERR_PARAM 4 -+#define E1000_ERR_MAC_INIT 5 -+#define E1000_ERR_PHY_TYPE 6 -+#define E1000_ERR_RESET 9 -+#define E1000_ERR_MASTER_REQUESTS_PENDING 10 -+#define E1000_ERR_HOST_INTERFACE_COMMAND 11 -+#define E1000_BLK_PHY_RESET 12 -+#define E1000_ERR_SWFW_SYNC 13 -+#define E1000_NOT_IMPLEMENTED 14 -+#define E1000_ERR_MBX 15 -+#define E1000_ERR_INVALID_ARGUMENT 16 -+#define E1000_ERR_NO_SPACE 17 -+#define E1000_ERR_NVM_PBA_SECTION 18 -+#define E1000_ERR_I2C 19 -+#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 - - /* Loop limit on how long we wait for auto-negotiation to complete */ --#define COPPER_LINK_UP_LIMIT 10 --#define PHY_AUTO_NEG_LIMIT 45 --#define PHY_FORCE_LIMIT 20 -+#define FIBER_LINK_UP_LIMIT 50 -+#define COPPER_LINK_UP_LIMIT 10 -+#define PHY_AUTO_NEG_LIMIT 45 -+#define PHY_FORCE_LIMIT 20 - /* Number of 100 microseconds we wait for PCI Express master disable */ --#define MASTER_DISABLE_TIMEOUT 800 -+#define MASTER_DISABLE_TIMEOUT 800 - /* Number of milliseconds we wait for PHY configuration done after MAC reset */ --#define PHY_CFG_TIMEOUT 100 -+#define PHY_CFG_TIMEOUT 100 - /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ -+#define MDIO_OWNERSHIP_TIMEOUT 10 - /* Number of milliseconds for NVM auto read done after MAC reset. */ --#define AUTO_READ_DONE_TIMEOUT 10 -+#define AUTO_READ_DONE_TIMEOUT 10 - - /* Flow Control */ --#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ -+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ -+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ -+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ - --#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ --#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ -+/* Transmit Configuration Word */ -+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ -+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ -+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ -+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ -+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ - --#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ --#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ --#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 --#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 --#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 --#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 --#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A --#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ -- --#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF --#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 --#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 --#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 --#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 --#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 -- --#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 --#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 --#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 --#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 --#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 --#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 --#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 --#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 --#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 --#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 --#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 -- --#define E1000_TIMINCA_16NS_SHIFT 24 -- --/* Time Sync Interrupt Cause/Mask Register Bits */ -- --#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ --#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ --#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ --#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ --#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ --#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ --#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ --#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ -- --#define TSYNC_INTERRUPTS TSINTR_TXTS --#define E1000_TSICR_TXTS TSINTR_TXTS -- --/* TSAUXC Configuration Bits */ --#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ --#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ --#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ --#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ --#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ --#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ --#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ --#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ --#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ --#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ --#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ --#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ --#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ --#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ -- --/* SDP Configuration Bits */ --#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ --#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ --#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ --#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ --#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ --#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ --#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ --#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ --#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ --#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ --#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ --#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ --#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ --#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ --#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ --#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ --#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ --#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ --#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ --#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ --#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ --#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ --#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ --#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ --#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ --#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ --#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ --#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ --#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ --#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ -- --#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ --#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ --#define E1000_MDICNFG_PHY_MASK 0x03E00000 --#define E1000_MDICNFG_PHY_SHIFT 21 -+/* Receive Configuration Word */ -+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ -+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ -+#define E1000_RXCW_C 0x20000000 /* Receive config */ -+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ -+ -+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ -+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ -+ -+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ -+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ -+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 -+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 -+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 -+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 -+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A -+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ -+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ -+ -+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF -+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 -+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 -+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 -+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 -+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 -+ -+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 -+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 -+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 -+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 -+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 -+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 -+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 -+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 -+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 -+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 -+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 -+ -+#define E1000_TIMINCA_16NS_SHIFT 24 -+#define E1000_TIMINCA_INCPERIOD_SHIFT 24 -+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF -+ -+#define E1000_TSICR_TXTS 0x00000002 -+#define E1000_TSIM_TXTS 0x00000002 -+/* TUPLE Filtering Configuration */ -+#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ -+#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ -+#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ -+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ -+#define E1000_TTQF_PROTOCOL_TCP 0x0 -+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ -+#define E1000_TTQF_PROTOCOL_UDP 0x1 -+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ -+#define E1000_TTQF_PROTOCOL_SCTP 0x2 -+#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ -+#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ -+#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ -+#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ -+#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ -+#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ -+#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ -+#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ -+ -+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ -+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ -+#define E1000_MDICNFG_PHY_MASK 0x03E00000 -+#define E1000_MDICNFG_PHY_SHIFT 21 - - #define E1000_MEDIA_PORT_COPPER 1 - #define E1000_MEDIA_PORT_OTHER 2 -@@ -604,95 +759,209 @@ - #define E1000_M88E1112_PAGE_ADDR 0x16 - #define E1000_M88E1112_STATUS 0x01 - -+#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ -+#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ -+#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ -+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ -+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ -+ -+/* I350 EEE defines */ -+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ -+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ -+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ -+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ -+#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ -+/* EEE status */ -+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ -+#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ -+#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ -+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ -+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ -+#define E1000_M88E1543_EEE_CTRL_1 0x0 -+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ -+#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ -+#define E1000_EEE_ADV_DEV_I354 7 -+#define E1000_EEE_ADV_ADDR_I354 60 -+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ -+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ -+#define E1000_PCS_STATUS_DEV_I354 3 -+#define E1000_PCS_STATUS_ADDR_I354 1 -+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 -+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 -+#define E1000_M88E1512_CFG_REG_1 0x0010 -+#define E1000_M88E1512_CFG_REG_2 0x0011 -+#define E1000_M88E1512_CFG_REG_3 0x0007 -+#define E1000_M88E1512_MODE 0x0014 -+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ -+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ -+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ - /* PCI Express Control */ --#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 --#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 --#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 --#define E1000_GCR_CAP_VER2 0x00040000 -- --/* mPHY Address Control and Data Registers */ --#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ --#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 --#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ -- --/* mPHY PCS CLK Register */ --#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ --/* mPHY Near End Digital Loopback Override Bit */ --#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 -- --#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 --#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 -+#define E1000_GCR_RXD_NO_SNOOP 0x00000001 -+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 -+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 -+#define E1000_GCR_TXD_NO_SNOOP 0x00000008 -+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 -+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 -+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 -+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 -+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 -+#define E1000_GCR_CAP_VER2 0x00040000 -+ -+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ -+ E1000_GCR_RXDSCW_NO_SNOOP | \ -+ E1000_GCR_RXDSCR_NO_SNOOP | \ -+ E1000_GCR_TXD_NO_SNOOP | \ -+ E1000_GCR_TXDSCW_NO_SNOOP | \ -+ E1000_GCR_TXDSCR_NO_SNOOP) -+ -+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ -+ -+/* mPHY address control and data registers */ -+#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ -+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 -+#define E1000_MPHY_DATA 0x0E10 /* Data Register */ -+ -+/* AFE CSR Offset for PCS CLK */ -+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 -+/* Override for near end digital loopback. */ -+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 - - /* PHY Control Register */ --#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ --#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ --#define MII_CR_POWER_DOWN 0x0800 /* Power down */ --#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ --#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ --#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ --#define MII_CR_SPEED_1000 0x0040 --#define MII_CR_SPEED_100 0x2000 --#define MII_CR_SPEED_10 0x0000 -+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ -+#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -+#define MII_CR_SPEED_1000 0x0040 -+#define MII_CR_SPEED_100 0x2000 -+#define MII_CR_SPEED_10 0x0000 - - /* PHY Status Register */ --#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ --#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - - /* Autoneg Advertisement Register */ --#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ --#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ --#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ --#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ --#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ --#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ -+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ -+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ -+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ -+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ -+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ - - /* Link Partner Ability Register (Base Page) */ --#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ --#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ -+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ -+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ -+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ -+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ -+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ -+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ -+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ -+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ -+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ -+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ - - /* Autoneg Expansion Register */ -+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ -+#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ -+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ -+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ -+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ - - /* 1000BASE-T Control Register */ --#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ --#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ --#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ -- /* 0=Configure PHY as Slave */ --#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ -- /* 0=Automatic Master/Slave config */ -+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ -+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -+/* 1=Repeater/switch device port 0=DTE device */ -+#define CR_1000T_REPEATER_DTE 0x0400 -+/* 1=Configure PHY as Master 0=Configure PHY as Slave */ -+#define CR_1000T_MS_VALUE 0x0800 -+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ -+#define CR_1000T_MS_ENABLE 0x1000 -+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ -+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ -+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ -+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ -+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ - - /* 1000BASE-T Status Register */ --#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ --#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ -+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ -+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ -+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ -+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ -+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ -+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ -+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ - -+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 - - /* PHY 1000 MII Register/Bit Definitions */ - /* PHY Registers defined by IEEE */ --#define PHY_CONTROL 0x00 /* Control Register */ --#define PHY_STATUS 0x01 /* Status Register */ --#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ --#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ --#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ --#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ --#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ --#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -+#define PHY_CONTROL 0x00 /* Control Register */ -+#define PHY_STATUS 0x01 /* Status Register */ -+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ -+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ -+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ -+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ -+ -+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ - - /* NVM Control */ --#define E1000_EECD_SK 0x00000001 /* NVM Clock */ --#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ --#define E1000_EECD_DI 0x00000004 /* NVM Data In */ --#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ --#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ --#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ --#define E1000_EECD_PRES 0x00000100 /* NVM Present */ -+#define E1000_EECD_SK 0x00000001 /* NVM Clock */ -+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ -+#define E1000_EECD_DI 0x00000004 /* NVM Data In */ -+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ -+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ -+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ -+#define E1000_EECD_PRES 0x00000100 /* NVM Present */ -+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ -+#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ -+#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ -+#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ -+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ - /* NVM Addressing bits based on type 0=small, 1=large */ --#define E1000_EECD_ADDR_BITS 0x00000400 --#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ --#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ --#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ --#define E1000_EECD_SIZE_EX_SHIFT 11 -+#define E1000_EECD_ADDR_BITS 0x00000400 -+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ -+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ -+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ -+#define E1000_EECD_SIZE_EX_SHIFT 11 -+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ -+#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ -+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ -+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) - #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ --#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ -+#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ - #define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ -+#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ - #define E1000_FLUDONE_ATTEMPTS 20000 - #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ - #define E1000_I210_FIFO_SEL_RX 0x00 -@@ -700,53 +969,32 @@ - #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) - #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 - #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 -+ - #define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ - /* Secure FLASH mode requires removing MSb */ - #define E1000_I210_FW_PTR_MASK 0x7FFF - /* Firmware code revision field word offset*/ - #define E1000_I210_FW_VER_OFFSET 328 --#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ --#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ --#define E1000_FLUDONE_ATTEMPTS 20000 --#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ --#define E1000_I210_FIFO_SEL_RX 0x00 --#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) --#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) --#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 --#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 -- - --/* Offset to data in NVM read/write registers */ --#define E1000_NVM_RW_REG_DATA 16 --#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ --#define E1000_NVM_RW_REG_START 1 /* Start operation */ --#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ --#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ -+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ -+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -+#define E1000_NVM_RW_REG_START 1 /* Start operation */ -+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ -+#define E1000_FLASH_UPDATES 2000 - - /* NVM Word Offsets */ --#define NVM_COMPAT 0x0003 --#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ --#define NVM_VERSION 0x0005 --#define NVM_INIT_CONTROL2_REG 0x000F --#define NVM_INIT_CONTROL3_PORT_B 0x0014 --#define NVM_INIT_CONTROL3_PORT_A 0x0024 --#define NVM_ALT_MAC_ADDR_PTR 0x0037 --#define NVM_CHECKSUM_REG 0x003F --#define NVM_COMPATIBILITY_REG_3 0x0003 --#define NVM_COMPATIBILITY_BIT_MASK 0x8000 --#define NVM_MAC_ADDR 0x0000 --#define NVM_SUB_DEV_ID 0x000B --#define NVM_SUB_VEN_ID 0x000C --#define NVM_DEV_ID 0x000D --#define NVM_VEN_ID 0x000E --#define NVM_INIT_CTRL_2 0x000F --#define NVM_INIT_CTRL_4 0x0013 --#define NVM_LED_1_CFG 0x001C --#define NVM_LED_0_2_CFG 0x001F --#define NVM_ETRACK_WORD 0x0042 --#define NVM_ETRACK_HIWORD 0x0043 --#define NVM_COMB_VER_OFF 0x0083 --#define NVM_COMB_VER_PTR 0x003d -+#define NVM_COMPAT 0x0003 -+#define NVM_ID_LED_SETTINGS 0x0004 -+#define NVM_VERSION 0x0005 -+#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 -+#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 -+#define NVM_FUTURE_INIT_WORD1 0x0019 -+#define NVM_ETRACK_WORD 0x0042 -+#define NVM_ETRACK_HIWORD 0x0043 -+#define NVM_COMB_VER_OFF 0x0083 -+#define NVM_COMB_VER_PTR 0x003d - - /* NVM version defines */ - #define NVM_MAJOR_MASK 0xF000 -@@ -763,6 +1011,31 @@ - #define NVM_HEX_CONV 16 - #define NVM_HEX_TENS 10 - -+/* FW version defines */ -+/* Offset of "Loader patch ptr" in Firmware Header */ -+#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 -+/* Patch generation hour & minutes */ -+#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 -+/* Patch generation month & day */ -+#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 -+/* Patch generation year */ -+#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 -+/* Patch major & minor numbers */ -+#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 -+ -+#define NVM_MAC_ADDR 0x0000 -+#define NVM_SUB_DEV_ID 0x000B -+#define NVM_SUB_VEN_ID 0x000C -+#define NVM_DEV_ID 0x000D -+#define NVM_VEN_ID 0x000E -+#define NVM_INIT_CTRL_2 0x000F -+#define NVM_INIT_CTRL_4 0x0013 -+#define NVM_LED_1_CFG 0x001C -+#define NVM_LED_0_2_CFG 0x001F -+ -+#define NVM_COMPAT_VALID_CSUM 0x0001 -+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 -+ - #define NVM_ETS_CFG 0x003E - #define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 - #define NVM_ETS_LTHRES_DELTA_SHIFT 6 -@@ -775,236 +1048,292 @@ - #define NVM_ETS_DATA_INDEX_MASK 0x0300 - #define NVM_ETS_DATA_INDEX_SHIFT 8 - #define NVM_ETS_DATA_HTHRESH_MASK 0x00FF -+#define NVM_INIT_CONTROL2_REG 0x000F -+#define NVM_INIT_CONTROL3_PORT_B 0x0014 -+#define NVM_INIT_3GIO_3 0x001A -+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 -+#define NVM_INIT_CONTROL3_PORT_A 0x0024 -+#define NVM_CFG 0x0012 -+#define NVM_ALT_MAC_ADDR_PTR 0x0037 -+#define NVM_CHECKSUM_REG 0x003F -+#define NVM_COMPATIBILITY_REG_3 0x0003 -+#define NVM_COMPATIBILITY_BIT_MASK 0x8000 -+ -+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ -+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ -+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ -+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ - --#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ --#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ --#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ --#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ -- --#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) -+#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) - - /* Mask bits for fields in Word 0x24 of the NVM */ --#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ --#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ -+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ -+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ -+/* Offset of Link Mode bits for 82575/82576 */ -+#define NVM_WORD24_LNK_MODE_OFFSET 8 -+/* Offset of Link Mode bits for 82580 up */ -+#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 - - /* Mask bits for fields in Word 0x0f of the NVM */ --#define NVM_WORD0F_PAUSE_MASK 0x3000 --#define NVM_WORD0F_ASM_DIR 0x2000 -+#define NVM_WORD0F_PAUSE_MASK 0x3000 -+#define NVM_WORD0F_PAUSE 0x1000 -+#define NVM_WORD0F_ASM_DIR 0x2000 - - /* Mask bits for fields in Word 0x1a of the NVM */ -+#define NVM_WORD1A_ASPM_MASK 0x000C - --/* length of string needed to store part num */ --#define E1000_PBANUM_LENGTH 11 -+/* Mask bits for fields in Word 0x03 of the EEPROM */ -+#define NVM_COMPAT_LOM 0x0800 -+ -+/* length of string needed to store PBA number */ -+#define E1000_PBANUM_LENGTH 11 - - /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ --#define NVM_SUM 0xBABA -+#define NVM_SUM 0xBABA - --#define NVM_PBA_OFFSET_0 8 --#define NVM_PBA_OFFSET_1 9 -+/* PBA (printed board assembly) number words */ -+#define NVM_PBA_OFFSET_0 8 -+#define NVM_PBA_OFFSET_1 9 -+#define NVM_PBA_PTR_GUARD 0xFAFA - #define NVM_RESERVED_WORD 0xFFFF --#define NVM_PBA_PTR_GUARD 0xFAFA --#define NVM_WORD_SIZE_BASE_SHIFT 6 -- --/* NVM Commands - Microwire */ -+#define NVM_WORD_SIZE_BASE_SHIFT 6 - - /* NVM Commands - SPI */ --#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ --#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ --#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ --#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ --#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ --#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ -+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ -+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ -+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ -+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ -+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ - - /* SPI NVM Status Register */ --#define NVM_STATUS_RDY_SPI 0x01 -+#define NVM_STATUS_RDY_SPI 0x01 - - /* Word definitions for ID LED Settings */ --#define ID_LED_RESERVED_0000 0x0000 --#define ID_LED_RESERVED_FFFF 0xFFFF --#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ -- (ID_LED_OFF1_OFF2 << 8) | \ -- (ID_LED_DEF1_DEF2 << 4) | \ -- (ID_LED_DEF1_DEF2)) --#define ID_LED_DEF1_DEF2 0x1 --#define ID_LED_DEF1_ON2 0x2 --#define ID_LED_DEF1_OFF2 0x3 --#define ID_LED_ON1_DEF2 0x4 --#define ID_LED_ON1_ON2 0x5 --#define ID_LED_ON1_OFF2 0x6 --#define ID_LED_OFF1_DEF2 0x7 --#define ID_LED_OFF1_ON2 0x8 --#define ID_LED_OFF1_OFF2 0x9 -- --#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF --#define IGP_ACTIVITY_LED_ENABLE 0x0300 --#define IGP_LED3_MODE 0x07000000 -+#define ID_LED_RESERVED_0000 0x0000 -+#define ID_LED_RESERVED_FFFF 0xFFFF -+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ -+ (ID_LED_OFF1_OFF2 << 8) | \ -+ (ID_LED_DEF1_DEF2 << 4) | \ -+ (ID_LED_DEF1_DEF2)) -+#define ID_LED_DEF1_DEF2 0x1 -+#define ID_LED_DEF1_ON2 0x2 -+#define ID_LED_DEF1_OFF2 0x3 -+#define ID_LED_ON1_DEF2 0x4 -+#define ID_LED_ON1_ON2 0x5 -+#define ID_LED_ON1_OFF2 0x6 -+#define ID_LED_OFF1_DEF2 0x7 -+#define ID_LED_OFF1_ON2 0x8 -+#define ID_LED_OFF1_OFF2 0x9 -+ -+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF -+#define IGP_ACTIVITY_LED_ENABLE 0x0300 -+#define IGP_LED3_MODE 0x07000000 - - /* PCI/PCI-X/PCI-EX Config space */ --#define PCIE_DEVICE_CONTROL2 0x28 --#define PCIE_DEVICE_CONTROL2_16ms 0x0005 -+#define PCIX_COMMAND_REGISTER 0xE6 -+#define PCIX_STATUS_REGISTER_LO 0xE8 -+#define PCIX_STATUS_REGISTER_HI 0xEA -+#define PCI_HEADER_TYPE_REGISTER 0x0E -+#define PCIE_LINK_STATUS 0x12 -+#define PCIE_DEVICE_CONTROL2 0x28 -+ -+#define PCIX_COMMAND_MMRBC_MASK 0x000C -+#define PCIX_COMMAND_MMRBC_SHIFT 0x2 -+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 -+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 -+#define PCIX_STATUS_HI_MMRBC_4K 0x3 -+#define PCIX_STATUS_HI_MMRBC_2K 0x2 -+#define PCIX_STATUS_LO_FUNC_MASK 0x7 -+#define PCI_HEADER_TYPE_MULTIFUNC 0x80 -+#define PCIE_LINK_WIDTH_MASK 0x3F0 -+#define PCIE_LINK_WIDTH_SHIFT 4 -+#define PCIE_LINK_SPEED_MASK 0x0F -+#define PCIE_LINK_SPEED_2500 0x01 -+#define PCIE_LINK_SPEED_5000 0x02 -+#define PCIE_DEVICE_CONTROL2_16ms 0x0005 - --#define PHY_REVISION_MASK 0xFFFFFFF0 --#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ --#define MAX_PHY_MULTI_PAGE_REG 0xF -+#ifndef ETH_ADDR_LEN -+#define ETH_ADDR_LEN 6 -+#endif -+ -+#define PHY_REVISION_MASK 0xFFFFFFF0 -+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -+#define MAX_PHY_MULTI_PAGE_REG 0xF - --/* Bit definitions for valid PHY IDs. */ --/* I = Integrated -+/* Bit definitions for valid PHY IDs. -+ * I = Integrated - * E = External - */ --#define M88E1111_I_PHY_ID 0x01410CC0 --#define M88E1112_E_PHY_ID 0x01410C90 --#define I347AT4_E_PHY_ID 0x01410DC0 --#define IGP03E1000_E_PHY_ID 0x02A80390 --#define I82580_I_PHY_ID 0x015403A0 --#define I350_I_PHY_ID 0x015403B0 --#define M88_VENDOR 0x0141 --#define I210_I_PHY_ID 0x01410C00 --#define M88E1543_E_PHY_ID 0x01410EA0 -+#define M88E1000_E_PHY_ID 0x01410C50 -+#define M88E1000_I_PHY_ID 0x01410C30 -+#define M88E1011_I_PHY_ID 0x01410C20 -+#define IGP01E1000_I_PHY_ID 0x02A80380 -+#define M88E1111_I_PHY_ID 0x01410CC0 -+#define M88E1543_E_PHY_ID 0x01410EA0 -+#define M88E1512_E_PHY_ID 0x01410DD0 -+#define M88E1112_E_PHY_ID 0x01410C90 -+#define I347AT4_E_PHY_ID 0x01410DC0 -+#define M88E1340M_E_PHY_ID 0x01410DF0 -+#define GG82563_E_PHY_ID 0x01410CA0 -+#define IGP03E1000_E_PHY_ID 0x02A80390 -+#define IFE_E_PHY_ID 0x02A80330 -+#define IFE_PLUS_E_PHY_ID 0x02A80320 -+#define IFE_C_E_PHY_ID 0x02A80310 -+#define I82580_I_PHY_ID 0x015403A0 -+#define I350_I_PHY_ID 0x015403B0 -+#define I210_I_PHY_ID 0x01410C00 -+#define IGP04E1000_E_PHY_ID 0x02A80391 -+#define M88_VENDOR 0x0141 - - /* M88E1000 Specific Registers */ --#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ --#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ --#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ -+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ -+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ -+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ -+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ - --#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ --#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ -+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ -+#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ - - /* M88E1000 PHY Specific Control Register */ --#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ --/* 1=CLK125 low, 0=CLK125 toggling */ --#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ -- /* Manual MDI configuration */ --#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ -+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ -+/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ -+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 -+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ - /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ --#define M88E1000_PSCR_AUTO_X_1000T 0x0040 -+#define M88E1000_PSCR_AUTO_X_1000T 0x0040 - /* Auto crossover enabled all speeds */ --#define M88E1000_PSCR_AUTO_X_MODE 0x0060 --/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold -- * 0=Normal 10BASE-T Rx Threshold -- */ --/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ --#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ -+#define M88E1000_PSCR_AUTO_X_MODE 0x0060 -+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ - - /* M88E1000 PHY Specific Status Register */ --#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ --#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ --#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ -+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ -+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ -+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ - /* 0 = <50M - * 1 = 50-80M - * 2 = 80-110M - * 3 = 110-140M - * 4 = >140M - */ --#define M88E1000_PSSR_CABLE_LENGTH 0x0380 --#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ --#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ -- --#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 -- --/* M88E1000 Extended PHY Specific Control Register */ --/* 1 = Lost lock detect enabled. -- * Will assert lost lock and bring -- * link down if idle not seen -- * within 1ms in 1000BASE-T -- */ -+#define M88E1000_PSSR_CABLE_LENGTH 0x0380 -+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ -+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ -+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ -+ -+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 -+ - /* Number of times we will attempt to autonegotiate before downshifting if we - * are the master - */ --#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 --#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 -+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 -+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 - /* Number of times we will attempt to autonegotiate before downshifting if we - * are the slave - */ --#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 --#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 --#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ -+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 -+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 -+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ -+ -+/* Intel I347AT4 Registers */ -+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ -+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ -+#define I347AT4_PAGE_SELECT 0x16 - --/* Intel i347-AT4 Registers */ -+/* I347AT4 Extended PHY Specific Control Register */ - --#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ --#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ --#define I347AT4_PAGE_SELECT 0x16 -- --/* i347-AT4 Extended PHY Specific Control Register */ -- --/* Number of times we will attempt to autonegotiate before downshifting if we -- * are the master -+/* Number of times we will attempt to autonegotiate before downshifting if we -+ * are the master - */ --#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 --#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 --#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 --#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 --#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 --#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 --#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 --#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 --#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 --#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 -+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 -+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 -+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 -+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 -+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 -+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 -+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 -+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 -+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 -+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 - --/* i347-AT4 PHY Cable Diagnostics Control */ --#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ -+/* I347AT4 PHY Cable Diagnostics Control */ -+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ - --/* Marvell 1112 only registers */ --#define M88E1112_VCT_DSP_DISTANCE 0x001A -+/* M88E1112 only registers */ -+#define M88E1112_VCT_DSP_DISTANCE 0x001A - - /* M88EC018 Rev 2 specific DownShift settings */ --#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 --#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 -+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 -+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 -+ -+/* Bits... -+ * 15-5: page -+ * 4-0: register offset -+ */ -+#define GG82563_PAGE_SHIFT 5 -+#define GG82563_REG(page, reg) \ -+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) -+#define GG82563_MIN_ALT_REG 30 -+ -+/* GG82563 Specific Registers */ -+#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ -+#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ -+#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ -+#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ -+ -+/* MAC Specific Control Register */ -+#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) -+ -+#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ -+ -+/* Page 193 - Port Control Registers */ -+/* Kumeran Mode Control */ -+#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) -+#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ -+ -+/* Page 194 - KMRN Registers */ -+#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ - - /* MDI Control */ --#define E1000_MDIC_DATA_MASK 0x0000FFFF --#define E1000_MDIC_REG_MASK 0x001F0000 --#define E1000_MDIC_REG_SHIFT 16 --#define E1000_MDIC_PHY_MASK 0x03E00000 --#define E1000_MDIC_PHY_SHIFT 21 --#define E1000_MDIC_OP_WRITE 0x04000000 --#define E1000_MDIC_OP_READ 0x08000000 --#define E1000_MDIC_READY 0x10000000 --#define E1000_MDIC_INT_EN 0x20000000 --#define E1000_MDIC_ERROR 0x40000000 --#define E1000_MDIC_DEST 0x80000000 -- --/* Thermal Sensor */ --#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ --#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ -- --/* Energy Efficient Ethernet */ --#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ --#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ --#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ --#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ --#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ --#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ --#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ --#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ --#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ --#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ --#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ --#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ --#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ --#define E1000_M88E1543_EEE_CTRL_1 0x0 --#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ --#define E1000_EEE_ADV_DEV_I354 7 --#define E1000_EEE_ADV_ADDR_I354 60 --#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ --#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ --#define E1000_PCS_STATUS_DEV_I354 3 --#define E1000_PCS_STATUS_ADDR_I354 1 --#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ --#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 --#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 -+#define E1000_MDIC_REG_MASK 0x001F0000 -+#define E1000_MDIC_REG_SHIFT 16 -+#define E1000_MDIC_PHY_MASK 0x03E00000 -+#define E1000_MDIC_PHY_SHIFT 21 -+#define E1000_MDIC_OP_WRITE 0x04000000 -+#define E1000_MDIC_OP_READ 0x08000000 -+#define E1000_MDIC_READY 0x10000000 -+#define E1000_MDIC_ERROR 0x40000000 -+#define E1000_MDIC_DEST 0x80000000 - - /* SerDes Control */ --#define E1000_GEN_CTL_READY 0x80000000 --#define E1000_GEN_CTL_ADDRESS_SHIFT 8 --#define E1000_GEN_POLL_TIMEOUT 640 -- --#define E1000_VFTA_ENTRY_SHIFT 5 --#define E1000_VFTA_ENTRY_MASK 0x7F --#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F -- --/* DMA Coalescing register fields */ --#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ -+#define E1000_GEN_CTL_READY 0x80000000 -+#define E1000_GEN_CTL_ADDRESS_SHIFT 8 -+#define E1000_GEN_POLL_TIMEOUT 640 -+ -+/* LinkSec register fields */ -+#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 -+#define E1000_LSECTXCAP_SUM_SHIFT 16 -+#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 -+#define E1000_LSECRXCAP_SUM_SHIFT 16 -+ -+#define E1000_LSECTXCTRL_EN_MASK 0x00000003 -+#define E1000_LSECTXCTRL_DISABLE 0x0 -+#define E1000_LSECTXCTRL_AUTH 0x1 -+#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 -+#define E1000_LSECTXCTRL_AISCI 0x00000020 -+#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 -+#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 -+ -+#define E1000_LSECRXCTRL_EN_MASK 0x0000000C -+#define E1000_LSECRXCTRL_EN_SHIFT 2 -+#define E1000_LSECRXCTRL_DISABLE 0x0 -+#define E1000_LSECRXCTRL_CHECK 0x1 -+#define E1000_LSECRXCTRL_STRICT 0x2 -+#define E1000_LSECRXCTRL_DROP 0x3 -+#define E1000_LSECRXCTRL_PLSH 0x00000040 -+#define E1000_LSECRXCTRL_RP 0x00000080 -+#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 - - /* Tx Rate-Scheduler Config fields */ - #define E1000_RTTBCNRC_RS_ENA 0x80000000 -@@ -1013,4 +1342,70 @@ - #define E1000_RTTBCNRC_RF_INT_MASK \ - (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) - --#endif -+/* DMA Coalescing register fields */ -+/* DMA Coalescing Watchdog Timer */ -+#define E1000_DMACR_DMACWT_MASK 0x00003FFF -+/* DMA Coalescing Rx Threshold */ -+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 -+#define E1000_DMACR_DMACTHR_SHIFT 16 -+/* Lx when no PCIe transactions */ -+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 -+#define E1000_DMACR_DMAC_LX_SHIFT 28 -+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ -+/* DMA Coalescing BMC-to-OS Watchdog Enable */ -+#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 -+ -+/* DMA Coalescing Transmit Threshold */ -+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF -+ -+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ -+ -+/* Rx Traffic Rate Threshold */ -+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF -+/* Rx packet rate in current window */ -+#define E1000_DMCRTRH_LRPRCW 0x80000000 -+ -+/* DMA Coal Rx Traffic Current Count */ -+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF -+ -+/* Flow ctrl Rx Threshold High val */ -+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 -+#define E1000_FCRTC_RTH_COAL_SHIFT 4 -+/* Lx power decision based on DMA coal */ -+#define E1000_PCIEMISC_LX_DECISION 0x00000080 -+ -+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ -+#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ -+#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ -+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ -+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ -+ -+/* Proxy Filter Control */ -+#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ -+#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ -+#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ -+#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ -+#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ -+#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ -+#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ -+#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ -+#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ -+/* Proxy Status */ -+#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ -+ -+/* Firmware Status */ -+#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ -+/* VF Control */ -+#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ -+ -+#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ -+/* Lan ID bit field offset in status register */ -+#define E1000_STATUS_LAN_ID_OFFSET 2 -+#define E1000_VFTA_ENTRIES 128 -+#ifndef E1000_UNUSEDARG -+#define E1000_UNUSEDARG -+#endif /* E1000_UNUSEDARG */ -+#ifndef ERROR_REPORT -+#define ERROR_REPORT(fmt) do { } while (0) -+#endif /* ERROR_REPORT */ -+#endif /* _E1000_DEFINES_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h ---- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,33 +1,31 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_HW_H_ - #define _E1000_HW_H_ - --#include --#include --#include --#include -- -+#include "e1000_osdep.h" - #include "e1000_regs.h" - #include "e1000_defines.h" - -@@ -50,15 +48,14 @@ - #define E1000_DEV_ID_82580_SGMII 0x1511 - #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 - #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 --#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 --#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A --#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C --#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 - #define E1000_DEV_ID_I350_COPPER 0x1521 - #define E1000_DEV_ID_I350_FIBER 0x1522 - #define E1000_DEV_ID_I350_SERDES 0x1523 - #define E1000_DEV_ID_I350_SGMII 0x1524 -+#define E1000_DEV_ID_I350_DA4 0x1546 - #define E1000_DEV_ID_I210_COPPER 0x1533 -+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 -+#define E1000_DEV_ID_I210_COPPER_IT 0x1535 - #define E1000_DEV_ID_I210_FIBER 0x1536 - #define E1000_DEV_ID_I210_SERDES 0x1537 - #define E1000_DEV_ID_I210_SGMII 0x1538 -@@ -68,19 +65,26 @@ - #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 - #define E1000_DEV_ID_I354_SGMII 0x1F41 - #define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 -+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 -+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A -+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C -+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 - --#define E1000_REVISION_2 2 --#define E1000_REVISION_4 4 -- --#define E1000_FUNC_0 0 --#define E1000_FUNC_1 1 --#define E1000_FUNC_2 2 --#define E1000_FUNC_3 3 -- --#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 --#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 --#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 --#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 -+#define E1000_REVISION_0 0 -+#define E1000_REVISION_1 1 -+#define E1000_REVISION_2 2 -+#define E1000_REVISION_3 3 -+#define E1000_REVISION_4 4 -+ -+#define E1000_FUNC_0 0 -+#define E1000_FUNC_1 1 -+#define E1000_FUNC_2 2 -+#define E1000_FUNC_3 3 -+ -+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 -+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 -+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 -+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 - - enum e1000_mac_type { - e1000_undefined = 0, -@@ -127,6 +131,7 @@ - e1000_phy_igp_3, - e1000_phy_ife, - e1000_phy_82580, -+ e1000_phy_vf, - e1000_phy_i210, - }; - -@@ -181,6 +186,177 @@ - e1000_fc_default = 0xFF - }; - -+enum e1000_ms_type { -+ e1000_ms_hw_default = 0, -+ e1000_ms_force_master, -+ e1000_ms_force_slave, -+ e1000_ms_auto -+}; -+ -+enum e1000_smart_speed { -+ e1000_smart_speed_default = 0, -+ e1000_smart_speed_on, -+ e1000_smart_speed_off -+}; -+ -+enum e1000_serdes_link_state { -+ e1000_serdes_link_down = 0, -+ e1000_serdes_link_autoneg_progress, -+ e1000_serdes_link_autoneg_complete, -+ e1000_serdes_link_forced_up -+}; -+ -+#ifndef __le16 -+#define __le16 u16 -+#endif -+#ifndef __le32 -+#define __le32 u32 -+#endif -+#ifndef __le64 -+#define __le64 u64 -+#endif -+/* Receive Descriptor */ -+struct e1000_rx_desc { -+ __le64 buffer_addr; /* Address of the descriptor's data buffer */ -+ __le16 length; /* Length of data DMAed into data buffer */ -+ __le16 csum; /* Packet checksum */ -+ u8 status; /* Descriptor status */ -+ u8 errors; /* Descriptor Errors */ -+ __le16 special; -+}; -+ -+/* Receive Descriptor - Extended */ -+union e1000_rx_desc_extended { -+ struct { -+ __le64 buffer_addr; -+ __le64 reserved; -+ } read; -+ struct { -+ struct { -+ __le32 mrq; /* Multiple Rx Queues */ -+ union { -+ __le32 rss; /* RSS Hash */ -+ struct { -+ __le16 ip_id; /* IP id */ -+ __le16 csum; /* Packet Checksum */ -+ } csum_ip; -+ } hi_dword; -+ } lower; -+ struct { -+ __le32 status_error; /* ext status/error */ -+ __le16 length; -+ __le16 vlan; /* VLAN tag */ -+ } upper; -+ } wb; /* writeback */ -+}; -+ -+#define MAX_PS_BUFFERS 4 -+ -+/* Number of packet split data buffers (not including the header buffer) */ -+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) -+ -+/* Receive Descriptor - Packet Split */ -+union e1000_rx_desc_packet_split { -+ struct { -+ /* one buffer for protocol header(s), three data buffers */ -+ __le64 buffer_addr[MAX_PS_BUFFERS]; -+ } read; -+ struct { -+ struct { -+ __le32 mrq; /* Multiple Rx Queues */ -+ union { -+ __le32 rss; /* RSS Hash */ -+ struct { -+ __le16 ip_id; /* IP id */ -+ __le16 csum; /* Packet Checksum */ -+ } csum_ip; -+ } hi_dword; -+ } lower; -+ struct { -+ __le32 status_error; /* ext status/error */ -+ __le16 length0; /* length of buffer 0 */ -+ __le16 vlan; /* VLAN tag */ -+ } middle; -+ struct { -+ __le16 header_status; -+ /* length of buffers 1-3 */ -+ __le16 length[PS_PAGE_BUFFERS]; -+ } upper; -+ __le64 reserved; -+ } wb; /* writeback */ -+}; -+ -+/* Transmit Descriptor */ -+struct e1000_tx_desc { -+ __le64 buffer_addr; /* Address of the descriptor's data buffer */ -+ union { -+ __le32 data; -+ struct { -+ __le16 length; /* Data buffer length */ -+ u8 cso; /* Checksum offset */ -+ u8 cmd; /* Descriptor control */ -+ } flags; -+ } lower; -+ union { -+ __le32 data; -+ struct { -+ u8 status; /* Descriptor status */ -+ u8 css; /* Checksum start */ -+ __le16 special; -+ } fields; -+ } upper; -+}; -+ -+/* Offload Context Descriptor */ -+struct e1000_context_desc { -+ union { -+ __le32 ip_config; -+ struct { -+ u8 ipcss; /* IP checksum start */ -+ u8 ipcso; /* IP checksum offset */ -+ __le16 ipcse; /* IP checksum end */ -+ } ip_fields; -+ } lower_setup; -+ union { -+ __le32 tcp_config; -+ struct { -+ u8 tucss; /* TCP checksum start */ -+ u8 tucso; /* TCP checksum offset */ -+ __le16 tucse; /* TCP checksum end */ -+ } tcp_fields; -+ } upper_setup; -+ __le32 cmd_and_length; -+ union { -+ __le32 data; -+ struct { -+ u8 status; /* Descriptor status */ -+ u8 hdr_len; /* Header length */ -+ __le16 mss; /* Maximum segment size */ -+ } fields; -+ } tcp_seg_setup; -+}; -+ -+/* Offload data descriptor */ -+struct e1000_data_desc { -+ __le64 buffer_addr; /* Address of the descriptor's buffer address */ -+ union { -+ __le32 data; -+ struct { -+ __le16 length; /* Data buffer length */ -+ u8 typ_len_ext; -+ u8 cmd; -+ } flags; -+ } lower; -+ union { -+ __le32 data; -+ struct { -+ u8 status; /* Descriptor status */ -+ u8 popts; /* Packet Options */ -+ __le16 special; -+ } fields; -+ } upper; -+}; -+ - /* Statistics counters collected by the MAC */ - struct e1000_hw_stats { - u64 crcerrs; -@@ -289,7 +465,7 @@ - u8 checksum; - }; - --#define E1000_HI_MAX_DATA_LENGTH 252 -+#define E1000_HI_MAX_DATA_LENGTH 252 - struct e1000_host_command_info { - struct e1000_host_command_header command_header; - u8 command_data[E1000_HI_MAX_DATA_LENGTH]; -@@ -304,7 +480,7 @@ - u16 command_length; - }; - --#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 -+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 - struct e1000_host_mng_command_info { - struct e1000_host_mng_command_header command_header; - u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; -@@ -313,52 +489,95 @@ - #include "e1000_mac.h" - #include "e1000_phy.h" - #include "e1000_nvm.h" -+#include "e1000_manage.h" - #include "e1000_mbx.h" - -+/* Function pointers for the MAC. */ - struct e1000_mac_operations { -- s32 (*check_for_link)(struct e1000_hw *); -- s32 (*reset_hw)(struct e1000_hw *); -- s32 (*init_hw)(struct e1000_hw *); -+ s32 (*init_params)(struct e1000_hw *); -+ s32 (*id_led_init)(struct e1000_hw *); -+ s32 (*blink_led)(struct e1000_hw *); - bool (*check_mng_mode)(struct e1000_hw *); -- s32 (*setup_physical_interface)(struct e1000_hw *); -- void (*rar_set)(struct e1000_hw *, u8 *, u32); -- s32 (*read_mac_addr)(struct e1000_hw *); -- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); -- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); -- void (*release_swfw_sync)(struct e1000_hw *, u16); --#ifdef CONFIG_IGB_HWMON -+ s32 (*check_for_link)(struct e1000_hw *); -+ s32 (*cleanup_led)(struct e1000_hw *); -+ void (*clear_hw_cntrs)(struct e1000_hw *); -+ void (*clear_vfta)(struct e1000_hw *); -+ s32 (*get_bus_info)(struct e1000_hw *); -+ void (*set_lan_id)(struct e1000_hw *); -+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); -+ s32 (*led_on)(struct e1000_hw *); -+ s32 (*led_off)(struct e1000_hw *); -+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); -+ s32 (*reset_hw)(struct e1000_hw *); -+ s32 (*init_hw)(struct e1000_hw *); -+ void (*shutdown_serdes)(struct e1000_hw *); -+ void (*power_up_serdes)(struct e1000_hw *); -+ s32 (*setup_link)(struct e1000_hw *); -+ s32 (*setup_physical_interface)(struct e1000_hw *); -+ s32 (*setup_led)(struct e1000_hw *); -+ void (*write_vfta)(struct e1000_hw *, u32, u32); -+ void (*config_collision_dist)(struct e1000_hw *); -+ int (*rar_set)(struct e1000_hw *, u8*, u32); -+ s32 (*read_mac_addr)(struct e1000_hw *); -+ s32 (*validate_mdi_setting)(struct e1000_hw *); - s32 (*get_thermal_sensor_data)(struct e1000_hw *); - s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); --#endif -- -+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); -+ void (*release_swfw_sync)(struct e1000_hw *, u16); - }; - -+/* When to use various PHY register access functions: -+ * -+ * Func Caller -+ * Function Does Does When to use -+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -+ * X_reg L,P,A n/a for simple PHY reg accesses -+ * X_reg_locked P,A L for multiple accesses of different regs -+ * on different pages -+ * X_reg_page A L,P for multiple accesses of different regs -+ * on the same page -+ * -+ * Where X=[read|write], L=locking, P=sets page, A=register access -+ * -+ */ - struct e1000_phy_operations { -- s32 (*acquire)(struct e1000_hw *); -- s32 (*check_polarity)(struct e1000_hw *); -- s32 (*check_reset_block)(struct e1000_hw *); -- s32 (*force_speed_duplex)(struct e1000_hw *); -- s32 (*get_cfg_done)(struct e1000_hw *hw); -- s32 (*get_cable_length)(struct e1000_hw *); -- s32 (*get_phy_info)(struct e1000_hw *); -- s32 (*read_reg)(struct e1000_hw *, u32, u16 *); -+ s32 (*init_params)(struct e1000_hw *); -+ s32 (*acquire)(struct e1000_hw *); -+ s32 (*check_polarity)(struct e1000_hw *); -+ s32 (*check_reset_block)(struct e1000_hw *); -+ s32 (*commit)(struct e1000_hw *); -+ s32 (*force_speed_duplex)(struct e1000_hw *); -+ s32 (*get_cfg_done)(struct e1000_hw *hw); -+ s32 (*get_cable_length)(struct e1000_hw *); -+ s32 (*get_info)(struct e1000_hw *); -+ s32 (*set_page)(struct e1000_hw *, u16); -+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *); -+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); -+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); - void (*release)(struct e1000_hw *); -- s32 (*reset)(struct e1000_hw *); -- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); -- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); -- s32 (*write_reg)(struct e1000_hw *, u32, u16); -+ s32 (*reset)(struct e1000_hw *); -+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); -+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); -+ s32 (*write_reg)(struct e1000_hw *, u32, u16); -+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); -+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16); -+ void (*power_up)(struct e1000_hw *); -+ void (*power_down)(struct e1000_hw *); - s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); - s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); - }; - -+/* Function pointers for the NVM. */ - struct e1000_nvm_operations { -- s32 (*acquire)(struct e1000_hw *); -- s32 (*read)(struct e1000_hw *, u16, u16, u16 *); -+ s32 (*init_params)(struct e1000_hw *); -+ s32 (*acquire)(struct e1000_hw *); -+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *); - void (*release)(struct e1000_hw *); -- s32 (*write)(struct e1000_hw *, u16, u16, u16 *); -- s32 (*update)(struct e1000_hw *); -- s32 (*validate)(struct e1000_hw *); -- s32 (*valid_led_default)(struct e1000_hw *, u16 *); -+ void (*reload)(struct e1000_hw *); -+ s32 (*update)(struct e1000_hw *); -+ s32 (*valid_led_default)(struct e1000_hw *, u16 *); -+ s32 (*validate)(struct e1000_hw *); -+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *); - }; - - #define E1000_MAX_SENSORS 3 -@@ -374,49 +593,45 @@ - struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; - }; - --struct e1000_info { -- s32 (*get_invariants)(struct e1000_hw *); -- struct e1000_mac_operations *mac_ops; -- struct e1000_phy_operations *phy_ops; -- struct e1000_nvm_operations *nvm_ops; --}; -- --extern const struct e1000_info e1000_82575_info; -- - struct e1000_mac_info { - struct e1000_mac_operations ops; -- -- u8 addr[6]; -- u8 perm_addr[6]; -+ u8 addr[ETH_ADDR_LEN]; -+ u8 perm_addr[ETH_ADDR_LEN]; - - enum e1000_mac_type type; - -+ u32 collision_delta; - u32 ledctl_default; - u32 ledctl_mode1; - u32 ledctl_mode2; - u32 mc_filter_type; -+ u32 tx_packet_delta; - u32 txcw; - -+ u16 current_ifs_val; -+ u16 ifs_max_val; -+ u16 ifs_min_val; -+ u16 ifs_ratio; -+ u16 ifs_step_size; - u16 mta_reg_count; - u16 uta_reg_count; - - /* Maximum size of the MTA register table in all supported adapters */ -- #define MAX_MTA_REG 128 -+#define MAX_MTA_REG 128 - u32 mta_shadow[MAX_MTA_REG]; - u16 rar_entry_count; - - u8 forced_speed_duplex; - - bool adaptive_ifs; -+ bool has_fwsm; - bool arc_subsystem_valid; - bool asf_firmware_present; - bool autoneg; - bool autoneg_failed; -- bool disable_hw_init_bits; - bool get_link_status; -- bool ifs_params_forced; - bool in_ifs_mode; -- bool report_tx_early; -+ enum e1000_serdes_link_state serdes_link_state; - bool serdes_has_link; - bool tx_pkt_filtering; - struct e1000_thermal_sensor_data thermal_sensor_data; -@@ -424,7 +639,6 @@ - - struct e1000_phy_info { - struct e1000_phy_operations ops; -- - enum e1000_phy_type type; - - enum e1000_1000t_rx_status local_rx; -@@ -477,20 +691,19 @@ - enum e1000_bus_speed speed; - enum e1000_bus_width width; - -- u32 snoop; -- - u16 func; - u16 pci_cmd_word; - }; - - struct e1000_fc_info { -- u32 high_water; /* Flow control high-water mark */ -- u32 low_water; /* Flow control low-water mark */ -- u16 pause_time; /* Flow control pause timer */ -- bool send_xon; /* Flow control send XON */ -- bool strict_ieee; /* Strict IEEE mode */ -- enum e1000_fc_mode current_mode; /* Type of flow control */ -- enum e1000_fc_mode requested_mode; -+ u32 high_water; /* Flow control high-water mark */ -+ u32 low_water; /* Flow control low-water mark */ -+ u16 pause_time; /* Flow control pause timer */ -+ u16 refresh_time; /* Flow control refresh timer */ -+ bool send_xon; /* Flow control send XON */ -+ bool strict_ieee; /* Strict IEEE mode */ -+ enum e1000_fc_mode current_mode; /* FC mode in effect */ -+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ - }; - - struct e1000_mbx_operations { -@@ -525,12 +738,17 @@ - bool sgmii_active; - bool global_device_reset; - bool eee_disable; -- bool clear_semaphore_once; -- struct e1000_sfp_flags eth_flags; - bool module_plugged; -+ bool clear_semaphore_once; -+ u32 mtu; -+ struct sfp_e1000_flags eth_flags; - u8 media_port; - bool media_changed; -- bool mas_capable; -+}; -+ -+struct e1000_dev_spec_vf { -+ u32 vf_number; -+ u32 v2p_mailbox; - }; - - struct e1000_hw { -@@ -549,7 +767,8 @@ - struct e1000_host_mng_dhcp_cookie mng_cookie; - - union { -- struct e1000_dev_spec_82575 _82575; -+ struct e1000_dev_spec_82575 _82575; -+ struct e1000_dev_spec_vf vf; - } dev_spec; - - u16 device_id; -@@ -560,14 +779,13 @@ - u8 revision_id; - }; - --struct net_device *igb_get_hw_dev(struct e1000_hw *hw); --#define hw_dbg(format, arg...) \ -- netdev_dbg(igb_get_hw_dev(hw), format, ##arg) -+#include "e1000_82575.h" -+#include "e1000_i210.h" - - /* These functions must be implemented by drivers */ --s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); --s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); -+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); - --void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); --void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); --#endif /* _E1000_HW_H_ */ -+#endif -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c ---- a/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,107 +1,40 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* - --/* e1000_i210 -- * e1000_i211 -- */ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. - --#include --#include -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. - --#include "e1000_hw.h" --#include "e1000_i210.h" -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. - --static s32 igb_update_flash_i210(struct e1000_hw *hw); -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". - --/** -- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore -- * @hw: pointer to the HW structure -- * -- * Acquire the HW semaphore to access the PHY or NVM -- */ --static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) --{ -- u32 swsm; -- s32 timeout = hw->nvm.word_size + 1; -- s32 i = 0; -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -- /* Get the SW semaphore */ -- while (i < timeout) { -- swsm = rd32(E1000_SWSM); -- if (!(swsm & E1000_SWSM_SMBI)) -- break; -+*******************************************************************************/ - -- udelay(50); -- i++; -- } -+#include "e1000_api.h" - -- if (i == timeout) { -- /* In rare circumstances, the SW semaphore may already be held -- * unintentionally. Clear the semaphore once before giving up. -- */ -- if (hw->dev_spec._82575.clear_semaphore_once) { -- hw->dev_spec._82575.clear_semaphore_once = false; -- igb_put_hw_semaphore(hw); -- for (i = 0; i < timeout; i++) { -- swsm = rd32(E1000_SWSM); -- if (!(swsm & E1000_SWSM_SMBI)) -- break; - -- udelay(50); -- } -- } -- -- /* If we do not have the semaphore here, we have to give up. */ -- if (i == timeout) { -- hw_dbg("Driver can't access device - SMBI bit is set.\n"); -- return -E1000_ERR_NVM; -- } -- } -- -- /* Get the FW semaphore. */ -- for (i = 0; i < timeout; i++) { -- swsm = rd32(E1000_SWSM); -- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); -- -- /* Semaphore acquired if bit latched */ -- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) -- break; -- -- udelay(50); -- } -- -- if (i == timeout) { -- /* Release semaphores */ -- igb_put_hw_semaphore(hw); -- hw_dbg("Driver can't access the NVM\n"); -- return -E1000_ERR_NVM; -- } -- -- return 0; --} -+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); -+static void e1000_release_nvm_i210(struct e1000_hw *hw); -+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); -+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, -+ u16 *data); -+static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); -+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); - - /** -- * igb_acquire_nvm_i210 - Request for access to EEPROM -+ * e1000_acquire_nvm_i210 - Request for access to EEPROM - * @hw: pointer to the HW structure - * - * Acquire the necessary semaphores for exclusive access to the EEPROM. -@@ -109,93 +42,178 @@ - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ --static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) -+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) - { -- return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_acquire_nvm_i210"); -+ -+ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); -+ -+ return ret_val; - } - - /** -- * igb_release_nvm_i210 - Release exclusive access to EEPROM -+ * e1000_release_nvm_i210 - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit, - * then release the semaphores acquired. - **/ --static void igb_release_nvm_i210(struct e1000_hw *hw) -+static void e1000_release_nvm_i210(struct e1000_hw *hw) - { -- igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); -+ DEBUGFUNC("e1000_release_nvm_i210"); -+ -+ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); - } - - /** -- * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore -+ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Acquire the SW/FW semaphore to access the PHY or NVM. The mask - * will also specify which port we're acquiring the lock for. - **/ --s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) -+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) - { - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 16; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ - -+ DEBUGFUNC("e1000_acquire_swfw_sync_i210"); -+ - while (i < timeout) { -- if (igb_get_hw_semaphore_i210(hw)) { -+ if (e1000_get_hw_semaphore_i210(hw)) { - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } - -- swfw_sync = rd32(E1000_SW_FW_SYNC); -+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); - if (!(swfw_sync & (fwmask | swmask))) - break; - -- /* Firmware currently using resource (fwmask) */ -- igb_put_hw_semaphore(hw); -- mdelay(5); -+ /* -+ * Firmware currently using resource (fwmask) -+ * or other software thread using resource (swmask) -+ */ -+ e1000_put_hw_semaphore_generic(hw); -+ msec_delay_irq(5); - i++; - } - - if (i == timeout) { -- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); -+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } - - swfw_sync |= swmask; -- wr32(E1000_SW_FW_SYNC, swfw_sync); -+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); -+ -+ e1000_put_hw_semaphore_generic(hw); - -- igb_put_hw_semaphore(hw); - out: - return ret_val; - } - - /** -- * igb_release_swfw_sync_i210 - Release SW/FW semaphore -+ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Release the SW/FW semaphore used to access the PHY or NVM. The mask - * will also specify which port we're releasing the lock for. - **/ --void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) -+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) - { - u32 swfw_sync; - -- while (igb_get_hw_semaphore_i210(hw)) -+ DEBUGFUNC("e1000_release_swfw_sync_i210"); -+ -+ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) - ; /* Empty */ - -- swfw_sync = rd32(E1000_SW_FW_SYNC); -+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); - swfw_sync &= ~mask; -- wr32(E1000_SW_FW_SYNC, swfw_sync); -+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); - -- igb_put_hw_semaphore(hw); -+ e1000_put_hw_semaphore_generic(hw); - } - - /** -- * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register -+ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore -+ * @hw: pointer to the HW structure -+ * -+ * Acquire the HW semaphore to access the PHY or NVM -+ **/ -+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) -+{ -+ u32 swsm; -+ s32 timeout = hw->nvm.word_size + 1; -+ s32 i = 0; -+ -+ DEBUGFUNC("e1000_get_hw_semaphore_i210"); -+ -+ /* Get the SW semaphore */ -+ while (i < timeout) { -+ swsm = E1000_READ_REG(hw, E1000_SWSM); -+ if (!(swsm & E1000_SWSM_SMBI)) -+ break; -+ -+ usec_delay(50); -+ i++; -+ } -+ -+ if (i == timeout) { -+ /* In rare circumstances, the SW semaphore may already be held -+ * unintentionally. Clear the semaphore once before giving up. -+ */ -+ if (hw->dev_spec._82575.clear_semaphore_once) { -+ hw->dev_spec._82575.clear_semaphore_once = false; -+ e1000_put_hw_semaphore_generic(hw); -+ for (i = 0; i < timeout; i++) { -+ swsm = E1000_READ_REG(hw, E1000_SWSM); -+ if (!(swsm & E1000_SWSM_SMBI)) -+ break; -+ -+ usec_delay(50); -+ } -+ } -+ -+ /* If we do not have the semaphore here, we have to give up. */ -+ if (i == timeout) { -+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); -+ return -E1000_ERR_NVM; -+ } -+ } -+ -+ /* Get the FW semaphore. */ -+ for (i = 0; i < timeout; i++) { -+ swsm = E1000_READ_REG(hw, E1000_SWSM); -+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); -+ -+ /* Semaphore acquired if bit latched */ -+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) -+ break; -+ -+ usec_delay(50); -+ } -+ -+ if (i == timeout) { -+ /* Release semaphores */ -+ e1000_put_hw_semaphore_generic(hw); -+ DEBUGOUT("Driver can't access the NVM\n"); -+ return -E1000_ERR_NVM; -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register - * @hw: pointer to the HW structure - * @offset: offset of word in the Shadow Ram to read - * @words: number of words to read -@@ -204,28 +222,74 @@ - * Reads a 16 bit word from the Shadow Ram using the EERD register. - * Uses necessary synchronization semaphores. - **/ --static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, -- u16 *data) -+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, -+ u16 *data) - { -- s32 status = 0; -+ s32 status = E1000_SUCCESS; - u16 i, count; - -+ DEBUGFUNC("e1000_read_nvm_srrd_i210"); -+ - /* We cannot hold synchronization semaphores for too long, - * because of forceful takeover procedure. However it is more efficient -- * to read in bursts than synchronizing access for each word. -- */ -+ * to read in bursts than synchronizing access for each word. */ - for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { - count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? - E1000_EERD_EEWR_MAX_COUNT : (words - i); -- if (!(hw->nvm.ops.acquire(hw))) { -- status = igb_read_nvm_eerd(hw, offset, count, -+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { -+ status = e1000_read_nvm_eerd(hw, offset, count, - data + i); - hw->nvm.ops.release(hw); - } else { - status = E1000_ERR_SWFW_SYNC; - } - -- if (status) -+ if (status != E1000_SUCCESS) -+ break; -+ } -+ -+ return status; -+} -+ -+/** -+ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR -+ * @hw: pointer to the HW structure -+ * @offset: offset within the Shadow RAM to be written to -+ * @words: number of words to write -+ * @data: 16 bit word(s) to be written to the Shadow RAM -+ * -+ * Writes data to Shadow RAM at offset using EEWR register. -+ * -+ * If e1000_update_nvm_checksum is not called after this function , the -+ * data will not be committed to FLASH and also Shadow RAM will most likely -+ * contain an invalid checksum. -+ * -+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer -+ * partially written. -+ **/ -+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, -+ u16 *data) -+{ -+ s32 status = E1000_SUCCESS; -+ u16 i, count; -+ -+ DEBUGFUNC("e1000_write_nvm_srwr_i210"); -+ -+ /* We cannot hold synchronization semaphores for too long, -+ * because of forceful takeover procedure. However it is more efficient -+ * to write in bursts than synchronizing access for each word. */ -+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { -+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? -+ E1000_EERD_EEWR_MAX_COUNT : (words - i); -+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { -+ status = e1000_write_nvm_srwr(hw, offset, count, -+ data + i); -+ hw->nvm.ops.release(hw); -+ } else { -+ status = E1000_ERR_SWFW_SYNC; -+ } -+ -+ if (status != E1000_SUCCESS) - break; - } - -@@ -233,7 +297,7 @@ - } - - /** -- * igb_write_nvm_srwr - Write to Shadow Ram using EEWR -+ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR - * @hw: pointer to the HW structure - * @offset: offset within the Shadow Ram to be written to - * @words: number of words to write -@@ -241,23 +305,26 @@ - * - * Writes data to Shadow Ram at offset using EEWR register. - * -- * If igb_update_nvm_checksum is not called after this function , the -+ * If e1000_update_nvm_checksum is not called after this function , the - * Shadow Ram will most likely contain an invalid checksum. - **/ --static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, -+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) - { - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i, k, eewr = 0; - u32 attempts = 100000; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - -- /* A check for invalid values: offset too large, too many words, -+ DEBUGFUNC("e1000_write_nvm_srwr"); -+ -+ /* -+ * A check for invalid values: offset too large, too many words, - * too many words for the offset, and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { -- hw_dbg("nvm parameter(s) out of bounds\n"); -+ DEBUGOUT("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } -@@ -267,19 +334,19 @@ - (data[i] << E1000_NVM_RW_REG_DATA) | - E1000_NVM_RW_REG_START; - -- wr32(E1000_SRWR, eewr); -+ E1000_WRITE_REG(hw, E1000_SRWR, eewr); - - for (k = 0; k < attempts; k++) { - if (E1000_NVM_RW_REG_DONE & -- rd32(E1000_SRWR)) { -- ret_val = 0; -+ E1000_READ_REG(hw, E1000_SRWR)) { -+ ret_val = E1000_SUCCESS; - break; - } -- udelay(5); -- } -+ usec_delay(5); -+ } - -- if (ret_val) { -- hw_dbg("Shadow RAM write EEWR timed out\n"); -+ if (ret_val != E1000_SUCCESS) { -+ DEBUGOUT("Shadow RAM write EEWR timed out\n"); - break; - } - } -@@ -288,52 +355,7 @@ - return ret_val; - } - --/** -- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR -- * @hw: pointer to the HW structure -- * @offset: offset within the Shadow RAM to be written to -- * @words: number of words to write -- * @data: 16 bit word(s) to be written to the Shadow RAM -- * -- * Writes data to Shadow RAM at offset using EEWR register. -- * -- * If e1000_update_nvm_checksum is not called after this function , the -- * data will not be committed to FLASH and also Shadow RAM will most likely -- * contain an invalid checksum. -- * -- * If error code is returned, data and Shadow RAM may be inconsistent - buffer -- * partially written. -- **/ --static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, -- u16 *data) --{ -- s32 status = 0; -- u16 i, count; -- -- /* We cannot hold synchronization semaphores for too long, -- * because of forceful takeover procedure. However it is more efficient -- * to write in bursts than synchronizing access for each word. -- */ -- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { -- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? -- E1000_EERD_EEWR_MAX_COUNT : (words - i); -- if (!(hw->nvm.ops.acquire(hw))) { -- status = igb_write_nvm_srwr(hw, offset, count, -- data + i); -- hw->nvm.ops.release(hw); -- } else { -- status = E1000_ERR_SWFW_SYNC; -- } -- -- if (status) -- break; -- } -- -- return status; --} -- --/** -- * igb_read_invm_word_i210 - Reads OTP -+/** e1000_read_invm_word_i210 - Reads OTP - * @hw: pointer to the HW structure - * @address: the word address (aka eeprom offset) to read - * @data: pointer to the data read -@@ -341,15 +363,17 @@ - * Reads 16-bit words from the OTP. Return error when the word is not - * stored in OTP. - **/ --static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) -+static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) - { - s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; - u32 invm_dword; - u16 i; - u8 record_type, word_address; - -+ DEBUGFUNC("e1000_read_invm_word_i210"); -+ - for (i = 0; i < E1000_INVM_SIZE; i++) { -- invm_dword = rd32(E1000_INVM_DATA_REG(i)); -+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); - /* Get record type */ - record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); - if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) -@@ -362,75 +386,76 @@ - word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); - if (word_address == address) { - *data = INVM_DWORD_TO_WORD_DATA(invm_dword); -- hw_dbg("Read INVM Word 0x%02x = %x\n", -+ DEBUGOUT2("Read INVM Word 0x%02x = %x", - address, *data); -- status = 0; -+ status = E1000_SUCCESS; - break; - } - } - } -- if (status) -- hw_dbg("Requested word 0x%02x not found in OTP\n", address); -+ if (status != E1000_SUCCESS) -+ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); - return status; - } - --/** -- * igb_read_invm_i210 - Read invm wrapper function for I210/I211 -+/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 - * @hw: pointer to the HW structure -- * @words: number of words to read -+ * @address: the word address (aka eeprom offset) to read - * @data: pointer to the data read - * - * Wrapper function to return data formerly found in the NVM. - **/ --static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, -- u16 words __always_unused, u16 *data) -+static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, -+ u16 E1000_UNUSEDARG words, u16 *data) - { -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("e1000_read_invm_i210"); - - /* Only the MAC addr is required to be present in the iNVM */ - switch (offset) { - case NVM_MAC_ADDR: -- ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); -- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, -+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); -+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, - &data[1]); -- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, -+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, - &data[2]); -- if (ret_val) -- hw_dbg("MAC Addr not found in iNVM\n"); -+ if (ret_val != E1000_SUCCESS) -+ DEBUGOUT("MAC Addr not found in iNVM\n"); - break; - case NVM_INIT_CTRL_2: -- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); -- if (ret_val) { -+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); -+ if (ret_val != E1000_SUCCESS) { - *data = NVM_INIT_CTRL_2_DEFAULT_I211; -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - } - break; - case NVM_INIT_CTRL_4: -- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); -- if (ret_val) { -+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); -+ if (ret_val != E1000_SUCCESS) { - *data = NVM_INIT_CTRL_4_DEFAULT_I211; -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - } - break; - case NVM_LED_1_CFG: -- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); -- if (ret_val) { -+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); -+ if (ret_val != E1000_SUCCESS) { - *data = NVM_LED_1_CFG_DEFAULT_I211; -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - } - break; - case NVM_LED_0_2_CFG: -- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); -- if (ret_val) { -+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); -+ if (ret_val != E1000_SUCCESS) { - *data = NVM_LED_0_2_CFG_DEFAULT_I211; -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - } - break; - case NVM_ID_LED_SETTINGS: -- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); -- if (ret_val) { -+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); -+ if (ret_val != E1000_SUCCESS) { - *data = ID_LED_RESERVED_FFFF; -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - } - break; - case NVM_SUB_DEV_ID: -@@ -446,7 +471,7 @@ - *data = hw->vendor_id; - break; - default: -- hw_dbg("NVM word 0x%02x is not mapped.\n", offset); -+ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); - *data = NVM_RESERVED_WORD; - break; - } -@@ -454,14 +479,15 @@ - } - - /** -- * igb_read_invm_version - Reads iNVM version and image type -+ * e1000_read_invm_version - Reads iNVM version and image type - * @hw: pointer to the HW structure - * @invm_ver: version structure for the version read - * - * Reads iNVM version and image type. - **/ --s32 igb_read_invm_version(struct e1000_hw *hw, -- struct e1000_fw_version *invm_ver) { -+s32 e1000_read_invm_version(struct e1000_hw *hw, -+ struct e1000_fw_version *invm_ver) -+{ - u32 *record = NULL; - u32 *next_record = NULL; - u32 i = 0; -@@ -472,9 +498,11 @@ - s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; - u16 version = 0; - -+ DEBUGFUNC("e1000_read_invm_version"); -+ - /* Read iNVM memory */ - for (i = 0; i < E1000_INVM_SIZE; i++) { -- invm_dword = rd32(E1000_INVM_DATA_REG(i)); -+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); - buffer[i] = invm_dword; - } - -@@ -486,17 +514,18 @@ - /* Check if we have first version location used */ - if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { - version = 0; -- status = 0; -+ status = E1000_SUCCESS; - break; - } - /* Check if we have second version location used */ - else if ((i == 1) && - ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { - version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; -- status = 0; -+ status = E1000_SUCCESS; - break; - } -- /* Check if we have odd version location -+ /* -+ * Check if we have odd version location - * used and it is the last one used - */ - else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && -@@ -504,21 +533,22 @@ - (i != 1))) { - version = (*next_record & E1000_INVM_VER_FIELD_TWO) - >> 13; -- status = 0; -+ status = E1000_SUCCESS; - break; - } -- /* Check if we have even version location -+ /* -+ * Check if we have even version location - * used and it is the last one used - */ - else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && - ((*record & 0x3) == 0)) { - version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; -- status = 0; -+ status = E1000_SUCCESS; - break; - } - } - -- if (!status) { -+ if (status == E1000_SUCCESS) { - invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) - >> E1000_INVM_MAJOR_SHIFT; - invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; -@@ -531,7 +561,7 @@ - /* Check if we have image type in first location used */ - if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { - invm_ver->invm_img_type = 0; -- status = 0; -+ status = E1000_SUCCESS; - break; - } - /* Check if we have image type in first location used */ -@@ -540,7 +570,7 @@ - ((((*record & 0x3) != 0) && (i != 1)))) { - invm_ver->invm_img_type = - (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; -- status = 0; -+ status = E1000_SUCCESS; - break; - } - } -@@ -548,27 +578,30 @@ - } - - /** -- * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum -+ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ --static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) -+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) - { -- s32 status = 0; -+ s32 status = E1000_SUCCESS; - s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); - -- if (!(hw->nvm.ops.acquire(hw))) { -+ DEBUGFUNC("e1000_validate_nvm_checksum_i210"); -+ -+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { - -- /* Replace the read function with semaphore grabbing with -+ /* -+ * Replace the read function with semaphore grabbing with - * the one that skips this for a while. - * We have semaphore taken already here. - */ - read_op_ptr = hw->nvm.ops.read; -- hw->nvm.ops.read = igb_read_nvm_eerd; -+ hw->nvm.ops.read = e1000_read_nvm_eerd; - -- status = igb_validate_nvm_checksum(hw); -+ status = e1000_validate_nvm_checksum_generic(hw); - - /* Revert original read operation. */ - hw->nvm.ops.read = read_op_ptr; -@@ -581,147 +614,208 @@ - return status; - } - -+ - /** -- * igb_update_nvm_checksum_i210 - Update EEPROM checksum -+ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. Next commit EEPROM data onto the Flash. - **/ --static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) -+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - -- /* Read the first word from the EEPROM. If this times out or fails, do -+ DEBUGFUNC("e1000_update_nvm_checksum_i210"); -+ -+ /* -+ * Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ -- ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); -- if (ret_val) { -- hw_dbg("EEPROM read failed\n"); -+ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); -+ if (ret_val != E1000_SUCCESS) { -+ DEBUGOUT("EEPROM read failed\n"); - goto out; - } - -- if (!(hw->nvm.ops.acquire(hw))) { -- /* Do not use hw->nvm.ops.write, hw->nvm.ops.read -+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { -+ /* -+ * Do not use hw->nvm.ops.write, hw->nvm.ops.read - * because we do not want to take the synchronization - * semaphores twice here. - */ - - for (i = 0; i < NVM_CHECKSUM_REG; i++) { -- ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); -+ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); - if (ret_val) { - hw->nvm.ops.release(hw); -- hw_dbg("NVM Read Error while updating checksum.\n"); -+ DEBUGOUT("NVM Read Error while updating checksum.\n"); - goto out; - } - checksum += nvm_data; - } - checksum = (u16) NVM_SUM - checksum; -- ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, -+ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, - &checksum); -- if (ret_val) { -+ if (ret_val != E1000_SUCCESS) { - hw->nvm.ops.release(hw); -- hw_dbg("NVM Write Error while updating checksum.\n"); -+ DEBUGOUT("NVM Write Error while updating checksum.\n"); - goto out; - } - - hw->nvm.ops.release(hw); - -- ret_val = igb_update_flash_i210(hw); -+ ret_val = e1000_update_flash_i210(hw); - } else { -- ret_val = -E1000_ERR_SWFW_SYNC; -+ ret_val = E1000_ERR_SWFW_SYNC; -+ } -+out: -+ return ret_val; -+} -+ -+/** -+ * e1000_get_flash_presence_i210 - Check if flash device is detected. -+ * @hw: pointer to the HW structure -+ * -+ **/ -+bool e1000_get_flash_presence_i210(struct e1000_hw *hw) -+{ -+ u32 eec = 0; -+ bool ret_val = false; -+ -+ DEBUGFUNC("e1000_get_flash_presence_i210"); -+ -+ eec = E1000_READ_REG(hw, E1000_EECD); -+ -+ if (eec & E1000_EECD_FLASH_DETECTED_I210) -+ ret_val = true; -+ -+ return ret_val; -+} -+ -+/** -+ * e1000_update_flash_i210 - Commit EEPROM to the flash -+ * @hw: pointer to the HW structure -+ * -+ **/ -+s32 e1000_update_flash_i210(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ u32 flup; -+ -+ DEBUGFUNC("e1000_update_flash_i210"); -+ -+ ret_val = e1000_pool_flash_update_done_i210(hw); -+ if (ret_val == -E1000_ERR_NVM) { -+ DEBUGOUT("Flash update time out\n"); -+ goto out; - } -+ -+ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; -+ E1000_WRITE_REG(hw, E1000_EECD, flup); -+ -+ ret_val = e1000_pool_flash_update_done_i210(hw); -+ if (ret_val == E1000_SUCCESS) -+ DEBUGOUT("Flash update complete\n"); -+ else -+ DEBUGOUT("Flash update time out\n"); -+ - out: - return ret_val; - } - - /** -- * igb_pool_flash_update_done_i210 - Pool FLUDONE status. -+ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. - * @hw: pointer to the HW structure - * - **/ --static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) -+s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) - { - s32 ret_val = -E1000_ERR_NVM; - u32 i, reg; - -+ DEBUGFUNC("e1000_pool_flash_update_done_i210"); -+ - for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { -- reg = rd32(E1000_EECD); -+ reg = E1000_READ_REG(hw, E1000_EECD); - if (reg & E1000_EECD_FLUDONE_I210) { -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - break; - } -- udelay(5); -+ usec_delay(5); - } - - return ret_val; - } - - /** -- * igb_get_flash_presence_i210 - Check if flash device is detected. -+ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers - * @hw: pointer to the HW structure - * -+ * Initialize the i210/i211 NVM parameters and function pointers. - **/ --bool igb_get_flash_presence_i210(struct e1000_hw *hw) -+static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) - { -- u32 eec = 0; -- bool ret_val = false; -+ s32 ret_val; -+ struct e1000_nvm_info *nvm = &hw->nvm; - -- eec = rd32(E1000_EECD); -- if (eec & E1000_EECD_FLASH_DETECTED_I210) -- ret_val = true; -+ DEBUGFUNC("e1000_init_nvm_params_i210"); - -+ ret_val = e1000_init_nvm_params_82575(hw); -+ nvm->ops.acquire = e1000_acquire_nvm_i210; -+ nvm->ops.release = e1000_release_nvm_i210; -+ nvm->ops.valid_led_default = e1000_valid_led_default_i210; -+ if (e1000_get_flash_presence_i210(hw)) { -+ hw->nvm.type = e1000_nvm_flash_hw; -+ nvm->ops.read = e1000_read_nvm_srrd_i210; -+ nvm->ops.write = e1000_write_nvm_srwr_i210; -+ nvm->ops.validate = e1000_validate_nvm_checksum_i210; -+ nvm->ops.update = e1000_update_nvm_checksum_i210; -+ } else { -+ hw->nvm.type = e1000_nvm_invm; -+ nvm->ops.read = e1000_read_invm_i210; -+ nvm->ops.write = e1000_null_write_nvm; -+ nvm->ops.validate = e1000_null_ops_generic; -+ nvm->ops.update = e1000_null_ops_generic; -+ } - return ret_val; - } - - /** -- * igb_update_flash_i210 - Commit EEPROM to the flash -+ * e1000_init_function_pointers_i210 - Init func ptrs. - * @hw: pointer to the HW structure - * -+ * Called to initialize all function pointers and parameters. - **/ --static s32 igb_update_flash_i210(struct e1000_hw *hw) -+void e1000_init_function_pointers_i210(struct e1000_hw *hw) - { -- s32 ret_val = 0; -- u32 flup; -- -- ret_val = igb_pool_flash_update_done_i210(hw); -- if (ret_val == -E1000_ERR_NVM) { -- hw_dbg("Flash update time out\n"); -- goto out; -- } -+ e1000_init_function_pointers_82575(hw); -+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210; - -- flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; -- wr32(E1000_EECD, flup); -- -- ret_val = igb_pool_flash_update_done_i210(hw); -- if (ret_val) -- hw_dbg("Flash update complete\n"); -- else -- hw_dbg("Flash update time out\n"); -- --out: -- return ret_val; -+ return; - } - - /** -- * igb_valid_led_default_i210 - Verify a valid default LED config -+ * e1000_valid_led_default_i210 - Verify a valid default LED config - * @hw: pointer to the HW structure - * @data: pointer to the NVM (EEPROM) - * - * Read the EEPROM for the current default LED configuration. If the - * LED configuration is not valid, set to a valid LED configuration. - **/ --s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) -+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) - { - s32 ret_val; - -+ DEBUGFUNC("e1000_valid_led_default_i210"); -+ - ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -+ DEBUGOUT("NVM Read Error\n"); - goto out; - } - -@@ -741,17 +835,19 @@ - } - - /** -- * __igb_access_xmdio_reg - Read/write XMDIO register -+ * __e1000_access_xmdio_reg - Read/write XMDIO register - * @hw: pointer to the HW structure - * @address: XMDIO address to program - * @dev_addr: device address to program - * @data: pointer to value to read/write from/to the XMDIO address - * @read: boolean flag to indicate read or write - **/ --static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, -- u8 dev_addr, u16 *data, bool read) -+static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, -+ u8 dev_addr, u16 *data, bool read) - { -- s32 ret_val = 0; -+ s32 ret_val; -+ -+ DEBUGFUNC("__e1000_access_xmdio_reg"); - - ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); - if (ret_val) -@@ -782,67 +878,41 @@ - } - - /** -- * igb_read_xmdio_reg - Read XMDIO register -+ * e1000_read_xmdio_reg - Read XMDIO register - * @hw: pointer to the HW structure - * @addr: XMDIO address to program - * @dev_addr: device address to program - * @data: value to be read from the EMI address - **/ --s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) -+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) - { -- return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); -+ DEBUGFUNC("e1000_read_xmdio_reg"); -+ -+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); - } - - /** -- * igb_write_xmdio_reg - Write XMDIO register -+ * e1000_write_xmdio_reg - Write XMDIO register - * @hw: pointer to the HW structure - * @addr: XMDIO address to program - * @dev_addr: device address to program - * @data: value to be written to the XMDIO address - **/ --s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) --{ -- return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); --} -- --/** -- * igb_init_nvm_params_i210 - Init NVM func ptrs. -- * @hw: pointer to the HW structure -- **/ --s32 igb_init_nvm_params_i210(struct e1000_hw *hw) -+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) - { -- s32 ret_val = 0; -- struct e1000_nvm_info *nvm = &hw->nvm; -+ DEBUGFUNC("e1000_read_xmdio_reg"); - -- nvm->ops.acquire = igb_acquire_nvm_i210; -- nvm->ops.release = igb_release_nvm_i210; -- nvm->ops.valid_led_default = igb_valid_led_default_i210; -- -- /* NVM Function Pointers */ -- if (igb_get_flash_presence_i210(hw)) { -- hw->nvm.type = e1000_nvm_flash_hw; -- nvm->ops.read = igb_read_nvm_srrd_i210; -- nvm->ops.write = igb_write_nvm_srwr_i210; -- nvm->ops.validate = igb_validate_nvm_checksum_i210; -- nvm->ops.update = igb_update_nvm_checksum_i210; -- } else { -- hw->nvm.type = e1000_nvm_invm; -- nvm->ops.read = igb_read_invm_i210; -- nvm->ops.write = NULL; -- nvm->ops.validate = NULL; -- nvm->ops.update = NULL; -- } -- return ret_val; -+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); - } - - /** -- * igb_pll_workaround_i210 -+ * e1000_pll_workaround_i210 - * @hw: pointer to the HW structure - * - * Works around an errata in the PLL circuit where it occasionally - * provides the wrong clock frequency after power up. - **/ --s32 igb_pll_workaround_i210(struct e1000_hw *hw) -+static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) - { - s32 ret_val; - u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; -@@ -850,53 +920,104 @@ - int i; - - /* Get and set needed register values */ -- wuc = rd32(E1000_WUC); -- mdicnfg = rd32(E1000_MDICNFG); -+ wuc = E1000_READ_REG(hw, E1000_WUC); -+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); - reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; -- wr32(E1000_MDICNFG, reg_val); -+ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); - - /* Get data from NVM, or set default */ -- ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, -- &nvm_word); -- if (ret_val) -+ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, -+ &nvm_word); -+ if (ret_val != E1000_SUCCESS) - nvm_word = E1000_INVM_DEFAULT_AL; - tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; - for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { - /* check current state directly from internal PHY */ -- igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | -+ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | - E1000_PHY_PLL_FREQ_REG), &phy_word); - if ((phy_word & E1000_PHY_PLL_UNCONF) - != E1000_PHY_PLL_UNCONF) { -- ret_val = 0; -+ ret_val = E1000_SUCCESS; - break; - } else { - ret_val = -E1000_ERR_PHY; - } - /* directly reset the internal PHY */ -- ctrl = rd32(E1000_CTRL); -- wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); - -- ctrl_ext = rd32(E1000_CTRL_EXT); -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); - ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); -- wr32(E1000_CTRL_EXT, ctrl_ext); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); - -- wr32(E1000_WUC, 0); -+ E1000_WRITE_REG(hw, E1000_WUC, 0); - reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); -- wr32(E1000_EEARBC_I210, reg_val); -+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); - -- igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); -+ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); - pci_word |= E1000_PCI_PMCSR_D3; -- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); -- usleep_range(1000, 2000); -+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); -+ msec_delay(1); - pci_word &= ~E1000_PCI_PMCSR_D3; -- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); -+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); - reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); -- wr32(E1000_EEARBC_I210, reg_val); -+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); - - /* restore WUC register */ -- wr32(E1000_WUC, wuc); -+ E1000_WRITE_REG(hw, E1000_WUC, wuc); - } - /* restore MDICNFG setting */ -- wr32(E1000_MDICNFG, mdicnfg); -+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); -+ return ret_val; -+} -+ -+/** -+ * e1000_get_cfg_done_i210 - Read config done bit -+ * @hw: pointer to the HW structure -+ * -+ * Read the management control register for the config done bit for -+ * completion status. NOTE: silicon which is EEPROM-less will fail trying -+ * to read the config done bit, so an error is *ONLY* logged and returns -+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon -+ * would not be able to be reset or change link. -+ **/ -+static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) -+{ -+ s32 timeout = PHY_CFG_TIMEOUT; -+ u32 mask = E1000_NVM_CFG_DONE_PORT_0; -+ -+ DEBUGFUNC("e1000_get_cfg_done_i210"); -+ -+ while (timeout) { -+ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) -+ break; -+ msec_delay(1); -+ timeout--; -+ } -+ if (!timeout) -+ DEBUGOUT("MNG configuration cycle has not completed.\n"); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_init_hw_i210 - Init hw for I210/I211 -+ * @hw: pointer to the HW structure -+ * -+ * Called to initialize hw for i210 hw family. -+ **/ -+s32 e1000_init_hw_i210(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_init_hw_i210"); -+ if ((hw->mac.type >= e1000_i210) && -+ !(e1000_get_flash_presence_i210(hw))) { -+ ret_val = e1000_pll_workaround_i210(hw); -+ if (ret_val != E1000_SUCCESS) -+ return ret_val; -+ } -+ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; -+ ret_val = e1000_init_hw_82575(hw); - return ret_val; - } -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h ---- a/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,39 +1,47 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_I210_H_ - #define _E1000_I210_H_ - --s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); --void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); --s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); --s32 igb_read_invm_version(struct e1000_hw *hw, -- struct e1000_fw_version *invm_ver); --s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); --s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); --s32 igb_init_nvm_params_i210(struct e1000_hw *hw); --bool igb_get_flash_presence_i210(struct e1000_hw *hw); --s32 igb_pll_workaround_i210(struct e1000_hw *hw); -+bool e1000_get_flash_presence_i210(struct e1000_hw *hw); -+s32 e1000_update_flash_i210(struct e1000_hw *hw); -+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); -+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); -+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, -+ u16 words, u16 *data); -+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, -+ u16 words, u16 *data); -+s32 e1000_read_invm_version(struct e1000_hw *hw, -+ struct e1000_fw_version *invm_ver); -+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); -+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); -+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, -+ u16 *data); -+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, -+ u16 data); -+s32 e1000_init_hw_i210(struct e1000_hw *hw); - - #define E1000_STM_OPCODE 0xDB00 - #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 -@@ -56,15 +64,15 @@ - - #define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 - #define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 --#define E1000_INVM_ULT_BYTES_SIZE 8 --#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 --#define E1000_INVM_VER_FIELD_ONE 0x1FF8 --#define E1000_INVM_VER_FIELD_TWO 0x7FE000 --#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 -- --#define E1000_INVM_MAJOR_MASK 0x3F0 --#define E1000_INVM_MINOR_MASK 0xF --#define E1000_INVM_MAJOR_SHIFT 4 -+#define E1000_INVM_ULT_BYTES_SIZE 8 -+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 -+#define E1000_INVM_VER_FIELD_ONE 0x1FF8 -+#define E1000_INVM_VER_FIELD_TWO 0x7FE000 -+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 -+ -+#define E1000_INVM_MAJOR_MASK 0x3F0 -+#define E1000_INVM_MINOR_MASK 0xF -+#define E1000_INVM_MAJOR_SHIFT 4 - - #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ -@@ -73,7 +81,7 @@ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_OFF1_ON2)) - --/* NVM offset defaults for i211 device */ -+/* NVM offset defaults for I211 devices */ - #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 - #define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 - #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c ---- a/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,68 +1,179 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "e1000_api.h" -+ -+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); -+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); -+static void e1000_config_collision_dist_generic(struct e1000_hw *hw); -+static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); -+ -+/** -+ * e1000_init_mac_ops_generic - Initialize MAC function pointers -+ * @hw: pointer to the HW structure - * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+ * Setups up the function pointers to no-op functions -+ **/ -+void e1000_init_mac_ops_generic(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ DEBUGFUNC("e1000_init_mac_ops_generic"); -+ -+ /* General Setup */ -+ mac->ops.init_params = e1000_null_ops_generic; -+ mac->ops.init_hw = e1000_null_ops_generic; -+ mac->ops.reset_hw = e1000_null_ops_generic; -+ mac->ops.setup_physical_interface = e1000_null_ops_generic; -+ mac->ops.get_bus_info = e1000_null_ops_generic; -+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; -+ mac->ops.read_mac_addr = igb_e1000_read_mac_addr_generic; -+ mac->ops.config_collision_dist = e1000_config_collision_dist_generic; -+ mac->ops.clear_hw_cntrs = e1000_null_mac_generic; -+ /* LED */ -+ mac->ops.cleanup_led = e1000_null_ops_generic; -+ mac->ops.setup_led = e1000_null_ops_generic; -+ mac->ops.blink_led = e1000_null_ops_generic; -+ mac->ops.led_on = e1000_null_ops_generic; -+ mac->ops.led_off = e1000_null_ops_generic; -+ /* LINK */ -+ mac->ops.setup_link = e1000_null_ops_generic; -+ mac->ops.get_link_up_info = e1000_null_link_info; -+ mac->ops.check_for_link = e1000_null_ops_generic; -+ /* Management */ -+ mac->ops.check_mng_mode = e1000_null_mng_mode; -+ /* VLAN, MC, etc. */ -+ mac->ops.update_mc_addr_list = e1000_null_update_mc; -+ mac->ops.clear_vfta = e1000_null_mac_generic; -+ mac->ops.write_vfta = e1000_null_write_vfta; -+ mac->ops.rar_set = e1000_rar_set_generic; -+ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; -+} -+ -+/** -+ * e1000_null_ops_generic - No-op function, returns 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_null_ops_generic"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_null_mac_generic - No-op function, return void -+ * @hw: pointer to the HW structure -+ **/ -+void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_null_mac_generic"); -+ return; -+} - --#include --#include --#include --#include --#include -+/** -+ * e1000_null_link_info - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) -+{ -+ DEBUGFUNC("e1000_null_link_info"); -+ return E1000_SUCCESS; -+} - --#include "e1000_mac.h" -+/** -+ * e1000_null_mng_mode - No-op function, return false -+ * @hw: pointer to the HW structure -+ **/ -+bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_null_mng_mode"); -+ return false; -+} - --#include "igb.h" -+/** -+ * e1000_null_update_mc - No-op function, return void -+ * @hw: pointer to the HW structure -+ **/ -+void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, -+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) -+{ -+ DEBUGFUNC("e1000_null_update_mc"); -+ return; -+} - --static s32 igb_set_default_fc(struct e1000_hw *hw); --static s32 igb_set_fc_watermarks(struct e1000_hw *hw); -+/** -+ * e1000_null_write_vfta - No-op function, return void -+ * @hw: pointer to the HW structure -+ **/ -+void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, -+ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) -+{ -+ DEBUGFUNC("e1000_null_write_vfta"); -+ return; -+} - - /** -- * igb_get_bus_info_pcie - Get PCIe bus information -+ * e1000_null_rar_set - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, -+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) -+{ -+ DEBUGFUNC("e1000_null_rar_set"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_get_bus_info_pcie_generic - Get PCIe bus information - * @hw: pointer to the HW structure - * - * Determines and stores the system bus information for a particular - * network interface. The following bus information is determined and stored: - * bus speed, bus width, type (PCIe), and PCIe function. - **/ --s32 igb_get_bus_info_pcie(struct e1000_hw *hw) -+s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) - { -+ struct e1000_mac_info *mac = &hw->mac; - struct e1000_bus_info *bus = &hw->bus; - s32 ret_val; -- u32 reg; - u16 pcie_link_status; - -+ DEBUGFUNC("igb_e1000_get_bus_info_pcie_generic"); -+ - bus->type = e1000_bus_type_pci_express; - -- ret_val = igb_read_pcie_cap_reg(hw, -- PCI_EXP_LNKSTA, -- &pcie_link_status); -+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, -+ &pcie_link_status); - if (ret_val) { - bus->width = e1000_bus_width_unknown; - bus->speed = e1000_bus_speed_unknown; - } else { -- switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { -- case PCI_EXP_LNKSTA_CLS_2_5GB: -+ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { -+ case PCIE_LINK_SPEED_2500: - bus->speed = e1000_bus_speed_2500; - break; -- case PCI_EXP_LNKSTA_CLS_5_0GB: -+ case PCIE_LINK_SPEED_5000: - bus->speed = e1000_bus_speed_5000; - break; - default: -@@ -71,75 +182,70 @@ - } - - bus->width = (enum e1000_bus_width)((pcie_link_status & -- PCI_EXP_LNKSTA_NLW) >> -- PCI_EXP_LNKSTA_NLW_SHIFT); -+ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); - } - -- reg = rd32(E1000_STATUS); -- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; -+ mac->ops.set_lan_id(hw); - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_clear_vfta - Clear VLAN filter table -+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices -+ * - * @hw: pointer to the HW structure - * -- * Clears the register array which contains the VLAN filter table by -- * setting all the values to 0. -+ * Determines the LAN function id by reading memory-mapped registers -+ * and swaps the port value if requested. - **/ --void igb_clear_vfta(struct e1000_hw *hw) -+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) - { -- u32 offset; -+ struct e1000_bus_info *bus = &hw->bus; -+ u32 reg; - -- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { -- array_wr32(E1000_VFTA, offset, 0); -- wrfl(); -- } -+ /* The status register reports the correct function number -+ * for the device regardless of function swap state. -+ */ -+ reg = E1000_READ_REG(hw, E1000_STATUS); -+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; - } - - /** -- * igb_write_vfta - Write value to VLAN filter table -+ * igb_e1000_set_lan_id_single_port - Set LAN id for a single port device - * @hw: pointer to the HW structure -- * @offset: register offset in VLAN filter table -- * @value: register value written to VLAN filter table - * -- * Writes value at the given offset in the register array which stores -- * the VLAN filter table. -+ * Sets the LAN function id to zero for a single port device. - **/ --static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw) - { -- array_wr32(E1000_VFTA, offset, value); -- wrfl(); --} -+ struct e1000_bus_info *bus = &hw->bus; - --/* Due to a hw errata, if the host tries to configure the VFTA register -- * while performing queries from the BMC or DMA, then the VFTA in some -- * cases won't be written. -- */ -+ bus->func = 0; -+} - - /** -- * igb_clear_vfta_i350 - Clear VLAN filter table -+ * igb_e1000_clear_vfta_generic - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ --void igb_clear_vfta_i350(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_clear_vfta_generic(struct e1000_hw *hw) - { - u32 offset; -- int i; - -- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { -- for (i = 0; i < 10; i++) -- array_wr32(E1000_VFTA, offset, 0); -+ DEBUGFUNC("igb_e1000_clear_vfta_generic"); - -- wrfl(); -+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { -+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); -+ E1000_WRITE_FLUSH(hw); - } - } - - /** -- * igb_write_vfta_i350 - Write value to VLAN filter table -+ * igb_e1000_write_vfta_generic - Write value to VLAN filter table - * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table -@@ -147,113 +253,85 @@ - * Writes value at the given offset in the register array which stores - * the VLAN filter table. - **/ --static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) - { -- int i; -- -- for (i = 0; i < 10; i++) -- array_wr32(E1000_VFTA, offset, value); -+ DEBUGFUNC("igb_e1000_write_vfta_generic"); - -- wrfl(); -+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); -+ E1000_WRITE_FLUSH(hw); - } - - /** -- * igb_init_rx_addrs - Initialize receive address's -+ * e1000_init_rx_addrs_generic - Initialize receive address's - * @hw: pointer to the HW structure - * @rar_count: receive address registers - * -- * Setups the receive address registers by setting the base receive address -+ * Setup the receive address registers by setting the base receive address - * register to the devices MAC address and clearing all the other receive - * address registers to 0. - **/ --void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) -+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) - { - u32 i; -- u8 mac_addr[ETH_ALEN] = {0}; -+ u8 mac_addr[ETH_ADDR_LEN] = {0}; -+ -+ DEBUGFUNC("e1000_init_rx_addrs_generic"); - - /* Setup the receive address */ -- hw_dbg("Programming MAC Address into RAR[0]\n"); -+ DEBUGOUT("Programming MAC Address into RAR[0]\n"); - - hw->mac.ops.rar_set(hw, hw->mac.addr, 0); - - /* Zero out the other (rar_entry_count - 1) receive addresses */ -- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); -+ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); - for (i = 1; i < rar_count; i++) - hw->mac.ops.rar_set(hw, mac_addr, i); - } - - /** -- * igb_vfta_set - enable or disable vlan in VLAN filter table -- * @hw: pointer to the HW structure -- * @vid: VLAN id to add or remove -- * @add: if true add filter, if false remove -- * -- * Sets or clears a bit in the VLAN filter table array based on VLAN id -- * and if we are adding or removing the filter -- **/ --s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) --{ -- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; -- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); -- u32 vfta; -- struct igb_adapter *adapter = hw->back; -- s32 ret_val = 0; -- -- vfta = adapter->shadow_vfta[index]; -- -- /* bit was set/cleared before we started */ -- if ((!!(vfta & mask)) == add) { -- ret_val = -E1000_ERR_CONFIG; -- } else { -- if (add) -- vfta |= mask; -- else -- vfta &= ~mask; -- } -- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) -- igb_write_vfta_i350(hw, index, vfta); -- else -- igb_write_vfta(hw, index, vfta); -- adapter->shadow_vfta[index] = vfta; -- -- return ret_val; --} -- --/** -- * igb_check_alt_mac_addr - Check for alternate MAC addr -+ * igb_e1000_check_alt_mac_addr_generic - Check for alternate MAC addr - * @hw: pointer to the HW structure - * - * Checks the nvm for an alternate MAC address. An alternate MAC address - * can be setup by pre-boot software and must be treated like a permanent -- * address and must override the actual permanent MAC address. If an -- * alternate MAC address is found it is saved in the hw struct and -- * programmed into RAR0 and the function returns success, otherwise the -- * function returns an error. -+ * address and must override the actual permanent MAC address. If an -+ * alternate MAC address is found it is programmed into RAR0, replacing -+ * the permanent address that was installed into RAR0 by the Si on reset. -+ * This function will return SUCCESS unless it encounters an error while -+ * reading the EEPROM. - **/ --s32 igb_check_alt_mac_addr(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) - { - u32 i; -- s32 ret_val = 0; -+ s32 ret_val; - u16 offset, nvm_alt_mac_addr_offset, nvm_data; -- u8 alt_mac_addr[ETH_ALEN]; -+ u8 alt_mac_addr[ETH_ADDR_LEN]; -+ -+ DEBUGFUNC("igb_e1000_check_alt_mac_addr_generic"); -+ -+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); -+ if (ret_val) -+ return ret_val; - - /* Alternate MAC address is handled by the option ROM for 82580 - * and newer. SW support not required. - */ - if (hw->mac.type >= e1000_82580) -- goto out; -+ return E1000_SUCCESS; - - ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, -- &nvm_alt_mac_addr_offset); -+ &nvm_alt_mac_addr_offset); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - - if ((nvm_alt_mac_addr_offset == 0xFFFF) || - (nvm_alt_mac_addr_offset == 0x0000)) - /* There is no Alternate MAC Address */ -- goto out; -+ return E1000_SUCCESS; - - if (hw->bus.func == E1000_FUNC_1) - nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; -@@ -262,12 +340,12 @@ - - if (hw->bus.func == E1000_FUNC_3) - nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; -- for (i = 0; i < ETH_ALEN; i += 2) { -+ for (i = 0; i < ETH_ADDR_LEN; i += 2) { - offset = nvm_alt_mac_addr_offset + (i >> 1); - ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - - alt_mac_addr[i] = (u8)(nvm_data & 0xFF); -@@ -275,9 +353,9 @@ - } - - /* if multicast bit is set, the alternate address will not be used */ -- if (is_multicast_ether_addr(alt_mac_addr)) { -- hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); -- goto out; -+ if (alt_mac_addr[0] & 0x01) { -+ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); -+ return E1000_SUCCESS; - } - - /* We have a valid alternate MAC address, and we want to treat it the -@@ -286,12 +364,11 @@ - */ - hw->mac.ops.rar_set(hw, alt_mac_addr, 0); - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_rar_set - Set receive address register -+ * e1000_rar_set_generic - Set receive address register - * @hw: pointer to the HW structure - * @addr: pointer to the receive address - * @index: receive address array register -@@ -299,16 +376,17 @@ - * Sets the receive address array register at index to the address passed - * in by addr. - **/ --void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) -+static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) - { - u32 rar_low, rar_high; - -+ DEBUGFUNC("e1000_rar_set_generic"); -+ - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ -- rar_low = ((u32) addr[0] | -- ((u32) addr[1] << 8) | -- ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); -+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | -+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - -@@ -320,60 +398,29 @@ - * a single burst write, which will malfunction on some parts. - * The flushes avoid this. - */ -- wr32(E1000_RAL(index), rar_low); -- wrfl(); -- wr32(E1000_RAH(index), rar_high); -- wrfl(); --} -+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); -+ E1000_WRITE_FLUSH(hw); -+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); -+ E1000_WRITE_FLUSH(hw); - --/** -- * igb_mta_set - Set multicast filter table address -- * @hw: pointer to the HW structure -- * @hash_value: determines the MTA register and bit to set -- * -- * The multicast table address is a register array of 32-bit registers. -- * The hash_value is used to determine what register the bit is in, the -- * current value is read, the new bit is OR'd in and the new value is -- * written back into the register. -- **/ --void igb_mta_set(struct e1000_hw *hw, u32 hash_value) --{ -- u32 hash_bit, hash_reg, mta; -- -- /* The MTA is a register array of 32-bit registers. It is -- * treated like an array of (32*mta_reg_count) bits. We want to -- * set bit BitArray[hash_value]. So we figure out what register -- * the bit is in, read it, OR in the new bit, then write -- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a -- * mask to bits 31:5 of the hash value which gives us the -- * register we're modifying. The hash bit within that register -- * is determined by the lower 5 bits of the hash value. -- */ -- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); -- hash_bit = hash_value & 0x1F; -- -- mta = array_rd32(E1000_MTA, hash_reg); -- -- mta |= (1 << hash_bit); -- -- array_wr32(E1000_MTA, hash_reg, mta); -- wrfl(); -+ return E1000_SUCCESS; - } - - /** -- * igb_hash_mc_addr - Generate a multicast hash value -+ * e1000_hash_mc_addr_generic - Generate a multicast hash value - * @hw: pointer to the HW structure - * @mc_addr: pointer to a multicast address - * - * Generates a multicast address hash value which is used to determine -- * the multicast filter table array address and new table value. See -- * igb_mta_set() -+ * the multicast filter table array address and new table value. - **/ --static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) -+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) - { - u32 hash_value, hash_mask; - u8 bit_shift = 0; - -+ DEBUGFUNC("e1000_hash_mc_addr_generic"); -+ - /* Register count multiplied by bits per register */ - hash_mask = (hw->mac.mta_reg_count * 32) - 1; - -@@ -401,7 +448,7 @@ - * values resulting from each mc_filter_type... - * [0] [1] [2] [3] [4] [5] - * 01 AA 00 12 34 56 -- * LSB MSB -+ * LSB MSB - * - * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 - * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 -@@ -430,7 +477,7 @@ - } - - /** -- * igb_update_mc_addr_list - Update Multicast addresses -+ * e1000_update_mc_addr_list_generic - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program -@@ -438,150 +485,412 @@ - * Updates entire Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - **/ --void igb_update_mc_addr_list(struct e1000_hw *hw, -- u8 *mc_addr_list, u32 mc_addr_count) -+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, -+ u8 *mc_addr_list, u32 mc_addr_count) - { - u32 hash_value, hash_bit, hash_reg; - int i; - -+ DEBUGFUNC("e1000_update_mc_addr_list_generic"); -+ - /* clear mta_shadow */ - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - - /* update mta_shadow from mc_addr_list */ - for (i = 0; (u32) i < mc_addr_count; i++) { -- hash_value = igb_hash_mc_addr(hw, mc_addr_list); -+ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); - - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); -- mc_addr_list += (ETH_ALEN); -+ mc_addr_list += (ETH_ADDR_LEN); - } - - /* replace the entire MTA table */ - for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) -- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); -- wrfl(); -+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); -+ E1000_WRITE_FLUSH(hw); - } - - /** -- * igb_clear_hw_cntrs_base - Clear base hardware counters -+ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value -+ * @hw: pointer to the HW structure -+ * -+ * In certain situations, a system BIOS may report that the PCIx maximum -+ * memory read byte count (MMRBC) value is higher than than the actual -+ * value. We check the PCIx command register with the current PCIx status -+ * register. -+ **/ -+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) -+{ -+ u16 cmd_mmrbc; -+ u16 pcix_cmd; -+ u16 pcix_stat_hi_word; -+ u16 stat_mmrbc; -+ -+ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); -+ -+ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ -+ if (hw->bus.type != e1000_bus_type_pcix) -+ return; -+ -+ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); -+ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); -+ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> -+ PCIX_COMMAND_MMRBC_SHIFT; -+ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> -+ PCIX_STATUS_HI_MMRBC_SHIFT; -+ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) -+ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; -+ if (cmd_mmrbc > stat_mmrbc) { -+ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; -+ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; -+ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); -+ } -+} -+ -+/** -+ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters - * @hw: pointer to the HW structure - * - * Clears the base hardware counters by reading the counter registers. - **/ --void igb_clear_hw_cntrs_base(struct e1000_hw *hw) -+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) - { -- rd32(E1000_CRCERRS); -- rd32(E1000_SYMERRS); -- rd32(E1000_MPC); -- rd32(E1000_SCC); -- rd32(E1000_ECOL); -- rd32(E1000_MCC); -- rd32(E1000_LATECOL); -- rd32(E1000_COLC); -- rd32(E1000_DC); -- rd32(E1000_SEC); -- rd32(E1000_RLEC); -- rd32(E1000_XONRXC); -- rd32(E1000_XONTXC); -- rd32(E1000_XOFFRXC); -- rd32(E1000_XOFFTXC); -- rd32(E1000_FCRUC); -- rd32(E1000_GPRC); -- rd32(E1000_BPRC); -- rd32(E1000_MPRC); -- rd32(E1000_GPTC); -- rd32(E1000_GORCL); -- rd32(E1000_GORCH); -- rd32(E1000_GOTCL); -- rd32(E1000_GOTCH); -- rd32(E1000_RNBC); -- rd32(E1000_RUC); -- rd32(E1000_RFC); -- rd32(E1000_ROC); -- rd32(E1000_RJC); -- rd32(E1000_TORL); -- rd32(E1000_TORH); -- rd32(E1000_TOTL); -- rd32(E1000_TOTH); -- rd32(E1000_TPR); -- rd32(E1000_TPT); -- rd32(E1000_MPTC); -- rd32(E1000_BPTC); -+ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); -+ -+ E1000_READ_REG(hw, E1000_CRCERRS); -+ E1000_READ_REG(hw, E1000_SYMERRS); -+ E1000_READ_REG(hw, E1000_MPC); -+ E1000_READ_REG(hw, E1000_SCC); -+ E1000_READ_REG(hw, E1000_ECOL); -+ E1000_READ_REG(hw, E1000_MCC); -+ E1000_READ_REG(hw, E1000_LATECOL); -+ E1000_READ_REG(hw, E1000_COLC); -+ E1000_READ_REG(hw, E1000_DC); -+ E1000_READ_REG(hw, E1000_SEC); -+ E1000_READ_REG(hw, E1000_RLEC); -+ E1000_READ_REG(hw, E1000_XONRXC); -+ E1000_READ_REG(hw, E1000_XONTXC); -+ E1000_READ_REG(hw, E1000_XOFFRXC); -+ E1000_READ_REG(hw, E1000_XOFFTXC); -+ E1000_READ_REG(hw, E1000_FCRUC); -+ E1000_READ_REG(hw, E1000_GPRC); -+ E1000_READ_REG(hw, E1000_BPRC); -+ E1000_READ_REG(hw, E1000_MPRC); -+ E1000_READ_REG(hw, E1000_GPTC); -+ E1000_READ_REG(hw, E1000_GORCL); -+ E1000_READ_REG(hw, E1000_GORCH); -+ E1000_READ_REG(hw, E1000_GOTCL); -+ E1000_READ_REG(hw, E1000_GOTCH); -+ E1000_READ_REG(hw, E1000_RNBC); -+ E1000_READ_REG(hw, E1000_RUC); -+ E1000_READ_REG(hw, E1000_RFC); -+ E1000_READ_REG(hw, E1000_ROC); -+ E1000_READ_REG(hw, E1000_RJC); -+ E1000_READ_REG(hw, E1000_TORL); -+ E1000_READ_REG(hw, E1000_TORH); -+ E1000_READ_REG(hw, E1000_TOTL); -+ E1000_READ_REG(hw, E1000_TOTH); -+ E1000_READ_REG(hw, E1000_TPR); -+ E1000_READ_REG(hw, E1000_TPT); -+ E1000_READ_REG(hw, E1000_MPTC); -+ E1000_READ_REG(hw, E1000_BPTC); - } - - /** -- * igb_check_for_copper_link - Check for link (Copper) -+ * e1000_check_for_copper_link_generic - Check for link (Copper) - * @hw: pointer to the HW structure - * - * Checks to see of the link status of the hardware has changed. If a - * change in link status has been detected, then we read the PHY registers - * to get the current speed/duplex if link exists. - **/ --s32 igb_check_for_copper_link(struct e1000_hw *hw) -+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) - { - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - bool link; - -+ DEBUGFUNC("e1000_check_for_copper_link"); -+ - /* We only want to go out to the PHY registers to see if Auto-Neg - * has completed and/or if our link status has changed. The - * get_link_status flag is set upon receiving a Link Status - * Change or Rx Sequence Error interrupt. - */ -- if (!mac->get_link_status) { -- ret_val = 0; -- goto out; -- } -+ if (!mac->get_link_status) -+ return E1000_SUCCESS; - - /* First we want to see if the MII Status Register reports - * link. If so, then we want to get the current speed/duplex - * of the PHY. - */ -- ret_val = igb_phy_has_link(hw, 1, 0, &link); -+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) -- goto out; /* No link detected */ -+ return E1000_SUCCESS; /* No link detected */ - - mac->get_link_status = false; - - /* Check if there was DownShift, must be checked - * immediately after link-up - */ -- igb_check_downshift(hw); -+ e1000_check_downshift_generic(hw); - - /* If we are forcing speed/duplex, then we simply return since - * we have already determined whether we have link or not. - */ -- if (!mac->autoneg) { -- ret_val = -E1000_ERR_CONFIG; -- goto out; -- } -+ if (!mac->autoneg) -+ return -E1000_ERR_CONFIG; - - /* Auto-Neg is enabled. Auto Speed Detection takes care - * of MAC speed/duplex configuration. So we only need to - * configure Collision Distance in the MAC. - */ -- igb_config_collision_dist(hw); -+ mac->ops.config_collision_dist(hw); - - /* Configure Flow Control now that Auto-Neg has completed. - * First, we need to restore the desired flow control - * settings because we may have had to re-autoneg with a - * different link partner. - */ -- ret_val = igb_config_fc_after_link_up(hw); -+ ret_val = e1000_config_fc_after_link_up_generic(hw); - if (ret_val) -- hw_dbg("Error configuring flow control\n"); -+ DEBUGOUT("Error configuring flow control\n"); - --out: - return ret_val; - } - - /** -- * igb_setup_link - Setup flow control and link settings -+ * e1000_check_for_fiber_link_generic - Check for link (Fiber) -+ * @hw: pointer to the HW structure -+ * -+ * Checks for link up on the hardware. If link is not up and we have -+ * a signal, then we need to force link up. -+ **/ -+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ u32 rxcw; -+ u32 ctrl; -+ u32 status; -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_check_for_fiber_link_generic"); -+ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ rxcw = E1000_READ_REG(hw, E1000_RXCW); -+ -+ /* If we don't have link (auto-negotiation failed or link partner -+ * cannot auto-negotiate), the cable is plugged in (we have signal), -+ * and our link partner is not trying to auto-negotiate with us (we -+ * are receiving idles or data), we need to force link up. We also -+ * need to give auto-negotiation time to complete, in case the cable -+ * was just plugged in. The autoneg_failed flag does this. -+ */ -+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ -+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && -+ !(rxcw & E1000_RXCW_C)) { -+ if (!mac->autoneg_failed) { -+ mac->autoneg_failed = true; -+ return E1000_SUCCESS; -+ } -+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); -+ -+ /* Disable auto-negotiation in the TXCW register */ -+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); -+ -+ /* Force link-up and also force full-duplex. */ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ -+ /* Configure Flow Control after forcing link up. */ -+ ret_val = e1000_config_fc_after_link_up_generic(hw); -+ if (ret_val) { -+ DEBUGOUT("Error configuring flow control\n"); -+ return ret_val; -+ } -+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { -+ /* If we are forcing link and we are receiving /C/ ordered -+ * sets, re-enable auto-negotiation in the TXCW register -+ * and disable forced link in the Device Control register -+ * in an attempt to auto-negotiate with our link partner. -+ */ -+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); -+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); -+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); -+ -+ mac->serdes_has_link = true; -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_check_for_serdes_link_generic - Check for link (Serdes) -+ * @hw: pointer to the HW structure -+ * -+ * Checks for link up on the hardware. If link is not up and we have -+ * a signal, then we need to force link up. -+ **/ -+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ u32 rxcw; -+ u32 ctrl; -+ u32 status; -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_check_for_serdes_link_generic"); -+ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ rxcw = E1000_READ_REG(hw, E1000_RXCW); -+ -+ /* If we don't have link (auto-negotiation failed or link partner -+ * cannot auto-negotiate), and our link partner is not trying to -+ * auto-negotiate with us (we are receiving idles or data), -+ * we need to force link up. We also need to give auto-negotiation -+ * time to complete. -+ */ -+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ -+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { -+ if (!mac->autoneg_failed) { -+ mac->autoneg_failed = true; -+ return E1000_SUCCESS; -+ } -+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); -+ -+ /* Disable auto-negotiation in the TXCW register */ -+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); -+ -+ /* Force link-up and also force full-duplex. */ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ -+ /* Configure Flow Control after forcing link up. */ -+ ret_val = e1000_config_fc_after_link_up_generic(hw); -+ if (ret_val) { -+ DEBUGOUT("Error configuring flow control\n"); -+ return ret_val; -+ } -+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { -+ /* If we are forcing link and we are receiving /C/ ordered -+ * sets, re-enable auto-negotiation in the TXCW register -+ * and disable forced link in the Device Control register -+ * in an attempt to auto-negotiate with our link partner. -+ */ -+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); -+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); -+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); -+ -+ mac->serdes_has_link = true; -+ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { -+ /* If we force link for non-auto-negotiation switch, check -+ * link status based on MAC synchronization for internal -+ * serdes media type. -+ */ -+ /* SYNCH bit and IV bit are sticky. */ -+ usec_delay(10); -+ rxcw = E1000_READ_REG(hw, E1000_RXCW); -+ if (rxcw & E1000_RXCW_SYNCH) { -+ if (!(rxcw & E1000_RXCW_IV)) { -+ mac->serdes_has_link = true; -+ DEBUGOUT("SERDES: Link up - forced.\n"); -+ } -+ } else { -+ mac->serdes_has_link = false; -+ DEBUGOUT("SERDES: Link down - force failed.\n"); -+ } -+ } -+ -+ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ if (status & E1000_STATUS_LU) { -+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */ -+ usec_delay(10); -+ rxcw = E1000_READ_REG(hw, E1000_RXCW); -+ if (rxcw & E1000_RXCW_SYNCH) { -+ if (!(rxcw & E1000_RXCW_IV)) { -+ mac->serdes_has_link = true; -+ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); -+ } else { -+ mac->serdes_has_link = false; -+ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); -+ } -+ } else { -+ mac->serdes_has_link = false; -+ DEBUGOUT("SERDES: Link down - no sync.\n"); -+ } -+ } else { -+ mac->serdes_has_link = false; -+ DEBUGOUT("SERDES: Link down - autoneg failed\n"); -+ } -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_set_default_fc_generic - Set flow control default values -+ * @hw: pointer to the HW structure -+ * -+ * Read the EEPROM for the default values for flow control and store the -+ * values. -+ **/ -+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ u16 nvm_data; -+ u16 nvm_offset = 0; -+ -+ DEBUGFUNC("e1000_set_default_fc_generic"); -+ -+ /* Read and store word 0x0F of the EEPROM. This word contains bits -+ * that determine the hardware's default PAUSE (flow control) mode, -+ * a bit that determines whether the HW defaults to enabling or -+ * disabling auto-negotiation, and the direction of the -+ * SW defined pins. If there is no SW over-ride of the flow -+ * control setting, then the variable hw->fc will -+ * be initialized based on a value in the EEPROM. -+ */ -+ if (hw->mac.type == e1000_i350) { -+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); -+ ret_val = hw->nvm.ops.read(hw, -+ NVM_INIT_CONTROL2_REG + -+ nvm_offset, -+ 1, &nvm_data); -+ } else { -+ ret_val = hw->nvm.ops.read(hw, -+ NVM_INIT_CONTROL2_REG, -+ 1, &nvm_data); -+ } -+ -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; -+ } -+ -+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) -+ hw->fc.requested_mode = e1000_fc_none; -+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == -+ NVM_WORD0F_ASM_DIR) -+ hw->fc.requested_mode = e1000_fc_tx_pause; -+ else -+ hw->fc.requested_mode = e1000_fc_full; -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_setup_link_generic - Setup flow control and link settings - * @hw: pointer to the HW structure - * - * Determines which flow control settings to use, then configures flow -@@ -590,91 +899,260 @@ - * should be established. Assumes the hardware has previously been reset - * and the transmitter and receiver are not enabled. - **/ --s32 igb_setup_link(struct e1000_hw *hw) -+s32 e1000_setup_link_generic(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_setup_link_generic"); - - /* In the case of the phy reset being blocked, we already have a link. - * We do not need to set it up again. - */ -- if (igb_check_reset_block(hw)) -- goto out; -+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) -+ return E1000_SUCCESS; - - /* If requested flow control is set to default, set flow control - * based on the EEPROM flow control settings. - */ - if (hw->fc.requested_mode == e1000_fc_default) { -- ret_val = igb_set_default_fc(hw); -+ ret_val = e1000_set_default_fc_generic(hw); - if (ret_val) -- goto out; -+ return ret_val; - } - -- /* We want to save off the original Flow Control configuration just -- * in case we get disconnected and then reconnected into a different -- * hub or switch with different Flow Control capabilities. -+ /* Save off the requested flow control mode for use later. Depending -+ * on the link partner's capabilities, we may or may not use this mode. - */ - hw->fc.current_mode = hw->fc.requested_mode; - -- hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); -+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", -+ hw->fc.current_mode); - - /* Call the necessary media_type subroutine to configure the link. */ - ret_val = hw->mac.ops.setup_physical_interface(hw); - if (ret_val) -- goto out; -+ return ret_val; - - /* Initialize the flow control address, type, and PAUSE timer - * registers to their default values. This is done even if flow - * control is disabled, because it does not hurt anything to - * initialize these registers. - */ -- hw_dbg("Initializing the Flow Control address, type and timer regs\n"); -- wr32(E1000_FCT, FLOW_CONTROL_TYPE); -- wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); -- wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); -+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); -+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); -+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); -+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); -+ -+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); - -- wr32(E1000_FCTTV, hw->fc.pause_time); -+ return e1000_set_fc_watermarks_generic(hw); -+} - -- ret_val = igb_set_fc_watermarks(hw); -+/** -+ * e1000_commit_fc_settings_generic - Configure flow control -+ * @hw: pointer to the HW structure -+ * -+ * Write the flow control settings to the Transmit Config Word Register (TXCW) -+ * base on the flow control settings in e1000_mac_info. -+ **/ -+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ u32 txcw; - --out: -+ DEBUGFUNC("e1000_commit_fc_settings_generic"); -+ -+ /* Check for a software override of the flow control settings, and -+ * setup the device accordingly. If auto-negotiation is enabled, then -+ * software will have to set the "PAUSE" bits to the correct value in -+ * the Transmit Config Word Register (TXCW) and re-start auto- -+ * negotiation. However, if auto-negotiation is disabled, then -+ * software will have to manually configure the two flow control enable -+ * bits in the CTRL register. -+ * -+ * The possible values of the "fc" parameter are: -+ * 0: Flow control is completely disabled -+ * 1: Rx flow control is enabled (we can receive pause frames, -+ * but not send pause frames). -+ * 2: Tx flow control is enabled (we can send pause frames but we -+ * do not support receiving pause frames). -+ * 3: Both Rx and Tx flow control (symmetric) are enabled. -+ */ -+ switch (hw->fc.current_mode) { -+ case e1000_fc_none: -+ /* Flow control completely disabled by a software over-ride. */ -+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); -+ break; -+ case e1000_fc_rx_pause: -+ /* Rx Flow control is enabled and Tx Flow control is disabled -+ * by a software over-ride. Since there really isn't a way to -+ * advertise that we are capable of Rx Pause ONLY, we will -+ * advertise that we support both symmetric and asymmetric Rx -+ * PAUSE. Later, we will disable the adapter's ability to send -+ * PAUSE frames. -+ */ -+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); -+ break; -+ case e1000_fc_tx_pause: -+ /* Tx Flow control is enabled, and Rx Flow control is disabled, -+ * by a software over-ride. -+ */ -+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); -+ break; -+ case e1000_fc_full: -+ /* Flow control (both Rx and Tx) is enabled by a software -+ * over-ride. -+ */ -+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); -+ break; -+ default: -+ DEBUGOUT("Flow control param set incorrectly\n"); -+ return -E1000_ERR_CONFIG; -+ break; -+ } -+ -+ E1000_WRITE_REG(hw, E1000_TXCW, txcw); -+ mac->txcw = txcw; -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_poll_fiber_serdes_link_generic - Poll for link up -+ * @hw: pointer to the HW structure -+ * -+ * Polls for link up by reading the status register, if link fails to come -+ * up with auto-negotiation, then the link is forced if a signal is detected. -+ **/ -+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ u32 i, status; -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); -+ -+ /* If we have a signal (the cable is plugged in, or assumed true for -+ * serdes media) then poll for a "Link-Up" indication in the Device -+ * Status Register. Time-out if a link isn't seen in 500 milliseconds -+ * seconds (Auto-negotiation should complete in less than 500 -+ * milliseconds even if the other end is doing it in SW). -+ */ -+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { -+ msec_delay(10); -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ if (status & E1000_STATUS_LU) -+ break; -+ } -+ if (i == FIBER_LINK_UP_LIMIT) { -+ DEBUGOUT("Never got a valid link from auto-neg!!!\n"); -+ mac->autoneg_failed = true; -+ /* AutoNeg failed to achieve a link, so we'll call -+ * mac->check_for_link. This routine will force the -+ * link up if we detect a signal. This will allow us to -+ * communicate with non-autonegotiating link partners. -+ */ -+ ret_val = mac->ops.check_for_link(hw); -+ if (ret_val) { -+ DEBUGOUT("Error while checking for link\n"); -+ return ret_val; -+ } -+ mac->autoneg_failed = false; -+ } else { -+ mac->autoneg_failed = false; -+ DEBUGOUT("Valid Link Found\n"); -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes -+ * @hw: pointer to the HW structure -+ * -+ * Configures collision distance and flow control for fiber and serdes -+ * links. Upon successful setup, poll for link. -+ **/ -+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) -+{ -+ u32 ctrl; -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); -+ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ -+ /* Take the link out of reset */ -+ ctrl &= ~E1000_CTRL_LRST; -+ -+ hw->mac.ops.config_collision_dist(hw); -+ -+ ret_val = e1000_commit_fc_settings_generic(hw); -+ if (ret_val) -+ return ret_val; -+ -+ /* Since auto-negotiation is enabled, take the link out of reset (the -+ * link will be in reset, because we previously reset the chip). This -+ * will restart auto-negotiation. If auto-negotiation is successful -+ * then the link-up status bit will be set and the flow control enable -+ * bits (RFCE and TFCE) will be set according to their negotiated value. -+ */ -+ DEBUGOUT("Auto-negotiation enabled\n"); -+ -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ E1000_WRITE_FLUSH(hw); -+ msec_delay(1); -+ -+ /* For these adapters, the SW definable pin 1 is set when the optics -+ * detect a signal. If we have a signal, then poll for a "Link-Up" -+ * indication. -+ */ -+ if (hw->phy.media_type == e1000_media_type_internal_serdes || -+ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { -+ ret_val = e1000_poll_fiber_serdes_link_generic(hw); -+ } else { -+ DEBUGOUT("No signal detected\n"); -+ } - - return ret_val; - } - - /** -- * igb_config_collision_dist - Configure collision distance -+ * e1000_config_collision_dist_generic - Configure collision distance - * @hw: pointer to the HW structure - * - * Configures the collision distance to the default value and is used -- * during link setup. Currently no func pointer exists and all -- * implementations are handled in the generic version of this function. -+ * during link setup. - **/ --void igb_config_collision_dist(struct e1000_hw *hw) -+static void e1000_config_collision_dist_generic(struct e1000_hw *hw) - { - u32 tctl; - -- tctl = rd32(E1000_TCTL); -+ DEBUGFUNC("e1000_config_collision_dist_generic"); -+ -+ tctl = E1000_READ_REG(hw, E1000_TCTL); - - tctl &= ~E1000_TCTL_COLD; - tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; - -- wr32(E1000_TCTL, tctl); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_TCTL, tctl); -+ E1000_WRITE_FLUSH(hw); - } - - /** -- * igb_set_fc_watermarks - Set flow control high/low watermarks -+ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks - * @hw: pointer to the HW structure - * - * Sets the flow control high/low threshold (watermark) registers. If - * flow control XON frame transmission is enabled, then set XON frame -- * tansmission as well. -+ * transmission as well. - **/ --static s32 igb_set_fc_watermarks(struct e1000_hw *hw) -+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) - { -- s32 ret_val = 0; - u32 fcrtl = 0, fcrth = 0; - -+ DEBUGFUNC("e1000_set_fc_watermarks_generic"); -+ - /* Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the -@@ -692,61 +1170,14 @@ - - fcrth = hw->fc.high_water; - } -- wr32(E1000_FCRTL, fcrtl); -- wr32(E1000_FCRTH, fcrth); -+ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); -+ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); - -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_set_default_fc - Set flow control default values -- * @hw: pointer to the HW structure -- * -- * Read the EEPROM for the default values for flow control and store the -- * values. -- **/ --static s32 igb_set_default_fc(struct e1000_hw *hw) --{ -- s32 ret_val = 0; -- u16 lan_offset; -- u16 nvm_data; -- -- /* Read and store word 0x0F of the EEPROM. This word contains bits -- * that determine the hardware's default PAUSE (flow control) mode, -- * a bit that determines whether the HW defaults to enabling or -- * disabling auto-negotiation, and the direction of the -- * SW defined pins. If there is no SW over-ride of the flow -- * control setting, then the variable hw->fc will -- * be initialized based on a value in the EEPROM. -- */ -- if (hw->mac.type == e1000_i350) { -- lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); -- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG -- + lan_offset, 1, &nvm_data); -- } else { -- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, -- 1, &nvm_data); -- } -- -- if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -- } -- -- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) -- hw->fc.requested_mode = e1000_fc_none; -- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == -- NVM_WORD0F_ASM_DIR) -- hw->fc.requested_mode = e1000_fc_tx_pause; -- else -- hw->fc.requested_mode = e1000_fc_full; -- --out: -- return ret_val; --} -- --/** -- * igb_force_mac_fc - Force the MAC's flow control settings -+ * e1000_force_mac_fc_generic - Force the MAC's flow control settings - * @hw: pointer to the HW structure - * - * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the -@@ -755,12 +1186,13 @@ - * autonegotiation is managed by the PHY rather than the MAC. Software must - * also configure these bits when link is forced on a fiber connection. - **/ --s32 igb_force_mac_fc(struct e1000_hw *hw) -+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) - { - u32 ctrl; -- s32 ret_val = 0; - -- ctrl = rd32(E1000_CTRL); -+ DEBUGFUNC("e1000_force_mac_fc_generic"); -+ -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - - /* Because we didn't get link via the internal auto-negotiation - * mechanism (we either forced link or we got link via PHY -@@ -776,10 +1208,10 @@ - * frames but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * frames but we do not receive pause frames). -- * 3: Both Rx and TX flow control (symmetric) is enabled. -+ * 3: Both Rx and Tx flow control (symmetric) is enabled. - * other: No other values should be possible at this point. - */ -- hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); -+ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); - - switch (hw->fc.current_mode) { - case e1000_fc_none: -@@ -797,19 +1229,17 @@ - ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); - break; - default: -- hw_dbg("Flow control param set incorrectly\n"); -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ DEBUGOUT("Flow control param set incorrectly\n"); -+ return -E1000_ERR_CONFIG; - } - -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_config_fc_after_link_up - Configures flow control after link -+ * e1000_config_fc_after_link_up_generic - Configures flow control after link - * @hw: pointer to the HW structure - * - * Checks the status of auto-negotiation after link up to ensure that the -@@ -818,29 +1248,32 @@ - * and did not fail, then we configure flow control based on our link - * partner. - **/ --s32 igb_config_fc_after_link_up(struct e1000_hw *hw) -+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) - { - struct e1000_mac_info *mac = &hw->mac; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; - u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; - u16 speed, duplex; - -+ DEBUGFUNC("e1000_config_fc_after_link_up_generic"); -+ - /* Check for the case where we have fiber media and auto-neg failed - * so we had to force link. In this case, we need to force the - * configuration of the MAC to match the "fc" parameter. - */ - if (mac->autoneg_failed) { -- if (hw->phy.media_type == e1000_media_type_internal_serdes) -- ret_val = igb_force_mac_fc(hw); -+ if (hw->phy.media_type == e1000_media_type_fiber || -+ hw->phy.media_type == e1000_media_type_internal_serdes) -+ ret_val = e1000_force_mac_fc_generic(hw); - } else { - if (hw->phy.media_type == e1000_media_type_copper) -- ret_val = igb_force_mac_fc(hw); -+ ret_val = e1000_force_mac_fc_generic(hw); - } - - if (ret_val) { -- hw_dbg("Error forcing flow control settings\n"); -- goto out; -+ DEBUGOUT("Error forcing flow control settings\n"); -+ return ret_val; - } - - /* Check for the case where we have copper media and auto-neg is -@@ -853,18 +1286,16 @@ - * has completed. We read this twice because this reg has - * some "sticky" (latched) bits. - */ -- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, -- &mii_status_reg); -+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) -- goto out; -- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, -- &mii_status_reg); -+ return ret_val; -+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) -- goto out; -+ return ret_val; - - if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { -- hw_dbg("Copper PHY and Auto Neg has not completed.\n"); -- goto out; -+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); -+ return ret_val; - } - - /* The AutoNeg process has completed, so we now need to -@@ -874,13 +1305,13 @@ - * flow control was negotiated. - */ - ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, -- &mii_nway_adv_reg); -+ &mii_nway_adv_reg); - if (ret_val) -- goto out; -+ return ret_val; - ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, -- &mii_nway_lp_ability_reg); -+ &mii_nway_lp_ability_reg); - if (ret_val) -- goto out; -+ return ret_val; - - /* Two bits in the Auto Negotiation Advertisement Register - * (Address 4) and two bits in the Auto Negotiation Base -@@ -917,18 +1348,18 @@ - */ - if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { -- /* Now we need to check if the user selected RX ONLY -+ /* Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise -- * FULL flow control because we could not advertise RX -+ * FULL flow control because we could not advertise Rx - * ONLY. Hence, we must now check to see if we need to -- * turn OFF the TRANSMISSION of PAUSE frames. -+ * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == e1000_fc_full) { - hw->fc.current_mode = e1000_fc_full; -- hw_dbg("Flow Control = FULL.\n"); -+ DEBUGOUT("Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = e1000_fc_rx_pause; -- hw_dbg("Flow Control = RX PAUSE frames only.\n"); -+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); - } - } - /* For receiving PAUSE frames ONLY. -@@ -943,7 +1374,7 @@ - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_tx_pause; -- hw_dbg("Flow Control = TX PAUSE frames only.\n"); -+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); - } - /* For transmitting PAUSE frames ONLY. - * -@@ -957,46 +1388,23 @@ - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_rx_pause; -- hw_dbg("Flow Control = RX PAUSE frames only.\n"); -- } -- /* Per the IEEE spec, at this point flow control should be -- * disabled. However, we want to consider that we could -- * be connected to a legacy switch that doesn't advertise -- * desired flow control, but can be forced on the link -- * partner. So if we advertised no flow control, that is -- * what we will resolve to. If we advertised some kind of -- * receive capability (Rx Pause Only or Full Flow Control) -- * and the link partner advertised none, we will configure -- * ourselves to enable Rx Flow Control only. We can do -- * this safely for two reasons: If the link partner really -- * didn't want flow control enabled, and we enable Rx, no -- * harm done since we won't be receiving any PAUSE frames -- * anyway. If the intent on the link partner was to have -- * flow control enabled, then by us enabling RX only, we -- * can at least receive pause frames and process them. -- * This is a good idea because in most cases, since we are -- * predominantly a server NIC, more times than not we will -- * be asked to delay transmission of packets than asking -- * our link partner to pause transmission of frames. -- */ -- else if ((hw->fc.requested_mode == e1000_fc_none) || -- (hw->fc.requested_mode == e1000_fc_tx_pause) || -- (hw->fc.strict_ieee)) { -- hw->fc.current_mode = e1000_fc_none; -- hw_dbg("Flow Control = NONE.\n"); -+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); - } else { -- hw->fc.current_mode = e1000_fc_rx_pause; -- hw_dbg("Flow Control = RX PAUSE frames only.\n"); -+ /* Per the IEEE spec, at this point flow control -+ * should be disabled. -+ */ -+ hw->fc.current_mode = e1000_fc_none; -+ DEBUGOUT("Flow Control = NONE.\n"); - } - - /* Now we need to do one last check... If we auto- - * negotiated to HALF DUPLEX, flow control should not be - * enabled per IEEE 802.3 spec. - */ -- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); -+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); - if (ret_val) { -- hw_dbg("Error getting link speed and duplex\n"); -- goto out; -+ DEBUGOUT("Error getting link speed and duplex\n"); -+ return ret_val; - } - - if (duplex == HALF_DUPLEX) -@@ -1005,26 +1413,27 @@ - /* Now we call a subroutine to actually force the MAC - * controller to use the correct flow control settings. - */ -- ret_val = igb_force_mac_fc(hw); -+ ret_val = e1000_force_mac_fc_generic(hw); - if (ret_val) { -- hw_dbg("Error forcing flow control settings\n"); -- goto out; -+ DEBUGOUT("Error forcing flow control settings\n"); -+ return ret_val; - } - } -+ - /* Check for the case where we have SerDes media and auto-neg is - * enabled. In this case, we need to check and see if Auto-Neg - * has completed, and if so, how the PHY and link partner has - * flow control configured. - */ -- if ((hw->phy.media_type == e1000_media_type_internal_serdes) -- && mac->autoneg) { -+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) && -+ mac->autoneg) { - /* Read the PCS_LSTS and check to see if AutoNeg - * has completed. - */ -- pcs_status_reg = rd32(E1000_PCS_LSTAT); -+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); - - if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { -- hw_dbg("PCS Auto Neg has not completed.\n"); -+ DEBUGOUT("PCS Auto Neg has not completed.\n"); - return ret_val; - } - -@@ -1034,8 +1443,8 @@ - * Page Ability Register (PCS_LPAB) to determine how - * flow control was negotiated. - */ -- pcs_adv_reg = rd32(E1000_PCS_ANADV); -- pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); -+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); -+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); - - /* Two bits in the Auto Negotiation Advertisement Register - * (PCS_ANADV) and two bits in the Auto Negotiation Base -@@ -1080,10 +1489,10 @@ - */ - if (hw->fc.requested_mode == e1000_fc_full) { - hw->fc.current_mode = e1000_fc_full; -- hw_dbg("Flow Control = FULL.\n"); -+ DEBUGOUT("Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = e1000_fc_rx_pause; -- hw_dbg("Flow Control = Rx PAUSE frames only.\n"); -+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); - } - } - /* For receiving PAUSE frames ONLY. -@@ -1098,7 +1507,7 @@ - (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && - (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_tx_pause; -- hw_dbg("Flow Control = Tx PAUSE frames only.\n"); -+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); - } - /* For transmitting PAUSE frames ONLY. - * -@@ -1112,35 +1521,34 @@ - !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && - (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_rx_pause; -- hw_dbg("Flow Control = Rx PAUSE frames only.\n"); -+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); - } else { - /* Per the IEEE spec, at this point flow control - * should be disabled. - */ - hw->fc.current_mode = e1000_fc_none; -- hw_dbg("Flow Control = NONE.\n"); -+ DEBUGOUT("Flow Control = NONE.\n"); - } - - /* Now we call a subroutine to actually force the MAC - * controller to use the correct flow control settings. - */ -- pcs_ctrl_reg = rd32(E1000_PCS_LCTL); -+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); - pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; -- wr32(E1000_PCS_LCTL, pcs_ctrl_reg); -+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); - -- ret_val = igb_force_mac_fc(hw); -+ ret_val = e1000_force_mac_fc_generic(hw); - if (ret_val) { -- hw_dbg("Error forcing flow control settings\n"); -+ DEBUGOUT("Error forcing flow control settings\n"); - return ret_val; - } - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex -+ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex -@@ -1148,172 +1556,185 @@ - * Read the status register for the current speed/duplex and store the current - * speed and duplex for copper connections. - **/ --s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, -- u16 *duplex) -+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, -+ u16 *duplex) - { - u32 status; - -- status = rd32(E1000_STATUS); -+ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); -+ -+ status = E1000_READ_REG(hw, E1000_STATUS); - if (status & E1000_STATUS_SPEED_1000) { - *speed = SPEED_1000; -- hw_dbg("1000 Mbs, "); -+ DEBUGOUT("1000 Mbs, "); - } else if (status & E1000_STATUS_SPEED_100) { - *speed = SPEED_100; -- hw_dbg("100 Mbs, "); -+ DEBUGOUT("100 Mbs, "); - } else { - *speed = SPEED_10; -- hw_dbg("10 Mbs, "); -+ DEBUGOUT("10 Mbs, "); - } - - if (status & E1000_STATUS_FD) { - *duplex = FULL_DUPLEX; -- hw_dbg("Full Duplex\n"); -+ DEBUGOUT("Full Duplex\n"); - } else { - *duplex = HALF_DUPLEX; -- hw_dbg("Half Duplex\n"); -+ DEBUGOUT("Half Duplex\n"); - } - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_get_hw_semaphore - Acquire hardware semaphore -+ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex -+ * @hw: pointer to the HW structure -+ * @speed: stores the current speed -+ * @duplex: stores the current duplex -+ * -+ * Sets the speed and duplex to gigabit full duplex (the only possible option) -+ * for fiber/serdes links. -+ **/ -+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 *speed, u16 *duplex) -+{ -+ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); -+ -+ *speed = SPEED_1000; -+ *duplex = FULL_DUPLEX; -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore to access the PHY or NVM - **/ --s32 igb_get_hw_semaphore(struct e1000_hw *hw) -+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) - { - u32 swsm; -- s32 ret_val = 0; - s32 timeout = hw->nvm.word_size + 1; - s32 i = 0; - -+ DEBUGFUNC("e1000_get_hw_semaphore_generic"); -+ - /* Get the SW semaphore */ - while (i < timeout) { -- swsm = rd32(E1000_SWSM); -+ swsm = E1000_READ_REG(hw, E1000_SWSM); - if (!(swsm & E1000_SWSM_SMBI)) - break; - -- udelay(50); -+ usec_delay(50); - i++; - } - - if (i == timeout) { -- hw_dbg("Driver can't access device - SMBI bit is set.\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); -+ return -E1000_ERR_NVM; - } - - /* Get the FW semaphore. */ - for (i = 0; i < timeout; i++) { -- swsm = rd32(E1000_SWSM); -- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); -+ swsm = E1000_READ_REG(hw, E1000_SWSM); -+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); - - /* Semaphore acquired if bit latched */ -- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) -+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) - break; - -- udelay(50); -+ usec_delay(50); - } - - if (i == timeout) { - /* Release semaphores */ -- igb_put_hw_semaphore(hw); -- hw_dbg("Driver can't access the NVM\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -+ e1000_put_hw_semaphore_generic(hw); -+ DEBUGOUT("Driver can't access the NVM\n"); -+ return -E1000_ERR_NVM; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_put_hw_semaphore - Release hardware semaphore -+ * e1000_put_hw_semaphore_generic - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used to access the PHY or NVM - **/ --void igb_put_hw_semaphore(struct e1000_hw *hw) -+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) - { - u32 swsm; - -- swsm = rd32(E1000_SWSM); -+ DEBUGFUNC("e1000_put_hw_semaphore_generic"); -+ -+ swsm = E1000_READ_REG(hw, E1000_SWSM); - - swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); - -- wr32(E1000_SWSM, swsm); -+ E1000_WRITE_REG(hw, E1000_SWSM, swsm); - } - - /** -- * igb_get_auto_rd_done - Check for auto read completion -+ * e1000_get_auto_rd_done_generic - Check for auto read completion - * @hw: pointer to the HW structure - * - * Check EEPROM for Auto Read done bit. - **/ --s32 igb_get_auto_rd_done(struct e1000_hw *hw) -+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) - { - s32 i = 0; -- s32 ret_val = 0; - -+ DEBUGFUNC("e1000_get_auto_rd_done_generic"); - - while (i < AUTO_READ_DONE_TIMEOUT) { -- if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) -+ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) - break; -- usleep_range(1000, 2000); -+ msec_delay(1); - i++; - } - - if (i == AUTO_READ_DONE_TIMEOUT) { -- hw_dbg("Auto read by HW from NVM has not completed.\n"); -- ret_val = -E1000_ERR_RESET; -- goto out; -+ DEBUGOUT("Auto read by HW from NVM has not completed.\n"); -+ return -E1000_ERR_RESET; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_valid_led_default - Verify a valid default LED config -+ * e1000_valid_led_default_generic - Verify a valid default LED config - * @hw: pointer to the HW structure - * @data: pointer to the NVM (EEPROM) - * - * Read the EEPROM for the current default LED configuration. If the - * LED configuration is not valid, set to a valid LED configuration. - **/ --static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) -+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) - { - s32 ret_val; - -+ DEBUGFUNC("e1000_valid_led_default_generic"); -+ - ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - -- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { -- switch (hw->phy.media_type) { -- case e1000_media_type_internal_serdes: -- *data = ID_LED_DEFAULT_82575_SERDES; -- break; -- case e1000_media_type_copper: -- default: -- *data = ID_LED_DEFAULT; -- break; -- } -- } --out: -- return ret_val; -+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) -+ *data = ID_LED_DEFAULT; -+ -+ return E1000_SUCCESS; - } - - /** -- * igb_id_led_init - -+ * e1000_id_led_init_generic - - * @hw: pointer to the HW structure - * - **/ --s32 igb_id_led_init(struct e1000_hw *hw) -+s32 e1000_id_led_init_generic(struct e1000_hw *hw) - { - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; -@@ -1323,17 +1744,13 @@ - u16 data, i, temp; - const u16 led_mask = 0x0F; - -- /* i210 and i211 devices have different LED mechanism */ -- if ((hw->mac.type == e1000_i210) || -- (hw->mac.type == e1000_i211)) -- ret_val = igb_valid_led_default_i210(hw, &data); -- else -- ret_val = igb_valid_led_default(hw, &data); -+ DEBUGFUNC("e1000_id_led_init_generic"); - -+ ret_val = hw->nvm.ops.valid_led_default(hw, &data); - if (ret_val) -- goto out; -+ return ret_val; - -- mac->ledctl_default = rd32(E1000_LEDCTL); -+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); - mac->ledctl_mode1 = mac->ledctl_default; - mac->ledctl_mode2 = mac->ledctl_default; - -@@ -1375,34 +1792,69 @@ - } - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_cleanup_led - Set LED config to default operation -+ * e1000_setup_led_generic - Configures SW controllable LED -+ * @hw: pointer to the HW structure -+ * -+ * This prepares the SW controllable LED for use and saves the current state -+ * of the LED so it can be later restored. -+ **/ -+s32 e1000_setup_led_generic(struct e1000_hw *hw) -+{ -+ u32 ledctl; -+ -+ DEBUGFUNC("e1000_setup_led_generic"); -+ -+ if (hw->mac.ops.setup_led != e1000_setup_led_generic) -+ return -E1000_ERR_CONFIG; -+ -+ if (hw->phy.media_type == e1000_media_type_fiber) { -+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); -+ hw->mac.ledctl_default = ledctl; -+ /* Turn off LED0 */ -+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | -+ E1000_LEDCTL_LED0_MODE_MASK); -+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF << -+ E1000_LEDCTL_LED0_MODE_SHIFT); -+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); -+ } else if (hw->phy.media_type == e1000_media_type_copper) { -+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_cleanup_led_generic - Set LED config to default operation - * @hw: pointer to the HW structure - * - * Remove the current LED configuration and set the LED configuration - * to the default value, saved from the EEPROM. - **/ --s32 igb_cleanup_led(struct e1000_hw *hw) -+s32 e1000_cleanup_led_generic(struct e1000_hw *hw) - { -- wr32(E1000_LEDCTL, hw->mac.ledctl_default); -- return 0; -+ DEBUGFUNC("e1000_cleanup_led_generic"); -+ -+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); -+ return E1000_SUCCESS; - } - - /** -- * igb_blink_led - Blink LED -+ * e1000_blink_led_generic - Blink LED - * @hw: pointer to the HW structure - * -- * Blink the led's which are set to be on. -+ * Blink the LEDs which are set to be on. - **/ --s32 igb_blink_led(struct e1000_hw *hw) -+s32 e1000_blink_led_generic(struct e1000_hw *hw) - { - u32 ledctl_blink = 0; - u32 i; - -+ DEBUGFUNC("e1000_blink_led_generic"); -+ - if (hw->phy.media_type == e1000_media_type_fiber) { - /* always blink LED0 for PCI-E fiber */ - ledctl_blink = E1000_LEDCTL_LED0_BLINK | -@@ -1432,100 +1884,239 @@ - } - } - -- wr32(E1000_LEDCTL, ledctl_blink); -+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_led_on_generic - Turn LED on -+ * @hw: pointer to the HW structure -+ * -+ * Turn LED on. -+ **/ -+s32 e1000_led_on_generic(struct e1000_hw *hw) -+{ -+ u32 ctrl; -+ -+ DEBUGFUNC("e1000_led_on_generic"); -+ -+ switch (hw->phy.media_type) { -+ case e1000_media_type_fiber: -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ ctrl &= ~E1000_CTRL_SWDPIN0; -+ ctrl |= E1000_CTRL_SWDPIO0; -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ break; -+ case e1000_media_type_copper: -+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); -+ break; -+ default: -+ break; -+ } - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_led_off - Turn LED off -+ * e1000_led_off_generic - Turn LED off - * @hw: pointer to the HW structure - * - * Turn LED off. - **/ --s32 igb_led_off(struct e1000_hw *hw) -+s32 e1000_led_off_generic(struct e1000_hw *hw) - { -+ u32 ctrl; -+ -+ DEBUGFUNC("e1000_led_off_generic"); -+ - switch (hw->phy.media_type) { -+ case e1000_media_type_fiber: -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ ctrl |= E1000_CTRL_SWDPIN0; -+ ctrl |= E1000_CTRL_SWDPIO0; -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ break; - case e1000_media_type_copper: -- wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); -+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); - break; - default: - break; - } - -- return 0; -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities -+ * @hw: pointer to the HW structure -+ * @no_snoop: bitmap of snoop events -+ * -+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'. -+ **/ -+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) -+{ -+ u32 gcr; -+ -+ DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); -+ -+ if (hw->bus.type != e1000_bus_type_pci_express) -+ return; -+ -+ if (no_snoop) { -+ gcr = E1000_READ_REG(hw, E1000_GCR); -+ gcr &= ~(PCIE_NO_SNOOP_ALL); -+ gcr |= no_snoop; -+ E1000_WRITE_REG(hw, E1000_GCR, gcr); -+ } - } - - /** -- * igb_disable_pcie_master - Disables PCI-express master access -+ * e1000_disable_pcie_master_generic - Disables PCI-express master access - * @hw: pointer to the HW structure - * -- * Returns 0 (0) if successful, else returns -10 -+ * Returns E1000_SUCCESS if successful, else returns -10 - * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused - * the master requests to be disabled. - * - * Disables PCI-Express master access and verifies there are no pending - * requests. - **/ --s32 igb_disable_pcie_master(struct e1000_hw *hw) -+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) - { - u32 ctrl; - s32 timeout = MASTER_DISABLE_TIMEOUT; -- s32 ret_val = 0; -+ -+ DEBUGFUNC("e1000_disable_pcie_master_generic"); - - if (hw->bus.type != e1000_bus_type_pci_express) -- goto out; -+ return E1000_SUCCESS; - -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - - while (timeout) { -- if (!(rd32(E1000_STATUS) & -- E1000_STATUS_GIO_MASTER_ENABLE)) -+ if (!(E1000_READ_REG(hw, E1000_STATUS) & -+ E1000_STATUS_GIO_MASTER_ENABLE) || -+ E1000_REMOVED(hw->hw_addr)) - break; -- udelay(100); -+ usec_delay(100); - timeout--; - } - - if (!timeout) { -- hw_dbg("Master requests are pending.\n"); -- ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; -- goto out; -+ DEBUGOUT("Master requests are pending.\n"); -+ return -E1000_ERR_MASTER_REQUESTS_PENDING; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_validate_mdi_setting - Verify MDI/MDIx settings -+ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing - * @hw: pointer to the HW structure - * -- * Verify that when not using auto-negotitation that MDI/MDIx is correctly -- * set, which is forced to MDI mode only. -+ * Reset the Adaptive Interframe Spacing throttle to default values. - **/ --s32 igb_validate_mdi_setting(struct e1000_hw *hw) -+void e1000_reset_adaptive_generic(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ struct e1000_mac_info *mac = &hw->mac; - -- /* All MDI settings are supported on 82580 and newer. */ -- if (hw->mac.type >= e1000_82580) -- goto out; -+ DEBUGFUNC("e1000_reset_adaptive_generic"); -+ -+ if (!mac->adaptive_ifs) { -+ DEBUGOUT("Not in Adaptive IFS mode!\n"); -+ return; -+ } -+ -+ mac->current_ifs_val = 0; -+ mac->ifs_min_val = IFS_MIN; -+ mac->ifs_max_val = IFS_MAX; -+ mac->ifs_step_size = IFS_STEP; -+ mac->ifs_ratio = IFS_RATIO; -+ -+ mac->in_ifs_mode = false; -+ E1000_WRITE_REG(hw, E1000_AIT, 0); -+} -+ -+/** -+ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing -+ * @hw: pointer to the HW structure -+ * -+ * Update the Adaptive Interframe Spacing Throttle value based on the -+ * time between transmitted packets and time between collisions. -+ **/ -+void e1000_update_adaptive_generic(struct e1000_hw *hw) -+{ -+ struct e1000_mac_info *mac = &hw->mac; -+ -+ DEBUGFUNC("e1000_update_adaptive_generic"); -+ -+ if (!mac->adaptive_ifs) { -+ DEBUGOUT("Not in Adaptive IFS mode!\n"); -+ return; -+ } -+ -+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { -+ if (mac->tx_packet_delta > MIN_NUM_XMITS) { -+ mac->in_ifs_mode = true; -+ if (mac->current_ifs_val < mac->ifs_max_val) { -+ if (!mac->current_ifs_val) -+ mac->current_ifs_val = mac->ifs_min_val; -+ else -+ mac->current_ifs_val += -+ mac->ifs_step_size; -+ E1000_WRITE_REG(hw, E1000_AIT, -+ mac->current_ifs_val); -+ } -+ } -+ } else { -+ if (mac->in_ifs_mode && -+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) { -+ mac->current_ifs_val = 0; -+ mac->in_ifs_mode = false; -+ E1000_WRITE_REG(hw, E1000_AIT, 0); -+ } -+ } -+} -+ -+/** -+ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings -+ * @hw: pointer to the HW structure -+ * -+ * Verify that when not using auto-negotiation that MDI/MDIx is correctly -+ * set, which is forced to MDI mode only. -+ **/ -+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) -+{ -+ DEBUGFUNC("e1000_validate_mdi_setting_generic"); - - if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { -- hw_dbg("Invalid MDI setting detected\n"); -+ DEBUGOUT("Invalid MDI setting detected\n"); - hw->phy.mdix = 1; -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ return -E1000_ERR_CONFIG; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register -+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings -+ * @hw: pointer to the HW structure -+ * -+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced -+ * operation. -+ **/ -+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register - * @hw: pointer to the HW structure - * @reg: 32bit register offset such as E1000_SCTL - * @offset: register offset to write to -@@ -1535,72 +2126,28 @@ - * and they all have the format address << 8 | data and bit 31 is polled for - * completion. - **/ --s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, -- u32 offset, u8 data) -+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, -+ u32 offset, u8 data) - { - u32 i, regvalue = 0; -- s32 ret_val = 0; -+ -+ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); - - /* Set up the address and data */ - regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); -- wr32(reg, regvalue); -+ E1000_WRITE_REG(hw, reg, regvalue); - - /* Poll the ready bit to see if the MDI read completed */ - for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { -- udelay(5); -- regvalue = rd32(reg); -+ usec_delay(5); -+ regvalue = E1000_READ_REG(hw, reg); - if (regvalue & E1000_GEN_CTL_READY) - break; - } - if (!(regvalue & E1000_GEN_CTL_READY)) { -- hw_dbg("Reg %08x did not indicate ready\n", reg); -- ret_val = -E1000_ERR_PHY; -- goto out; -- } -- --out: -- return ret_val; --} -- --/** -- * igb_enable_mng_pass_thru - Enable processing of ARP's -- * @hw: pointer to the HW structure -- * -- * Verifies the hardware needs to leave interface enabled so that frames can -- * be directed to and from the management interface. -- **/ --bool igb_enable_mng_pass_thru(struct e1000_hw *hw) --{ -- u32 manc; -- u32 fwsm, factps; -- bool ret_val = false; -- -- if (!hw->mac.asf_firmware_present) -- goto out; -- -- manc = rd32(E1000_MANC); -- -- if (!(manc & E1000_MANC_RCV_TCO_EN)) -- goto out; -- -- if (hw->mac.arc_subsystem_valid) { -- fwsm = rd32(E1000_FWSM); -- factps = rd32(E1000_FACTPS); -- -- if (!(factps & E1000_FACTPS_MNGCG) && -- ((fwsm & E1000_FWSM_MODE_MASK) == -- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { -- ret_val = true; -- goto out; -- } -- } else { -- if ((manc & E1000_MANC_SMBUS_EN) && -- !(manc & E1000_MANC_ASF_EN)) { -- ret_val = true; -- goto out; -- } -+ DEBUGOUT1("Reg %08x did not indicate ready\n", reg); -+ return -E1000_ERR_PHY; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h ---- a/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,87 +1,81 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* - --#ifndef _E1000_MAC_H_ --#define _E1000_MAC_H_ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. - --#include "e1000_hw.h" -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. - --#include "e1000_phy.h" --#include "e1000_nvm.h" --#include "e1000_defines.h" --#include "e1000_i210.h" -- --/* Functions that should not be called directly from drivers but can be used -- * by other files in this 'shared code' -- */ --s32 igb_blink_led(struct e1000_hw *hw); --s32 igb_check_for_copper_link(struct e1000_hw *hw); --s32 igb_cleanup_led(struct e1000_hw *hw); --s32 igb_config_fc_after_link_up(struct e1000_hw *hw); --s32 igb_disable_pcie_master(struct e1000_hw *hw); --s32 igb_force_mac_fc(struct e1000_hw *hw); --s32 igb_get_auto_rd_done(struct e1000_hw *hw); --s32 igb_get_bus_info_pcie(struct e1000_hw *hw); --s32 igb_get_hw_semaphore(struct e1000_hw *hw); --s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, -- u16 *duplex); --s32 igb_id_led_init(struct e1000_hw *hw); --s32 igb_led_off(struct e1000_hw *hw); --void igb_update_mc_addr_list(struct e1000_hw *hw, -- u8 *mc_addr_list, u32 mc_addr_count); --s32 igb_setup_link(struct e1000_hw *hw); --s32 igb_validate_mdi_setting(struct e1000_hw *hw); --s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, -- u32 offset, u8 data); -- --void igb_clear_hw_cntrs_base(struct e1000_hw *hw); --void igb_clear_vfta(struct e1000_hw *hw); --void igb_clear_vfta_i350(struct e1000_hw *hw); --s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); --void igb_config_collision_dist(struct e1000_hw *hw); --void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); --void igb_mta_set(struct e1000_hw *hw, u32 hash_value); --void igb_put_hw_semaphore(struct e1000_hw *hw); --void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); --s32 igb_check_alt_mac_addr(struct e1000_hw *hw); -- --bool igb_enable_mng_pass_thru(struct e1000_hw *hw); -- --enum e1000_mng_mode { -- e1000_mng_mode_none = 0, -- e1000_mng_mode_asf, -- e1000_mng_mode_pt, -- e1000_mng_mode_ipmi, -- e1000_mng_mode_host_if_only --}; -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". - --#define E1000_FACTPS_MNGCG 0x20000000 -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - --#define E1000_FWSM_MODE_MASK 0xE --#define E1000_FWSM_MODE_SHIFT 1 -+*******************************************************************************/ - --#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 -+#ifndef _E1000_MAC_H_ -+#define _E1000_MAC_H_ - --void e1000_init_function_pointers_82575(struct e1000_hw *hw); -+void e1000_init_mac_ops_generic(struct e1000_hw *hw); -+#ifndef E1000_REMOVED -+#define E1000_REMOVED(a) (0) -+#endif /* E1000_REMOVED */ -+void e1000_null_mac_generic(struct e1000_hw *hw); -+s32 e1000_null_ops_generic(struct e1000_hw *hw); -+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); -+bool e1000_null_mng_mode(struct e1000_hw *hw); -+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); -+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); -+int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); -+s32 e1000_blink_led_generic(struct e1000_hw *hw); -+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); -+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); -+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); -+s32 e1000_cleanup_led_generic(struct e1000_hw *hw); -+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); -+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); -+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); -+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); -+s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); -+void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw); -+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); -+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, -+ u16 *duplex); -+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, -+ u16 *speed, u16 *duplex); -+s32 e1000_id_led_init_generic(struct e1000_hw *hw); -+s32 e1000_led_on_generic(struct e1000_hw *hw); -+s32 e1000_led_off_generic(struct e1000_hw *hw); -+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, -+ u8 *mc_addr_list, u32 mc_addr_count); -+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); -+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); -+s32 e1000_setup_led_generic(struct e1000_hw *hw); -+s32 e1000_setup_link_generic(struct e1000_hw *hw); -+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); -+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, -+ u32 offset, u8 data); -+ -+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); -+ -+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); -+void igb_e1000_clear_vfta_generic(struct e1000_hw *hw); -+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); -+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); -+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); -+s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); -+void e1000_reset_adaptive_generic(struct e1000_hw *hw); -+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); -+void e1000_update_adaptive_generic(struct e1000_hw *hw); -+void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); - - #endif -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.c b/drivers/net/ethernet/intel/igb/e1000_manage.c ---- a/drivers/net/ethernet/intel/igb/e1000_manage.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_manage.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,552 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "e1000_api.h" -+/** -+ * e1000_calculate_checksum - Calculate checksum for buffer -+ * @buffer: pointer to EEPROM -+ * @length: size of EEPROM to calculate a checksum for -+ * -+ * Calculates the checksum for some buffer on a specified length. The -+ * checksum calculated is returned. -+ **/ -+u8 e1000_calculate_checksum(u8 *buffer, u32 length) -+{ -+ u32 i; -+ u8 sum = 0; -+ -+ DEBUGFUNC("e1000_calculate_checksum"); -+ -+ if (!buffer) -+ return 0; -+ -+ for (i = 0; i < length; i++) -+ sum += buffer[i]; -+ -+ return (u8) (0 - sum); -+} -+ -+/** -+ * e1000_mng_enable_host_if_generic - Checks host interface is enabled -+ * @hw: pointer to the HW structure -+ * -+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND -+ * -+ * This function checks whether the HOST IF is enabled for command operation -+ * and also checks whether the previous command is completed. It busy waits -+ * in case of previous command is not completed. -+ **/ -+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) -+{ -+ u32 hicr; -+ u8 i; -+ -+ DEBUGFUNC("e1000_mng_enable_host_if_generic"); -+ -+ if (!hw->mac.arc_subsystem_valid) { -+ DEBUGOUT("ARC subsystem not valid.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ /* Check that the host interface is enabled. */ -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ if (!(hicr & E1000_HICR_EN)) { -+ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ /* check the previous command is completed */ -+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ if (!(hicr & E1000_HICR_C)) -+ break; -+ msec_delay_irq(1); -+ } -+ -+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { -+ DEBUGOUT("Previous command timeout failed .\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_check_mng_mode_generic - Generic check management mode -+ * @hw: pointer to the HW structure -+ * -+ * Reads the firmware semaphore register and returns true (>0) if -+ * manageability is enabled, else false (0). -+ **/ -+bool e1000_check_mng_mode_generic(struct e1000_hw *hw) -+{ -+ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); -+ -+ DEBUGFUNC("e1000_check_mng_mode_generic"); -+ -+ -+ return (fwsm & E1000_FWSM_MODE_MASK) == -+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); -+} -+ -+/** -+ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx -+ * @hw: pointer to the HW structure -+ * -+ * Enables packet filtering on transmit packets if manageability is enabled -+ * and host interface is enabled. -+ **/ -+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) -+{ -+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; -+ u32 *buffer = (u32 *)&hw->mng_cookie; -+ u32 offset; -+ s32 ret_val, hdr_csum, csum; -+ u8 i, len; -+ -+ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); -+ -+ hw->mac.tx_pkt_filtering = true; -+ -+ /* No manageability, no filtering */ -+ if (!hw->mac.ops.check_mng_mode(hw)) { -+ hw->mac.tx_pkt_filtering = false; -+ return hw->mac.tx_pkt_filtering; -+ } -+ -+ /* If we can't read from the host interface for whatever -+ * reason, disable filtering. -+ */ -+ ret_val = e1000_mng_enable_host_if_generic(hw); -+ if (ret_val != E1000_SUCCESS) { -+ hw->mac.tx_pkt_filtering = false; -+ return hw->mac.tx_pkt_filtering; -+ } -+ -+ /* Read in the header. Length and offset are in dwords. */ -+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; -+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; -+ for (i = 0; i < len; i++) -+ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, -+ offset + i); -+ hdr_csum = hdr->checksum; -+ hdr->checksum = 0; -+ csum = e1000_calculate_checksum((u8 *)hdr, -+ E1000_MNG_DHCP_COOKIE_LENGTH); -+ /* If either the checksums or signature don't match, then -+ * the cookie area isn't considered valid, in which case we -+ * take the safe route of assuming Tx filtering is enabled. -+ */ -+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { -+ hw->mac.tx_pkt_filtering = true; -+ return hw->mac.tx_pkt_filtering; -+ } -+ -+ /* Cookie area is valid, make the final check for filtering. */ -+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) -+ hw->mac.tx_pkt_filtering = false; -+ -+ return hw->mac.tx_pkt_filtering; -+} -+ -+/** -+ * e1000_mng_write_cmd_header_generic - Writes manageability command header -+ * @hw: pointer to the HW structure -+ * @hdr: pointer to the host interface command header -+ * -+ * Writes the command header after does the checksum calculation. -+ **/ -+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, -+ struct e1000_host_mng_command_header *hdr) -+{ -+ u16 i, length = sizeof(struct e1000_host_mng_command_header); -+ -+ DEBUGFUNC("e1000_mng_write_cmd_header_generic"); -+ -+ /* Write the whole command header structure with new checksum. */ -+ -+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); -+ -+ length >>= 2; -+ /* Write the relevant command block into the ram area. */ -+ for (i = 0; i < length; i++) { -+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, -+ *((u32 *) hdr + i)); -+ E1000_WRITE_FLUSH(hw); -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_mng_host_if_write_generic - Write to the manageability host interface -+ * @hw: pointer to the HW structure -+ * @buffer: pointer to the host interface buffer -+ * @length: size of the buffer -+ * @offset: location in the buffer to write to -+ * @sum: sum of the data (not checksum) -+ * -+ * This function writes the buffer content at the offset given on the host if. -+ * It also does alignment considerations to do the writes in most efficient -+ * way. Also fills up the sum of the buffer in *buffer parameter. -+ **/ -+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, -+ u16 length, u16 offset, u8 *sum) -+{ -+ u8 *tmp; -+ u8 *bufptr = buffer; -+ u32 data = 0; -+ u16 remaining, i, j, prev_bytes; -+ -+ DEBUGFUNC("e1000_mng_host_if_write_generic"); -+ -+ /* sum = only sum of the data and it is not checksum */ -+ -+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) -+ return -E1000_ERR_PARAM; -+ -+ tmp = (u8 *)&data; -+ prev_bytes = offset & 0x3; -+ offset >>= 2; -+ -+ if (prev_bytes) { -+ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); -+ for (j = prev_bytes; j < sizeof(u32); j++) { -+ *(tmp + j) = *bufptr++; -+ *sum += *(tmp + j); -+ } -+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); -+ length -= j - prev_bytes; -+ offset++; -+ } -+ -+ remaining = length & 0x3; -+ length -= remaining; -+ -+ /* Calculate length in DWORDs */ -+ length >>= 2; -+ -+ /* The device driver writes the relevant command block into the -+ * ram area. -+ */ -+ for (i = 0; i < length; i++) { -+ for (j = 0; j < sizeof(u32); j++) { -+ *(tmp + j) = *bufptr++; -+ *sum += *(tmp + j); -+ } -+ -+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, -+ data); -+ } -+ if (remaining) { -+ for (j = 0; j < sizeof(u32); j++) { -+ if (j < remaining) -+ *(tmp + j) = *bufptr++; -+ else -+ *(tmp + j) = 0; -+ -+ *sum += *(tmp + j); -+ } -+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, -+ data); -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface -+ * @hw: pointer to the HW structure -+ * @buffer: pointer to the host interface -+ * @length: size of the buffer -+ * -+ * Writes the DHCP information to the host interface. -+ **/ -+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, -+ u16 length) -+{ -+ struct e1000_host_mng_command_header hdr; -+ s32 ret_val; -+ u32 hicr; -+ -+ DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); -+ -+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; -+ hdr.command_length = length; -+ hdr.reserved1 = 0; -+ hdr.reserved2 = 0; -+ hdr.checksum = 0; -+ -+ /* Enable the host interface */ -+ ret_val = e1000_mng_enable_host_if_generic(hw); -+ if (ret_val) -+ return ret_val; -+ -+ /* Populate the host interface with the contents of "buffer". */ -+ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, -+ sizeof(hdr), &(hdr.checksum)); -+ if (ret_val) -+ return ret_val; -+ -+ /* Write the manageability command header */ -+ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); -+ if (ret_val) -+ return ret_val; -+ -+ /* Tell the ARC a new command is pending. */ -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_enable_mng_pass_thru - Check if management passthrough is needed -+ * @hw: pointer to the HW structure -+ * -+ * Verifies the hardware needs to leave interface enabled so that frames can -+ * be directed to and from the management interface. -+ **/ -+/* Changed name, duplicated with e1000 */ -+bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw) -+{ -+ u32 manc; -+ u32 fwsm, factps; -+ -+ DEBUGFUNC("igb_e1000_enable_mng_pass_thru"); -+ -+ if (!hw->mac.asf_firmware_present) -+ return false; -+ -+ manc = E1000_READ_REG(hw, E1000_MANC); -+ -+ if (!(manc & E1000_MANC_RCV_TCO_EN)) -+ return false; -+ -+ if (hw->mac.has_fwsm) { -+ fwsm = E1000_READ_REG(hw, E1000_FWSM); -+ factps = E1000_READ_REG(hw, E1000_FACTPS); -+ -+ if (!(factps & E1000_FACTPS_MNGCG) && -+ ((fwsm & E1000_FWSM_MODE_MASK) == -+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) -+ return true; -+ } else if ((manc & E1000_MANC_SMBUS_EN) && -+ !(manc & E1000_MANC_ASF_EN)) { -+ return true; -+ } -+ -+ return false; -+} -+ -+/** -+ * e1000_host_interface_command - Writes buffer to host interface -+ * @hw: pointer to the HW structure -+ * @buffer: contains a command to write -+ * @length: the byte length of the buffer, must be multiple of 4 bytes -+ * -+ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS -+ * else returns E1000_ERR_HOST_INTERFACE_COMMAND. -+ **/ -+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) -+{ -+ u32 hicr, i; -+ -+ DEBUGFUNC("e1000_host_interface_command"); -+ -+ if (!(hw->mac.arc_subsystem_valid)) { -+ DEBUGOUT("Hardware doesn't support host interface command.\n"); -+ return E1000_SUCCESS; -+ } -+ -+ if (!hw->mac.asf_firmware_present) { -+ DEBUGOUT("Firmware is not present.\n"); -+ return E1000_SUCCESS; -+ } -+ -+ if (length == 0 || length & 0x3 || -+ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { -+ DEBUGOUT("Buffer length failure.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ /* Check that the host interface is enabled. */ -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ if (!(hicr & E1000_HICR_EN)) { -+ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ /* Calculate length in DWORDs */ -+ length >>= 2; -+ -+ /* The device driver writes the relevant command block -+ * into the ram area. -+ */ -+ for (i = 0; i < length; i++) -+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, -+ *((u32 *)buffer + i)); -+ -+ /* Setting this bit tells the ARC that a new command is pending. */ -+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); -+ -+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ if (!(hicr & E1000_HICR_C)) -+ break; -+ msec_delay(1); -+ } -+ -+ /* Check command successful completion. */ -+ if (i == E1000_HI_COMMAND_TIMEOUT || -+ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { -+ DEBUGOUT("Command has failed with no status valid.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ for (i = 0; i < length; i++) -+ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, -+ E1000_HOST_IF, -+ i); -+ -+ return E1000_SUCCESS; -+} -+/** -+ * e1000_load_firmware - Writes proxy FW code buffer to host interface -+ * and execute. -+ * @hw: pointer to the HW structure -+ * @buffer: contains a firmware to write -+ * @length: the byte length of the buffer, must be multiple of 4 bytes -+ * -+ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled -+ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. -+ **/ -+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) -+{ -+ u32 hicr, hibba, fwsm, icr, i; -+ -+ DEBUGFUNC("e1000_load_firmware"); -+ -+ if (hw->mac.type < e1000_i210) { -+ DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); -+ return -E1000_ERR_CONFIG; -+ } -+ -+ /* Check that the host interface is enabled. */ -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ if (!(hicr & E1000_HICR_EN)) { -+ DEBUGOUT("E1000_HOST_EN bit disabled.\n"); -+ return -E1000_ERR_CONFIG; -+ } -+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { -+ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); -+ return -E1000_ERR_CONFIG; -+ } -+ -+ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { -+ DEBUGOUT("Buffer length failure.\n"); -+ return -E1000_ERR_INVALID_ARGUMENT; -+ } -+ -+ /* Clear notification from ROM-FW by reading ICR register */ -+ icr = E1000_READ_REG(hw, E1000_ICR_V2); -+ -+ /* Reset ROM-FW */ -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ hicr |= E1000_HICR_FW_RESET_ENABLE; -+ E1000_WRITE_REG(hw, E1000_HICR, hicr); -+ hicr |= E1000_HICR_FW_RESET; -+ E1000_WRITE_REG(hw, E1000_HICR, hicr); -+ E1000_WRITE_FLUSH(hw); -+ -+ /* Wait till MAC notifies about its readiness after ROM-FW reset */ -+ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { -+ icr = E1000_READ_REG(hw, E1000_ICR_V2); -+ if (icr & E1000_ICR_MNG) -+ break; -+ msec_delay(1); -+ } -+ -+ /* Check for timeout */ -+ if (i == E1000_HI_COMMAND_TIMEOUT) { -+ DEBUGOUT("FW reset failed.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ /* Wait till MAC is ready to accept new FW code */ -+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { -+ fwsm = E1000_READ_REG(hw, E1000_FWSM); -+ if ((fwsm & E1000_FWSM_FW_VALID) && -+ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == -+ E1000_FWSM_HI_EN_ONLY_MODE)) -+ break; -+ msec_delay(1); -+ } -+ -+ /* Check for timeout */ -+ if (i == E1000_HI_COMMAND_TIMEOUT) { -+ DEBUGOUT("FW reset failed.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ /* Calculate length in DWORDs */ -+ length >>= 2; -+ -+ /* The device driver writes the relevant FW code block -+ * into the ram area in DWORDs via 1kB ram addressing window. -+ */ -+ for (i = 0; i < length; i++) { -+ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { -+ /* Point to correct 1kB ram window */ -+ hibba = E1000_HI_FW_BASE_ADDRESS + -+ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * -+ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); -+ -+ E1000_WRITE_REG(hw, E1000_HIBBA, hibba); -+ } -+ -+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, -+ i % E1000_HI_FW_BLOCK_DWORD_LENGTH, -+ *((u32 *)buffer + i)); -+ } -+ -+ /* Setting this bit tells the ARC that a new FW is ready to execute. */ -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); -+ -+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { -+ hicr = E1000_READ_REG(hw, E1000_HICR); -+ if (!(hicr & E1000_HICR_C)) -+ break; -+ msec_delay(1); -+ } -+ -+ /* Check for successful FW start. */ -+ if (i == E1000_HI_COMMAND_TIMEOUT) { -+ DEBUGOUT("New FW did not start within timeout period.\n"); -+ return -E1000_ERR_HOST_INTERFACE_COMMAND; -+ } -+ -+ return E1000_SUCCESS; -+} -+ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.h b/drivers/net/ethernet/intel/igb/e1000_manage.h ---- a/drivers/net/ethernet/intel/igb/e1000_manage.h 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_manage.h 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,86 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#ifndef _E1000_MANAGE_H_ -+#define _E1000_MANAGE_H_ -+ -+bool e1000_check_mng_mode_generic(struct e1000_hw *hw); -+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); -+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); -+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, -+ u16 length, u16 offset, u8 *sum); -+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, -+ struct e1000_host_mng_command_header *hdr); -+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, -+ u8 *buffer, u16 length); -+bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw); -+u8 e1000_calculate_checksum(u8 *buffer, u32 length); -+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); -+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); -+ -+enum e1000_mng_mode { -+ e1000_mng_mode_none = 0, -+ e1000_mng_mode_asf, -+ e1000_mng_mode_pt, -+ e1000_mng_mode_ipmi, -+ e1000_mng_mode_host_if_only -+}; -+ -+#define E1000_FACTPS_MNGCG 0x20000000 -+ -+#define E1000_FWSM_MODE_MASK 0xE -+#define E1000_FWSM_MODE_SHIFT 1 -+#define E1000_FWSM_FW_VALID 0x00008000 -+#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 -+ -+#define E1000_MNG_IAMT_MODE 0x3 -+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 -+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 -+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 -+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 -+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 -+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 -+ -+#define E1000_VFTA_ENTRY_SHIFT 5 -+#define E1000_VFTA_ENTRY_MASK 0x7F -+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F -+ -+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -+#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ -+#define E1000_HI_FW_BASE_ADDRESS 0x10000 -+#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ -+#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ -+#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ -+#define E1000_HICR_EN 0x01 /* Enable bit - RO */ -+/* Driver sets this bit when done to put command in RAM */ -+#define E1000_HICR_C 0x02 -+#define E1000_HICR_SV 0x04 /* Status Validity */ -+#define E1000_HICR_FW_RESET_ENABLE 0x40 -+#define E1000_HICR_FW_RESET 0x80 -+ -+/* Intel(R) Active Management Technology signature */ -+#define E1000_IAMT_SIGNATURE 0x544D4149 -+ -+#endif -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c ---- a/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,42 +1,71 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #include "e1000_mbx.h" - - /** -- * igb_read_mbx - Reads a message from the mailbox -+ * e1000_null_mbx_check_for_flag - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 E1000_UNUSEDARG mbx_id) -+{ -+ DEBUGFUNC("e1000_null_mbx_check_flag"); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_null_mbx_transact - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, -+ u32 E1000_UNUSEDARG *msg, -+ u16 E1000_UNUSEDARG size, -+ u16 E1000_UNUSEDARG mbx_id) -+{ -+ DEBUGFUNC("e1000_null_mbx_rw_msg"); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_mbx - Reads a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to read - * -- * returns SUCCESS if it successfully read message from buffer -+ * returns SUCCESS if it successfuly read message from buffer - **/ --s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) -+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_read_mbx"); -+ - /* limit read to size of mailbox */ - if (size > mbx->size) - size = mbx->size; -@@ -48,7 +77,7 @@ - } - - /** -- * igb_write_mbx - Write a message to the mailbox -+ * e1000_write_mbx - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer -@@ -56,10 +85,12 @@ - * - * returns SUCCESS if it successfully copied message into the buffer - **/ --s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) -+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("e1000_write_mbx"); - - if (size > mbx->size) - ret_val = -E1000_ERR_MBX; -@@ -71,17 +102,19 @@ - } - - /** -- * igb_check_for_msg - checks to see if someone sent us mail -+ * e1000_check_for_msg - checks to see if someone sent us mail - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ --s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) -+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_check_for_msg"); -+ - if (mbx->ops.check_for_msg) - ret_val = mbx->ops.check_for_msg(hw, mbx_id); - -@@ -89,17 +122,19 @@ - } - - /** -- * igb_check_for_ack - checks to see if someone sent us ACK -+ * e1000_check_for_ack - checks to see if someone sent us ACK - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ --s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) -+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_check_for_ack"); -+ - if (mbx->ops.check_for_ack) - ret_val = mbx->ops.check_for_ack(hw, mbx_id); - -@@ -107,17 +142,19 @@ - } - - /** -- * igb_check_for_rst - checks to see if other side has reset -+ * e1000_check_for_rst - checks to see if other side has reset - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ --s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) -+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_check_for_rst"); -+ - if (mbx->ops.check_for_rst) - ret_val = mbx->ops.check_for_rst(hw, mbx_id); - -@@ -125,17 +162,19 @@ - } - - /** -- * igb_poll_for_msg - Wait for message notification -+ * e1000_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification - **/ --static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) -+static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - -+ DEBUGFUNC("e1000_poll_for_msg"); -+ - if (!countdown || !mbx->ops.check_for_msg) - goto out; - -@@ -143,28 +182,30 @@ - countdown--; - if (!countdown) - break; -- udelay(mbx->usec_delay); -+ usec_delay(mbx->usec_delay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - out: -- return countdown ? 0 : -E1000_ERR_MBX; -+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; - } - - /** -- * igb_poll_for_ack - Wait for message acknowledgement -+ * e1000_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message acknowledgement - **/ --static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) -+static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - -+ DEBUGFUNC("e1000_poll_for_ack"); -+ - if (!countdown || !mbx->ops.check_for_ack) - goto out; - -@@ -172,18 +213,18 @@ - countdown--; - if (!countdown) - break; -- udelay(mbx->usec_delay); -+ usec_delay(mbx->usec_delay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - out: -- return countdown ? 0 : -E1000_ERR_MBX; -+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; - } - - /** -- * igb_read_posted_mbx - Wait for message notification and receive message -+ * e1000_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer -@@ -192,17 +233,19 @@ - * returns SUCCESS if it successfully received a message notification and - * copied it into the receive buffer. - **/ --static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, -- u16 mbx_id) -+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_read_posted_mbx"); -+ - if (!mbx->ops.read) - goto out; - -- ret_val = igb_poll_for_msg(hw, mbx_id); -+ ret_val = e1000_poll_for_msg(hw, mbx_id); - -+ /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); - out: -@@ -210,7 +253,7 @@ - } - - /** -- * igb_write_posted_mbx - Write a message to the mailbox, wait for ack -+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer -@@ -219,12 +262,13 @@ - * returns SUCCESS if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ --static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, -- u16 mbx_id) -+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) - { - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_write_posted_mbx"); -+ - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; -@@ -234,37 +278,58 @@ - - /* if msg sent wait until we receive an ack */ - if (!ret_val) -- ret_val = igb_poll_for_ack(hw, mbx_id); -+ ret_val = e1000_poll_for_ack(hw, mbx_id); - out: - return ret_val; - } - --static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) -+/** -+ * e1000_init_mbx_ops_generic - Initialize mbx function pointers -+ * @hw: pointer to the HW structure -+ * -+ * Sets the function pointers to no-op functions -+ **/ -+void e1000_init_mbx_ops_generic(struct e1000_hw *hw) - { -- u32 mbvficr = rd32(E1000_MBVFICR); -+ struct e1000_mbx_info *mbx = &hw->mbx; -+ mbx->ops.init_params = e1000_null_ops_generic; -+ mbx->ops.read = e1000_null_mbx_transact; -+ mbx->ops.write = e1000_null_mbx_transact; -+ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; -+ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; -+ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; -+ mbx->ops.read_posted = e1000_read_posted_mbx; -+ mbx->ops.write_posted = e1000_write_posted_mbx; -+} -+ -+static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) -+{ -+ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); - s32 ret_val = -E1000_ERR_MBX; - - if (mbvficr & mask) { -- ret_val = 0; -- wr32(E1000_MBVFICR, mask); -+ ret_val = E1000_SUCCESS; -+ E1000_WRITE_REG(hw, E1000_MBVFICR, mask); - } - - return ret_val; - } - - /** -- * igb_check_for_msg_pf - checks to see if the VF has sent mail -+ * e1000_check_for_msg_pf - checks to see if the VF has sent mail - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ --static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) -+static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) - { - s32 ret_val = -E1000_ERR_MBX; - -- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { -- ret_val = 0; -+ DEBUGFUNC("e1000_check_for_msg_pf"); -+ -+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { -+ ret_val = E1000_SUCCESS; - hw->mbx.stats.reqs++; - } - -@@ -272,18 +337,20 @@ - } - - /** -- * igb_check_for_ack_pf - checks to see if the VF has ACKed -+ * e1000_check_for_ack_pf - checks to see if the VF has ACKed - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ --static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) -+static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) - { - s32 ret_val = -E1000_ERR_MBX; - -- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { -- ret_val = 0; -+ DEBUGFUNC("e1000_check_for_ack_pf"); -+ -+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { -+ ret_val = E1000_SUCCESS; - hw->mbx.stats.acks++; - } - -@@ -291,20 +358,22 @@ - } - - /** -- * igb_check_for_rst_pf - checks to see if the VF has reset -+ * e1000_check_for_rst_pf - checks to see if the VF has reset - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ --static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) -+static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) - { -- u32 vflre = rd32(E1000_VFLRE); -+ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); - s32 ret_val = -E1000_ERR_MBX; - -+ DEBUGFUNC("e1000_check_for_rst_pf"); -+ - if (vflre & (1 << vf_number)) { -- ret_val = 0; -- wr32(E1000_VFLRE, (1 << vf_number)); -+ ret_val = E1000_SUCCESS; -+ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); - hw->mbx.stats.rsts++; - } - -@@ -312,30 +381,40 @@ - } - - /** -- * igb_obtain_mbx_lock_pf - obtain mailbox lock -+ * e1000_obtain_mbx_lock_pf - obtain mailbox lock - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * return SUCCESS if we obtained the mailbox lock - **/ --static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) -+static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) - { - s32 ret_val = -E1000_ERR_MBX; - u32 p2v_mailbox; -+ int count = 10; - -- /* Take ownership of the buffer */ -- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); -+ DEBUGFUNC("e1000_obtain_mbx_lock_pf"); - -- /* reserve mailbox for vf use */ -- p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); -- if (p2v_mailbox & E1000_P2VMAILBOX_PFU) -- ret_val = 0; -+ do { -+ /* Take ownership of the buffer */ -+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), -+ E1000_P2VMAILBOX_PFU); -+ -+ /* reserve mailbox for pf use */ -+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); -+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { -+ ret_val = E1000_SUCCESS; -+ break; -+ } -+ usec_delay(1000); -+ } while (count-- > 0); - - return ret_val; -+ - } - - /** -- * igb_write_mbx_pf - Places a message in the mailbox -+ * e1000_write_mbx_pf - Places a message in the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer -@@ -343,27 +422,29 @@ - * - * returns SUCCESS if it successfully copied message into the buffer - **/ --static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, -- u16 vf_number) -+static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, -+ u16 vf_number) - { - s32 ret_val; - u16 i; - -+ DEBUGFUNC("e1000_write_mbx_pf"); -+ - /* lock the mailbox to prevent pf/vf race condition */ -- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); -+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ -- igb_check_for_msg_pf(hw, vf_number); -- igb_check_for_ack_pf(hw, vf_number); -+ e1000_check_for_msg_pf(hw, vf_number); -+ e1000_check_for_ack_pf(hw, vf_number); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) -- array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); -+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); - - /* Interrupt VF to tell it a message has been sent and release buffer*/ -- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); -+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); - - /* update stats */ - hw->mbx.stats.msgs_tx++; -@@ -374,7 +455,7 @@ - } - - /** -- * igb_read_mbx_pf - Read a message from the mailbox -+ * e1000_read_mbx_pf - Read a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer -@@ -384,23 +465,25 @@ - * memory buffer. The presumption is that the caller knows that there was - * a message due to a VF request so no polling for message is needed. - **/ --static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, -- u16 vf_number) -+static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, -+ u16 vf_number) - { - s32 ret_val; - u16 i; - -+ DEBUGFUNC("e1000_read_mbx_pf"); -+ - /* lock the mailbox to prevent pf/vf race condition */ -- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); -+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_read; - - /* copy the message to the mailbox memory buffer */ - for (i = 0; i < size; i++) -- msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); -+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); - - /* Acknowledge the message and release buffer */ -- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); -+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; -@@ -415,29 +498,34 @@ - * - * Initializes the hw->mbx struct to correct values for pf mailbox - */ --s32 igb_init_mbx_params_pf(struct e1000_hw *hw) -+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) - { - struct e1000_mbx_info *mbx = &hw->mbx; - -- mbx->timeout = 0; -- mbx->usec_delay = 0; -- -- mbx->size = E1000_VFMAILBOX_SIZE; -- -- mbx->ops.read = igb_read_mbx_pf; -- mbx->ops.write = igb_write_mbx_pf; -- mbx->ops.read_posted = igb_read_posted_mbx; -- mbx->ops.write_posted = igb_write_posted_mbx; -- mbx->ops.check_for_msg = igb_check_for_msg_pf; -- mbx->ops.check_for_ack = igb_check_for_ack_pf; -- mbx->ops.check_for_rst = igb_check_for_rst_pf; -+ switch (hw->mac.type) { -+ case e1000_82576: -+ case e1000_i350: -+ case e1000_i354: -+ mbx->timeout = 0; -+ mbx->usec_delay = 0; - -- mbx->stats.msgs_tx = 0; -- mbx->stats.msgs_rx = 0; -- mbx->stats.reqs = 0; -- mbx->stats.acks = 0; -- mbx->stats.rsts = 0; -+ mbx->size = E1000_VFMAILBOX_SIZE; - -- return 0; -+ mbx->ops.read = e1000_read_mbx_pf; -+ mbx->ops.write = e1000_write_mbx_pf; -+ mbx->ops.read_posted = e1000_read_posted_mbx; -+ mbx->ops.write_posted = e1000_write_posted_mbx; -+ mbx->ops.check_for_msg = e1000_check_for_msg_pf; -+ mbx->ops.check_for_ack = e1000_check_for_ack_pf; -+ mbx->ops.check_for_rst = e1000_check_for_rst_pf; -+ -+ mbx->stats.msgs_tx = 0; -+ mbx->stats.msgs_rx = 0; -+ mbx->stats.reqs = 0; -+ mbx->stats.acks = 0; -+ mbx->stats.rsts = 0; -+ default: -+ return E1000_SUCCESS; -+ } - } - -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h ---- a/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,30 +1,31 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_MBX_H_ - #define _E1000_MBX_H_ - --#include "e1000_hw.h" -+#include "e1000_api.h" - - #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ - #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -@@ -32,10 +33,10 @@ - #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ - #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - --#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ --#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ --#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ --#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ -+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ -+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ -+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - - #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ - -@@ -43,31 +44,41 @@ - * PF. The reverse is true if it is E1000_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ --/* Messages below or'd with this are the ACK */ -+/* Msgs below or'd with this are the ACK */ - #define E1000_VT_MSGTYPE_ACK 0x80000000 --/* Messages below or'd with this are the NACK */ -+/* Msgs below or'd with this are the NACK */ - #define E1000_VT_MSGTYPE_NACK 0x40000000 - /* Indicates that VF is still clear to send requests */ - #define E1000_VT_MSGTYPE_CTS 0x20000000 - #define E1000_VT_MSGINFO_SHIFT 16 --/* bits 23:16 are used for exra info for certain messages */ -+/* bits 23:16 are used for extra info for certain messages */ - #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) - --#define E1000_VF_RESET 0x01 /* VF requests reset */ --#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ --#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ --#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ --#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ --#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ -+#define E1000_VF_RESET 0x01 /* VF requests reset */ -+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ -+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ -+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) -+#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) -+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ -+#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) -+#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ -+#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ -+#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) - #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) - --#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -+ -+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -+#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ - --s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); --s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); --s32 igb_check_for_msg(struct e1000_hw *, u16); --s32 igb_check_for_ack(struct e1000_hw *, u16); --s32 igb_check_for_rst(struct e1000_hw *, u16); --s32 igb_init_mbx_params_pf(struct e1000_hw *); -+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); -+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); -+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); -+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); -+s32 e1000_check_for_msg(struct e1000_hw *, u16); -+s32 e1000_check_for_ack(struct e1000_hw *, u16); -+s32 e1000_check_for_rst(struct e1000_hw *, u16); -+void e1000_init_mbx_ops_generic(struct e1000_hw *hw); -+s32 e1000_init_mbx_params_pf(struct e1000_hw *); - - #endif /* _E1000_MBX_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c ---- a/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,63 +1,131 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "e1000_api.h" -+ -+static void e1000_reload_nvm_generic(struct e1000_hw *hw); -+ -+/** -+ * e1000_init_nvm_ops_generic - Initialize NVM function pointers -+ * @hw: pointer to the HW structure - * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+ * Setups up the function pointers to no-op functions -+ **/ -+void e1000_init_nvm_ops_generic(struct e1000_hw *hw) -+{ -+ struct e1000_nvm_info *nvm = &hw->nvm; -+ DEBUGFUNC("e1000_init_nvm_ops_generic"); -+ -+ /* Initialize function pointers */ -+ nvm->ops.init_params = e1000_null_ops_generic; -+ nvm->ops.acquire = e1000_null_ops_generic; -+ nvm->ops.read = e1000_null_read_nvm; -+ nvm->ops.release = e1000_null_nvm_generic; -+ nvm->ops.reload = e1000_reload_nvm_generic; -+ nvm->ops.update = e1000_null_ops_generic; -+ nvm->ops.valid_led_default = e1000_null_led_default; -+ nvm->ops.validate = e1000_null_ops_generic; -+ nvm->ops.write = e1000_null_write_nvm; -+} - --#include --#include -+/** -+ * e1000_null_nvm_read - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, -+ u16 E1000_UNUSEDARG *c) -+{ -+ DEBUGFUNC("e1000_null_read_nvm"); -+ return E1000_SUCCESS; -+} - --#include "e1000_mac.h" --#include "e1000_nvm.h" -+/** -+ * e1000_null_nvm_generic - No-op function, return void -+ * @hw: pointer to the HW structure -+ **/ -+void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_null_nvm_generic"); -+ return; -+} - - /** -- * igb_raise_eec_clk - Raise EEPROM clock -+ * e1000_null_led_default - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 E1000_UNUSEDARG *data) -+{ -+ DEBUGFUNC("e1000_null_led_default"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_null_write_nvm - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, -+ u16 E1000_UNUSEDARG *c) -+{ -+ DEBUGFUNC("e1000_null_write_nvm"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_raise_eec_clk - Raise EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Enable/Raise the EEPROM clock bit. - **/ --static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) -+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) - { - *eecd = *eecd | E1000_EECD_SK; -- wr32(E1000_EECD, *eecd); -- wrfl(); -- udelay(hw->nvm.delay_usec); -+ E1000_WRITE_REG(hw, E1000_EECD, *eecd); -+ E1000_WRITE_FLUSH(hw); -+ usec_delay(hw->nvm.delay_usec); - } - - /** -- * igb_lower_eec_clk - Lower EEPROM clock -+ * e1000_lower_eec_clk - Lower EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Clear/Lower the EEPROM clock bit. - **/ --static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) -+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) - { - *eecd = *eecd & ~E1000_EECD_SK; -- wr32(E1000_EECD, *eecd); -- wrfl(); -- udelay(hw->nvm.delay_usec); -+ E1000_WRITE_REG(hw, E1000_EECD, *eecd); -+ E1000_WRITE_FLUSH(hw); -+ usec_delay(hw->nvm.delay_usec); - } - - /** -- * igb_shift_out_eec_bits - Shift data bits our to the EEPROM -+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM - * @hw: pointer to the HW structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out -@@ -66,12 +134,14 @@ - * "data" parameter will be shifted out to the EEPROM one bit at a time. - * In order to do this, "data" must be broken down into bits. - **/ --static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) -+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) - { - struct e1000_nvm_info *nvm = &hw->nvm; -- u32 eecd = rd32(E1000_EECD); -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); - u32 mask; - -+ DEBUGFUNC("e1000_shift_out_eec_bits"); -+ - mask = 0x01 << (count - 1); - if (nvm->type == e1000_nvm_eeprom_spi) - eecd |= E1000_EECD_DO; -@@ -82,23 +152,23 @@ - if (data & mask) - eecd |= E1000_EECD_DI; - -- wr32(E1000_EECD, eecd); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); -+ E1000_WRITE_FLUSH(hw); - -- udelay(nvm->delay_usec); -+ usec_delay(nvm->delay_usec); - -- igb_raise_eec_clk(hw, &eecd); -- igb_lower_eec_clk(hw, &eecd); -+ e1000_raise_eec_clk(hw, &eecd); -+ e1000_lower_eec_clk(hw, &eecd); - - mask >>= 1; - } while (mask); - - eecd &= ~E1000_EECD_DI; -- wr32(E1000_EECD, eecd); -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); - } - - /** -- * igb_shift_in_eec_bits - Shift data bits in from the EEPROM -+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM - * @hw: pointer to the HW structure - * @count: number of bits to shift in - * -@@ -108,121 +178,124 @@ - * "DO" bit. During this "shifting in" process the data in "DI" bit should - * always be clear. - **/ --static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) -+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) - { - u32 eecd; - u32 i; - u16 data; - -- eecd = rd32(E1000_EECD); -+ DEBUGFUNC("e1000_shift_in_eec_bits"); -+ -+ eecd = E1000_READ_REG(hw, E1000_EECD); - - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); - data = 0; - - for (i = 0; i < count; i++) { - data <<= 1; -- igb_raise_eec_clk(hw, &eecd); -+ e1000_raise_eec_clk(hw, &eecd); - -- eecd = rd32(E1000_EECD); -+ eecd = E1000_READ_REG(hw, E1000_EECD); - - eecd &= ~E1000_EECD_DI; - if (eecd & E1000_EECD_DO) - data |= 1; - -- igb_lower_eec_clk(hw, &eecd); -+ e1000_lower_eec_clk(hw, &eecd); - } - - return data; - } - - /** -- * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion -+ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion - * @hw: pointer to the HW structure - * @ee_reg: EEPROM flag for polling - * - * Polls the EEPROM status bit for either read or write completion based - * upon the value of 'ee_reg'. - **/ --static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) -+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) - { - u32 attempts = 100000; - u32 i, reg = 0; -- s32 ret_val = -E1000_ERR_NVM; -+ -+ DEBUGFUNC("e1000_poll_eerd_eewr_done"); - - for (i = 0; i < attempts; i++) { - if (ee_reg == E1000_NVM_POLL_READ) -- reg = rd32(E1000_EERD); -+ reg = E1000_READ_REG(hw, E1000_EERD); - else -- reg = rd32(E1000_EEWR); -+ reg = E1000_READ_REG(hw, E1000_EEWR); - -- if (reg & E1000_NVM_RW_REG_DONE) { -- ret_val = 0; -- break; -- } -+ if (reg & E1000_NVM_RW_REG_DONE) -+ return E1000_SUCCESS; - -- udelay(5); -+ usec_delay(5); - } - -- return ret_val; -+ return -E1000_ERR_NVM; - } - - /** -- * igb_acquire_nvm - Generic request for access to EEPROM -+ * e1000_acquire_nvm_generic - Generic request for access to EEPROM - * @hw: pointer to the HW structure - * - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ --s32 igb_acquire_nvm(struct e1000_hw *hw) -+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) - { -- u32 eecd = rd32(E1000_EECD); -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); - s32 timeout = E1000_NVM_GRANT_ATTEMPTS; -- s32 ret_val = 0; - -+ DEBUGFUNC("e1000_acquire_nvm_generic"); - -- wr32(E1000_EECD, eecd | E1000_EECD_REQ); -- eecd = rd32(E1000_EECD); -+ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); -+ eecd = E1000_READ_REG(hw, E1000_EECD); - - while (timeout) { - if (eecd & E1000_EECD_GNT) - break; -- udelay(5); -- eecd = rd32(E1000_EECD); -+ usec_delay(5); -+ eecd = E1000_READ_REG(hw, E1000_EECD); - timeout--; - } - - if (!timeout) { - eecd &= ~E1000_EECD_REQ; -- wr32(E1000_EECD, eecd); -- hw_dbg("Could not acquire NVM grant\n"); -- ret_val = -E1000_ERR_NVM; -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); -+ DEBUGOUT("Could not acquire NVM grant\n"); -+ return -E1000_ERR_NVM; - } - -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_standby_nvm - Return EEPROM to standby state -+ * e1000_standby_nvm - Return EEPROM to standby state - * @hw: pointer to the HW structure - * - * Return the EEPROM to a standby state. - **/ --static void igb_standby_nvm(struct e1000_hw *hw) -+static void e1000_standby_nvm(struct e1000_hw *hw) - { - struct e1000_nvm_info *nvm = &hw->nvm; -- u32 eecd = rd32(E1000_EECD); -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); -+ -+ DEBUGFUNC("e1000_standby_nvm"); - - if (nvm->type == e1000_nvm_eeprom_spi) { - /* Toggle CS to flush commands */ - eecd |= E1000_EECD_CS; -- wr32(E1000_EECD, eecd); -- wrfl(); -- udelay(nvm->delay_usec); -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); -+ E1000_WRITE_FLUSH(hw); -+ usec_delay(nvm->delay_usec); - eecd &= ~E1000_EECD_CS; -- wr32(E1000_EECD, eecd); -- wrfl(); -- udelay(nvm->delay_usec); -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); -+ E1000_WRITE_FLUSH(hw); -+ usec_delay(nvm->delay_usec); - } - } - -@@ -236,53 +309,57 @@ - { - u32 eecd; - -- eecd = rd32(E1000_EECD); -+ DEBUGFUNC("e1000_stop_nvm"); -+ -+ eecd = E1000_READ_REG(hw, E1000_EECD); - if (hw->nvm.type == e1000_nvm_eeprom_spi) { - /* Pull CS high */ - eecd |= E1000_EECD_CS; -- igb_lower_eec_clk(hw, &eecd); -+ e1000_lower_eec_clk(hw, &eecd); - } - } - - /** -- * igb_release_nvm - Release exclusive access to EEPROM -+ * e1000_release_nvm_generic - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - **/ --void igb_release_nvm(struct e1000_hw *hw) -+void e1000_release_nvm_generic(struct e1000_hw *hw) - { - u32 eecd; - -+ DEBUGFUNC("e1000_release_nvm_generic"); -+ - e1000_stop_nvm(hw); - -- eecd = rd32(E1000_EECD); -+ eecd = E1000_READ_REG(hw, E1000_EECD); - eecd &= ~E1000_EECD_REQ; -- wr32(E1000_EECD, eecd); -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); - } - - /** -- * igb_ready_nvm_eeprom - Prepares EEPROM for read/write -+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write - * @hw: pointer to the HW structure - * - * Setups the EEPROM for reading and writing. - **/ --static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) -+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) - { - struct e1000_nvm_info *nvm = &hw->nvm; -- u32 eecd = rd32(E1000_EECD); -- s32 ret_val = 0; -- u16 timeout = 0; -+ u32 eecd = E1000_READ_REG(hw, E1000_EECD); - u8 spi_stat_reg; - -+ DEBUGFUNC("e1000_ready_nvm_eeprom"); - - if (nvm->type == e1000_nvm_eeprom_spi) { -+ u16 timeout = NVM_MAX_RETRY_SPI; -+ - /* Clear SK and CS */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); -- wr32(E1000_EECD, eecd); -- wrfl(); -- udelay(1); -- timeout = NVM_MAX_RETRY_SPI; -+ E1000_WRITE_REG(hw, E1000_EECD, eecd); -+ E1000_WRITE_FLUSH(hw); -+ usec_delay(1); - - /* Read "Status Register" repeatedly until the LSB is cleared. - * The EEPROM will signal that the command has been completed -@@ -290,30 +367,28 @@ - * not cleared within 'timeout', then error out. - */ - while (timeout) { -- igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, -- hw->nvm.opcode_bits); -- spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); -+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, -+ hw->nvm.opcode_bits); -+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); - if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) - break; - -- udelay(5); -- igb_standby_nvm(hw); -+ usec_delay(5); -+ e1000_standby_nvm(hw); - timeout--; - } - - if (!timeout) { -- hw_dbg("SPI NVM Status error\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -+ DEBUGOUT("SPI NVM Status error\n"); -+ return -E1000_ERR_NVM; - } - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_read_nvm_spi - Read EEPROM's using SPI -+ * e1000_read_nvm_spi - Read EEPROM's using SPI - * @hw: pointer to the HW structure - * @offset: offset of word in the EEPROM to read - * @words: number of words to read -@@ -321,7 +396,7 @@ - * - * Reads a 16 bit word from the EEPROM. - **/ --s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) - { - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i = 0; -@@ -329,51 +404,51 @@ - u16 word_in; - u8 read_opcode = NVM_READ_OPCODE_SPI; - -+ DEBUGFUNC("e1000_read_nvm_spi"); -+ - /* A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { -- hw_dbg("nvm parameter(s) out of bounds\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -+ DEBUGOUT("nvm parameter(s) out of bounds\n"); -+ return -E1000_ERR_NVM; - } - - ret_val = nvm->ops.acquire(hw); - if (ret_val) -- goto out; -+ return ret_val; - -- ret_val = igb_ready_nvm_eeprom(hw); -+ ret_val = e1000_ready_nvm_eeprom(hw); - if (ret_val) - goto release; - -- igb_standby_nvm(hw); -+ e1000_standby_nvm(hw); - - if ((nvm->address_bits == 8) && (offset >= 128)) - read_opcode |= NVM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ -- igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); -- igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); -+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); -+ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); - - /* Read the data. SPI NVMs increment the address with each byte - * read and will roll over if reading beyond the end. This allows - * us to read the whole NVM from any offset - */ - for (i = 0; i < words; i++) { -- word_in = igb_shift_in_eec_bits(hw, 16); -+ word_in = e1000_shift_in_eec_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); - } - - release: - nvm->ops.release(hw); - --out: - return ret_val; - } - - /** -- * igb_read_nvm_eerd - Reads EEPROM using EERD register -+ * e1000_read_nvm_eerd - Reads EEPROM using EERD register - * @hw: pointer to the HW structure - * @offset: offset of word in the EEPROM to read - * @words: number of words to read -@@ -381,41 +456,44 @@ - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ --s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) - { - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i, eerd = 0; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("e1000_read_nvm_eerd"); - - /* A check for invalid values: offset too large, too many words, -- * and not enough words. -+ * too many words for the offset, and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { -- hw_dbg("nvm parameter(s) out of bounds\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -+ DEBUGOUT("nvm parameter(s) out of bounds\n"); -+ return -E1000_ERR_NVM; - } - - for (i = 0; i < words; i++) { - eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + -- E1000_NVM_RW_REG_START; -+ E1000_NVM_RW_REG_START; - -- wr32(E1000_EERD, eerd); -- ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); -+ E1000_WRITE_REG(hw, E1000_EERD, eerd); -+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); - if (ret_val) - break; - -- data[i] = (rd32(E1000_EERD) >> -- E1000_NVM_RW_REG_DATA); -+ data[i] = (E1000_READ_REG(hw, E1000_EERD) >> -+ E1000_NVM_RW_REG_DATA); - } - --out: -+ if (ret_val) -+ DEBUGOUT1("NVM read error: %d\n", ret_val); -+ - return ret_val; - } - - /** -- * igb_write_nvm_spi - Write to EEPROM using SPI -+ * e1000_write_nvm_spi - Write to EEPROM using SPI - * @hw: pointer to the HW structure - * @offset: offset within the EEPROM to be written to - * @words: number of words to write -@@ -424,21 +502,23 @@ - * Writes data to EEPROM at offset using SPI interface. - * - * If e1000_update_nvm_checksum is not called after this function , the -- * EEPROM will most likley contain an invalid checksum. -+ * EEPROM will most likely contain an invalid checksum. - **/ --s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) - { - struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val = -E1000_ERR_NVM; - u16 widx = 0; - -+ DEBUGFUNC("e1000_write_nvm_spi"); -+ - /* A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { -- hw_dbg("nvm parameter(s) out of bounds\n"); -- return ret_val; -+ DEBUGOUT("nvm parameter(s) out of bounds\n"); -+ return -E1000_ERR_NVM; - } - - while (widx < words) { -@@ -448,19 +528,19 @@ - if (ret_val) - return ret_val; - -- ret_val = igb_ready_nvm_eeprom(hw); -+ ret_val = e1000_ready_nvm_eeprom(hw); - if (ret_val) { - nvm->ops.release(hw); - return ret_val; - } - -- igb_standby_nvm(hw); -+ e1000_standby_nvm(hw); - - /* Send the WRITE ENABLE command (8 bit opcode) */ -- igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, -+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, - nvm->opcode_bits); - -- igb_standby_nvm(hw); -+ e1000_standby_nvm(hw); - - /* Some SPI eeproms use the 8th address bit embedded in the - * opcode -@@ -469,24 +549,23 @@ - write_opcode |= NVM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ -- igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); -- igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), -+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); -+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), - nvm->address_bits); - - /* Loop to allow for up to whole page write of eeprom */ - while (widx < words) { - u16 word_out = data[widx]; -- - word_out = (word_out >> 8) | (word_out << 8); -- igb_shift_out_eec_bits(hw, word_out, 16); -+ e1000_shift_out_eec_bits(hw, word_out, 16); - widx++; - - if ((((offset + widx) * 2) % nvm->page_size) == 0) { -- igb_standby_nvm(hw); -+ e1000_standby_nvm(hw); - break; - } - } -- usleep_range(1000, 2000); -+ msec_delay(10); - nvm->ops.release(hw); - } - -@@ -494,132 +573,199 @@ - } - - /** -- * igb_read_part_string - Read device part number -+ * igb_e1000_read_pba_string_generic - Read device part number - * @hw: pointer to the HW structure -- * @part_num: pointer to device part number -- * @part_num_size: size of part number buffer -+ * @pba_num: pointer to device part number -+ * @pba_num_size: size of part number buffer - * - * Reads the product board assembly (PBA) number from the EEPROM and stores -- * the value in part_num. -+ * the value in pba_num. - **/ --s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, -+ u32 pba_num_size) - { - s32 ret_val; - u16 nvm_data; -- u16 pointer; -+ u16 pba_ptr; - u16 offset; - u16 length; - -- if (part_num == NULL) { -- hw_dbg("PBA string buffer was null\n"); -- ret_val = E1000_ERR_INVALID_ARGUMENT; -- goto out; -+ DEBUGFUNC("igb_e1000_read_pba_string_generic"); -+ -+ if ((hw->mac.type >= e1000_i210) && -+ !e1000_get_flash_presence_i210(hw)) { -+ DEBUGOUT("Flashless no PBA string\n"); -+ return -E1000_ERR_NVM_PBA_SECTION; -+ } -+ -+ if (pba_num == NULL) { -+ DEBUGOUT("PBA string buffer was null\n"); -+ return -E1000_ERR_INVALID_ARGUMENT; - } - - ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - -- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); -+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - - /* if nvm_data is not ptr guard the PBA must be in legacy format which -- * means pointer is actually our second data word for the PBA number -+ * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (nvm_data != NVM_PBA_PTR_GUARD) { -- hw_dbg("NVM PBA number is not stored as string\n"); -+ DEBUGOUT("NVM PBA number is not stored as string\n"); - -- /* we will need 11 characters to store the PBA */ -- if (part_num_size < 11) { -- hw_dbg("PBA string buffer too small\n"); -+ /* make sure callers buffer is big enough to store the PBA */ -+ if (pba_num_size < E1000_PBANUM_LENGTH) { -+ DEBUGOUT("PBA string buffer too small\n"); - return E1000_ERR_NO_SPACE; - } - -- /* extract hex string from data and pointer */ -- part_num[0] = (nvm_data >> 12) & 0xF; -- part_num[1] = (nvm_data >> 8) & 0xF; -- part_num[2] = (nvm_data >> 4) & 0xF; -- part_num[3] = nvm_data & 0xF; -- part_num[4] = (pointer >> 12) & 0xF; -- part_num[5] = (pointer >> 8) & 0xF; -- part_num[6] = '-'; -- part_num[7] = 0; -- part_num[8] = (pointer >> 4) & 0xF; -- part_num[9] = pointer & 0xF; -+ /* extract hex string from data and pba_ptr */ -+ pba_num[0] = (nvm_data >> 12) & 0xF; -+ pba_num[1] = (nvm_data >> 8) & 0xF; -+ pba_num[2] = (nvm_data >> 4) & 0xF; -+ pba_num[3] = nvm_data & 0xF; -+ pba_num[4] = (pba_ptr >> 12) & 0xF; -+ pba_num[5] = (pba_ptr >> 8) & 0xF; -+ pba_num[6] = '-'; -+ pba_num[7] = 0; -+ pba_num[8] = (pba_ptr >> 4) & 0xF; -+ pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ -- part_num[10] = '\0'; -+ pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { -- if (part_num[offset] < 0xA) -- part_num[offset] += '0'; -- else if (part_num[offset] < 0x10) -- part_num[offset] += 'A' - 0xA; -+ if (pba_num[offset] < 0xA) -+ pba_num[offset] += '0'; -+ else if (pba_num[offset] < 0x10) -+ pba_num[offset] += 'A' - 0xA; - } - -- goto out; -+ return E1000_SUCCESS; - } - -- ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); -+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - - if (length == 0xFFFF || length == 0) { -- hw_dbg("NVM PBA number section invalid length\n"); -- ret_val = E1000_ERR_NVM_PBA_SECTION; -- goto out; -- } -- /* check if part_num buffer is big enough */ -- if (part_num_size < (((u32)length * 2) - 1)) { -- hw_dbg("PBA string buffer too small\n"); -- ret_val = E1000_ERR_NO_SPACE; -- goto out; -+ DEBUGOUT("NVM PBA number section invalid length\n"); -+ return -E1000_ERR_NVM_PBA_SECTION; -+ } -+ /* check if pba_num buffer is big enough */ -+ if (pba_num_size < (((u32)length * 2) - 1)) { -+ DEBUGOUT("PBA string buffer too small\n"); -+ return -E1000_ERR_NO_SPACE; - } - - /* trim pba length from start of string */ -- pointer++; -+ pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { -- ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); -+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } -- part_num[offset * 2] = (u8)(nvm_data >> 8); -- part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); -+ pba_num[offset * 2] = (u8)(nvm_data >> 8); -+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); - } -- part_num[offset * 2] = '\0'; -+ pba_num[offset * 2] = '\0'; - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_read_mac_addr - Read device MAC address -+ * e1000_read_pba_length_generic - Read device part number length -+ * @hw: pointer to the HW structure -+ * @pba_num_size: size of part number buffer -+ * -+ * Reads the product board assembly (PBA) number length from the EEPROM and -+ * stores the value in pba_num_size. -+ **/ -+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) -+{ -+ s32 ret_val; -+ u16 nvm_data; -+ u16 pba_ptr; -+ u16 length; -+ -+ DEBUGFUNC("e1000_read_pba_length_generic"); -+ -+ if (pba_num_size == NULL) { -+ DEBUGOUT("PBA buffer size was null\n"); -+ return -E1000_ERR_INVALID_ARGUMENT; -+ } -+ -+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; -+ } -+ -+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; -+ } -+ -+ /* if data is not ptr guard the PBA must be in legacy format */ -+ if (nvm_data != NVM_PBA_PTR_GUARD) { -+ *pba_num_size = E1000_PBANUM_LENGTH; -+ return E1000_SUCCESS; -+ } -+ -+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); -+ if (ret_val) { -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; -+ } -+ -+ if (length == 0xFFFF || length == 0) { -+ DEBUGOUT("NVM PBA number section invalid length\n"); -+ return -E1000_ERR_NVM_PBA_SECTION; -+ } -+ -+ /* Convert from length in u16 values to u8 chars, add 1 for NULL, -+ * and subtract 2 because length field is included in length. -+ */ -+ *pba_num_size = ((u32)length * 2) - 1; -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * igb_e1000_read_mac_addr_generic - Read device MAC address - * @hw: pointer to the HW structure - * - * Reads the device MAC address from the EEPROM and stores the value. - * Since devices with two ports use the same EEPROM, we increment the - * last bit in the MAC address for the second port. - **/ --s32 igb_read_mac_addr(struct e1000_hw *hw) -+ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw) - { - u32 rar_high; - u32 rar_low; - u16 i; - -- rar_high = rd32(E1000_RAH(0)); -- rar_low = rd32(E1000_RAL(0)); -+ rar_high = E1000_READ_REG(hw, E1000_RAH(0)); -+ rar_low = E1000_READ_REG(hw, E1000_RAL(0)); - - for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); -@@ -627,83 +773,104 @@ - for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); - -- for (i = 0; i < ETH_ALEN; i++) -+ for (i = 0; i < ETH_ADDR_LEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i]; - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_validate_nvm_checksum - Validate EEPROM checksum -+ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ --s32 igb_validate_nvm_checksum(struct e1000_hw *hw) -+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - -+ DEBUGFUNC("e1000_validate_nvm_checksum_generic"); -+ - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); - if (ret_val) { -- hw_dbg("NVM Read Error\n"); -- goto out; -+ DEBUGOUT("NVM Read Error\n"); -+ return ret_val; - } - checksum += nvm_data; - } - - if (checksum != (u16) NVM_SUM) { -- hw_dbg("NVM Checksum Invalid\n"); -- ret_val = -E1000_ERR_NVM; -- goto out; -+ DEBUGOUT("NVM Checksum Invalid\n"); -+ return -E1000_ERR_NVM; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_update_nvm_checksum - Update EEPROM checksum -+ * e1000_update_nvm_checksum_generic - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. - **/ --s32 igb_update_nvm_checksum(struct e1000_hw *hw) -+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) - { -- s32 ret_val; -+ s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - -+ DEBUGFUNC("e1000_update_nvm_checksum"); -+ - for (i = 0; i < NVM_CHECKSUM_REG; i++) { - ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); - if (ret_val) { -- hw_dbg("NVM Read Error while updating checksum.\n"); -- goto out; -+ DEBUGOUT("NVM Read Error while updating checksum.\n"); -+ return ret_val; - } - checksum += nvm_data; - } - checksum = (u16) NVM_SUM - checksum; - ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); - if (ret_val) -- hw_dbg("NVM Write Error while updating checksum.\n"); -+ DEBUGOUT("NVM Write Error while updating checksum.\n"); - --out: - return ret_val; - } - - /** -- * igb_get_fw_version - Get firmware version information -+ * e1000_reload_nvm_generic - Reloads EEPROM - * @hw: pointer to the HW structure -- * @fw_vers: pointer to output structure - * -- * unsupported MAC types will return all 0 version structure -+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the -+ * extended control register. - **/ --void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) -+static void e1000_reload_nvm_generic(struct e1000_hw *hw) -+{ -+ u32 ctrl_ext; -+ -+ DEBUGFUNC("e1000_reload_nvm_generic"); -+ -+ usec_delay(10); -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ ctrl_ext |= E1000_CTRL_EXT_EE_RST; -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); -+ E1000_WRITE_FLUSH(hw); -+} -+ -+/** -+ * e1000_get_fw_version - Get firmware version information -+ * @hw: pointer to the HW structure -+ * @fw_vers: pointer to output version structure -+ * -+ * unsupported/not present features return 0 in version structure -+ **/ -+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) - { - u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; - u8 q, hval, rem, result; -@@ -711,17 +878,18 @@ - - memset(fw_vers, 0, sizeof(struct e1000_fw_version)); - -- /* basic eeprom version numbers and bits used vary by part and by tool -- * used to create the nvm images. Check which data format we have. -- */ -- hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); -+ /* basic eeprom version numbers, bits used vary by part and by tool -+ * used to create the nvm images */ -+ /* Check which data format we have */ - switch (hw->mac.type) { - case e1000_i211: -- igb_read_invm_version(hw, fw_vers); -+ e1000_read_invm_version(hw, fw_vers); - return; - case e1000_82575: - case e1000_82576: - case e1000_82580: -+ case e1000_i354: -+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); - /* Use this format, unless EETRACK ID exists, - * then use alternate format - */ -@@ -736,12 +904,13 @@ - } - break; - case e1000_i210: -- if (!(igb_get_flash_presence_i210(hw))) { -- igb_read_invm_version(hw, fw_vers); -+ if (!(e1000_get_flash_presence_i210(hw))) { -+ e1000_read_invm_version(hw, fw_vers); - return; - } - /* fall through */ - case e1000_i350: -+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); - /* find combo image version */ - hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); - if ((comb_offset != 0x0) && -@@ -769,6 +938,7 @@ - } - break; - default: -+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); - return; - } - hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); -@@ -797,5 +967,11 @@ - hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); - fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) - | eeprom_verl; -+ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { -+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); -+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); -+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | -+ eeprom_verl; - } - } -+ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h ---- a/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,41 +1,30 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_NVM_H_ - #define _E1000_NVM_H_ - --s32 igb_acquire_nvm(struct e1000_hw *hw); --void igb_release_nvm(struct e1000_hw *hw); --s32 igb_read_mac_addr(struct e1000_hw *hw); --s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); --s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, -- u32 part_num_size); --s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); --s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); --s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); --s32 igb_validate_nvm_checksum(struct e1000_hw *hw); --s32 igb_update_nvm_checksum(struct e1000_hw *hw); -- - struct e1000_fw_version { - u32 etrack_id; - u16 eep_major; -@@ -51,6 +40,31 @@ - u16 or_build; - u16 or_patch; - }; --void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); -+ -+void e1000_init_nvm_ops_generic(struct e1000_hw *hw); -+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); -+void e1000_null_nvm_generic(struct e1000_hw *hw); -+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); -+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); -+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); -+ -+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); -+s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw); -+s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, -+ u32 pba_num_size); -+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); -+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, -+ u16 *data); -+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); -+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); -+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, -+ u16 *data); -+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); -+void e1000_release_nvm_generic(struct e1000_hw *hw); -+void e1000_get_fw_version(struct e1000_hw *hw, -+ struct e1000_fw_version *fw_vers); -+ -+#define E1000_STM_OPCODE 0xDB00 - - #endif -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_osdep.h b/drivers/net/ethernet/intel/igb/e1000_osdep.h ---- a/drivers/net/ethernet/intel/igb/e1000_osdep.h 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_osdep.h 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,141 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+/* glue for the OS independent part of e1000 -+ * includes register access macros -+ */ -+ -+#ifndef _E1000_OSDEP_H_ -+#define _E1000_OSDEP_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include "kcompat.h" -+ -+#define usec_delay(x) udelay(x) -+#define usec_delay_irq(x) udelay(x) -+#ifndef msec_delay -+#define msec_delay(x) do { \ -+ /* Don't mdelay in interrupt context! */ \ -+ if (in_interrupt()) \ -+ BUG(); \ -+ else \ -+ msleep(x); \ -+} while (0) -+ -+/* Some workarounds require millisecond delays and are run during interrupt -+ * context. Most notably, when establishing link, the phy may need tweaking -+ * but cannot process phy register reads/writes faster than millisecond -+ * intervals...and we establish link due to a "link status change" interrupt. -+ */ -+#define msec_delay_irq(x) mdelay(x) -+ -+#define E1000_READ_REG(x, y) e1000_read_reg(x, y) -+#endif -+ -+#define PCI_COMMAND_REGISTER PCI_COMMAND -+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE -+#define ETH_ADDR_LEN ETH_ALEN -+ -+#ifdef __BIG_ENDIAN -+#define E1000_BIG_ENDIAN __BIG_ENDIAN -+#endif -+ -+#ifdef DEBUG -+#define DEBUGOUT(S) pr_debug(S) -+#define DEBUGOUT1(S, A...) pr_debug(S, ## A) -+#else -+#define DEBUGOUT(S) -+#define DEBUGOUT1(S, A...) -+#endif -+ -+#ifdef DEBUG_FUNC -+#define DEBUGFUNC(F) DEBUGOUT(F "\n") -+#else -+#define DEBUGFUNC(F) -+#endif -+#define DEBUGOUT2 DEBUGOUT1 -+#define DEBUGOUT3 DEBUGOUT2 -+#define DEBUGOUT7 DEBUGOUT3 -+ -+#define E1000_REGISTER(a, reg) reg -+ -+/* forward declaration */ -+struct e1000_hw; -+ -+/* write operations, indexed using DWORDS */ -+#define E1000_WRITE_REG(hw, reg, val) \ -+do { \ -+ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ -+ if (!E1000_REMOVED(hw_addr)) \ -+ writel((val), &hw_addr[(reg)]); \ -+} while (0) -+ -+u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); -+ -+#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \ -+ E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val)) -+ -+#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \ -+ e1000_read_reg((hw), (reg) + ((idx) << 2))) -+ -+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY -+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY -+ -+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ -+ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \ -+ ((offset) << 1)))) -+ -+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ -+ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) -+ -+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ -+ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) -+ -+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ -+ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) -+ -+#define E1000_WRITE_REG_IO(a, reg, offset) do { \ -+ outl(reg, ((a)->io_base)); \ -+ outl(offset, ((a)->io_base + 4)); \ -+ } while (0) -+ -+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) -+ -+#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ -+ writel((value), ((a)->flash_address + reg))) -+ -+#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ -+ writew((value), ((a)->flash_address + reg))) -+ -+#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) -+ -+#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) -+ -+#define E1000_REMOVED(h) unlikely(!(h)) -+ -+#endif /* _E1000_OSDEP_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c ---- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,147 +1,271 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -- --#include --#include -- --#include "e1000_mac.h" --#include "e1000_phy.h" -- --static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); --static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, -- u16 *phy_ctrl); --static s32 igb_wait_autoneg(struct e1000_hw *hw); --static s32 igb_set_master_slave_mode(struct e1000_hw *hw); -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -+*******************************************************************************/ -+ -+#include "e1000_api.h" -+ -+static s32 e1000_wait_autoneg(struct e1000_hw *hw); - /* Cable length tables */ - static const u16 e1000_m88_cable_length_table[] = { - 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; - #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ -- (sizeof(e1000_m88_cable_length_table) / \ -- sizeof(e1000_m88_cable_length_table[0])) -+ (sizeof(e1000_m88_cable_length_table) / \ -+ sizeof(e1000_m88_cable_length_table[0])) - - static const u16 e1000_igp_2_cable_length_table[] = { -- 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, -- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, -- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, -- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, -- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, -- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, -- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, -- 104, 109, 114, 118, 121, 124}; -+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, -+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, -+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, -+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, -+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, -+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, -+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, -+ 124}; - #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ -- (sizeof(e1000_igp_2_cable_length_table) / \ -- sizeof(e1000_igp_2_cable_length_table[0])) -+ (sizeof(e1000_igp_2_cable_length_table) / \ -+ sizeof(e1000_igp_2_cable_length_table[0])) -+ -+/** -+ * e1000_init_phy_ops_generic - Initialize PHY function pointers -+ * @hw: pointer to the HW structure -+ * -+ * Setups up the function pointers to no-op functions -+ **/ -+void e1000_init_phy_ops_generic(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ DEBUGFUNC("e1000_init_phy_ops_generic"); -+ -+ /* Initialize function pointers */ -+ phy->ops.init_params = e1000_null_ops_generic; -+ phy->ops.acquire = e1000_null_ops_generic; -+ phy->ops.check_polarity = e1000_null_ops_generic; -+ phy->ops.check_reset_block = e1000_null_ops_generic; -+ phy->ops.commit = e1000_null_ops_generic; -+ phy->ops.force_speed_duplex = e1000_null_ops_generic; -+ phy->ops.get_cfg_done = e1000_null_ops_generic; -+ phy->ops.get_cable_length = e1000_null_ops_generic; -+ phy->ops.get_info = e1000_null_ops_generic; -+ phy->ops.set_page = e1000_null_set_page; -+ phy->ops.read_reg = e1000_null_read_reg; -+ phy->ops.read_reg_locked = e1000_null_read_reg; -+ phy->ops.read_reg_page = e1000_null_read_reg; -+ phy->ops.release = e1000_null_phy_generic; -+ phy->ops.reset = e1000_null_ops_generic; -+ phy->ops.set_d0_lplu_state = e1000_null_lplu_state; -+ phy->ops.set_d3_lplu_state = e1000_null_lplu_state; -+ phy->ops.write_reg = e1000_null_write_reg; -+ phy->ops.write_reg_locked = e1000_null_write_reg; -+ phy->ops.write_reg_page = e1000_null_write_reg; -+ phy->ops.power_up = e1000_null_phy_generic; -+ phy->ops.power_down = e1000_null_phy_generic; -+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; -+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; -+} -+ -+/** -+ * e1000_null_set_page - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, -+ u16 E1000_UNUSEDARG data) -+{ -+ DEBUGFUNC("e1000_null_set_page"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_null_read_reg - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, -+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) -+{ -+ DEBUGFUNC("e1000_null_read_reg"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_null_phy_generic - No-op function, return void -+ * @hw: pointer to the HW structure -+ **/ -+void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_null_phy_generic"); -+ return; -+} -+ -+/** -+ * e1000_null_lplu_state - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, -+ bool E1000_UNUSEDARG active) -+{ -+ DEBUGFUNC("e1000_null_lplu_state"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_null_write_reg - No-op function, return 0 -+ * @hw: pointer to the HW structure -+ **/ -+s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, -+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) -+{ -+ DEBUGFUNC("e1000_null_write_reg"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_i2c_byte_null - No-op function, return 0 -+ * @hw: pointer to hardware structure -+ * @byte_offset: byte offset to write -+ * @dev_addr: device address -+ * @data: data value read -+ * -+ **/ -+s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, -+ u8 E1000_UNUSEDARG byte_offset, -+ u8 E1000_UNUSEDARG dev_addr, -+ u8 E1000_UNUSEDARG *data) -+{ -+ DEBUGFUNC("e1000_read_i2c_byte_null"); -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_write_i2c_byte_null - No-op function, return 0 -+ * @hw: pointer to hardware structure -+ * @byte_offset: byte offset to write -+ * @dev_addr: device address -+ * @data: data value to write -+ * -+ **/ -+s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, -+ u8 E1000_UNUSEDARG byte_offset, -+ u8 E1000_UNUSEDARG dev_addr, -+ u8 E1000_UNUSEDARG data) -+{ -+ DEBUGFUNC("e1000_write_i2c_byte_null"); -+ return E1000_SUCCESS; -+} - - /** -- * igb_check_reset_block - Check if PHY reset is blocked -+ * e1000_check_reset_block_generic - Check if PHY reset is blocked - * @hw: pointer to the HW structure - * - * Read the PHY management control register and check whether a PHY reset -- * is blocked. If a reset is not blocked return 0, otherwise -+ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise - * return E1000_BLK_PHY_RESET (12). - **/ --s32 igb_check_reset_block(struct e1000_hw *hw) -+s32 e1000_check_reset_block_generic(struct e1000_hw *hw) - { - u32 manc; - -- manc = rd32(E1000_MANC); -+ DEBUGFUNC("e1000_check_reset_block"); -+ -+ manc = E1000_READ_REG(hw, E1000_MANC); - -- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; -+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? -+ E1000_BLK_PHY_RESET : E1000_SUCCESS; - } - - /** -- * igb_get_phy_id - Retrieve the PHY ID and revision -+ * e1000_get_phy_id - Retrieve the PHY ID and revision - * @hw: pointer to the HW structure - * - * Reads the PHY registers and stores the PHY ID and possibly the PHY - * revision in the hardware structure. - **/ --s32 igb_get_phy_id(struct e1000_hw *hw) -+s32 e1000_get_phy_id(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u16 phy_id; - -+ DEBUGFUNC("e1000_get_phy_id"); -+ -+ if (!phy->ops.read_reg) -+ return E1000_SUCCESS; -+ - ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); - if (ret_val) -- goto out; -+ return ret_val; - - phy->id = (u32)(phy_id << 16); -- udelay(20); -+ usec_delay(20); - ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); - if (ret_val) -- goto out; -+ return ret_val; - - phy->id |= (u32)(phy_id & PHY_REVISION_MASK); - phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_phy_reset_dsp - Reset PHY DSP -+ * e1000_phy_reset_dsp_generic - Reset PHY DSP - * @hw: pointer to the HW structure - * - * Reset the digital signal processor. - **/ --static s32 igb_phy_reset_dsp(struct e1000_hw *hw) -+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val; - -- if (!(hw->phy.ops.write_reg)) -- goto out; -+ DEBUGFUNC("e1000_phy_reset_dsp_generic"); -+ -+ if (!hw->phy.ops.write_reg) -+ return E1000_SUCCESS; - - ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); - if (ret_val) -- goto out; -- -- ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); -+ return ret_val; - --out: -- return ret_val; -+ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); - } - - /** -- * igb_read_phy_reg_mdic - Read MDI control register -+ * e1000_read_phy_reg_mdic - Read MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * -- * Reads the MDI control regsiter in the PHY at offset and stores the -+ * Reads the MDI control register in the PHY at offset and stores the - * information read to data. - **/ --s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) -+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) - { - struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; -- s32 ret_val = 0; -+ -+ DEBUGFUNC("e1000_read_phy_reg_mdic"); - - if (offset > MAX_PHY_REG_ADDRESS) { -- hw_dbg("PHY Address %d is out of range\n", offset); -- ret_val = -E1000_ERR_PARAM; -- goto out; -+ DEBUGOUT1("PHY Address %d is out of range\n", offset); -+ return -E1000_ERR_PARAM; - } - - /* Set up Op-code, Phy Address, and register offset in the MDI -@@ -152,52 +276,55 @@ - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); - -- wr32(E1000_MDIC, mdic); -+ E1000_WRITE_REG(hw, E1000_MDIC, mdic); - - /* Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { -- udelay(50); -- mdic = rd32(E1000_MDIC); -+ usec_delay_irq(50); -+ mdic = E1000_READ_REG(hw, E1000_MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { -- hw_dbg("MDI Read did not complete\n"); -- ret_val = -E1000_ERR_PHY; -- goto out; -+ DEBUGOUT("MDI Read did not complete\n"); -+ return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { -- hw_dbg("MDI Error\n"); -- ret_val = -E1000_ERR_PHY; -- goto out; -+ DEBUGOUT("MDI Error\n"); -+ return -E1000_ERR_PHY; -+ } -+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { -+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", -+ offset, -+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); -+ return -E1000_ERR_PHY; - } - *data = (u16) mdic; - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_write_phy_reg_mdic - Write MDI control register -+ * e1000_write_phy_reg_mdic - Write MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write to register at offset - * - * Writes data to MDI control register in the PHY at offset. - **/ --s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) -+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) - { - struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; -- s32 ret_val = 0; -+ -+ DEBUGFUNC("e1000_write_phy_reg_mdic"); - - if (offset > MAX_PHY_REG_ADDRESS) { -- hw_dbg("PHY Address %d is out of range\n", offset); -- ret_val = -E1000_ERR_PARAM; -- goto out; -+ DEBUGOUT1("PHY Address %d is out of range\n", offset); -+ return -E1000_ERR_PARAM; - } - - /* Set up Op-code, Phy Address, and register offset in the MDI -@@ -209,35 +336,38 @@ - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); - -- wr32(E1000_MDIC, mdic); -+ E1000_WRITE_REG(hw, E1000_MDIC, mdic); - - /* Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { -- udelay(50); -- mdic = rd32(E1000_MDIC); -+ usec_delay_irq(50); -+ mdic = E1000_READ_REG(hw, E1000_MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { -- hw_dbg("MDI Write did not complete\n"); -- ret_val = -E1000_ERR_PHY; -- goto out; -+ DEBUGOUT("MDI Write did not complete\n"); -+ return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { -- hw_dbg("MDI Error\n"); -- ret_val = -E1000_ERR_PHY; -- goto out; -+ DEBUGOUT("MDI Error\n"); -+ return -E1000_ERR_PHY; -+ } -+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { -+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", -+ offset, -+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); -+ return -E1000_ERR_PHY; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_read_phy_reg_i2c - Read PHY register using i2c -+ * e1000_read_phy_reg_i2c - Read PHY register using i2c - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data -@@ -245,11 +375,13 @@ - * Reads the PHY register at offset using the i2c interface and stores the - * retrieved information in data. - **/ --s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) -+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) - { - struct e1000_phy_info *phy = &hw->phy; - u32 i, i2ccmd = 0; - -+ DEBUGFUNC("e1000_read_phy_reg_i2c"); -+ - /* Set up Op-code, Phy Address, and register address in the I2CCMD - * register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. -@@ -258,47 +390,49 @@ - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - (E1000_I2CCMD_OPCODE_READ)); - -- wr32(E1000_I2CCMD, i2ccmd); -+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); - - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { -- udelay(50); -- i2ccmd = rd32(E1000_I2CCMD); -+ usec_delay(50); -+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) - break; - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { -- hw_dbg("I2CCMD Read did not complete\n"); -+ DEBUGOUT("I2CCMD Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { -- hw_dbg("I2CCMD Error bit set\n"); -+ DEBUGOUT("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } - - /* Need to byte-swap the 16-bit value. */ - *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_write_phy_reg_i2c - Write PHY register using i2c -+ * e1000_write_phy_reg_i2c - Write PHY register using i2c - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes the data to PHY register at the offset using the i2c interface. - **/ --s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) -+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) - { - struct e1000_phy_info *phy = &hw->phy; - u32 i, i2ccmd = 0; - u16 phy_data_swapped; - -+ DEBUGFUNC("e1000_write_phy_reg_i2c"); -+ - /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ - if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { -- hw_dbg("PHY I2C Address %d is out of range.\n", -+ DEBUGOUT1("PHY I2C Address %d is out of range.\n", - hw->phy.addr); - return -E1000_ERR_CONFIG; - } -@@ -315,29 +449,29 @@ - E1000_I2CCMD_OPCODE_WRITE | - phy_data_swapped); - -- wr32(E1000_I2CCMD, i2ccmd); -+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); - - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { -- udelay(50); -- i2ccmd = rd32(E1000_I2CCMD); -+ usec_delay(50); -+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) - break; - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { -- hw_dbg("I2CCMD Write did not complete\n"); -+ DEBUGOUT("I2CCMD Write did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { -- hw_dbg("I2CCMD Error bit set\n"); -+ DEBUGOUT("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_read_sfp_data_byte - Reads SFP module data. -+ * e1000_read_sfp_data_byte - Reads SFP module data. - * @hw: pointer to the HW structure - * @offset: byte location offset to be read - * @data: read data buffer pointer -@@ -349,14 +483,16 @@ - * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters - * access - **/ --s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) -+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) - { - u32 i = 0; - u32 i2ccmd = 0; - u32 data_local = 0; - -+ DEBUGFUNC("e1000_read_sfp_data_byte"); -+ - if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { -- hw_dbg("I2CCMD command address exceeds upper limit\n"); -+ DEBUGOUT("I2CCMD command address exceeds upper limit\n"); - return -E1000_ERR_PHY; - } - -@@ -367,30 +503,103 @@ - i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_READ); - -- wr32(E1000_I2CCMD, i2ccmd); -+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); - - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { -- udelay(50); -- data_local = rd32(E1000_I2CCMD); -+ usec_delay(50); -+ data_local = E1000_READ_REG(hw, E1000_I2CCMD); - if (data_local & E1000_I2CCMD_READY) - break; - } - if (!(data_local & E1000_I2CCMD_READY)) { -- hw_dbg("I2CCMD Read did not complete\n"); -+ DEBUGOUT("I2CCMD Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (data_local & E1000_I2CCMD_ERROR) { -- hw_dbg("I2CCMD Error bit set\n"); -+ DEBUGOUT("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } - *data = (u8) data_local & 0xFF; - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_read_phy_reg_igp - Read igp PHY register -+ * e1000_write_sfp_data_byte - Writes SFP module data. -+ * @hw: pointer to the HW structure -+ * @offset: byte location offset to write to -+ * @data: data to write -+ * -+ * Writes one byte to SFP module data stored -+ * in SFP resided EEPROM memory or SFP diagnostic area. -+ * Function should be called with -+ * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access -+ * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters -+ * access -+ **/ -+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) -+{ -+ u32 i = 0; -+ u32 i2ccmd = 0; -+ u32 data_local = 0; -+ -+ DEBUGFUNC("e1000_write_sfp_data_byte"); -+ -+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { -+ DEBUGOUT("I2CCMD command address exceeds upper limit\n"); -+ return -E1000_ERR_PHY; -+ } -+ /* The programming interface is 16 bits wide -+ * so we need to read the whole word first -+ * then update appropriate byte lane and write -+ * the updated word back. -+ */ -+ /* Set up Op-code, EEPROM Address,in the I2CCMD -+ * register. The MAC will take care of interfacing -+ * with an EEPROM to write the data given. -+ */ -+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | -+ E1000_I2CCMD_OPCODE_READ); -+ /* Set a command to read single word */ -+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); -+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { -+ usec_delay(50); -+ /* Poll the ready bit to see if lastly -+ * launched I2C operation completed -+ */ -+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); -+ if (i2ccmd & E1000_I2CCMD_READY) { -+ /* Check if this is READ or WRITE phase */ -+ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == -+ E1000_I2CCMD_OPCODE_READ) { -+ /* Write the selected byte -+ * lane and update whole word -+ */ -+ data_local = i2ccmd & 0xFF00; -+ data_local |= data; -+ i2ccmd = ((offset << -+ E1000_I2CCMD_REG_ADDR_SHIFT) | -+ E1000_I2CCMD_OPCODE_WRITE | data_local); -+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); -+ } else { -+ break; -+ } -+ } -+ } -+ if (!(i2ccmd & E1000_I2CCMD_READY)) { -+ DEBUGOUT("I2CCMD Write did not complete\n"); -+ return -E1000_ERR_PHY; -+ } -+ if (i2ccmd & E1000_I2CCMD_ERROR) { -+ DEBUGOUT("I2CCMD Error bit set\n"); -+ return -E1000_ERR_PHY; -+ } -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_phy_reg_m88 - Read m88 PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data -@@ -399,38 +608,29 @@ - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ --s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) -+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) - { -- s32 ret_val = 0; -+ s32 ret_val; - -- if (!(hw->phy.ops.acquire)) -- goto out; -+ DEBUGFUNC("e1000_read_phy_reg_m88"); -+ -+ if (!hw->phy.ops.acquire) -+ return E1000_SUCCESS; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) -- goto out; -- -- if (offset > MAX_PHY_MULTI_PAGE_REG) { -- ret_val = igb_write_phy_reg_mdic(hw, -- IGP01E1000_PHY_PAGE_SELECT, -- (u16)offset); -- if (ret_val) { -- hw->phy.ops.release(hw); -- goto out; -- } -- } -+ return ret_val; - -- ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, -- data); -+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, -+ data); - - hw->phy.ops.release(hw); - --out: - return ret_val; - } - - /** -- * igb_write_phy_reg_igp - Write igp PHY register -+ * e1000_write_phy_reg_m88 - Write m88 PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset -@@ -438,80 +638,415 @@ - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ --s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) -+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) - { -- s32 ret_val = 0; -+ s32 ret_val; -+ -+ DEBUGFUNC("e1000_write_phy_reg_m88"); - -- if (!(hw->phy.ops.acquire)) -- goto out; -+ if (!hw->phy.ops.acquire) -+ return E1000_SUCCESS; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) -- goto out; -+ return ret_val; - -- if (offset > MAX_PHY_MULTI_PAGE_REG) { -- ret_val = igb_write_phy_reg_mdic(hw, -- IGP01E1000_PHY_PAGE_SELECT, -- (u16)offset); -- if (ret_val) { -- hw->phy.ops.release(hw); -- goto out; -- } -+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, -+ data); -+ -+ hw->phy.ops.release(hw); -+ -+ return ret_val; -+} -+ -+/** -+ * igb_e1000_set_page_igp - Set page as on IGP-like PHY(s) -+ * @hw: pointer to the HW structure -+ * @page: page to set (shifted left when necessary) -+ * -+ * Sets PHY page required for PHY register access. Assumes semaphore is -+ * already acquired. Note, this function sets phy.addr to 1 so the caller -+ * must set it appropriately (if necessary) after this function returns. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page) -+{ -+ DEBUGFUNC("igb_e1000_set_page_igp"); -+ -+ DEBUGOUT1("Setting page 0x%x\n", page); -+ -+ hw->phy.addr = 1; -+ -+ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); -+} -+ -+/** -+ * __e1000_read_phy_reg_igp - Read igp PHY register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * @locked: semaphore has already been acquired or not -+ * -+ * Acquires semaphore, if necessary, then reads the PHY register at offset -+ * and stores the retrieved information in data. Release any acquired -+ * semaphores before exiting. -+ **/ -+static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, -+ bool locked) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("__e1000_read_phy_reg_igp"); -+ -+ if (!locked) { -+ if (!hw->phy.ops.acquire) -+ return E1000_SUCCESS; -+ -+ ret_val = hw->phy.ops.acquire(hw); -+ if (ret_val) -+ return ret_val; - } - -- ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, -- data); -+ if (offset > MAX_PHY_MULTI_PAGE_REG) -+ ret_val = e1000_write_phy_reg_mdic(hw, -+ IGP01E1000_PHY_PAGE_SELECT, -+ (u16)offset); -+ if (!ret_val) -+ ret_val = e1000_read_phy_reg_mdic(hw, -+ MAX_PHY_REG_ADDRESS & offset, -+ data); -+ if (!locked) -+ hw->phy.ops.release(hw); - -- hw->phy.ops.release(hw); -+ return ret_val; -+} -+ -+/** -+ * e1000_read_phy_reg_igp - Read igp PHY register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * -+ * Acquires semaphore then reads the PHY register at offset and stores the -+ * retrieved information in data. -+ * Release the acquired semaphore before exiting. -+ **/ -+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) -+{ -+ return __e1000_read_phy_reg_igp(hw, offset, data, false); -+} -+ -+/** -+ * e1000_read_phy_reg_igp_locked - Read igp PHY register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * -+ * Reads the PHY register at offset and stores the retrieved information -+ * in data. Assumes semaphore already acquired. -+ **/ -+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) -+{ -+ return __e1000_read_phy_reg_igp(hw, offset, data, true); -+} -+ -+/** -+ * e1000_write_phy_reg_igp - Write igp PHY register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to write to -+ * @data: data to write at register offset -+ * @locked: semaphore has already been acquired or not -+ * -+ * Acquires semaphore, if necessary, then writes the data to PHY register -+ * at the offset. Release any acquired semaphores before exiting. -+ **/ -+static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, -+ bool locked) -+{ -+ s32 ret_val = E1000_SUCCESS; -+ -+ DEBUGFUNC("e1000_write_phy_reg_igp"); -+ -+ if (!locked) { -+ if (!hw->phy.ops.acquire) -+ return E1000_SUCCESS; -+ -+ ret_val = hw->phy.ops.acquire(hw); -+ if (ret_val) -+ return ret_val; -+ } -+ -+ if (offset > MAX_PHY_MULTI_PAGE_REG) -+ ret_val = e1000_write_phy_reg_mdic(hw, -+ IGP01E1000_PHY_PAGE_SELECT, -+ (u16)offset); -+ if (!ret_val) -+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & -+ offset, -+ data); -+ if (!locked) -+ hw->phy.ops.release(hw); - --out: - return ret_val; - } - - /** -- * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link -+ * e1000_write_phy_reg_igp - Write igp PHY register - * @hw: pointer to the HW structure -+ * @offset: register offset to write to -+ * @data: data to write at register offset - * -- * Sets up Carrier-sense on Transmit and downshift values. -+ * Acquires semaphore then writes the data to PHY register -+ * at the offset. Release any acquired semaphores before exiting. - **/ --s32 igb_copper_link_setup_82580(struct e1000_hw *hw) -+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) -+{ -+ return __e1000_write_phy_reg_igp(hw, offset, data, false); -+} -+ -+/** -+ * e1000_write_phy_reg_igp_locked - Write igp PHY register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to write to -+ * @data: data to write at register offset -+ * -+ * Writes the data to PHY register at the offset. -+ * Assumes semaphore already acquired. -+ **/ -+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) -+{ -+ return __e1000_write_phy_reg_igp(hw, offset, data, true); -+} -+ -+/** -+ * __e1000_read_kmrn_reg - Read kumeran register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * @locked: semaphore has already been acquired or not -+ * -+ * Acquires semaphore, if necessary. Then reads the PHY register at offset -+ * using the kumeran interface. The information retrieved is stored in data. -+ * Release any acquired semaphores before exiting. -+ **/ -+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, -+ bool locked) -+{ -+ u32 kmrnctrlsta; -+ -+ DEBUGFUNC("__e1000_read_kmrn_reg"); -+ -+ if (!locked) { -+ s32 ret_val = E1000_SUCCESS; -+ -+ if (!hw->phy.ops.acquire) -+ return E1000_SUCCESS; -+ -+ ret_val = hw->phy.ops.acquire(hw); -+ if (ret_val) -+ return ret_val; -+ } -+ -+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & -+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; -+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); -+ E1000_WRITE_FLUSH(hw); -+ -+ usec_delay(2); -+ -+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); -+ *data = (u16)kmrnctrlsta; -+ -+ if (!locked) -+ hw->phy.ops.release(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_read_kmrn_reg_generic - Read kumeran register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * -+ * Acquires semaphore then reads the PHY register at offset using the -+ * kumeran interface. The information retrieved is stored in data. -+ * Release the acquired semaphore before exiting. -+ **/ -+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) -+{ -+ return __e1000_read_kmrn_reg(hw, offset, data, false); -+} -+ -+/** -+ * e1000_read_kmrn_reg_locked - Read kumeran register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to be read -+ * @data: pointer to the read data -+ * -+ * Reads the PHY register at offset using the kumeran interface. The -+ * information retrieved is stored in data. -+ * Assumes semaphore already acquired. -+ **/ -+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) -+{ -+ return __e1000_read_kmrn_reg(hw, offset, data, true); -+} -+ -+/** -+ * __e1000_write_kmrn_reg - Write kumeran register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to write to -+ * @data: data to write at register offset -+ * @locked: semaphore has already been acquired or not -+ * -+ * Acquires semaphore, if necessary. Then write the data to PHY register -+ * at the offset using the kumeran interface. Release any acquired semaphores -+ * before exiting. -+ **/ -+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, -+ bool locked) -+{ -+ u32 kmrnctrlsta; -+ -+ DEBUGFUNC("e1000_write_kmrn_reg_generic"); -+ -+ if (!locked) { -+ s32 ret_val = E1000_SUCCESS; -+ -+ if (!hw->phy.ops.acquire) -+ return E1000_SUCCESS; -+ -+ ret_val = hw->phy.ops.acquire(hw); -+ if (ret_val) -+ return ret_val; -+ } -+ -+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & -+ E1000_KMRNCTRLSTA_OFFSET) | data; -+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); -+ E1000_WRITE_FLUSH(hw); -+ -+ usec_delay(2); -+ -+ if (!locked) -+ hw->phy.ops.release(hw); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_write_kmrn_reg_generic - Write kumeran register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to write to -+ * @data: data to write at register offset -+ * -+ * Acquires semaphore then writes the data to the PHY register at the offset -+ * using the kumeran interface. Release the acquired semaphore before exiting. -+ **/ -+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) -+{ -+ return __e1000_write_kmrn_reg(hw, offset, data, false); -+} -+ -+/** -+ * e1000_write_kmrn_reg_locked - Write kumeran register -+ * @hw: pointer to the HW structure -+ * @offset: register offset to write to -+ * @data: data to write at register offset -+ * -+ * Write the data to PHY register at the offset using the kumeran interface. -+ * Assumes semaphore already acquired. -+ **/ -+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) -+{ -+ return __e1000_write_kmrn_reg(hw, offset, data, true); -+} -+ -+/** -+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode -+ * @hw: pointer to the HW structure -+ * -+ * Sets up Master/slave mode -+ **/ -+static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) - { -- struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - -- if (phy->reset_disable) { -- ret_val = 0; -- goto out; -+ /* Resolve Master/Slave mode */ -+ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); -+ if (ret_val) -+ return ret_val; -+ -+ /* load defaults for future use */ -+ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? -+ ((phy_data & CR_1000T_MS_VALUE) ? -+ e1000_ms_force_master : -+ e1000_ms_force_slave) : e1000_ms_auto; -+ -+ switch (hw->phy.ms_type) { -+ case e1000_ms_force_master: -+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); -+ break; -+ case e1000_ms_force_slave: -+ phy_data |= CR_1000T_MS_ENABLE; -+ phy_data &= ~(CR_1000T_MS_VALUE); -+ break; -+ case e1000_ms_auto: -+ phy_data &= ~CR_1000T_MS_ENABLE; -+ /* fall-through */ -+ default: -+ break; - } - -- if (phy->type == e1000_phy_82580) { -+ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); -+} -+ -+/** -+ * igb_e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link -+ * @hw: pointer to the HW structure -+ * -+ * Sets up Carrier-sense on Transmit and downshift values. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw) -+{ -+ s32 ret_val; -+ u16 phy_data; -+ -+ DEBUGFUNC("igb_e1000_copper_link_setup_82577"); -+ -+ if (hw->phy.reset_disable) -+ return E1000_SUCCESS; -+ -+ if (hw->phy.type == e1000_phy_82580) { - ret_val = hw->phy.ops.reset(hw); - if (ret_val) { -- hw_dbg("Error resetting the PHY.\n"); -- goto out; -+ DEBUGOUT("Error resetting the PHY.\n"); -+ return ret_val; - } - } - -- /* Enable CRS on TX. This must be set for half-duplex operation. */ -- ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); -+ /* Enable CRS on Tx. This must be set for half-duplex operation. */ -+ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; -+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; - - /* Enable downshift */ -- phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; -+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - -- ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); -+ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Set MDI/MDIX mode */ -- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); -+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); - if (ret_val) -- goto out; -- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; -+ return ret_val; -+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; - /* Options: - * 0 - Auto (default) - * 1 - MDI mode -@@ -521,41 +1056,42 @@ - case 1: - break; - case 2: -- phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; -+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; - break; - case 0: - default: -- phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; -+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; - break; - } -- ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); -+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); -+ if (ret_val) -+ return ret_val; - --out: -- return ret_val; -+ return e1000_set_master_slave_mode(hw); - } - - /** -- * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link -+ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock - * and downshift values are set also. - **/ --s32 igb_copper_link_setup_m88(struct e1000_hw *hw) -+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - -- if (phy->reset_disable) { -- ret_val = 0; -- goto out; -- } -+ DEBUGFUNC("e1000_copper_link_setup_m88"); -+ -+ if (phy->reset_disable) -+ return E1000_SUCCESS; - -- /* Enable CRS on TX. This must be set for half-duplex operation. */ -+ /* Enable CRS on Tx. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - -@@ -591,12 +1127,12 @@ - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; -- if (phy->disable_polarity_correction == 1) -+ if (phy->disable_polarity_correction) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - if (phy->revision < E1000_REVISION_4) { - /* Force TX_CLK in the Extended PHY Specific Control Register -@@ -605,7 +1141,7 @@ - ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - phy_data |= M88E1000_EPSCR_TX_CLK_25; - -@@ -617,42 +1153,43 @@ - } else { - /* Configure Master and Slave downshift values */ - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | -- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); -+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); - } - ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); - if (ret_val) -- goto out; -+ return ret_val; - } - - /* Commit the changes. */ -- ret_val = igb_phy_sw_reset(hw); -+ ret_val = phy->ops.commit(hw); - if (ret_val) { -- hw_dbg("Error committing the PHY changes\n"); -- goto out; -+ DEBUGOUT("Error committing the PHY changes\n"); -+ return ret_val; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link -+ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. - * Also enables and sets the downshift parameters. - **/ --s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) -+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - -+ DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); -+ - if (phy->reset_disable) -- return 0; -+ return E1000_SUCCESS; - - /* Enable CRS on Tx. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); -@@ -694,7 +1231,7 @@ - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; -- if (phy->disable_polarity_correction == 1) -+ if (phy->disable_polarity_correction) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - - /* Enable downshift and setting it to X6 */ -@@ -705,9 +1242,9 @@ - if (ret_val) - return ret_val; - -- ret_val = igb_phy_sw_reset(hw); -+ ret_val = phy->ops.commit(hw); - if (ret_val) { -- hw_dbg("Error committing the PHY changes\n"); -+ DEBUGOUT("Error committing the PHY changes\n"); - return ret_val; - } - } -@@ -721,70 +1258,60 @@ - return ret_val; - - /* Commit the changes. */ -- ret_val = igb_phy_sw_reset(hw); -+ ret_val = phy->ops.commit(hw); - if (ret_val) { -- hw_dbg("Error committing the PHY changes\n"); -+ DEBUGOUT("Error committing the PHY changes\n"); - return ret_val; - } -- ret_val = igb_set_master_slave_mode(hw); -+ -+ ret_val = e1000_set_master_slave_mode(hw); - if (ret_val) - return ret_val; - -- return 0; -+ return E1000_SUCCESS; - } - - /** -- * igb_copper_link_setup_igp - Setup igp PHY's for copper link -+ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for - * igp PHY's. - **/ --s32 igb_copper_link_setup_igp(struct e1000_hw *hw) -+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - -- if (phy->reset_disable) { -- ret_val = 0; -- goto out; -- } -+ DEBUGFUNC("e1000_copper_link_setup_igp"); -+ -+ if (phy->reset_disable) -+ return E1000_SUCCESS; - -- ret_val = phy->ops.reset(hw); -+ ret_val = hw->phy.ops.reset(hw); - if (ret_val) { -- hw_dbg("Error resetting the PHY.\n"); -- goto out; -+ DEBUGOUT("Error resetting the PHY.\n"); -+ return ret_val; - } - - /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid - * timeout issues when LFS is enabled. - */ -- msleep(100); -+ msec_delay(100); - -- /* The NVM settings will configure LPLU in D3 for -- * non-IGP1 PHYs. -- */ -- if (phy->type == e1000_phy_igp) { -- /* disable lplu d3 during driver init */ -- if (phy->ops.set_d3_lplu_state) -- ret_val = phy->ops.set_d3_lplu_state(hw, false); -+ /* disable lplu d0 during driver init */ -+ if (hw->phy.ops.set_d0_lplu_state) { -+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); - if (ret_val) { -- hw_dbg("Error Disabling LPLU D3\n"); -- goto out; -+ DEBUGOUT("Error Disabling LPLU D0\n"); -+ return ret_val; - } - } -- -- /* disable lplu d0 during driver init */ -- ret_val = phy->ops.set_d0_lplu_state(hw, false); -- if (ret_val) { -- hw_dbg("Error Disabling LPLU D0\n"); -- goto out; -- } - /* Configure mdi-mdix settings */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); - if (ret_val) -- goto out; -+ return ret_val; - - data &= ~IGP01E1000_PSCR_AUTO_MDIX; - -@@ -802,7 +1329,7 @@ - } - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); - if (ret_val) -- goto out; -+ return ret_val; - - /* set auto-master slave resolution settings */ - if (hw->mac.autoneg) { -@@ -816,124 +1343,34 @@ - IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) -- goto out; -+ return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Set auto Master/Slave resolution process */ - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); - if (ret_val) -- goto out; -+ return ret_val; - - data &= ~CR_1000T_MS_ENABLE; - ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); - if (ret_val) -- goto out; -- } -- -- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); -- if (ret_val) -- goto out; -- -- /* load defaults for future use */ -- phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? -- ((data & CR_1000T_MS_VALUE) ? -- e1000_ms_force_master : -- e1000_ms_force_slave) : -- e1000_ms_auto; -- -- switch (phy->ms_type) { -- case e1000_ms_force_master: -- data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); -- break; -- case e1000_ms_force_slave: -- data |= CR_1000T_MS_ENABLE; -- data &= ~(CR_1000T_MS_VALUE); -- break; -- case e1000_ms_auto: -- data &= ~CR_1000T_MS_ENABLE; -- default: -- break; -+ return ret_val; - } -- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); -- if (ret_val) -- goto out; -- } -- --out: -- return ret_val; --} -- --/** -- * igb_copper_link_autoneg - Setup/Enable autoneg for copper link -- * @hw: pointer to the HW structure -- * -- * Performs initial bounds checking on autoneg advertisement parameter, then -- * configure to advertise the full capability. Setup the PHY to autoneg -- * and restart the negotiation process between the link partner. If -- * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. -- **/ --static s32 igb_copper_link_autoneg(struct e1000_hw *hw) --{ -- struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val; -- u16 phy_ctrl; -- -- /* Perform some bounds checking on the autoneg advertisement -- * parameter. -- */ -- phy->autoneg_advertised &= phy->autoneg_mask; -- -- /* If autoneg_advertised is zero, we assume it was not defaulted -- * by the calling code so we set to advertise full capability. -- */ -- if (phy->autoneg_advertised == 0) -- phy->autoneg_advertised = phy->autoneg_mask; -- -- hw_dbg("Reconfiguring auto-neg advertisement params\n"); -- ret_val = igb_phy_setup_autoneg(hw); -- if (ret_val) { -- hw_dbg("Error Setting up Auto-Negotiation\n"); -- goto out; -- } -- hw_dbg("Restarting Auto-Neg\n"); -- -- /* Restart auto-negotiation by setting the Auto Neg Enable bit and -- * the Auto Neg Restart bit in the PHY control register. -- */ -- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); -- if (ret_val) -- goto out; - -- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); -- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); -- if (ret_val) -- goto out; -- -- /* Does the user want to wait for Auto-Neg to complete here, or -- * check at a later time (for example, callback routine). -- */ -- if (phy->autoneg_wait_to_complete) { -- ret_val = igb_wait_autoneg(hw); -- if (ret_val) { -- hw_dbg("Error while waiting for autoneg to complete\n"); -- goto out; -- } -+ ret_val = e1000_set_master_slave_mode(hw); - } - -- hw->mac.get_link_status = true; -- --out: - return ret_val; - } - - /** -- * igb_phy_setup_autoneg - Configure PHY for auto-negotiation -+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation - * @hw: pointer to the HW structure - * - * Reads the MII auto-neg advertisement register and/or the 1000T control -@@ -941,26 +1378,28 @@ - * return successful. Otherwise, setup advertisement and flow control to - * the appropriate values for the wanted auto-negotiation. - **/ --static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) -+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 mii_autoneg_adv_reg; - u16 mii_1000t_ctrl_reg = 0; - -+ DEBUGFUNC("e1000_phy_setup_autoneg"); -+ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); - if (ret_val) -- goto out; -+ return ret_val; - - if (phy->autoneg_mask & ADVERTISE_1000_FULL) { - /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, - &mii_1000t_ctrl_reg); - if (ret_val) -- goto out; -+ return ret_val; - } - - /* Need to parse both autoneg_advertised and fc and set up -@@ -980,39 +1419,39 @@ - NWAY_AR_10T_HD_CAPS); - mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); - -- hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); -+ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); - - /* Do we want to advertise 10 Mb Half Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_10_HALF) { -- hw_dbg("Advertise 10mb Half duplex\n"); -+ DEBUGOUT("Advertise 10mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; - } - - /* Do we want to advertise 10 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_10_FULL) { -- hw_dbg("Advertise 10mb Full duplex\n"); -+ DEBUGOUT("Advertise 10mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; - } - - /* Do we want to advertise 100 Mb Half Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_100_HALF) { -- hw_dbg("Advertise 100mb Half duplex\n"); -+ DEBUGOUT("Advertise 100mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; - } - - /* Do we want to advertise 100 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_100_FULL) { -- hw_dbg("Advertise 100mb Full duplex\n"); -+ DEBUGOUT("Advertise 100mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; - } - - /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ - if (phy->autoneg_advertised & ADVERTISE_1000_HALF) -- hw_dbg("Advertise 1000mb Half duplex request denied!\n"); -+ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); - - /* Do we want to advertise 1000 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { -- hw_dbg("Advertise 1000mb Full duplex\n"); -+ DEBUGOUT("Advertise 1000mb Full duplex\n"); - mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; - } - -@@ -1029,68 +1468,126 @@ - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * but we do not support receiving pause frames). -- * 3: Both Rx and TX flow control (symmetric) are enabled. -+ * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: No software override. The flow control configuration - * in the EEPROM is used. - */ - switch (hw->fc.current_mode) { - case e1000_fc_none: -- /* Flow control (RX & TX) is completely disabled by a -+ /* Flow control (Rx & Tx) is completely disabled by a - * software over-ride. - */ - mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case e1000_fc_rx_pause: -- /* RX Flow control is enabled, and TX Flow control is -+ /* Rx Flow control is enabled, and Tx Flow control is - * disabled, by a software over-ride. - * - * Since there really isn't a way to advertise that we are -- * capable of RX Pause ONLY, we will advertise that we -- * support both symmetric and asymmetric RX PAUSE. Later -+ * capable of Rx Pause ONLY, we will advertise that we -+ * support both symmetric and asymmetric Rx PAUSE. Later - * (in e1000_config_fc_after_link_up) we will disable the - * hw's ability to send PAUSE frames. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case e1000_fc_tx_pause: -- /* TX Flow control is enabled, and RX Flow control is -+ /* Tx Flow control is enabled, and Rx Flow control is - * disabled, by a software over-ride. - */ - mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; - mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; - break; - case e1000_fc_full: -- /* Flow control (both RX and TX) is enabled by a software -+ /* Flow control (both Rx and Tx) is enabled by a software - * over-ride. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - default: -- hw_dbg("Flow control param set incorrectly\n"); -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ DEBUGOUT("Flow control param set incorrectly\n"); -+ return -E1000_ERR_CONFIG; - } - - ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); - if (ret_val) -- goto out; -+ return ret_val; - -- hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); -+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - -- if (phy->autoneg_mask & ADVERTISE_1000_FULL) { -- ret_val = phy->ops.write_reg(hw, -- PHY_1000T_CTRL, -+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) -+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, - mii_1000t_ctrl_reg); -- if (ret_val) -- goto out; -+ -+ return ret_val; -+} -+ -+/** -+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link -+ * @hw: pointer to the HW structure -+ * -+ * Performs initial bounds checking on autoneg advertisement parameter, then -+ * configure to advertise the full capability. Setup the PHY to autoneg -+ * and restart the negotiation process between the link partner. If -+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. -+ **/ -+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val; -+ u16 phy_ctrl; -+ -+ DEBUGFUNC("e1000_copper_link_autoneg"); -+ -+ /* Perform some bounds checking on the autoneg advertisement -+ * parameter. -+ */ -+ phy->autoneg_advertised &= phy->autoneg_mask; -+ -+ /* If autoneg_advertised is zero, we assume it was not defaulted -+ * by the calling code so we set to advertise full capability. -+ */ -+ if (!phy->autoneg_advertised) -+ phy->autoneg_advertised = phy->autoneg_mask; -+ -+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); -+ ret_val = e1000_phy_setup_autoneg(hw); -+ if (ret_val) { -+ DEBUGOUT("Error Setting up Auto-Negotiation\n"); -+ return ret_val; -+ } -+ DEBUGOUT("Restarting Auto-Neg\n"); -+ -+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and -+ * the Auto Neg Restart bit in the PHY control register. -+ */ -+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); -+ if (ret_val) -+ return ret_val; -+ -+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); -+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); -+ if (ret_val) -+ return ret_val; -+ -+ /* Does the user want to wait for Auto-Neg to complete here, or -+ * check at a later time (for example, callback routine). -+ */ -+ if (phy->autoneg_wait_to_complete) { -+ ret_val = e1000_wait_autoneg(hw); -+ if (ret_val) { -+ DEBUGOUT("Error while waiting for autoneg to complete\n"); -+ return ret_val; -+ } - } - --out: -+ hw->mac.get_link_status = true; -+ - return ret_val; - } - - /** -- * igb_setup_copper_link - Configure copper link settings -+ * e1000_setup_copper_link_generic - Configure copper link settings - * @hw: pointer to the HW structure - * - * Calls the appropriate function to configure the link for auto-neg or forced -@@ -1098,129 +1595,134 @@ - * to configure collision distance and flow control are called. If link is - * not established, we return -E1000_ERR_PHY (-2). - **/ --s32 igb_setup_copper_link(struct e1000_hw *hw) -+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) - { - s32 ret_val; - bool link; - -+ DEBUGFUNC("e1000_setup_copper_link_generic"); -+ - if (hw->mac.autoneg) { - /* Setup autoneg and flow control advertisement and perform - * autonegotiation. - */ -- ret_val = igb_copper_link_autoneg(hw); -+ ret_val = e1000_copper_link_autoneg(hw); - if (ret_val) -- goto out; -+ return ret_val; - } else { - /* PHY will be set to 10H, 10F, 100H or 100F - * depending on user settings. - */ -- hw_dbg("Forcing Speed and Duplex\n"); -+ DEBUGOUT("Forcing Speed and Duplex\n"); - ret_val = hw->phy.ops.force_speed_duplex(hw); - if (ret_val) { -- hw_dbg("Error Forcing Speed and Duplex\n"); -- goto out; -+ DEBUGOUT("Error Forcing Speed and Duplex\n"); -+ return ret_val; - } - } - - /* Check link status. Wait up to 100 microseconds for link to become - * valid. - */ -- ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); -+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, -+ &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (link) { -- hw_dbg("Valid link established!!!\n"); -- igb_config_collision_dist(hw); -- ret_val = igb_config_fc_after_link_up(hw); -+ DEBUGOUT("Valid link established!!!\n"); -+ hw->mac.ops.config_collision_dist(hw); -+ ret_val = e1000_config_fc_after_link_up_generic(hw); - } else { -- hw_dbg("Unable to establish link!!!\n"); -+ DEBUGOUT("Unable to establish link!!!\n"); - } - --out: - return ret_val; - } - - /** -- * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY -+ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Waits for link and returns - * successful if link up is successful, else -E1000_ERR_PHY (-2). - **/ --s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) -+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - -+ DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); -+ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- igb_phy_force_speed_duplex_setup(hw, &phy_data); -+ e1000_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Clear Auto-Crossover to force MDI manually. IGP requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- hw_dbg("IGP PSCR: %X\n", phy_data); -+ DEBUGOUT1("IGP PSCR: %X\n", phy_data); - -- udelay(1); -+ usec_delay(1); - - if (phy->autoneg_wait_to_complete) { -- hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); -+ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); - -- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) -- hw_dbg("Link taking longer than expected.\n"); -+ DEBUGOUT("Link taking longer than expected.\n"); - - /* Try once more */ -- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); -- if (ret_val) -- goto out; -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); - } - --out: - return ret_val; - } - - /** -- * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY -+ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Resets the PHY to commit the - * changes. If time expires while waiting for link up, we reset the DSP. -- * After reset, TX_CLK and CRS on TX must be set. Return successful upon -+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon - * successful completion, else return corresponding error code. - **/ --s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) -+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - -+ DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); -+ - /* I210 and I211 devices support Auto-Crossover in forced operation. */ - if (phy->type != e1000_phy_i210) { - /* Clear Auto-Crossover to force MDI manually. M88E1000 -@@ -1229,45 +1731,49 @@ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, - &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, - phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- hw_dbg("M88E1000 PSCR: %X\n", phy_data); -+ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); - } - - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- igb_phy_force_speed_duplex_setup(hw, &phy_data); -+ e1000_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Reset the phy to commit changes. */ -- ret_val = igb_phy_sw_reset(hw); -+ ret_val = hw->phy.ops.commit(hw); - if (ret_val) -- goto out; -+ return ret_val; - - if (phy->autoneg_wait_to_complete) { -- hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); -+ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); - -- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) { - bool reset_dsp = true; - - switch (hw->phy.id) { - case I347AT4_E_PHY_ID: -+ case M88E1340M_E_PHY_ID: - case M88E1112_E_PHY_ID: -+ case M88E1543_E_PHY_ID: -+ case M88E1512_E_PHY_ID: - case I210_I_PHY_ID: - reset_dsp = false; - break; -@@ -1276,9 +1782,10 @@ - reset_dsp = false; - break; - } -- if (!reset_dsp) -- hw_dbg("Link taking longer than expected.\n"); -- else { -+ -+ if (!reset_dsp) { -+ DEBUGOUT("Link taking longer than expected.\n"); -+ } else { - /* We didn't get link. - * Reset the DSP and cross our fingers. - */ -@@ -1286,29 +1793,35 @@ - M88E1000_PHY_PAGE_SELECT, - 0x001d); - if (ret_val) -- goto out; -- ret_val = igb_phy_reset_dsp(hw); -+ return ret_val; -+ ret_val = e1000_phy_reset_dsp_generic(hw); - if (ret_val) -- goto out; -+ return ret_val; - } - } - - /* Try once more */ -- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, -- 100000, &link); -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); - if (ret_val) -- goto out; -+ return ret_val; - } - -- if (hw->phy.type != e1000_phy_m88 || -- hw->phy.id == I347AT4_E_PHY_ID || -- hw->phy.id == M88E1112_E_PHY_ID || -- hw->phy.id == I210_I_PHY_ID) -- goto out; -+ if (hw->phy.type != e1000_phy_m88) -+ return E1000_SUCCESS; - -+ if (hw->phy.id == I347AT4_E_PHY_ID || -+ hw->phy.id == M88E1340M_E_PHY_ID || -+ hw->phy.id == M88E1112_E_PHY_ID) -+ return E1000_SUCCESS; -+ if (hw->phy.id == I210_I_PHY_ID) -+ return E1000_SUCCESS; -+ if ((hw->phy.id == M88E1543_E_PHY_ID) || -+ (hw->phy.id == M88E1512_E_PHY_ID)) -+ return E1000_SUCCESS; - ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Resetting the phy means we need to re-force TX_CLK in the - * Extended PHY Specific Control Register to 25MHz clock from -@@ -1317,24 +1830,88 @@ - phy_data |= M88E1000_EPSCR_TX_CLK_25; - ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* In addition, we must re-enable CRS on Tx for both half and full - * duplex. - */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - --out: - return ret_val; - } - - /** -- * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex -+ * igb_e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex -+ * @hw: pointer to the HW structure -+ * -+ * Forces the speed and duplex settings of the PHY. -+ * This is a function pointer entry point only called by -+ * PHY setup routines. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val; -+ u16 data; -+ bool link; -+ -+ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_ife"); -+ -+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); -+ if (ret_val) -+ return ret_val; -+ -+ e1000_phy_force_speed_duplex_setup(hw, &data); -+ -+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); -+ if (ret_val) -+ return ret_val; -+ -+ /* Disable MDI-X support for 10/100 */ -+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); -+ if (ret_val) -+ return ret_val; -+ -+ data &= ~IFE_PMC_AUTO_MDIX; -+ data &= ~IFE_PMC_FORCE_MDIX; -+ -+ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); -+ if (ret_val) -+ return ret_val; -+ -+ DEBUGOUT1("IFE PMC: %X\n", data); -+ -+ usec_delay(1); -+ -+ if (phy->autoneg_wait_to_complete) { -+ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); -+ -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); -+ if (ret_val) -+ return ret_val; -+ -+ if (!link) -+ DEBUGOUT("Link taking longer than expected.\n"); -+ -+ /* Try once more */ -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); -+ if (ret_val) -+ return ret_val; -+ } -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex - * @hw: pointer to the HW structure - * @phy_ctrl: pointer to current value of PHY_CONTROL - * -@@ -1345,17 +1922,18 @@ - * caller must write to the PHY_CONTROL register for these settings to - * take affect. - **/ --static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, -- u16 *phy_ctrl) -+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) - { - struct e1000_mac_info *mac = &hw->mac; - u32 ctrl; - -+ DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); -+ - /* Turn off flow control when forcing speed/duplex */ - hw->fc.current_mode = e1000_fc_none; - - /* Force speed/duplex on the mac */ -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ctrl &= ~E1000_CTRL_SPD_SEL; - -@@ -1369,33 +1947,32 @@ - if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { - ctrl &= ~E1000_CTRL_FD; - *phy_ctrl &= ~MII_CR_FULL_DUPLEX; -- hw_dbg("Half Duplex\n"); -+ DEBUGOUT("Half Duplex\n"); - } else { - ctrl |= E1000_CTRL_FD; - *phy_ctrl |= MII_CR_FULL_DUPLEX; -- hw_dbg("Full Duplex\n"); -+ DEBUGOUT("Full Duplex\n"); - } - - /* Forcing 10mb or 100mb? */ - if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { - ctrl |= E1000_CTRL_SPD_100; - *phy_ctrl |= MII_CR_SPEED_100; -- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); -- hw_dbg("Forcing 100mb\n"); -+ *phy_ctrl &= ~MII_CR_SPEED_1000; -+ DEBUGOUT("Forcing 100mb\n"); - } else { - ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); -- *phy_ctrl |= MII_CR_SPEED_10; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); -- hw_dbg("Forcing 10mb\n"); -+ DEBUGOUT("Forcing 10mb\n"); - } - -- igb_config_collision_dist(hw); -+ hw->mac.ops.config_collision_dist(hw); - -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - } - - /** -- * igb_set_d3_lplu_state - Sets low power link up state for D3 -+ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * -@@ -1408,25 +1985,27 @@ - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. - **/ --s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) -+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -+ s32 ret_val; - u16 data; - -- if (!(hw->phy.ops.read_reg)) -- goto out; -+ DEBUGFUNC("e1000_set_d3_lplu_state_generic"); -+ -+ if (!hw->phy.ops.read_reg) -+ return E1000_SUCCESS; - - ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); - if (ret_val) -- goto out; -+ return ret_val; - - if (!active) { - data &= ~IGP02E1000_PM_D3_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); - if (ret_val) -- goto out; -+ return ret_val; - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable -@@ -1437,176 +2016,219 @@ - IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) -- goto out; -+ return ret_val; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) -- goto out; -+ return ret_val; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = phy->ops.read_reg(hw, -- IGP01E1000_PHY_PORT_CONFIG, -- &data); -+ IGP01E1000_PHY_PORT_CONFIG, -+ &data); - if (ret_val) -- goto out; -+ return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) -- goto out; -+ return ret_val; - } - } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || - (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || - (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { - data |= IGP02E1000_PM_D3_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, -- data); -+ data); - if (ret_val) -- goto out; -+ return ret_val; - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) -- goto out; -+ return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - } - --out: - return ret_val; - } - - /** -- * igb_check_downshift - Checks whether a downshift in speed occurred -+ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns 1 - * - * A downshift is detected by querying the PHY link health. - **/ --s32 igb_check_downshift(struct e1000_hw *hw) -+s32 e1000_check_downshift_generic(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, offset, mask; - -+ DEBUGFUNC("e1000_check_downshift_generic"); -+ - switch (phy->type) { - case e1000_phy_i210: - case e1000_phy_m88: - case e1000_phy_gg82563: -- offset = M88E1000_PHY_SPEC_STATUS; -- mask = M88E1000_PSSR_DOWNSHIFT; -+ offset = M88E1000_PHY_SPEC_STATUS; -+ mask = M88E1000_PSSR_DOWNSHIFT; - break; - case e1000_phy_igp_2: -- case e1000_phy_igp: - case e1000_phy_igp_3: -- offset = IGP01E1000_PHY_LINK_HEALTH; -- mask = IGP01E1000_PLHR_SS_DOWNGRADE; -+ offset = IGP01E1000_PHY_LINK_HEALTH; -+ mask = IGP01E1000_PLHR_SS_DOWNGRADE; - break; - default: - /* speed downshift not supported */ - phy->speed_downgraded = false; -- ret_val = 0; -- goto out; -+ return E1000_SUCCESS; - } - - ret_val = phy->ops.read_reg(hw, offset, &phy_data); - - if (!ret_val) -- phy->speed_downgraded = (phy_data & mask) ? true : false; -+ phy->speed_downgraded = !!(phy_data & mask); - --out: - return ret_val; - } - - /** -- * igb_check_polarity_m88 - Checks the polarity. -+ * igb_e1000_check_polarity_m88 - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY specific status register. - **/ --s32 igb_check_polarity_m88(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - -+ DEBUGFUNC("igb_e1000_check_polarity_m88"); -+ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); - - if (!ret_val) -- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) -- ? e1000_rev_polarity_reversed -- : e1000_rev_polarity_normal; -+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) -+ ? e1000_rev_polarity_reversed -+ : e1000_rev_polarity_normal); -+ -+ return ret_val; -+} -+ -+/** -+ * igb_e1000_check_polarity_igp - Checks the polarity. -+ * @hw: pointer to the HW structure -+ * -+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2) -+ * -+ * Polarity is determined based on the PHY port status register, and the -+ * current speed (since there is no polarity at 100Mbps). -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val; -+ u16 data, offset, mask; -+ -+ DEBUGFUNC("igb_e1000_check_polarity_igp"); -+ -+ /* Polarity is determined based on the speed of -+ * our connection. -+ */ -+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); -+ if (ret_val) -+ return ret_val; -+ -+ if ((data & IGP01E1000_PSSR_SPEED_MASK) == -+ IGP01E1000_PSSR_SPEED_1000MBPS) { -+ offset = IGP01E1000_PHY_PCS_INIT_REG; -+ mask = IGP01E1000_PHY_POLARITY_MASK; -+ } else { -+ /* This really only applies to 10Mbps since -+ * there is no polarity for 100Mbps (always 0). -+ */ -+ offset = IGP01E1000_PHY_PORT_STATUS; -+ mask = IGP01E1000_PSSR_POLARITY_REVERSED; -+ } -+ -+ ret_val = phy->ops.read_reg(hw, offset, &data); -+ -+ if (!ret_val) -+ phy->cable_polarity = ((data & mask) -+ ? e1000_rev_polarity_reversed -+ : e1000_rev_polarity_normal); - - return ret_val; - } - - /** -- * igb_check_polarity_igp - Checks the polarity. -+ * igb_e1000_check_polarity_ife - Check cable polarity for IFE PHY - * @hw: pointer to the HW structure - * -- * Success returns 0, Failure returns -E1000_ERR_PHY (-2) -- * -- * Polarity is determined based on the PHY port status register, and the -- * current speed (since there is no polarity at 100Mbps). -+ * Polarity is determined on the polarity reversal feature being enabled. - **/ --static s32 igb_check_polarity_igp(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; -- u16 data, offset, mask; -+ u16 phy_data, offset, mask; - -- /* Polarity is determined based on the speed of -- * our connection. -- */ -- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); -- if (ret_val) -- goto out; -+ DEBUGFUNC("igb_e1000_check_polarity_ife"); - -- if ((data & IGP01E1000_PSSR_SPEED_MASK) == -- IGP01E1000_PSSR_SPEED_1000MBPS) { -- offset = IGP01E1000_PHY_PCS_INIT_REG; -- mask = IGP01E1000_PHY_POLARITY_MASK; -+ /* Polarity is determined based on the reversal feature being enabled. -+ */ -+ if (phy->polarity_correction) { -+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL; -+ mask = IFE_PESC_POLARITY_REVERSED; - } else { -- /* This really only applies to 10Mbps since -- * there is no polarity for 100Mbps (always 0). -- */ -- offset = IGP01E1000_PHY_PORT_STATUS; -- mask = IGP01E1000_PSSR_POLARITY_REVERSED; -+ offset = IFE_PHY_SPECIAL_CONTROL; -+ mask = IFE_PSC_FORCE_POLARITY; - } - -- ret_val = phy->ops.read_reg(hw, offset, &data); -+ ret_val = phy->ops.read_reg(hw, offset, &phy_data); - - if (!ret_val) -- phy->cable_polarity = (data & mask) -- ? e1000_rev_polarity_reversed -- : e1000_rev_polarity_normal; -+ phy->cable_polarity = ((phy_data & mask) -+ ? e1000_rev_polarity_reversed -+ : e1000_rev_polarity_normal); - --out: - return ret_val; - } - - /** -- * igb_wait_autoneg - Wait for auto-neg completion -+ * e1000_wait_autoneg - Wait for auto-neg completion - * @hw: pointer to the HW structure - * - * Waits for auto-negotiation to complete or for the auto-negotiation time - * limit to expire, which ever happens first. - **/ --static s32 igb_wait_autoneg(struct e1000_hw *hw) -+static s32 e1000_wait_autoneg(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u16 i, phy_status; - -+ DEBUGFUNC("e1000_wait_autoneg"); -+ -+ if (!hw->phy.ops.read_reg) -+ return E1000_SUCCESS; -+ - /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ - for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); -@@ -1617,7 +2239,7 @@ - break; - if (phy_status & MII_SR_AUTONEG_COMPLETE) - break; -- msleep(100); -+ msec_delay(100); - } - - /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation -@@ -1627,7 +2249,7 @@ - } - - /** -- * igb_phy_has_link - Polls PHY for link -+ * e1000_phy_has_link_generic - Polls PHY for link - * @hw: pointer to the HW structure - * @iterations: number of times to poll for link - * @usec_interval: delay between polling attempts -@@ -1635,27 +2257,32 @@ - * - * Polls the PHY status register for link, 'iterations' number of times. - **/ --s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, -- u32 usec_interval, bool *success) -+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, -+ u32 usec_interval, bool *success) - { -- s32 ret_val = 0; -+ s32 ret_val = E1000_SUCCESS; - u16 i, phy_status; - -+ DEBUGFUNC("e1000_phy_has_link_generic"); -+ -+ if (!hw->phy.ops.read_reg) -+ return E1000_SUCCESS; -+ - for (i = 0; i < iterations; i++) { - /* Some PHYs require the PHY_STATUS register to be read - * twice due to the link bit being sticky. No harm doing - * it across the board. - */ - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); -- if (ret_val && usec_interval > 0) { -+ if (ret_val) { - /* If the first read fails, another entity may have - * ownership of the resources, wait and try again to - * see if they have relinquished the resources yet. - */ - if (usec_interval >= 1000) -- mdelay(usec_interval/1000); -+ msec_delay(usec_interval/1000); - else -- udelay(usec_interval); -+ usec_delay(usec_interval); - } - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) -@@ -1663,18 +2290,18 @@ - if (phy_status & MII_SR_LINK_STATUS) - break; - if (usec_interval >= 1000) -- mdelay(usec_interval/1000); -+ msec_delay(usec_interval/1000); - else -- udelay(usec_interval); -+ usec_delay(usec_interval); - } - -- *success = (i < iterations) ? true : false; -+ *success = (i < iterations); - - return ret_val; - } - - /** -- * igb_get_cable_length_m88 - Determine cable length for m88 PHY -+ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY - * @hw: pointer to the HW structure - * - * Reads the PHY specific status register to retrieve the cable length -@@ -1688,37 +2315,40 @@ - * 3 110 - 140 meters - * 4 > 140 meters - **/ --s32 igb_get_cable_length_m88(struct e1000_hw *hw) -+s32 e1000_get_cable_length_m88(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, index; - -+ DEBUGFUNC("e1000_get_cable_length_m88"); -+ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> -- M88E1000_PSSR_CABLE_LENGTH_SHIFT; -- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { -- ret_val = -E1000_ERR_PHY; -- goto out; -- } -+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> -+ M88E1000_PSSR_CABLE_LENGTH_SHIFT); -+ -+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) -+ return -E1000_ERR_PHY; - - phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - --s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) -+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; -- u16 phy_data, phy_data2, index, default_page, is_cm; -+ u16 phy_data, phy_data2, is_cm; -+ u16 index, default_page; -+ -+ DEBUGFUNC("e1000_get_cable_length_m88_gen2"); - - switch (hw->phy.id) { - case I210_I_PHY_ID: -@@ -1743,27 +2373,29 @@ - phy->cable_length = phy_data / (is_cm ? 100 : 1); - break; - case M88E1543_E_PHY_ID: -+ case M88E1512_E_PHY_ID: -+ case M88E1340M_E_PHY_ID: - case I347AT4_E_PHY_ID: - /* Remember the original page select and set it to 7 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); - if (ret_val) -- goto out; -+ return ret_val; - - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), - &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); - if (ret_val) -- goto out; -+ return ret_val; - - is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); - -@@ -1772,34 +2404,34 @@ - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); - -- /* Reset the page selec to its original value */ -+ /* Reset the page select to its original value */ - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, - default_page); - if (ret_val) -- goto out; -+ return ret_val; - break; -+ - case M88E1112_E_PHY_ID: - /* Remember the original page select and set it to 5 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, - &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; -- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { -- ret_val = -E1000_ERR_PHY; -- goto out; -- } -+ -+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) -+ return -E1000_ERR_PHY; - - phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; -@@ -1811,20 +2443,18 @@ - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, - default_page); - if (ret_val) -- goto out; -+ return ret_val; - - break; - default: -- ret_val = -E1000_ERR_PHY; -- goto out; -+ return -E1000_ERR_PHY; - } - --out: - return ret_val; - } - - /** -- * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY -+ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY - * @hw: pointer to the HW structure - * - * The automatic gain control (agc) normalizes the amplitude of the -@@ -1834,10 +2464,10 @@ - * into a lookup table to obtain the approximate cable length - * for each channel. - **/ --s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) -+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val = 0; -+ s32 ret_val; - u16 phy_data, i, agc_value = 0; - u16 cur_agc_index, max_agc_index = 0; - u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; -@@ -1848,26 +2478,26 @@ - IGP02E1000_PHY_AGC_D - }; - -+ DEBUGFUNC("e1000_get_cable_length_igp_2"); -+ - /* Read the AGC registers for all channels */ - for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { - ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - /* Getting bits 15:9, which represent the combination of - * coarse and fine gain values. The result is a number - * that can be put into the lookup table to obtain the - * approximate cable length. - */ -- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & -- IGP02E1000_AGC_LENGTH_MASK; -+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & -+ IGP02E1000_AGC_LENGTH_MASK); - - /* Array index bound check. */ - if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || -- (cur_agc_index == 0)) { -- ret_val = -E1000_ERR_PHY; -- goto out; -- } -+ (cur_agc_index == 0)) -+ return -E1000_ERR_PHY; - - /* Remove min & max AGC values from calculation. */ - if (e1000_igp_2_cable_length_table[min_agc_index] > -@@ -1885,18 +2515,17 @@ - agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); - - /* Calculate cable length with the error range of +/- 10 meters. */ -- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? -- (agc_value - IGP02E1000_AGC_RANGE) : 0; -+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? -+ (agc_value - IGP02E1000_AGC_RANGE) : 0); - phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_get_phy_info_m88 - Retrieve PHY information -+ * e1000_get_phy_info_m88 - Retrieve PHY information - * @hw: pointer to the HW structure - * - * Valid for only copper links. Read the PHY status register (sticky read) -@@ -1905,54 +2534,54 @@ - * special status register to determine MDI/MDIx and current speed. If - * speed is 1000, then determine cable length, local and remote receiver. - **/ --s32 igb_get_phy_info_m88(struct e1000_hw *hw) -+s32 e1000_get_phy_info_m88(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - -+ DEBUGFUNC("e1000_get_phy_info_m88"); -+ - if (phy->media_type != e1000_media_type_copper) { -- hw_dbg("Phy info is only valid for copper media\n"); -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ DEBUGOUT("Phy info is only valid for copper media\n"); -+ return -E1000_ERR_CONFIG; - } - -- ret_val = igb_phy_has_link(hw, 1, 0, &link); -+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) { -- hw_dbg("Phy info is only valid if link is up\n"); -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ DEBUGOUT("Phy info is only valid if link is up\n"); -+ return -E1000_ERR_CONFIG; - } - - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) -- ? true : false; -+ phy->polarity_correction = !!(phy_data & -+ M88E1000_PSCR_POLARITY_REVERSAL); - -- ret_val = igb_check_polarity_m88(hw); -+ ret_val = igb_e1000_check_polarity_m88(hw); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; -+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); - - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { -- ret_val = phy->ops.get_cable_length(hw); -+ ret_val = hw->phy.ops.get_cable_length(hw); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - - phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok -@@ -1968,12 +2597,11 @@ - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - --out: - return ret_val; - } - - /** -- * igb_get_phy_info_igp - Retrieve igp PHY information -+ * e1000_get_phy_info_igp - Retrieve igp PHY information - * @hw: pointer to the HW structure - * - * Read PHY status to determine if link is up. If link is up, then -@@ -1981,44 +2609,45 @@ - * PHY port status to determine MDI/MDIx and speed. Based on the speed, - * determine on the cable length, local and remote receiver. - **/ --s32 igb_get_phy_info_igp(struct e1000_hw *hw) -+s32 e1000_get_phy_info_igp(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - -- ret_val = igb_phy_has_link(hw, 1, 0, &link); -+ DEBUGFUNC("e1000_get_phy_info_igp"); -+ -+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) { -- hw_dbg("Phy info is only valid if link is up\n"); -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ DEBUGOUT("Phy info is only valid if link is up\n"); -+ return -E1000_ERR_CONFIG; - } - - phy->polarity_correction = true; - -- ret_val = igb_check_polarity_igp(hw); -+ ret_val = igb_e1000_check_polarity_igp(hw); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); - if (ret_val) -- goto out; -+ return ret_val; - -- phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; -+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); - - if ((data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - ret_val = phy->ops.get_cable_length(hw); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); - if (ret_val) -- goto out; -+ return ret_val; - - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok -@@ -2033,42 +2662,97 @@ - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - --out: - return ret_val; - } - - /** -- * igb_phy_sw_reset - PHY software reset -+ * igb_e1000_get_phy_info_ife - Retrieves various IFE PHY states -+ * @hw: pointer to the HW structure -+ * -+ * Populates "phy" structure with various feature states. -+ **/ -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val; -+ u16 data; -+ bool link; -+ -+ DEBUGFUNC("igb_e1000_get_phy_info_ife"); -+ -+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); -+ if (ret_val) -+ return ret_val; -+ -+ if (!link) { -+ DEBUGOUT("Phy info is only valid if link is up\n"); -+ return -E1000_ERR_CONFIG; -+ } -+ -+ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); -+ if (ret_val) -+ return ret_val; -+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); -+ -+ if (phy->polarity_correction) { -+ ret_val = igb_e1000_check_polarity_ife(hw); -+ if (ret_val) -+ return ret_val; -+ } else { -+ /* Polarity is forced */ -+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) -+ ? e1000_rev_polarity_reversed -+ : e1000_rev_polarity_normal); -+ } -+ -+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); -+ if (ret_val) -+ return ret_val; -+ -+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); -+ -+ /* The following parameters are undefined for 10/100 operation. */ -+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; -+ phy->local_rx = e1000_1000t_rx_status_undefined; -+ phy->remote_rx = e1000_1000t_rx_status_undefined; -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_phy_sw_reset_generic - PHY software reset - * @hw: pointer to the HW structure - * - * Does a software reset of the PHY by reading the PHY control register and - * setting/write the control register reset bit to the PHY. - **/ --s32 igb_phy_sw_reset(struct e1000_hw *hw) -+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) - { -- s32 ret_val = 0; -+ s32 ret_val; - u16 phy_ctrl; - -- if (!(hw->phy.ops.read_reg)) -- goto out; -+ DEBUGFUNC("e1000_phy_sw_reset_generic"); -+ -+ if (!hw->phy.ops.read_reg) -+ return E1000_SUCCESS; - - ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) -- goto out; -+ return ret_val; - - phy_ctrl |= MII_CR_RESET; - ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) -- goto out; -+ return ret_val; - -- udelay(1); -+ usec_delay(1); - --out: - return ret_val; - } - - /** -- * igb_phy_hw_reset - PHY hardware reset -+ * e1000_phy_hw_reset_generic - PHY hardware reset - * @hw: pointer to the HW structure - * - * Verify the reset block is not blocking us from resetting. Acquire -@@ -2076,50 +2760,65 @@ - * bit in the PHY. Wait the appropriate delay time for the device to - * reset and release the semaphore (if necessary). - **/ --s32 igb_phy_hw_reset(struct e1000_hw *hw) -+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; -- s32 ret_val; -+ s32 ret_val; - u32 ctrl; - -- ret_val = igb_check_reset_block(hw); -- if (ret_val) { -- ret_val = 0; -- goto out; -+ DEBUGFUNC("e1000_phy_hw_reset_generic"); -+ -+ if (phy->ops.check_reset_block) { -+ ret_val = phy->ops.check_reset_block(hw); -+ if (ret_val) -+ return E1000_SUCCESS; - } - - ret_val = phy->ops.acquire(hw); - if (ret_val) -- goto out; -+ return ret_val; - -- ctrl = rd32(E1000_CTRL); -- wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); -- wrfl(); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); -+ E1000_WRITE_FLUSH(hw); - -- udelay(phy->reset_delay_us); -+ usec_delay(phy->reset_delay_us); - -- wr32(E1000_CTRL, ctrl); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ E1000_WRITE_FLUSH(hw); - -- udelay(150); -+ usec_delay(150); - - phy->ops.release(hw); - -- ret_val = phy->ops.get_cfg_done(hw); -+ return phy->ops.get_cfg_done(hw); -+} - --out: -- return ret_val; -+/** -+ * e1000_get_cfg_done_generic - Generic configuration done -+ * @hw: pointer to the HW structure -+ * -+ * Generic function to wait 10 milli-seconds for configuration to complete -+ * and return success. -+ **/ -+s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) -+{ -+ DEBUGFUNC("e1000_get_cfg_done_generic"); -+ -+ msec_delay_irq(10); -+ -+ return E1000_SUCCESS; - } - - /** -- * igb_phy_init_script_igp3 - Inits the IGP3 PHY -+ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY - * @hw: pointer to the HW structure - * - * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. - **/ --s32 igb_phy_init_script_igp3(struct e1000_hw *hw) -+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) - { -- hw_dbg("Running IGP 3 PHY init script\n"); -+ DEBUGOUT("Running IGP 3 PHY init script\n"); - - /* PHY init IGP 3 */ - /* Enable rise/fall, 10-mode work in class-A */ -@@ -2130,7 +2829,7 @@ - hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); - /* Increase Hybrid poly bias */ - hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); -- /* Add 4% to TX amplitude in Giga mode */ -+ /* Add 4% to Tx amplitude in Gig mode */ - hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); - /* Disable trimming (TTT) */ - hw->phy.ops.write_reg(hw, 0x2011, 0x0000); -@@ -2191,17 +2890,106 @@ - /* Restart AN, Speed selection is 1000 */ - hw->phy.ops.write_reg(hw, 0x0000, 0x1340); - -- return 0; -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_get_phy_type_from_id - Get PHY type from id -+ * @phy_id: phy_id read from the phy -+ * -+ * Returns the phy type from the id. -+ **/ -+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) -+{ -+ enum e1000_phy_type phy_type = e1000_phy_unknown; -+ -+ switch (phy_id) { -+ case M88E1000_I_PHY_ID: -+ case M88E1000_E_PHY_ID: -+ case M88E1111_I_PHY_ID: -+ case M88E1011_I_PHY_ID: -+ case M88E1543_E_PHY_ID: -+ case M88E1512_E_PHY_ID: -+ case I347AT4_E_PHY_ID: -+ case M88E1112_E_PHY_ID: -+ case M88E1340M_E_PHY_ID: -+ phy_type = e1000_phy_m88; -+ break; -+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ -+ phy_type = e1000_phy_igp_2; -+ break; -+ case GG82563_E_PHY_ID: -+ phy_type = e1000_phy_gg82563; -+ break; -+ case IGP03E1000_E_PHY_ID: -+ phy_type = e1000_phy_igp_3; -+ break; -+ case IFE_E_PHY_ID: -+ case IFE_PLUS_E_PHY_ID: -+ case IFE_C_E_PHY_ID: -+ phy_type = e1000_phy_ife; -+ break; -+ case I82580_I_PHY_ID: -+ phy_type = e1000_phy_82580; -+ break; -+ case I210_I_PHY_ID: -+ phy_type = e1000_phy_i210; -+ break; -+ default: -+ phy_type = e1000_phy_unknown; -+ break; -+ } -+ return phy_type; -+} -+ -+/** -+ * e1000_determine_phy_address - Determines PHY address. -+ * @hw: pointer to the HW structure -+ * -+ * This uses a trial and error method to loop through possible PHY -+ * addresses. It tests each by reading the PHY ID registers and -+ * checking for a match. -+ **/ -+s32 e1000_determine_phy_address(struct e1000_hw *hw) -+{ -+ u32 phy_addr = 0; -+ u32 i; -+ enum e1000_phy_type phy_type = e1000_phy_unknown; -+ -+ hw->phy.id = phy_type; -+ -+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { -+ hw->phy.addr = phy_addr; -+ i = 0; -+ -+ do { -+ e1000_get_phy_id(hw); -+ phy_type = e1000_get_phy_type_from_id(hw->phy.id); -+ -+ /* If phy_type is valid, break - we found our -+ * PHY address -+ */ -+ if (phy_type != e1000_phy_unknown) -+ return E1000_SUCCESS; -+ -+ msec_delay(1); -+ i++; -+ } while (i < 10); -+ } -+ -+ return -E1000_ERR_PHY_TYPE; - } - - /** -- * igb_power_up_phy_copper - Restore copper link in case of PHY power down -+ * igb_e1000_power_up_phy_copper - Restore copper link in case of PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a -- * driver unload, restore the link to previous settings. -+ * driver unload, or wake on lan is not enabled, restore the link to previous -+ * settings. - **/ --void igb_power_up_phy_copper(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_power_up_phy_copper(struct e1000_hw *hw) - { - u16 mii_reg = 0; - -@@ -2212,13 +3000,15 @@ - } - - /** -- * igb_power_down_phy_copper - Power down copper PHY -+ * igb_e1000_power_down_phy_copper - Restore copper link in case of PHY power down - * @hw: pointer to the HW structure - * -- * Power down PHY to save power when interface is down and wake on lan -- * is not enabled. -+ * In the case of a PHY power down to save power, or to turn off link during a -+ * driver unload, or wake on lan is not enabled, restore the link to previous -+ * settings. - **/ --void igb_power_down_phy_copper(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+void igb_e1000_power_down_phy_copper(struct e1000_hw *hw) - { - u16 mii_reg = 0; - -@@ -2226,98 +3016,85 @@ - hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); - mii_reg |= MII_CR_POWER_DOWN; - hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); -- usleep_range(1000, 2000); -+ msec_delay(1); - } - - /** -- * igb_check_polarity_82580 - Checks the polarity. -+ * igb_e1000_check_polarity_82577 - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY specific status register. - **/ --static s32 igb_check_polarity_82580(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - -+ DEBUGFUNC("igb_e1000_check_polarity_82577"); - -- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); -+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); - - if (!ret_val) -- phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) -- ? e1000_rev_polarity_reversed -- : e1000_rev_polarity_normal; -+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) -+ ? e1000_rev_polarity_reversed -+ : e1000_rev_polarity_normal); - - return ret_val; - } - - /** -- * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY -+ * igb_e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY - * @hw: pointer to the HW structure - * -- * Calls the PHY setup function to force speed and duplex. Clears the -- * auto-crossover to force MDI manually. Waits for link and returns -- * successful if link up is successful, else -E1000_ERR_PHY (-2). -+ * Calls the PHY setup function to force speed and duplex. - **/ --s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - -+ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_82577"); -+ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- igb_phy_force_speed_duplex_setup(hw, &phy_data); -+ e1000_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); - if (ret_val) -- goto out; -- -- /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI -- * forced whenever speed and duplex are forced. -- */ -- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); -- if (ret_val) -- goto out; -- -- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; -- -- ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); -- if (ret_val) -- goto out; -- -- hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); -+ return ret_val; - -- udelay(1); -+ usec_delay(1); - - if (phy->autoneg_wait_to_complete) { -- hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); -+ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); - -- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) -- hw_dbg("Link taking longer than expected.\n"); -+ DEBUGOUT("Link taking longer than expected.\n"); - - /* Try once more */ -- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); -- if (ret_val) -- goto out; -+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, -+ 100000, &link); - } - --out: - return ret_val; - } - - /** -- * igb_get_phy_info_82580 - Retrieve I82580 PHY information -+ * igb_e1000_get_phy_info_82577 - Retrieve I82577 PHY information - * @hw: pointer to the HW structure - * - * Read PHY status to determine if link is up. If link is up, then -@@ -2325,44 +3102,46 @@ - * PHY port status to determine MDI/MDIx and speed. Based on the speed, - * determine on the cable length, local and remote receiver. - **/ --s32 igb_get_phy_info_82580(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - -- ret_val = igb_phy_has_link(hw, 1, 0, &link); -+ DEBUGFUNC("igb_e1000_get_phy_info_82577"); -+ -+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) -- goto out; -+ return ret_val; - - if (!link) { -- hw_dbg("Phy info is only valid if link is up\n"); -- ret_val = -E1000_ERR_CONFIG; -- goto out; -+ DEBUGOUT("Phy info is only valid if link is up\n"); -+ return -E1000_ERR_CONFIG; - } - - phy->polarity_correction = true; - -- ret_val = igb_check_polarity_82580(hw); -+ ret_val = igb_e1000_check_polarity_82577(hw); - if (ret_val) -- goto out; -+ return ret_val; - -- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); -+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); - if (ret_val) -- goto out; -+ return ret_val; - -- phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; -+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); - -- if ((data & I82580_PHY_STATUS2_SPEED_MASK) == -- I82580_PHY_STATUS2_SPEED_1000MBPS) { -+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) == -+ I82577_PHY_STATUS2_SPEED_1000MBPS) { - ret_val = hw->phy.ops.get_cable_length(hw); - if (ret_val) -- goto out; -+ return ret_val; - - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); - if (ret_val) -- goto out; -+ return ret_val; - - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok -@@ -2377,63 +3156,65 @@ - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_get_cable_length_82580 - Determine cable length for 82580 PHY -+ * igb_e1000_get_cable_length_82577 - Determine cable length for 82577 PHY - * @hw: pointer to the HW structure - * - * Reads the diagnostic status register and verifies result is valid before - * placing it in the phy_cable_length field. - **/ --s32 igb_get_cable_length_82580(struct e1000_hw *hw) -+/* Changed name, duplicated with e1000 */ -+s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw) - { - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, length; - -- ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); -+ DEBUGFUNC("igb_e1000_get_cable_length_82577"); -+ -+ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); - if (ret_val) -- goto out; -+ return ret_val; - -- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> -- I82580_DSTATUS_CABLE_LENGTH_SHIFT; -+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> -+ I82577_DSTATUS_CABLE_LENGTH_SHIFT); - - if (length == E1000_CABLE_LENGTH_UNDEFINED) -- ret_val = -E1000_ERR_PHY; -+ return -E1000_ERR_PHY; - - phy->cable_length = length; - --out: -- return ret_val; -+ return E1000_SUCCESS; - } - - /** -- * igb_write_phy_reg_gs40g - Write GS40G PHY register -+ * e1000_write_phy_reg_gs40g - Write GS40G PHY register - * @hw: pointer to the HW structure -- * @offset: lower half is register offset to write to -- * upper half is page to use. -+ * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ --s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) -+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) - { - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - -+ DEBUGFUNC("e1000_write_phy_reg_gs40g"); -+ - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - -- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); -+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; -- ret_val = igb_write_phy_reg_mdic(hw, offset, data); -+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data); - - release: - hw->phy.ops.release(hw); -@@ -2441,7 +3222,7 @@ - } - - /** -- * igb_read_phy_reg_gs40g - Read GS40G PHY register -+ * e1000_read_phy_reg_gs40g - Read GS40G PHY register - * @hw: pointer to the HW structure - * @offset: lower half is register offset to read to - * upper half is page to use. -@@ -2450,20 +3231,22 @@ - * Acquires semaphore, if necessary, then reads the data in the PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ --s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) -+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) - { - s32 ret_val; - u16 page = offset >> GS40G_PAGE_SHIFT; - -+ DEBUGFUNC("e1000_read_phy_reg_gs40g"); -+ - offset = offset & GS40G_OFFSET_MASK; - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - -- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); -+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); - if (ret_val) - goto release; -- ret_val = igb_read_phy_reg_mdic(hw, offset, data); -+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data); - - release: - hw->phy.ops.release(hw); -@@ -2471,41 +3254,156 @@ - } - - /** -- * igb_set_master_slave_mode - Setup PHY for Master/slave mode -+ * e1000_read_phy_reg_mphy - Read mPHY control register - * @hw: pointer to the HW structure -+ * @address: address to be read -+ * @data: pointer to the read data - * -- * Sets up Master/slave mode -+ * Reads the mPHY control register in the PHY at offset and stores the -+ * information read to data. - **/ --static s32 igb_set_master_slave_mode(struct e1000_hw *hw) -+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) - { -- s32 ret_val; -- u16 phy_data; -+ u32 mphy_ctrl = 0; -+ bool locked = false; -+ bool ready; - -- /* Resolve Master/Slave mode */ -- ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); -- if (ret_val) -- return ret_val; -+ DEBUGFUNC("e1000_read_phy_reg_mphy"); - -- /* load defaults for future use */ -- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? -- ((phy_data & CR_1000T_MS_VALUE) ? -- e1000_ms_force_master : -- e1000_ms_force_slave) : e1000_ms_auto; -+ /* Check if mPHY is ready to read/write operations */ -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; - -- switch (hw->phy.ms_type) { -- case e1000_ms_force_master: -- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); -- break; -- case e1000_ms_force_slave: -- phy_data |= CR_1000T_MS_ENABLE; -- phy_data &= ~(CR_1000T_MS_VALUE); -- break; -- case e1000_ms_auto: -- phy_data &= ~CR_1000T_MS_ENABLE; -- /* fall-through */ -- default: -+ /* Check if mPHY access is disabled and enable it if so */ -+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); -+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { -+ locked = true; -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS; -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); -+ } -+ -+ /* Set the address that we want to read */ -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ -+ /* We mask address, because we want to use only current lane */ -+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & -+ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | -+ (address & E1000_MPHY_ADDRESS_MASK); -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); -+ -+ /* Read data from the address */ -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ *data = E1000_READ_REG(hw, E1000_MPHY_DATA); -+ -+ /* Disable access to mPHY if it was originally disabled */ -+ if (locked) -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, -+ E1000_MPHY_DIS_ACCESS); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_write_phy_reg_mphy - Write mPHY control register -+ * @hw: pointer to the HW structure -+ * @address: address to write to -+ * @data: data to write to register at offset -+ * @line_override: used when we want to use different line than default one -+ * -+ * Writes data to mPHY control register. -+ **/ -+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, -+ bool line_override) -+{ -+ u32 mphy_ctrl = 0; -+ bool locked = false; -+ bool ready; -+ -+ DEBUGFUNC("e1000_write_phy_reg_mphy"); -+ -+ /* Check if mPHY is ready to read/write operations */ -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ -+ /* Check if mPHY access is disabled and enable it if so */ -+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); -+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { -+ locked = true; -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS; -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); -+ } -+ -+ /* Set the address that we want to read */ -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ -+ /* We mask address, because we want to use only current lane */ -+ if (line_override) -+ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; -+ else -+ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; -+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | -+ (address & E1000_MPHY_ADDRESS_MASK); -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); -+ -+ /* Read data from the address */ -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); -+ -+ /* Disable access to mPHY if it was originally disabled */ -+ if (locked) -+ ready = e1000_is_mphy_ready(hw); -+ if (!ready) -+ return -E1000_ERR_PHY; -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, -+ E1000_MPHY_DIS_ACCESS); -+ -+ return E1000_SUCCESS; -+} -+ -+/** -+ * e1000_is_mphy_ready - Check if mPHY control register is not busy -+ * @hw: pointer to the HW structure -+ * -+ * Returns mPHY control register status. -+ **/ -+bool e1000_is_mphy_ready(struct e1000_hw *hw) -+{ -+ u16 retry_count = 0; -+ u32 mphy_ctrl = 0; -+ bool ready = false; -+ -+ while (retry_count < 2) { -+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); -+ if (mphy_ctrl & E1000_MPHY_BUSY) { -+ usec_delay(20); -+ retry_count++; -+ continue; -+ } -+ ready = true; - break; - } - -- return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); -+ if (!ready) -+ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); -+ -+ return ready; - } -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h ---- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,146 +1,115 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* - --#ifndef _E1000_PHY_H_ --#define _E1000_PHY_H_ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. - --enum e1000_ms_type { -- e1000_ms_hw_default = 0, -- e1000_ms_force_master, -- e1000_ms_force_slave, -- e1000_ms_auto --}; -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. - --enum e1000_smart_speed { -- e1000_smart_speed_default = 0, -- e1000_smart_speed_on, -- e1000_smart_speed_off --}; -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. - --s32 igb_check_downshift(struct e1000_hw *hw); --s32 igb_check_reset_block(struct e1000_hw *hw); --s32 igb_copper_link_setup_igp(struct e1000_hw *hw); --s32 igb_copper_link_setup_m88(struct e1000_hw *hw); --s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); --s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); --s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); --s32 igb_get_cable_length_m88(struct e1000_hw *hw); --s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); --s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); --s32 igb_get_phy_id(struct e1000_hw *hw); --s32 igb_get_phy_info_igp(struct e1000_hw *hw); --s32 igb_get_phy_info_m88(struct e1000_hw *hw); --s32 igb_phy_sw_reset(struct e1000_hw *hw); --s32 igb_phy_hw_reset(struct e1000_hw *hw); --s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); --s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); --s32 igb_setup_copper_link(struct e1000_hw *hw); --s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); --s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, -- u32 usec_interval, bool *success); --void igb_power_up_phy_copper(struct e1000_hw *hw); --void igb_power_down_phy_copper(struct e1000_hw *hw); --s32 igb_phy_init_script_igp3(struct e1000_hw *hw); --s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); --s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); --s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); --s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); --s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); --s32 igb_copper_link_setup_82580(struct e1000_hw *hw); --s32 igb_get_phy_info_82580(struct e1000_hw *hw); --s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); --s32 igb_get_cable_length_82580(struct e1000_hw *hw); --s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); --s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); --s32 igb_check_polarity_m88(struct e1000_hw *hw); -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". - --/* IGP01E1000 Specific Registers */ --#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ --#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ --#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ --#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ --#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ --#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ --#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 --#define IGP01E1000_PHY_POLARITY_MASK 0x0078 --#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 --#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ --#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 -- --#define I82580_ADDR_REG 16 --#define I82580_CFG_REG 22 --#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) --#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ --#define I82580_CTRL_REG 23 --#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) -- --/* 82580 specific PHY registers */ --#define I82580_PHY_CTRL_2 18 --#define I82580_PHY_LBK_CTRL 19 --#define I82580_PHY_STATUS_2 26 --#define I82580_PHY_DIAG_STATUS 31 -- --/* I82580 PHY Status 2 */ --#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 --#define I82580_PHY_STATUS2_MDIX 0x0800 --#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 --#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 --#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 -- --/* I82580 PHY Control 2 */ --#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 --#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 --#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 -- --/* I82580 PHY Diagnostics Status */ --#define I82580_DSTATUS_CABLE_LENGTH 0x03FC --#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - --/* 82580 PHY Power Management */ --#define E1000_82580_PHY_POWER_MGMT 0xE14 --#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ --#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ --#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ --#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ -+*******************************************************************************/ -+ -+#ifndef _E1000_PHY_H_ -+#define _E1000_PHY_H_ -+ -+void e1000_init_phy_ops_generic(struct e1000_hw *hw); -+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); -+void e1000_null_phy_generic(struct e1000_hw *hw); -+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); -+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); -+s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, -+ u8 dev_addr, u8 *data); -+s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, -+ u8 dev_addr, u8 data); -+s32 e1000_check_downshift_generic(struct e1000_hw *hw); -+s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw); -+s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw); -+s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw); -+s32 e1000_check_reset_block_generic(struct e1000_hw *hw); -+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); -+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); -+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); -+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); -+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); -+s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); -+s32 e1000_get_cable_length_m88(struct e1000_hw *hw); -+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); -+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); -+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); -+s32 e1000_get_phy_id(struct e1000_hw *hw); -+s32 e1000_get_phy_info_igp(struct e1000_hw *hw); -+s32 e1000_get_phy_info_m88(struct e1000_hw *hw); -+s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw); -+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); -+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); -+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); -+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); -+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page); -+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); -+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); -+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, -+ u32 usec_interval, bool *success); -+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); -+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); -+s32 e1000_determine_phy_address(struct e1000_hw *hw); -+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); -+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); -+void igb_e1000_power_up_phy_copper(struct e1000_hw *hw); -+void igb_e1000_power_down_phy_copper(struct e1000_hw *hw); -+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); -+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); -+s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw); -+s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw); -+s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw); -+s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); -+s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw); -+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); -+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); -+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); -+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, -+ bool line_override); -+bool e1000_is_mphy_ready(struct e1000_hw *hw); - --/* Enable flexible speed on link-up */ --#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ --#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ --#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 --#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 --#define IGP01E1000_PSSR_MDIX 0x0800 --#define IGP01E1000_PSSR_SPEED_MASK 0xC000 --#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 --#define IGP02E1000_PHY_CHANNEL_NUM 4 --#define IGP02E1000_PHY_AGC_A 0x11B1 --#define IGP02E1000_PHY_AGC_B 0x12B1 --#define IGP02E1000_PHY_AGC_C 0x14B1 --#define IGP02E1000_PHY_AGC_D 0x18B1 --#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ --#define IGP02E1000_AGC_LENGTH_MASK 0x7F --#define IGP02E1000_AGC_RANGE 15 -+#define E1000_MAX_PHY_ADDR 8 - --#define E1000_CABLE_LENGTH_UNDEFINED 0xFF -+/* IGP01E1000 Specific Registers */ -+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ -+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ -+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ -+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ -+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ -+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ -+#define IGP_PAGE_SHIFT 5 -+#define PHY_REG_MASK 0x1F - - /* GS40G - I210 PHY defines */ - #define GS40G_PAGE_SELECT 0x16 -@@ -151,7 +120,110 @@ - #define GS40G_MAC_LB 0x4140 - #define GS40G_MAC_SPEED_1G 0X0006 - #define GS40G_COPPER_SPEC 0x0010 --#define GS40G_LINE_LB 0x4000 -+ -+#define HV_INTC_FC_PAGE_START 768 -+#define I82578_ADDR_REG 29 -+#define I82577_ADDR_REG 16 -+#define I82577_CFG_REG 22 -+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ -+#define I82577_CTRL_REG 23 -+ -+/* 82577 specific PHY registers */ -+#define I82577_PHY_CTRL_2 18 -+#define I82577_PHY_LBK_CTRL 19 -+#define I82577_PHY_STATUS_2 26 -+#define I82577_PHY_DIAG_STATUS 31 -+ -+/* I82577 PHY Status 2 */ -+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 -+#define I82577_PHY_STATUS2_MDIX 0x0800 -+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 -+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 -+ -+/* I82577 PHY Control 2 */ -+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 -+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 -+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 -+ -+/* I82577 PHY Diagnostics Status */ -+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC -+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 -+ -+/* 82580 PHY Power Management */ -+#define E1000_82580_PHY_POWER_MGMT 0xE14 -+#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ -+#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ -+#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ -+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ -+ -+#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ -+#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ -+#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ -+#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ -+#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ -+ -+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -+#define IGP01E1000_PHY_POLARITY_MASK 0x0078 -+ -+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ -+ -+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 -+ -+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ -+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ -+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ -+ -+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 -+ -+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -+#define IGP01E1000_PSSR_MDIX 0x0800 -+#define IGP01E1000_PSSR_SPEED_MASK 0xC000 -+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 -+ -+#define IGP02E1000_PHY_CHANNEL_NUM 4 -+#define IGP02E1000_PHY_AGC_A 0x11B1 -+#define IGP02E1000_PHY_AGC_B 0x12B1 -+#define IGP02E1000_PHY_AGC_C 0x14B1 -+#define IGP02E1000_PHY_AGC_D 0x18B1 -+ -+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ -+#define IGP02E1000_AGC_LENGTH_MASK 0x7F -+#define IGP02E1000_AGC_RANGE 15 -+ -+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF -+ -+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 -+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 -+#define E1000_KMRNCTRLSTA_REN 0x00200000 -+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ -+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ -+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ -+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ -+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ -+ -+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 -+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ -+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ -+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ -+ -+/* IFE PHY Extended Status Control */ -+#define IFE_PESC_POLARITY_REVERSED 0x0100 -+ -+/* IFE PHY Special Control */ -+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 -+#define IFE_PSC_FORCE_POLARITY 0x0020 -+ -+/* IFE PHY Special Control and LED Control */ -+#define IFE_PSCL_PROBE_MODE 0x0020 -+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ -+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ -+ -+/* IFE PHY MDIX Control */ -+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ -+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ -+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ - - /* SFP modules ID memory locations */ - #define E1000_SFF_IDENTIFIER_OFFSET 0x00 -@@ -160,7 +232,7 @@ - - #define E1000_SFF_ETH_FLAGS_OFFSET 0x06 - /* Flags for SFP modules compatible with ETH up to 1Gb */ --struct e1000_sfp_flags { -+struct sfp_e1000_flags { - u8 e1000_base_sx:1; - u8 e1000_base_lx:1; - u8 e1000_base_cx:1; -@@ -171,4 +243,10 @@ - u8 e10_base_px:1; - }; - -+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ -+#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 -+#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 -+#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 -+#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 -+ - #endif -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h ---- a/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,154 +1,196 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #ifndef _E1000_REGS_H_ - #define _E1000_REGS_H_ - --#define E1000_CTRL 0x00000 /* Device Control - RW */ --#define E1000_STATUS 0x00008 /* Device Status - RO */ --#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ --#define E1000_EERD 0x00014 /* EEPROM Read - RW */ --#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ --#define E1000_MDIC 0x00020 /* MDI Control - RW */ --#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ --#define E1000_SCTL 0x00024 /* SerDes Control - RW */ --#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ --#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ --#define E1000_FCT 0x00030 /* Flow Control Type - RW */ --#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ --#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ --#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ --#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ --#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ --#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ --#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ --#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ --#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ --#define E1000_RCTL 0x00100 /* RX Control - RW */ --#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ --#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ --#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ --#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) --#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ --#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ --#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ --#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ --#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ --#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ --#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ --#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ --#define E1000_TCTL 0x00400 /* TX Control - RW */ --#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ --#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ --#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ --#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ --#define E1000_LEDMUX 0x08130 /* LED MUX Control */ --#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ --#define E1000_PBS 0x01008 /* Packet Buffer Size */ --#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ --#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ --#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ --#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ --#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ --#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ --#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ --#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ --#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ --#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ --#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ --#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ --#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ --#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ --#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ --#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ --#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ -+#define E1000_CTRL 0x00000 /* Device Control - RW */ -+#define E1000_STATUS 0x00008 /* Device Status - RO */ -+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ -+#define E1000_EERD 0x00014 /* EEPROM Read - RW */ -+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ -+#define E1000_FLA 0x0001C /* Flash Access - RW */ -+#define E1000_MDIC 0x00020 /* MDI Control - RW */ -+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ -+#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ -+#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ -+#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ -+#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ -+#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ -+#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ - #define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ - #define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ - #define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ -+#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ -+#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ -+#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ -+#define E1000_SCTL 0x00024 /* SerDes Control - RW */ -+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ -+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ -+#define E1000_FCT 0x00030 /* Flow Control Type - RW */ -+#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ -+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ -+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ -+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ -+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ -+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ -+#define E1000_RCTL 0x00100 /* Rx Control - RW */ -+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ -+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ -+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ -+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ -+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -+#define E1000_TCTL 0x00400 /* Tx Control - RW */ -+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ -+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ -+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ -+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ -+#define E1000_LEDMUX 0x08130 /* LED MUX Control */ -+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ -+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ -+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ -+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ -+#define E1000_PBS 0x01008 /* Packet Buffer Size */ -+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ -+#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ -+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ -+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ -+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ -+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ -+#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ -+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ -+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ -+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ -+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ -+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ -+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ -+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ -+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ -+#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ -+#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ -+#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ -+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ -+#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ -+#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ -+#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ -+#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ -+#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ -+#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ -+#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ -+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ -+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ -+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ -+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ -+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ -+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ -+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ -+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ -+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ -+#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ -+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ -+/* Split and Replication Rx Control - RW */ -+#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ -+#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ -+#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ -+#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ -+#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ -+#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ -+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ -+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ -+#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ -+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ -+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ -+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ -+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ -+#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ -+#define E1000_I210_FLMNGCTL 0x12038 -+#define E1000_I210_FLMNGDATA 0x1203C -+#define E1000_I210_FLMNGCNT 0x12040 - --/* IEEE 1588 TIMESYNCH */ --#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ --#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ --#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ --#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ --#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ --#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ --#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ --#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ --#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ --#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ --#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ --#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ --#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ --#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ --#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ --#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ --#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ --#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ --#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ --#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ --#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ --#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ --#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ --#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ -+#define E1000_I210_FLSWCTL 0x12048 -+#define E1000_I210_FLSWDATA 0x1204C -+#define E1000_I210_FLSWCNT 0x12050 - --/* Filtering Registers */ --#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) --#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) --#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) --#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) --#define E1000_SAQF0 E1000_SAQF(0) --#define E1000_DAQF0 E1000_DAQF(0) --#define E1000_SPQF0 E1000_SPQF(0) --#define E1000_FTQF0 E1000_FTQF(0) --#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ --#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ -+#define E1000_I210_FLA 0x1201C - --#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) -+#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) -+#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ - --/* DMA Coalescing registers */ --#define E1000_DMACR 0x02508 /* Control Register */ --#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ --#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ --#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ --#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ --#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ --#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ -+/* QAV Tx mode control register */ -+#define E1000_I210_TQAVCTRL 0x3570 - --/* TX Rate Limit Registers */ --#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ --#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ --#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ -+/* QAV Tx mode control register bitfields masks */ -+/* QAV enable */ -+#define E1000_TQAVCTRL_MODE (1 << 0) -+/* Fetching arbitration type */ -+#define E1000_TQAVCTRL_FETCH_ARB (1 << 4) -+/* Fetching timer enable */ -+#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) -+/* Launch arbitration type */ -+#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) -+/* Launch timer enable */ -+#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) -+/* SP waits for SR enable */ -+#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) -+/* Fetching timer correction */ -+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 -+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ -+ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) -+ -+/* High credit registers where _n can be 0 or 1. */ -+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) -+ -+/* Queues fetch arbitration priority control register */ -+#define E1000_I210_TQAVARBCTRL 0x3574 -+/* Queues priority masks where _n and _p can be 0-3. */ -+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) -+/* QAV Tx mode control registers where _n can be 0 or 1. */ -+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) -+ -+/* QAV Tx mode control register bitfields masks */ -+#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ -+#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ -+#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ - --/* Split and Replication RX Control - RW */ --#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ -+/* Good transmitted packets counter registers */ -+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) - --/* Thermal sensor configuration and status registers */ --#define E1000_THMJT 0x08100 /* Junction Temperature */ --#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ --#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ --#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ --#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ -+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ -+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) -+ -+#define E1000_MMDAC 13 /* MMD Access Control */ -+#define E1000_MMDAAD 14 /* MMD Access Address/Data */ - - /* Convenience macros - * -@@ -157,269 +199,442 @@ - * Example usage: - * E1000_RDBAL_REG(current_rx_queue) - */ --#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ -- : (0x0C000 + ((_n) * 0x40))) --#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ -- : (0x0C004 + ((_n) * 0x40))) --#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ -- : (0x0C008 + ((_n) * 0x40))) --#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ -- : (0x0C00C + ((_n) * 0x40))) --#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ -- : (0x0C010 + ((_n) * 0x40))) --#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ -- : (0x0C018 + ((_n) * 0x40))) --#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ -- : (0x0C028 + ((_n) * 0x40))) --#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ -- : (0x0E000 + ((_n) * 0x40))) --#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ -- : (0x0E004 + ((_n) * 0x40))) --#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ -- : (0x0E008 + ((_n) * 0x40))) --#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ -- : (0x0E010 + ((_n) * 0x40))) --#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ -- : (0x0E018 + ((_n) * 0x40))) --#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ -- : (0x0E028 + ((_n) * 0x40))) --#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ -- (0x0C014 + ((_n) * 0x40))) -+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ -+ (0x0C000 + ((_n) * 0x40))) -+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ -+ (0x0C004 + ((_n) * 0x40))) -+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ -+ (0x0C008 + ((_n) * 0x40))) -+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ -+ (0x0C00C + ((_n) * 0x40))) -+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ -+ (0x0C010 + ((_n) * 0x40))) -+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ -+ (0x0C014 + ((_n) * 0x40))) - #define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) --#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ -- (0x0E014 + ((_n) * 0x40))) -+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ -+ (0x0C018 + ((_n) * 0x40))) -+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ -+ (0x0C028 + ((_n) * 0x40))) -+#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ -+ (0x0C030 + ((_n) * 0x40))) -+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ -+ (0x0E000 + ((_n) * 0x40))) -+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ -+ (0x0E004 + ((_n) * 0x40))) -+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ -+ (0x0E008 + ((_n) * 0x40))) -+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ -+ (0x0E010 + ((_n) * 0x40))) -+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ -+ (0x0E014 + ((_n) * 0x40))) - #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) --#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ -- : (0x0E038 + ((_n) * 0x40))) --#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ -- : (0x0E03C + ((_n) * 0x40))) -- --#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ --#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ -+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ -+ (0x0E018 + ((_n) * 0x40))) -+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ -+ (0x0E028 + ((_n) * 0x40))) -+#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ -+ (0x0E038 + ((_n) * 0x40))) -+#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ -+ (0x0E03C + ((_n) * 0x40))) -+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) -+#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ -+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ -+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ -+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) -+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ -+ (0x054E0 + ((_i - 16) * 8))) -+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ -+ (0x054E4 + ((_i - 16) * 8))) -+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) -+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) -+#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) -+#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) -+#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) -+#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) -+#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) -+#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) -+#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ -+#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ -+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ -+/* Same as TXPBS, renamed for newer Si - RW */ -+#define E1000_ITPBS 0x03404 -+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ -+#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ -+#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ -+#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ -+#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ -+#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ -+#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ -+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ -+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ -+/* DMA Tx Max Total Allow Size Reqs - RW */ -+#define E1000_DTXMXSZRQ 0x03540 -+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ -+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ -+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ -+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ -+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ -+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ -+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ -+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ -+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ -+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ -+#define E1000_COLC 0x04028 /* Collision Count - R/clr */ -+#define E1000_DC 0x04030 /* Defer Count - R/clr */ -+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ -+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ -+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ -+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ -+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ -+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ -+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ -+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ -+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ -+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ -+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ -+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ -+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ -+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ -+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ -+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ -+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ -+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ -+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ -+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ -+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ -+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ -+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ -+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ -+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ -+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ -+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ -+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ -+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ -+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ -+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ -+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ -+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ -+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ -+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ -+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ -+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ -+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ -+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ -+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ -+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ -+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ -+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ -+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ -+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ -+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ -+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ -+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ -+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ -+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ -+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ -+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ -+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ -+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ -+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ -+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -+ -+/* Virtualization statistical counters */ -+#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) -+#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) -+#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) -+#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) -+#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) -+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) -+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) -+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) -+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) -+ -+/* LinkSec */ -+#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ -+#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ -+#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ -+#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ -+#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ -+#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ -+#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ -+#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ -+#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ -+#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ -+#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ -+#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ -+#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ -+#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ -+#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ -+#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ -+#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ -+#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ -+#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ -+#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ -+#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ -+#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ -+#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ -+#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ -+#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ -+#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ -+#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ -+#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ -+#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ -+#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ -+/* LinkSec Tx 128-bit Key 0 - WO */ -+#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) -+/* LinkSec Tx 128-bit Key 1 - WO */ -+#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) -+#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ -+#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ -+/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit -+ * key - RW. -+ */ -+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) - --#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ --#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ --#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ --#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ --#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ --#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ --#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ --#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ --#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ --#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ --#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ --#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ --#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ --#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ --#define E1000_COLC 0x04028 /* Collision Count - R/clr */ --#define E1000_DC 0x04030 /* Defer Count - R/clr */ --#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ --#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ --#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ --#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ --#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ --#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ --#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ --#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ --#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ --#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ --#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ --#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ --#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ --#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ --#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ --#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ --#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ --#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ --#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ --#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ --#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ --#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ --#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ --#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ --#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ --#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ --#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ --#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ --#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ --#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ --#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ --#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ --#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ --#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ --#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ --#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ --#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ --#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ --#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ --#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ --#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ --#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ --#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ --#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ --#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ --#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ --#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ --#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ --/* Interrupt Cause Rx Packet Timer Expire Count */ --#define E1000_ICRXPTC 0x04104 --/* Interrupt Cause Rx Absolute Timer Expire Count */ --#define E1000_ICRXATC 0x04108 --/* Interrupt Cause Tx Packet Timer Expire Count */ --#define E1000_ICTXPTC 0x0410C --/* Interrupt Cause Tx Absolute Timer Expire Count */ --#define E1000_ICTXATC 0x04110 --/* Interrupt Cause Tx Queue Empty Count */ --#define E1000_ICTXQEC 0x04118 --/* Interrupt Cause Tx Queue Minimum Threshold Count */ --#define E1000_ICTXQMTC 0x0411C --/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ --#define E1000_ICRXDMTC 0x04120 --#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ --#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ --#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ --#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ --#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ --#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ --#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ --#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ --#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ --#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ --#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ --#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ --#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ --#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ --#define E1000_LENERRS 0x04138 /* Length Errors Count */ --#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ --#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ --#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ --#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ --#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ --#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ --#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ --#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ --#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ --#define E1000_RA 0x05400 /* Receive Address - RW Array */ --#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ --#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) --#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ -- (0x054E0 + ((_i - 16) * 8))) --#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ -- (0x054E4 + ((_i - 16) * 8))) --#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) --#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) --#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) --#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) --#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) --#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) --#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ --#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ --#define E1000_WUC 0x05800 /* Wakeup Control - RW */ --#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ --#define E1000_WUS 0x05810 /* Wakeup Status - RO */ --#define E1000_MANC 0x05820 /* Management Control - RW */ --#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ --#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ -- --#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ --#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ --#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ --#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ --#define E1000_GCR 0x05B00 /* PCI-Ex Control */ --#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ --#define E1000_SWSM 0x05B50 /* SW Semaphore */ --#define E1000_FWSM 0x05B54 /* FW Semaphore */ --#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ -+#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ -+#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ -+#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ -+#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ -+/* IPSec Rx IPv4/v6 Address - RW */ -+#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) -+/* IPSec Rx 128-bit Key - RW */ -+#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) -+#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ -+#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ -+/* IPSec Tx 128-bit Key - RW */ -+#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) -+#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ -+#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ -+#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ -+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ -+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ -+#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ -+#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ -+#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ -+#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ -+#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ -+#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ -+#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ -+#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ -+#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ -+#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ -+#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ -+#define E1000_LENERRS 0x04138 /* Length Errors Count */ -+#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ -+#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ -+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ -+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ -+#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ -+#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ -+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ -+#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ -+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ -+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ -+#define E1000_RA 0x05400 /* Receive Address - RW Array */ -+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ -+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ -+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ -+#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ -+#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ -+#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ -+#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ -+#define E1000_WUC 0x05800 /* Wakeup Control - RW */ -+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -+#define E1000_WUS 0x05810 /* Wakeup Status - RO */ -+#define E1000_MANC 0x05820 /* Management Control - RW */ -+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ -+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ -+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ -+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ -+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ -+#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ -+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ -+#define E1000_HOST_IF 0x08800 /* Host Interface */ -+#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ -+/* Flexible Host Filter Table */ -+#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) -+/* Ext Flexible Host Filter Table */ -+#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) -+ -+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ -+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ -+/* Management Decision Filters */ -+#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) -+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ -+#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ -+#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ -+#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ -+#define E1000_GCR 0x05B00 /* PCI-Ex Control */ -+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ -+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ -+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ -+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ -+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ -+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ -+#define E1000_SWSM 0x05B50 /* SW Semaphore */ -+#define E1000_FWSM 0x05B54 /* FW Semaphore */ -+/* Driver-only SW semaphore (not used by BOOT agents) */ -+#define E1000_SWSM2 0x05B58 -+#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ -+#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ -+#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ -+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ -+#define E1000_HICR 0x08F00 /* Host Interface Control */ -+#define E1000_FWSTS 0x08F0C /* FW Status */ - - /* RSS registers */ --#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ --#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ --#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ --#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ --/* MSI-X Allocation Register (_i) - RW */ --#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) --/* Redirection Table - RW Array */ --#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) --#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ -- -+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ -+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ -+#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ -+#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ -+#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ -+#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ -+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ -+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ -+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ -+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ - /* VT Registers */ --#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ --#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ --#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ --#define E1000_VFRE 0x00C8C /* VF Receive Enables */ --#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ --#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ --#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ --#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ --#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ --#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ --#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ --#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ -+#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ -+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ -+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ -+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ -+#define E1000_VFRE 0x00C8C /* VF Receive Enables */ -+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ -+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ -+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ -+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ -+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ -+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ -+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ -+#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ -+#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ -+#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ -+#define E1000_MDFB 0x03558 /* Malicious Driver free block */ -+#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ -+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ -+#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ -+#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ -+#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ - /* These act per VF so an array friendly macro is used */ --#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) --#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) --#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) --#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) --#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ --#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) -- --struct e1000_hw; -- --u32 igb_rd32(struct e1000_hw *hw, u32 reg); -- --/* write operations, indexed using DWORDS */ --#define wr32(reg, val) \ --do { \ -- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ -- if (!E1000_REMOVED(hw_addr)) \ -- writel((val), &hw_addr[(reg)]); \ --} while (0) -- --#define rd32(reg) (igb_rd32(hw, reg)) -+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) -+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) -+#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) -+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) -+/* VLAN Virtual Machine Filter - RW */ -+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) -+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) -+#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ -+#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ -+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ -+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ -+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ -+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ -+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ -+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ -+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ -+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ -+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ -+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ -+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ -+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ -+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ -+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ -+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ -+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ -+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ -+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ - --#define wrfl() ((void)rd32(E1000_STATUS)) -- --#define array_wr32(reg, offset, value) \ -- wr32((reg) + ((offset) << 2), (value)) -- --#define array_rd32(reg, offset) \ -- (readl(hw->hw_addr + reg + ((offset) << 2))) -+/* Filtering Registers */ -+#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ -+#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ -+#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ -+#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ -+#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ -+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ -+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ -+ -+#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ -+#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ -+#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ -+#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ -+#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ -+/* Tx Desc plane TC Rate-scheduler config */ -+#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) -+/* Tx Packet plane TC Rate-Scheduler Config */ -+#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) -+/* Rx Packet plane TC Rate-Scheduler Config */ -+#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) -+/* Tx Desc Plane TC Rate-Scheduler Status */ -+#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) -+/* Tx Desc Plane TC Rate-Scheduler MMW */ -+#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) -+/* Tx Packet plane TC Rate-Scheduler Status */ -+#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) -+/* Tx Packet plane TC Rate-scheduler MMW */ -+#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) -+/* Rx Packet plane TC Rate-Scheduler Status */ -+#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) -+/* Rx Packet plane TC Rate-Scheduler MMW */ -+#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) -+/* Tx Desc plane VM Rate-Scheduler MMW*/ -+#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) -+/* Tx BCN Rate-Scheduler MMW */ -+#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) -+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ -+#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ -+#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ -+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ -+#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ -+#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ -+#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ -+#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ -+#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ -+#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ -+#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ -+#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ -+#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ -+#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ - - /* DMA Coalescing registers */ -+#define E1000_DMACR 0x02508 /* Control Register */ -+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ -+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ -+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ -+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ -+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ - #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ - --/* Energy Efficient Ethernet "EEE" register */ --#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ --#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ --#define E1000_EEE_SU 0X0E34 /* EEE Setup */ --#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ --#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ --#define E1000_MMDAC 13 /* MMD Access Control */ --#define E1000_MMDAAD 14 /* MMD Access Address/Data */ -+/* PCIe Parity Status Register */ -+#define E1000_PCIEERRSTS 0x05BA8 - --/* Thermal Sensor Register */ -+#define E1000_PROXYS 0x5F64 /* Proxying Status */ -+#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ -+/* Thermal sensor configuration and status registers */ -+#define E1000_THMJT 0x08100 /* Junction Temperature */ -+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ -+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ -+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ - #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ - -+/* Energy Efficient Ethernet "EEE" registers */ -+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ -+#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ -+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ -+#define E1000_EEE_SU 0x0E34 /* EEE Setup */ -+#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ -+#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ -+ - /* OS2BMC Registers */ - #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ - #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ - #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ - #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ - --#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ --#define E1000_I210_FLMNGCTL 0x12038 --#define E1000_I210_FLMNGDATA 0x1203C --#define E1000_I210_FLMNGCNT 0x12040 -- --#define E1000_I210_FLSWCTL 0x12048 --#define E1000_I210_FLSWDATA 0x1204C --#define E1000_I210_FLSWCNT 0x12050 -- --#define E1000_I210_FLA 0x1201C -- --#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) --#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ -- --#define E1000_REMOVED(h) unlikely(!(h)) -- - #endif -diff -Nu a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h ---- a/drivers/net/ethernet/intel/igb/igb.h 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb.h 2016-11-14 14:32:08.579567168 +0000 -@@ -1,107 +1,149 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - /* Linux PRO/1000 Ethernet Driver main header file */ - - #ifndef _IGB_H_ - #define _IGB_H_ - --#include "e1000_mac.h" -+#include -+ -+#ifndef IGB_NO_LRO -+#include -+#endif -+ -+#include -+#include -+#include -+ -+#ifdef SIOCETHTOOL -+#include -+#endif -+ -+struct igb_adapter; -+ -+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -+#define IGB_DCA -+#endif -+#ifdef IGB_DCA -+#include -+#endif -+ -+#include "kcompat.h" -+ -+#ifdef HAVE_SCTP -+#include -+#endif -+ -+#include "e1000_api.h" - #include "e1000_82575.h" -+#include "e1000_manage.h" -+#include "e1000_mbx.h" -+ -+#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args) - -+#define PFX "igb: " -+#define DPRINTK(nlevel, klevel, fmt, args...) \ -+ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ -+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ -+ __func__ , ## args)) -+ -+#ifdef HAVE_PTP_1588_CLOCK -+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H -+#include -+#else - #include -+#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ - #include - #include --#include --#include -+#endif /* HAVE_PTP_1588_CLOCK */ -+ -+#ifdef HAVE_I2C_SUPPORT - #include - #include --#include --#include -- --struct igb_adapter; -- --#define E1000_PCS_CFG_IGN_SD 1 -+#endif /* HAVE_I2C_SUPPORT */ - - /* Interrupt defines */ --#define IGB_START_ITR 648 /* ~6000 ints/sec */ --#define IGB_4K_ITR 980 --#define IGB_20K_ITR 196 --#define IGB_70K_ITR 56 -+#define IGB_START_ITR 648 /* ~6000 ints/sec */ -+#define IGB_4K_ITR 980 -+#define IGB_20K_ITR 196 -+#define IGB_70K_ITR 56 -+ -+/* Interrupt modes, as used by the IntMode paramter */ -+#define IGB_INT_MODE_LEGACY 0 -+#define IGB_INT_MODE_MSI 1 -+#define IGB_INT_MODE_MSIX 2 - - /* TX/RX descriptor defines */ --#define IGB_DEFAULT_TXD 256 --#define IGB_DEFAULT_TX_WORK 128 --#define IGB_MIN_TXD 80 --#define IGB_MAX_TXD 4096 -- --#define IGB_DEFAULT_RXD 256 --#define IGB_MIN_RXD 80 --#define IGB_MAX_RXD 4096 -- --#define IGB_DEFAULT_ITR 3 /* dynamic */ --#define IGB_MAX_ITR_USECS 10000 --#define IGB_MIN_ITR_USECS 10 --#define NON_Q_VECTORS 1 --#define MAX_Q_VECTORS 8 --#define MAX_MSIX_ENTRIES 10 -+#define IGB_DEFAULT_TXD 256 -+#define IGB_DEFAULT_TX_WORK 128 -+#define IGB_MIN_TXD 80 -+#define IGB_MAX_TXD 4096 -+ -+#define IGB_DEFAULT_RXD 256 -+#define IGB_MIN_RXD 80 -+#define IGB_MAX_RXD 4096 -+ -+#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */ -+#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ -+ -+#define NON_Q_VECTORS 1 -+#define MAX_Q_VECTORS 10 - - /* Transmit and receive queues */ --#define IGB_MAX_RX_QUEUES 8 --#define IGB_MAX_RX_QUEUES_82575 4 --#define IGB_MAX_RX_QUEUES_I211 2 --#define IGB_MAX_TX_QUEUES 8 --#define IGB_MAX_VF_MC_ENTRIES 30 --#define IGB_MAX_VF_FUNCTIONS 8 --#define IGB_MAX_VFTA_ENTRIES 128 --#define IGB_82576_VF_DEV_ID 0x10CA --#define IGB_I350_VF_DEV_ID 0x1520 -- --/* NVM version defines */ --#define IGB_MAJOR_MASK 0xF000 --#define IGB_MINOR_MASK 0x0FF0 --#define IGB_BUILD_MASK 0x000F --#define IGB_COMB_VER_MASK 0x00FF --#define IGB_MAJOR_SHIFT 12 --#define IGB_MINOR_SHIFT 4 --#define IGB_COMB_VER_SHFT 8 --#define IGB_NVM_VER_INVALID 0xFFFF --#define IGB_ETRACK_SHIFT 16 --#define NVM_ETRACK_WORD 0x0042 --#define NVM_COMB_VER_OFF 0x0083 --#define NVM_COMB_VER_PTR 0x003d -+#define IGB_MAX_RX_QUEUES 16 -+#define IGB_MAX_RX_QUEUES_82575 4 -+#define IGB_MAX_RX_QUEUES_I211 2 -+#define IGB_MAX_TX_QUEUES 16 -+ -+#define IGB_MAX_VF_MC_ENTRIES 30 -+#define IGB_MAX_VF_FUNCTIONS 8 -+#define IGB_82576_VF_DEV_ID 0x10CA -+#define IGB_I350_VF_DEV_ID 0x1520 -+#define IGB_MAX_UTA_ENTRIES 128 -+#define MAX_EMULATION_MAC_ADDRS 16 -+#define OUI_LEN 3 -+#define IGB_MAX_VMDQ_QUEUES 8 - - struct vf_data_storage { - unsigned char vf_mac_addresses[ETH_ALEN]; - u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; - u16 num_vf_mc_hashes; -+ u16 default_vf_vlan_id; - u16 vlans_enabled; -+ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN]; -+ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; - u32 flags; - unsigned long last_nack; -+#ifdef IFLA_VF_MAX - u16 pf_vlan; /* When set, guest VLAN config not allowed. */ - u16 pf_qos; - u16 tx_rate; -+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE - bool spoofchk_enabled; -+#endif -+#endif - }; - - #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ -@@ -125,31 +167,97 @@ - #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) - #define IGB_TX_HTHRESH 1 - #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ -- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) --#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ -- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) -+ adapter->msix_entries) ? 1 : 4) - - /* this is the size past which hardware will drop packets when setting LPE=0 */ - #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 - -+/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we -+ * reserve 2 more, and skb_shared_info adds an additional 384 more, -+ * this adds roughly 448 bytes of extra data meaning the smallest -+ * allocation we could have is 1K. -+ * i.e. RXBUFFER_512 --> size-1024 slab -+ */ - /* Supported Rx Buffer Sizes */ --#define IGB_RXBUFFER_256 256 --#define IGB_RXBUFFER_2048 2048 --#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 --#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 -+#define IGB_RXBUFFER_256 256 -+#define IGB_RXBUFFER_2048 2048 -+#define IGB_RXBUFFER_16384 16384 -+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 -+#if MAX_SKB_FRAGS < 8 -+#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024) -+#else -+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 -+#endif -+ -+ -+/* Packet Buffer allocations */ -+#define IGB_PBA_BYTES_SHIFT 0xA -+#define IGB_TX_HEAD_ADDR_SHIFT 7 -+#define IGB_PBA_TX_MASK 0xFFFF0000 -+ -+#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ - - /* How many Rx Buffers do we bundle into one write to the hardware ? */ --#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -+#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - --#define AUTO_ALL_MODES 0 --#define IGB_EEPROM_APME 0x0400 -+#define IGB_EEPROM_APME 0x0400 -+#define AUTO_ALL_MODES 0 - - #ifndef IGB_MASTER_SLAVE - /* Switch to override PHY master/slave setting */ - #define IGB_MASTER_SLAVE e1000_ms_hw_default - #endif - --#define IGB_MNG_VLAN_NONE -1 -+#define IGB_MNG_VLAN_NONE -1 -+ -+#ifndef IGB_NO_LRO -+#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ -+struct igb_lro_stats { -+ u32 flushed; -+ u32 coal; -+}; -+ -+/* -+ * igb_lro_header - header format to be aggregated by LRO -+ * @iph: IP header without options -+ * @tcp: TCP header -+ * @ts: Optional TCP timestamp data in TCP options -+ * -+ * This structure relies on the check above that verifies that the header -+ * is IPv4 and does not contain any options. -+ */ -+struct igb_lrohdr { -+ struct iphdr iph; -+ struct tcphdr th; -+ __be32 ts[0]; -+}; -+ -+struct igb_lro_list { -+ struct sk_buff_head active; -+ struct igb_lro_stats stats; -+}; -+ -+#endif /* IGB_NO_LRO */ -+struct igb_cb { -+#ifndef IGB_NO_LRO -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ union { /* Union defining head/tail partner */ -+ struct sk_buff *head; -+ struct sk_buff *tail; -+ }; -+#endif -+ __be32 tsecr; /* timestamp echo response */ -+ u32 tsval; /* timestamp value in host order */ -+ u32 next_seq; /* next expected sequence number */ -+ u16 free; /* 65521 minus total size */ -+ u16 mss; /* size of data portion of packet */ -+ u16 append_cnt; /* number of skb's appended */ -+#endif /* IGB_NO_LRO */ -+#ifdef HAVE_VLAN_RX_REGISTER -+ u16 vid; /* VLAN tag */ -+#endif -+}; -+#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) - - enum igb_tx_flags { - /* cmd_type flags */ -@@ -163,30 +271,28 @@ - }; - - /* VLAN info */ --#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 --#define IGB_TX_FLAGS_VLAN_SHIFT 16 -+#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 -+#define IGB_TX_FLAGS_VLAN_SHIFT 16 - --/* The largest size we can write to the descriptor is 65535. In order to -+/* -+ * The largest size we can write to the descriptor is 65535. In order to - * maintain a power of two alignment we have to limit ourselves to 32K. - */ --#define IGB_MAX_TXD_PWR 15 -+#define IGB_MAX_TXD_PWR 15 - #define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) - - /* Tx Descriptors needed, worst case */ --#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) --#define DESC_NEEDED (MAX_SKB_FRAGS + 4) -- --/* EEPROM byte offsets */ --#define IGB_SFF_8472_SWAP 0x5C --#define IGB_SFF_8472_COMP 0x5E -- --/* Bitmasks */ --#define IGB_SFF_ADDRESSING_MODE 0x4 --#define IGB_SFF_8472_UNSUP 0x00 -+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) -+#ifndef MAX_SKB_FRAGS -+#define DESC_NEEDED 4 -+#elif (MAX_SKB_FRAGS < 16) -+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) -+#else -+#define DESC_NEEDED (MAX_SKB_FRAGS + 4) -+#endif - - /* wrapper around a pointer to a socket buffer, -- * so a DMA handle can be stored along with the buffer -- */ -+ * so a DMA handle can be stored along with the buffer */ - struct igb_tx_buffer { - union e1000_adv_tx_desc *next_to_watch; - unsigned long time_stamp; -@@ -202,15 +308,18 @@ - - struct igb_rx_buffer { - dma_addr_t dma; -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ struct sk_buff *skb; -+#else - struct page *page; -- unsigned int page_offset; -+ u32 page_offset; -+#endif - }; - - struct igb_tx_queue_stats { - u64 packets; - u64 bytes; - u64 restart_queue; -- u64 restart_queue2; - }; - - struct igb_rx_queue_stats { -@@ -221,6 +330,18 @@ - u64 alloc_failed; - }; - -+struct igb_rx_packet_stats { -+ u64 ipv4_packets; /* IPv4 headers processed */ -+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */ -+ u64 ipv6_packets; /* IPv6 headers processed */ -+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */ -+ u64 tcp_packets; /* TCP headers processed */ -+ u64 udp_packets; /* UDP headers processed */ -+ u64 sctp_packets; /* SCTP headers processed */ -+ u64 nfs_packets; /* NFS headers processe */ -+ u64 other_packets; -+}; -+ - struct igb_ring_container { - struct igb_ring *ring; /* pointer to linked list of rings */ - unsigned int total_bytes; /* total bytes processed this int */ -@@ -231,22 +352,22 @@ - }; - - struct igb_ring { -- struct igb_q_vector *q_vector; /* backlink to q_vector */ -- struct net_device *netdev; /* back pointer to net_device */ -- struct device *dev; /* device pointer for dma mapping */ -+ struct igb_q_vector *q_vector; /* backlink to q_vector */ -+ struct net_device *netdev; /* back pointer to net_device */ -+ struct device *dev; /* device for dma mapping */ - union { /* array of buffer info structs */ - struct igb_tx_buffer *tx_buffer_info; - struct igb_rx_buffer *rx_buffer_info; - }; -- void *desc; /* descriptor ring memory */ -- unsigned long flags; /* ring specific flags */ -- void __iomem *tail; /* pointer to ring tail register */ -+ void *desc; /* descriptor ring memory */ -+ unsigned long flags; /* ring specific flags */ -+ void __iomem *tail; /* pointer to ring tail register */ - dma_addr_t dma; /* phys address of the ring */ -- unsigned int size; /* length of desc. ring in bytes */ -+ unsigned int size; /* length of desc. ring in bytes */ - -- u16 count; /* number of desc. in the ring */ -- u8 queue_index; /* logical index of the ring*/ -- u8 reg_idx; /* physical index of the ring */ -+ u16 count; /* number of desc. in the ring */ -+ u8 queue_index; /* logical index of the ring*/ -+ u8 reg_idx; /* physical index of the ring */ - - /* everything past this point are written often */ - u16 next_to_clean; -@@ -257,16 +378,22 @@ - /* TX */ - struct { - struct igb_tx_queue_stats tx_stats; -- struct u64_stats_sync tx_syncp; -- struct u64_stats_sync tx_syncp2; - }; - /* RX */ - struct { -- struct sk_buff *skb; - struct igb_rx_queue_stats rx_stats; -- struct u64_stats_sync rx_syncp; -+ struct igb_rx_packet_stats pkt_stats; -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ u16 rx_buffer_len; -+#else -+ struct sk_buff *skb; -+#endif - }; - }; -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ struct net_device *vmdq_netdev; -+ int vqueue_index; /* queue index for virtual netdev */ -+#endif - } ____cacheline_internodealigned_in_smp; - - struct igb_q_vector { -@@ -281,29 +408,57 @@ - struct igb_ring_container rx, tx; - - struct napi_struct napi; -+#ifndef IGB_NO_LRO -+ struct igb_lro_list lrolist; /* LRO list for queue vector*/ -+#endif - struct rcu_head rcu; /* to avoid race with update stats on free */ - char name[IFNAMSIZ + 9]; -+#ifndef HAVE_NETDEV_NAPI_LIST -+ struct net_device poll_dev; -+#endif - - /* for dynamic allocation of rings associated with this q_vector */ - struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; - }; - - enum e1000_ring_flags_t { -+#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES) -+ IGB_RING_FLAG_RX_CSUM, -+#endif - IGB_RING_FLAG_RX_SCTP_CSUM, - IGB_RING_FLAG_RX_LB_VLAN_BSWAP, - IGB_RING_FLAG_TX_CTX_IDX, -- IGB_RING_FLAG_TX_DETECT_HANG -+ IGB_RING_FLAG_TX_DETECT_HANG, - }; - -+struct igb_mac_addr { -+ u8 addr[ETH_ALEN]; -+ u16 queue; -+ u16 state; /* bitmask */ -+}; -+#define IGB_MAC_STATE_DEFAULT 0x1 -+#define IGB_MAC_STATE_MODIFIED 0x2 -+#define IGB_MAC_STATE_IN_USE 0x4 -+ - #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) - --#define IGB_RX_DESC(R, i) \ -+#define IGB_RX_DESC(R, i) \ - (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) --#define IGB_TX_DESC(R, i) \ -+#define IGB_TX_DESC(R, i) \ - (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) --#define IGB_TX_CTXTDESC(R, i) \ -+#define IGB_TX_CTXTDESC(R, i) \ - (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) - -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+#define netdev_ring(ring) \ -+ ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) -+#define ring_queue_index(ring) \ -+ ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) -+#else -+#define netdev_ring(ring) (ring->netdev) -+#define ring_queue_index(ring) (ring->queue_index) -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+ - /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ - static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, - const u32 stat_err_bits) -@@ -312,16 +467,27 @@ - } - - /* igb_desc_unused - calculate if we have unused descriptors */ --static inline int igb_desc_unused(struct igb_ring *ring) -+static inline u16 igb_desc_unused(const struct igb_ring *ring) - { -- if (ring->next_to_clean > ring->next_to_use) -- return ring->next_to_clean - ring->next_to_use - 1; -+ u16 ntc = ring->next_to_clean; -+ u16 ntu = ring->next_to_use; - -- return ring->count + ring->next_to_clean - ring->next_to_use - 1; -+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; - } - --#ifdef CONFIG_IGB_HWMON -+#ifdef CONFIG_BQL -+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) -+{ -+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); -+} -+#endif /* CONFIG_BQL */ - -+struct igb_therm_proc_data { -+ struct e1000_hw *hw; -+ struct e1000_thermal_diode_data *sensor_data; -+}; -+ -+#ifdef IGB_HWMON - #define IGB_HWMON_TYPE_LOC 0 - #define IGB_HWMON_TYPE_TEMP 1 - #define IGB_HWMON_TYPE_CAUTION 2 -@@ -335,69 +501,79 @@ - }; - - struct hwmon_buff { -- struct attribute_group group; -- const struct attribute_group *groups[2]; -- struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; -- struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; -+ struct device *device; -+ struct hwmon_attr *hwmon_list; - unsigned int n_hwmon; - }; --#endif -- -+#endif /* IGB_HWMON */ -+#ifdef ETHTOOL_GRXFHINDIR - #define IGB_RETA_SIZE 128 -+#endif /* ETHTOOL_GRXFHINDIR */ - - /* board specific private data structure */ - struct igb_adapter { -+#ifdef HAVE_VLAN_RX_REGISTER -+ /* vlgrp must be first member of structure */ -+ struct vlan_group *vlgrp; -+#else - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; -- -+#endif - struct net_device *netdev; - - unsigned long state; - unsigned int flags; - - unsigned int num_q_vectors; -- struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; -+ struct msix_entry *msix_entries; - -- /* Interrupt Throttle Rate */ -- u32 rx_itr_setting; -- u32 tx_itr_setting; -- u16 tx_itr; -- u16 rx_itr; - - /* TX */ - u16 tx_work_limit; - u32 tx_timeout_count; - int num_tx_queues; -- struct igb_ring *tx_ring[16]; -+ struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; - - /* RX */ - int num_rx_queues; -- struct igb_ring *rx_ring[16]; -- -- u32 max_frame_size; -- u32 min_frame_size; -+ struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; - - struct timer_list watchdog_timer; -+ struct timer_list dma_err_timer; - struct timer_list phy_info_timer; -- - u16 mng_vlan_id; - u32 bd_number; - u32 wol; - u32 en_mng_pt; - u16 link_speed; - u16 link_duplex; -+ u8 port_num; -+ -+ u8 __iomem *io_addr; /* for iounmap */ -+ -+ /* Interrupt Throttle Rate */ -+ u32 rx_itr_setting; -+ u32 tx_itr_setting; - - struct work_struct reset_task; - struct work_struct watchdog_task; -+ struct work_struct dma_err_task; - bool fc_autoneg; - u8 tx_timeout_factor; -- struct timer_list blink_timer; -- unsigned long led_status; -+ -+#ifdef DEBUG -+ bool tx_hang_detected; -+ bool disable_hw_reset; -+#endif -+ u32 max_frame_size; - - /* OS defined structs */ - struct pci_dev *pdev; -- -- spinlock_t stats64_lock; -- struct rtnl_link_stats64 stats64; -+#ifndef HAVE_NETDEV_STATS_IN_NETDEV -+ struct net_device_stats net_stats; -+#endif -+#ifndef IGB_NO_LRO -+ struct igb_lro_stats lro_stats; -+#endif - - /* structs defined in e1000_hw.h */ - struct e1000_hw hw; -@@ -405,9 +581,11 @@ - struct e1000_phy_info phy_info; - struct e1000_phy_stats phy_stats; - -+#ifdef ETHTOOL_TEST - u32 test_icr; - struct igb_ring test_tx_ring; - struct igb_ring test_rx_ring; -+#endif - - int msg_enable; - -@@ -416,15 +594,48 @@ - u32 eims_other; - - /* to not mess up cache alignment, always add to the bottom */ -+ u32 *config_space; - u16 tx_ring_count; - u16 rx_ring_count; -- unsigned int vfs_allocated_count; - struct vf_data_storage *vf_data; -+#ifdef IFLA_VF_MAX - int vf_rate_link_speed; -+#endif -+ u32 lli_port; -+ u32 lli_size; -+ unsigned int vfs_allocated_count; -+ /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ -+ bool mdd; -+ int int_mode; - u32 rss_queues; -+ u32 tss_queues; -+ u32 vmdq_pools; -+ char fw_version[32]; - u32 wvbr; -+ struct igb_mac_addr *mac_table; -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; -+#endif -+ int vferr_refcount; -+ int dmac; - u32 *shadow_vfta; - -+ /* External Thermal Sensor support flag */ -+ bool ets; -+#ifdef IGB_HWMON -+ struct hwmon_buff igb_hwmon_buff; -+#else /* IGB_HWMON */ -+#ifdef IGB_PROCFS -+ struct proc_dir_entry *eth_dir; -+ struct proc_dir_entry *info_dir; -+ struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; -+ struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; -+ bool old_lsc; -+#endif /* IGB_PROCFS */ -+#endif /* IGB_HWMON */ -+ u32 etrack_id; -+ -+#ifdef HAVE_PTP_1588_CLOCK - struct ptp_clock *ptp_clock; - struct ptp_clock_info ptp_caps; - struct delayed_work ptp_overflow_work; -@@ -439,39 +650,57 @@ - struct timecounter tc; - u32 tx_hwtstamp_timeouts; - u32 rx_hwtstamp_cleared; -+#endif /* HAVE_PTP_1588_CLOCK */ - -- char fw_version[32]; --#ifdef CONFIG_IGB_HWMON -- struct hwmon_buff *igb_hwmon_buff; -- bool ets; --#endif -+#ifdef HAVE_I2C_SUPPORT - struct i2c_algo_bit_data i2c_algo; - struct i2c_adapter i2c_adap; - struct i2c_client *i2c_client; -- u32 rss_indir_tbl_init; -- u8 rss_indir_tbl[IGB_RETA_SIZE]; -- -+#endif /* HAVE_I2C_SUPPORT */ - unsigned long link_check_timeout; -+ -+ int devrc; -+ - int copper_tries; -- struct e1000_info ei; - u16 eee_advert; -+#ifdef ETHTOOL_GRXFHINDIR -+ u32 rss_indir_tbl_init; -+ u8 rss_indir_tbl[IGB_RETA_SIZE]; -+#endif -+}; -+ -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+struct igb_vmdq_adapter { -+#ifdef HAVE_VLAN_RX_REGISTER -+ /* vlgrp must be first member of structure */ -+ struct vlan_group *vlgrp; -+#else -+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; -+#endif -+ struct igb_adapter *real_adapter; -+ struct net_device *vnetdev; -+ struct net_device_stats net_stats; -+ struct igb_ring *tx_ring; -+ struct igb_ring *rx_ring; - }; -+#endif - - #define IGB_FLAG_HAS_MSI (1 << 0) - #define IGB_FLAG_DCA_ENABLED (1 << 1) --#define IGB_FLAG_QUAD_PORT_A (1 << 2) --#define IGB_FLAG_QUEUE_PAIRS (1 << 3) --#define IGB_FLAG_DMAC (1 << 4) --#define IGB_FLAG_PTP (1 << 5) --#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) --#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) --#define IGB_FLAG_WOL_SUPPORTED (1 << 8) --#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) --#define IGB_FLAG_MEDIA_RESET (1 << 10) --#define IGB_FLAG_MAS_CAPABLE (1 << 11) --#define IGB_FLAG_MAS_ENABLE (1 << 12) --#define IGB_FLAG_HAS_MSIX (1 << 13) --#define IGB_FLAG_EEE (1 << 14) -+#define IGB_FLAG_LLI_PUSH (1 << 2) -+#define IGB_FLAG_QUAD_PORT_A (1 << 3) -+#define IGB_FLAG_QUEUE_PAIRS (1 << 4) -+#define IGB_FLAG_EEE (1 << 5) -+#define IGB_FLAG_DMAC (1 << 6) -+#define IGB_FLAG_DETECT_BAD_DMA (1 << 7) -+#define IGB_FLAG_PTP (1 << 8) -+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9) -+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10) -+#define IGB_FLAG_WOL_SUPPORTED (1 << 11) -+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12) -+#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13) -+#define IGB_FLAG_MEDIA_RESET (1 << 14) -+#define IGB_FLAG_MAS_ENABLE (1 << 15) - - /* Media Auto Sense */ - #define IGB_MAS_ENABLE_0 0X0001 -@@ -479,13 +708,63 @@ - #define IGB_MAS_ENABLE_2 0X0004 - #define IGB_MAS_ENABLE_3 0X0008 - -+#define IGB_MIN_TXPBSIZE 20408 -+#define IGB_TX_BUF_4096 4096 -+ -+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ -+ - /* DMA Coalescing defines */ --#define IGB_MIN_TXPBSIZE 20408 --#define IGB_TX_BUF_4096 4096 --#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ -+#define IGB_DMAC_DISABLE 0 -+#define IGB_DMAC_MIN 250 -+#define IGB_DMAC_500 500 -+#define IGB_DMAC_EN_DEFAULT 1000 -+#define IGB_DMAC_2000 2000 -+#define IGB_DMAC_3000 3000 -+#define IGB_DMAC_4000 4000 -+#define IGB_DMAC_5000 5000 -+#define IGB_DMAC_6000 6000 -+#define IGB_DMAC_7000 7000 -+#define IGB_DMAC_8000 8000 -+#define IGB_DMAC_9000 9000 -+#define IGB_DMAC_MAX 10000 -+ -+#define IGB_82576_TSYNC_SHIFT 19 -+#define IGB_82580_TSYNC_SHIFT 24 -+#define IGB_TS_HDR_LEN 16 -+ -+/* CEM Support */ -+#define FW_HDR_LEN 0x4 -+#define FW_CMD_DRV_INFO 0xDD -+#define FW_CMD_DRV_INFO_LEN 0x5 -+#define FW_CMD_RESERVED 0X0 -+#define FW_RESP_SUCCESS 0x1 -+#define FW_UNUSED_VER 0x0 -+#define FW_MAX_RETRIES 3 -+#define FW_STATUS_SUCCESS 0x1 -+#define FW_FAMILY_DRV_VER 0Xffffffff -+ -+#define IGB_MAX_LINK_TRIES 20 -+ -+struct e1000_fw_hdr { -+ u8 cmd; -+ u8 buf_len; -+ union { -+ u8 cmd_resv; -+ u8 ret_status; -+ } cmd_or_resp; -+ u8 checksum; -+}; -+ -+#pragma pack(push, 1) -+struct e1000_fw_drv_info { -+ struct e1000_fw_hdr hdr; -+ u8 port_num; -+ u32 drv_version; -+ u16 pad; /* end spacing to ensure length is mult. of dword */ -+ u8 pad2; /* end spacing to ensure length is mult. of dword2 */ -+}; -+#pragma pack(pop) - --#define IGB_82576_TSYNC_SHIFT 19 --#define IGB_TS_HDR_LEN 16 - enum e1000_state_t { - __IGB_TESTING, - __IGB_RESETTING, -@@ -493,85 +772,82 @@ - __IGB_PTP_TX_IN_PROGRESS, - }; - --enum igb_boards { -- board_82575, --}; -- - extern char igb_driver_name[]; - extern char igb_driver_version[]; - --int igb_up(struct igb_adapter *); --void igb_down(struct igb_adapter *); --void igb_reinit_locked(struct igb_adapter *); --void igb_reset(struct igb_adapter *); --int igb_reinit_queues(struct igb_adapter *); --void igb_write_rss_indir_tbl(struct igb_adapter *); --int igb_set_spd_dplx(struct igb_adapter *, u32, u8); --int igb_setup_tx_resources(struct igb_ring *); --int igb_setup_rx_resources(struct igb_ring *); --void igb_free_tx_resources(struct igb_ring *); --void igb_free_rx_resources(struct igb_ring *); --void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); --void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); --void igb_setup_tctl(struct igb_adapter *); --void igb_setup_rctl(struct igb_adapter *); --netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); --void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); --void igb_alloc_rx_buffers(struct igb_ring *, u16); --void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); --bool igb_has_link(struct igb_adapter *adapter); --void igb_set_ethtool_ops(struct net_device *); --void igb_power_up_link(struct igb_adapter *); --void igb_set_fw_version(struct igb_adapter *); --void igb_ptp_init(struct igb_adapter *adapter); --void igb_ptp_stop(struct igb_adapter *adapter); --void igb_ptp_reset(struct igb_adapter *adapter); --void igb_ptp_rx_hang(struct igb_adapter *adapter); --void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); --void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, -- struct sk_buff *skb); -+extern int igb_open(struct net_device *netdev); -+extern int igb_close(struct net_device *netdev); -+extern int igb_up(struct igb_adapter *); -+extern void igb_down(struct igb_adapter *); -+extern void igb_reinit_locked(struct igb_adapter *); -+extern void igb_reset(struct igb_adapter *); -+extern int igb_reinit_queues(struct igb_adapter *); -+#ifdef ETHTOOL_SRXFHINDIR -+extern void igb_write_rss_indir_tbl(struct igb_adapter *); -+#endif -+extern int igb_set_spd_dplx(struct igb_adapter *, u16); -+extern int igb_setup_tx_resources(struct igb_ring *); -+extern int igb_setup_rx_resources(struct igb_ring *); -+extern void igb_free_tx_resources(struct igb_ring *); -+extern void igb_free_rx_resources(struct igb_ring *); -+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); -+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); -+extern void igb_setup_tctl(struct igb_adapter *); -+extern void igb_setup_rctl(struct igb_adapter *); -+extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); -+extern void igb_unmap_and_free_tx_resource(struct igb_ring *, -+ struct igb_tx_buffer *); -+extern void igb_alloc_rx_buffers(struct igb_ring *, u16); -+extern void igb_clean_rx_ring(struct igb_ring *); -+extern int igb_setup_queues(struct igb_adapter *adapter); -+extern void igb_update_stats(struct igb_adapter *); -+extern bool igb_has_link(struct igb_adapter *adapter); -+extern void igb_set_ethtool_ops(struct net_device *); -+extern void igb_check_options(struct igb_adapter *); -+extern void igb_power_up_link(struct igb_adapter *); -+#ifdef HAVE_PTP_1588_CLOCK -+extern void igb_ptp_init(struct igb_adapter *adapter); -+extern void igb_ptp_stop(struct igb_adapter *adapter); -+extern void igb_ptp_reset(struct igb_adapter *adapter); -+extern void igb_ptp_tx_work(struct work_struct *work); -+extern void igb_ptp_rx_hang(struct igb_adapter *adapter); -+extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); -+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, -+ struct sk_buff *skb); -+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, -+ unsigned char *va, -+ struct sk_buff *skb); -+extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, -+ struct ifreq *ifr, int cmd); -+#endif /* HAVE_PTP_1588_CLOCK */ -+#ifdef ETHTOOL_OPS_COMPAT -+extern int ethtool_ioctl(struct ifreq *); -+#endif -+extern int igb_write_mc_addr_list(struct net_device *netdev); -+extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); -+extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); -+extern int igb_available_rars(struct igb_adapter *adapter); -+extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); -+extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); -+extern void igb_enable_vlan_tags(struct igb_adapter *adapter); -+#ifndef HAVE_VLAN_RX_REGISTER -+extern void igb_vlan_mode(struct net_device *, u32); -+#endif -+ -+#define E1000_PCS_CFG_IGN_SD 1 -+ - int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); - int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); --void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); --#ifdef CONFIG_IGB_HWMON -+#ifdef IGB_HWMON - void igb_sysfs_exit(struct igb_adapter *adapter); - int igb_sysfs_init(struct igb_adapter *adapter); --#endif --static inline s32 igb_reset_phy(struct e1000_hw *hw) --{ -- if (hw->phy.ops.reset) -- return hw->phy.ops.reset(hw); -- -- return 0; --} -- --static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) --{ -- if (hw->phy.ops.read_reg) -- return hw->phy.ops.read_reg(hw, offset, data); -- -- return 0; --} -- --static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) --{ -- if (hw->phy.ops.write_reg) -- return hw->phy.ops.write_reg(hw, offset, data); -- -- return 0; --} -- --static inline s32 igb_get_phy_info(struct e1000_hw *hw) --{ -- if (hw->phy.ops.get_phy_info) -- return hw->phy.ops.get_phy_info(hw); -- -- return 0; --} -- --static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) --{ -- return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); --} -+#else -+#ifdef IGB_PROCFS -+int igb_procfs_init(struct igb_adapter *adapter); -+void igb_procfs_exit(struct igb_adapter *adapter); -+int igb_procfs_topdir_init(void); -+void igb_procfs_topdir_exit(void); -+#endif /* IGB_PROCFS */ -+#endif /* IGB_HWMON */ - - #endif /* _IGB_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_debugfs.c b/drivers/net/ethernet/intel/igb/igb_debugfs.c ---- a/drivers/net/ethernet/intel/igb/igb_debugfs.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_debugfs.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,26 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "igb.h" -+ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c ---- a/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,43 +1,50 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - /* ethtool support for igb */ - --#include - #include --#include --#include --#include --#include -+#include -+ -+#ifdef SIOCETHTOOL - #include --#include --#include -+#ifdef CONFIG_PM_RUNTIME - #include -+#endif /* CONFIG_PM_RUNTIME */ - #include --#include - - #include "igb.h" -+#include "igb_regtest.h" -+#include -+#ifdef ETHTOOL_GEEE -+#include -+#endif - -+#ifdef ETHTOOL_OPS_COMPAT -+#include "kcompat_ethtool.c" -+#endif -+#ifdef ETHTOOL_GSTATS - struct igb_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; -@@ -49,6 +56,7 @@ - .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ - .stat_offset = offsetof(struct igb_adapter, _stat) \ - } -+ - static const struct igb_stats igb_gstrings_stats[] = { - IGB_STAT("rx_packets", stats.gprc), - IGB_STAT("tx_packets", stats.gptc), -@@ -82,6 +90,10 @@ - IGB_STAT("tx_flow_control_xoff", stats.xofftxc), - IGB_STAT("rx_long_byte_count", stats.gorc), - IGB_STAT("tx_dma_out_of_sync", stats.doosync), -+#ifndef IGB_NO_LRO -+ IGB_STAT("lro_aggregated", lro_stats.coal), -+ IGB_STAT("lro_flushed", lro_stats.flushed), -+#endif /* IGB_LRO */ - IGB_STAT("tx_smbus", stats.mgptc), - IGB_STAT("rx_smbus", stats.mgprc), - IGB_STAT("dropped_smbus", stats.mgpdc), -@@ -89,15 +101,18 @@ - IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), - IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), - IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), -+#ifdef HAVE_PTP_1588_CLOCK - IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), - IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), -+#endif /* HAVE_PTP_1588_CLOCK */ - }; - - #define IGB_NETDEV_STAT(_net_stat) { \ -- .stat_string = __stringify(_net_stat), \ -- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ -- .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ -+ .stat_string = #_net_stat, \ -+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ -+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \ - } -+ - static const struct igb_stats igb_gstrings_net_stats[] = { - IGB_NETDEV_STAT(rx_errors), - IGB_NETDEV_STAT(tx_errors), -@@ -110,15 +125,12 @@ - IGB_NETDEV_STAT(tx_heartbeat_errors) - }; - --#define IGB_GLOBAL_STATS_LEN \ -- (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) --#define IGB_NETDEV_STATS_LEN \ -- (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) -+#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats) -+#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats) - #define IGB_RX_QUEUE_STATS_LEN \ - (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) -- --#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ -- -+#define IGB_TX_QUEUE_STATS_LEN \ -+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) - #define IGB_QUEUE_STATS_LEN \ - ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ - IGB_RX_QUEUE_STATS_LEN) + \ -@@ -127,23 +139,23 @@ - #define IGB_STATS_LEN \ - (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) - -+#endif /* ETHTOOL_GSTATS */ -+#ifdef ETHTOOL_TEST - static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" - }; -+ - #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) -+#endif /* ETHTOOL_TEST */ - - static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; -- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; -- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; - u32 status; -- u32 speed; - -- status = rd32(E1000_STATUS); - if (hw->phy.media_type == e1000_media_type_copper) { - - ecmd->supported = (SUPPORTED_10baseT_Half | -@@ -165,80 +177,85 @@ - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy.addr; - ecmd->transceiver = XCVR_INTERNAL; -+ - } else { -- ecmd->supported = (SUPPORTED_FIBRE | -- SUPPORTED_1000baseKX_Full | -+ ecmd->supported = (SUPPORTED_1000baseT_Full | -+ SUPPORTED_100baseT_Full | -+ SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause); -- ecmd->advertising = (ADVERTISED_FIBRE | -- ADVERTISED_1000baseKX_Full); -- if (hw->mac.type == e1000_i354) { -- if ((hw->device_id == -- E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && -- !(status & E1000_STATUS_2P5_SKU_OVER)) { -- ecmd->supported |= SUPPORTED_2500baseX_Full; -- ecmd->supported &= -- ~SUPPORTED_1000baseKX_Full; -- ecmd->advertising |= ADVERTISED_2500baseX_Full; -- ecmd->advertising &= -- ~ADVERTISED_1000baseKX_Full; -- } -- } -- if (eth_flags->e100_base_fx) { -- ecmd->supported |= SUPPORTED_100baseT_Full; -- ecmd->advertising |= ADVERTISED_100baseT_Full; -+ if (hw->mac.type == e1000_i354) -+ ecmd->supported |= (SUPPORTED_2500baseX_Full); -+ -+ ecmd->advertising = ADVERTISED_FIBRE; -+ -+ switch (adapter->link_speed) { -+ case SPEED_2500: -+ ecmd->advertising = ADVERTISED_2500baseX_Full; -+ break; -+ case SPEED_1000: -+ ecmd->advertising = ADVERTISED_1000baseT_Full; -+ break; -+ case SPEED_100: -+ ecmd->advertising = ADVERTISED_100baseT_Full; -+ break; -+ default: -+ break; - } -+ - if (hw->mac.autoneg == 1) - ecmd->advertising |= ADVERTISED_Autoneg; - - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - } -+ - if (hw->mac.autoneg != 1) - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - -- switch (hw->fc.requested_mode) { -- case e1000_fc_full: -+ if (hw->fc.requested_mode == e1000_fc_full) - ecmd->advertising |= ADVERTISED_Pause; -- break; -- case e1000_fc_rx_pause: -+ else if (hw->fc.requested_mode == e1000_fc_rx_pause) - ecmd->advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); -- break; -- case e1000_fc_tx_pause: -+ else if (hw->fc.requested_mode == e1000_fc_tx_pause) - ecmd->advertising |= ADVERTISED_Asym_Pause; -- break; -- default: -+ else - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); -- } -+ -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ - if (status & E1000_STATUS_LU) { -- if ((status & E1000_STATUS_2P5_SKU) && -- !(status & E1000_STATUS_2P5_SKU_OVER)) { -- speed = SPEED_2500; -- } else if (status & E1000_STATUS_SPEED_1000) { -- speed = SPEED_1000; -- } else if (status & E1000_STATUS_SPEED_100) { -- speed = SPEED_100; -- } else { -- speed = SPEED_10; -- } -+ if ((hw->mac.type == e1000_i354) && -+ (status & E1000_STATUS_2P5_SKU) && -+ !(status & E1000_STATUS_2P5_SKU_OVER)) -+ ethtool_cmd_speed_set(ecmd, SPEED_2500); -+ else if (status & E1000_STATUS_SPEED_1000) -+ ethtool_cmd_speed_set(ecmd, SPEED_1000); -+ else if (status & E1000_STATUS_SPEED_100) -+ ethtool_cmd_speed_set(ecmd, SPEED_100); -+ else -+ ethtool_cmd_speed_set(ecmd, SPEED_10); -+ - if ((status & E1000_STATUS_FD) || - hw->phy.media_type != e1000_media_type_copper) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; -+ - } else { -- speed = SPEED_UNKNOWN; -- ecmd->duplex = DUPLEX_UNKNOWN; -+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); -+ ecmd->duplex = -1; - } -- ethtool_cmd_speed_set(ecmd, speed); -+ - if ((hw->phy.media_type == e1000_media_type_fiber) || - hw->mac.autoneg) - ecmd->autoneg = AUTONEG_ENABLE; - else - ecmd->autoneg = AUTONEG_DISABLE; -+#ifdef ETH_TP_MDI_X - - /* MDI-X => 2; MDI =>1; Invalid =>0 */ - if (hw->phy.media_type == e1000_media_type_copper) -@@ -247,11 +264,14 @@ - else - ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; - -+#ifdef ETH_TP_MDI_AUTO - if (hw->phy.mdix == AUTO_ALL_MODES) - ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; - else - ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; - -+#endif -+#endif /* ETH_TP_MDI_X */ - return 0; - } - -@@ -260,16 +280,26 @@ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - -+ if (ecmd->duplex == DUPLEX_HALF) { -+ if (!hw->dev_spec._82575.eee_disable) -+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n"); -+ hw->dev_spec._82575.eee_disable = true; -+ } else { -+ if (hw->dev_spec._82575.eee_disable) -+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n"); -+ hw->dev_spec._82575.eee_disable = false; -+ } -+ - /* When SoL/IDER sessions are active, autoneg/speed/duplex -- * cannot be changed -- */ -- if (igb_check_reset_block(hw)) { -- dev_err(&adapter->pdev->dev, -- "Cannot change link characteristics when SoL/IDER is active.\n"); -+ * cannot be changed */ -+ if (e1000_check_reset_block(hw)) { -+ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link characteristics when SoL/IDER is active.\n"); - return -EINVAL; - } - -- /* MDI setting is only allowed when autoneg enabled because -+#ifdef ETH_TP_MDI_AUTO -+ /* -+ * MDI setting is only allowed when autoneg enabled because - * some hardware doesn't allow MDI setting when speed or - * duplex is forced. - */ -@@ -284,6 +314,7 @@ - } - } - -+#endif /* ETH_TP_MDI_AUTO */ - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - -@@ -318,14 +349,13 @@ - if (adapter->fc_autoneg) - hw->fc.requested_mode = e1000_fc_default; - } else { -- u32 speed = ethtool_cmd_speed(ecmd); -- /* calling this overrides forced MDI setting */ -- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { -+ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { - clear_bit(__IGB_RESETTING, &adapter->state); - return -EINVAL; - } - } - -+#ifdef ETH_TP_MDI_AUTO - /* MDI-X => 2; MDI => 1; Auto => 3 */ - if (ecmd->eth_tp_mdix_ctrl) { - /* fix up the value for auto (3 => 0) as zero is mapped -@@ -337,6 +367,7 @@ - hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; - } - -+#endif /* ETH_TP_MDI_AUTO */ - /* reset the link */ - if (netif_running(adapter->netdev)) { - igb_down(adapter); -@@ -353,7 +384,8 @@ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_mac_info *mac = &adapter->hw.mac; - -- /* If the link is not reported up to netdev, interrupts are disabled, -+ /* -+ * If the link is not reported up to netdev, interrupts are disabled, - * and so the physical link state may have changed since we last - * looked. Set get_link_status to make sure that the true link - * state is interrogated, rather than pulling a cached and possibly -@@ -391,10 +423,6 @@ - struct e1000_hw *hw = &adapter->hw; - int retval = 0; - -- /* 100basefx does not support setting link flow control */ -- if (hw->dev_spec._82575.eth_flags.e100_base_fx) -- return -EINVAL; -- - adapter->fc_autoneg = pause->autoneg; - - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) -@@ -420,10 +448,18 @@ - - hw->fc.current_mode = hw->fc.requested_mode; - -- retval = ((hw->phy.media_type == e1000_media_type_copper) ? -- igb_force_mac_fc(hw) : igb_setup_link(hw)); -+ if (hw->phy.media_type == e1000_media_type_fiber) { -+ retval = hw->mac.ops.setup_link(hw); -+ /* implicit goto out */ -+ } else { -+ retval = igb_e1000_force_mac_fc(hw); -+ if (retval) -+ goto out; -+ e1000_set_fc_watermarks_generic(hw); -+ } - } - -+out: - clear_bit(__IGB_RESETTING, &adapter->state); - return retval; - } -@@ -442,7 +478,7 @@ - - static int igb_get_regs_len(struct net_device *netdev) - { --#define IGB_REGS_LEN 739 -+#define IGB_REGS_LEN 555 - return IGB_REGS_LEN * sizeof(u32); - } - -@@ -459,80 +495,78 @@ - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; - - /* General Registers */ -- regs_buff[0] = rd32(E1000_CTRL); -- regs_buff[1] = rd32(E1000_STATUS); -- regs_buff[2] = rd32(E1000_CTRL_EXT); -- regs_buff[3] = rd32(E1000_MDIC); -- regs_buff[4] = rd32(E1000_SCTL); -- regs_buff[5] = rd32(E1000_CONNSW); -- regs_buff[6] = rd32(E1000_VET); -- regs_buff[7] = rd32(E1000_LEDCTL); -- regs_buff[8] = rd32(E1000_PBA); -- regs_buff[9] = rd32(E1000_PBS); -- regs_buff[10] = rd32(E1000_FRTIMER); -- regs_buff[11] = rd32(E1000_TCPTIMER); -+ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL); -+ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS); -+ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC); -+ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL); -+ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW); -+ regs_buff[6] = E1000_READ_REG(hw, E1000_VET); -+ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL); -+ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA); -+ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS); -+ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER); -+ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER); - - /* NVM Register */ -- regs_buff[12] = rd32(E1000_EECD); -+ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD); - - /* Interrupt */ - /* Reading EICS for EICR because they read the -- * same but EICS does not clear on read -- */ -- regs_buff[13] = rd32(E1000_EICS); -- regs_buff[14] = rd32(E1000_EICS); -- regs_buff[15] = rd32(E1000_EIMS); -- regs_buff[16] = rd32(E1000_EIMC); -- regs_buff[17] = rd32(E1000_EIAC); -- regs_buff[18] = rd32(E1000_EIAM); -+ * same but EICS does not clear on read */ -+ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS); -+ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS); -+ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS); -+ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC); -+ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC); -+ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM); - /* Reading ICS for ICR because they read the -- * same but ICS does not clear on read -- */ -- regs_buff[19] = rd32(E1000_ICS); -- regs_buff[20] = rd32(E1000_ICS); -- regs_buff[21] = rd32(E1000_IMS); -- regs_buff[22] = rd32(E1000_IMC); -- regs_buff[23] = rd32(E1000_IAC); -- regs_buff[24] = rd32(E1000_IAM); -- regs_buff[25] = rd32(E1000_IMIRVP); -+ * same but ICS does not clear on read */ -+ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS); -+ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS); -+ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS); -+ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC); -+ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC); -+ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM); -+ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP); - - /* Flow Control */ -- regs_buff[26] = rd32(E1000_FCAL); -- regs_buff[27] = rd32(E1000_FCAH); -- regs_buff[28] = rd32(E1000_FCTTV); -- regs_buff[29] = rd32(E1000_FCRTL); -- regs_buff[30] = rd32(E1000_FCRTH); -- regs_buff[31] = rd32(E1000_FCRTV); -+ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL); -+ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH); -+ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV); -+ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL); -+ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH); -+ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV); - - /* Receive */ -- regs_buff[32] = rd32(E1000_RCTL); -- regs_buff[33] = rd32(E1000_RXCSUM); -- regs_buff[34] = rd32(E1000_RLPML); -- regs_buff[35] = rd32(E1000_RFCTL); -- regs_buff[36] = rd32(E1000_MRQC); -- regs_buff[37] = rd32(E1000_VT_CTL); -+ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL); -+ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM); -+ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML); -+ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL); -+ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC); -+ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL); - - /* Transmit */ -- regs_buff[38] = rd32(E1000_TCTL); -- regs_buff[39] = rd32(E1000_TCTL_EXT); -- regs_buff[40] = rd32(E1000_TIPG); -- regs_buff[41] = rd32(E1000_DTXCTL); -+ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL); -+ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT); -+ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG); -+ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL); - - /* Wake Up */ -- regs_buff[42] = rd32(E1000_WUC); -- regs_buff[43] = rd32(E1000_WUFC); -- regs_buff[44] = rd32(E1000_WUS); -- regs_buff[45] = rd32(E1000_IPAV); -- regs_buff[46] = rd32(E1000_WUPL); -+ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC); -+ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC); -+ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS); -+ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV); -+ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL); - - /* MAC */ -- regs_buff[47] = rd32(E1000_PCS_CFG0); -- regs_buff[48] = rd32(E1000_PCS_LCTL); -- regs_buff[49] = rd32(E1000_PCS_LSTAT); -- regs_buff[50] = rd32(E1000_PCS_ANADV); -- regs_buff[51] = rd32(E1000_PCS_LPAB); -- regs_buff[52] = rd32(E1000_PCS_NPTX); -- regs_buff[53] = rd32(E1000_PCS_LPABNP); -+ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0); -+ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL); -+ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT); -+ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV); -+ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB); -+ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX); -+ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP); - - /* Statistics */ - regs_buff[54] = adapter->stats.crcerrs; -@@ -598,112 +632,75 @@ - regs_buff[120] = adapter->stats.hrmpc; - - for (i = 0; i < 4; i++) -- regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); -+ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i)); - for (i = 0; i < 4; i++) -- regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); -+ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i)); - for (i = 0; i < 4; i++) -- regs_buff[129 + i] = rd32(E1000_RDBAL(i)); -+ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i)); - for (i = 0; i < 4; i++) -- regs_buff[133 + i] = rd32(E1000_RDBAH(i)); -+ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i)); - for (i = 0; i < 4; i++) -- regs_buff[137 + i] = rd32(E1000_RDLEN(i)); -+ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i)); - for (i = 0; i < 4; i++) -- regs_buff[141 + i] = rd32(E1000_RDH(i)); -+ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i)); - for (i = 0; i < 4; i++) -- regs_buff[145 + i] = rd32(E1000_RDT(i)); -+ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i)); - for (i = 0; i < 4; i++) -- regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); -+ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); - - for (i = 0; i < 10; i++) -- regs_buff[153 + i] = rd32(E1000_EITR(i)); -+ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i)); - for (i = 0; i < 8; i++) -- regs_buff[163 + i] = rd32(E1000_IMIR(i)); -+ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i)); - for (i = 0; i < 8; i++) -- regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); -+ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i)); - for (i = 0; i < 16; i++) -- regs_buff[179 + i] = rd32(E1000_RAL(i)); -+ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i)); - for (i = 0; i < 16; i++) -- regs_buff[195 + i] = rd32(E1000_RAH(i)); -+ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i)); - - for (i = 0; i < 4; i++) -- regs_buff[211 + i] = rd32(E1000_TDBAL(i)); -+ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i)); - for (i = 0; i < 4; i++) -- regs_buff[215 + i] = rd32(E1000_TDBAH(i)); -+ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i)); - for (i = 0; i < 4; i++) -- regs_buff[219 + i] = rd32(E1000_TDLEN(i)); -+ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i)); - for (i = 0; i < 4; i++) -- regs_buff[223 + i] = rd32(E1000_TDH(i)); -+ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i)); - for (i = 0; i < 4; i++) -- regs_buff[227 + i] = rd32(E1000_TDT(i)); -+ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i)); - for (i = 0; i < 4; i++) -- regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); -+ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i)); - for (i = 0; i < 4; i++) -- regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); -+ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i)); - for (i = 0; i < 4; i++) -- regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); -+ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i)); - for (i = 0; i < 4; i++) -- regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); -+ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); - - for (i = 0; i < 4; i++) -- regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); -+ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i)); - for (i = 0; i < 4; i++) -- regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); -+ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i)); - for (i = 0; i < 32; i++) -- regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); -+ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i)); - for (i = 0; i < 128; i++) -- regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); -+ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i)); - for (i = 0; i < 128; i++) -- regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); -+ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i)); - for (i = 0; i < 4; i++) -- regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); -- -- regs_buff[547] = rd32(E1000_TDFH); -- regs_buff[548] = rd32(E1000_TDFT); -- regs_buff[549] = rd32(E1000_TDFHS); -- regs_buff[550] = rd32(E1000_TDFPC); -+ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i)); - -+ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH); -+ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT); -+ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS); -+ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC); - if (hw->mac.type > e1000_82580) { - regs_buff[551] = adapter->stats.o2bgptc; - regs_buff[552] = adapter->stats.b2ospc; - regs_buff[553] = adapter->stats.o2bspc; - regs_buff[554] = adapter->stats.b2ogprc; - } -- -- if (hw->mac.type != e1000_82576) -- return; -- for (i = 0; i < 12; i++) -- regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); -- for (i = 0; i < 4; i++) -- regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); -- -- for (i = 0; i < 12; i++) -- regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); -- for (i = 0; i < 12; i++) -- regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); - } - - static int igb_get_eeprom_len(struct net_device *netdev) -@@ -736,13 +733,13 @@ - return -ENOMEM; - - if (hw->nvm.type == e1000_nvm_eeprom_spi) -- ret_val = hw->nvm.ops.read(hw, first_word, -- last_word - first_word + 1, -- eeprom_buff); -+ ret_val = e1000_read_nvm(hw, first_word, -+ last_word - first_word + 1, -+ eeprom_buff); - else { - for (i = 0; i < last_word - first_word + 1; i++) { -- ret_val = hw->nvm.ops.read(hw, first_word + i, 1, -- &eeprom_buff[i]); -+ ret_val = e1000_read_nvm(hw, first_word + i, 1, -+ &eeprom_buff[i]); - if (ret_val) - break; - } -@@ -750,7 +747,7 @@ - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) -- le16_to_cpus(&eeprom_buff[i]); -+ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), - eeprom->len); -@@ -772,11 +769,6 @@ - if (eeprom->len == 0) - return -EOPNOTSUPP; - -- if ((hw->mac.type >= e1000_i210) && -- !igb_get_flash_presence_i210(hw)) { -- return -EOPNOTSUPP; -- } -- - if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) - return -EFAULT; - -@@ -791,19 +783,17 @@ - ptr = (void *)eeprom_buff; - - if (eeprom->offset & 1) { -- /* need read/modify/write of first changed EEPROM word -- * only the second byte of the word is being modified -- */ -- ret_val = hw->nvm.ops.read(hw, first_word, 1, -+ /* need read/modify/write of first changed EEPROM word */ -+ /* only the second byte of the word is being modified */ -+ ret_val = e1000_read_nvm(hw, first_word, 1, - &eeprom_buff[0]); - ptr++; - } - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { -- /* need read/modify/write of last changed EEPROM word -- * only the first byte of the word is being modified -- */ -- ret_val = hw->nvm.ops.read(hw, last_word, 1, -- &eeprom_buff[last_word - first_word]); -+ /* need read/modify/write of last changed EEPROM word */ -+ /* only the first byte of the word is being modified */ -+ ret_val = e1000_read_nvm(hw, last_word, 1, -+ &eeprom_buff[last_word - first_word]); - } - - /* Device's eeprom is always little-endian, word addressable */ -@@ -813,16 +803,16 @@ - memcpy(ptr, bytes, eeprom->len); - - for (i = 0; i < last_word - first_word + 1; i++) -- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); -+ cpu_to_le16s(&eeprom_buff[i]); - -- ret_val = hw->nvm.ops.write(hw, first_word, -- last_word - first_word + 1, eeprom_buff); -+ ret_val = e1000_write_nvm(hw, first_word, -+ last_word - first_word + 1, eeprom_buff); - -- /* Update the checksum if nvm write succeeded */ -+ /* Update the checksum if write succeeded. -+ * and flush shadow RAM for 82573 controllers */ - if (ret_val == 0) -- hw->nvm.ops.update(hw); -+ e1000_update_nvm_checksum(hw); - -- igb_set_fw_version(adapter); - kfree(eeprom_buff); - return ret_val; - } -@@ -832,16 +822,14 @@ - { - struct igb_adapter *adapter = netdev_priv(netdev); - -- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); -- strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); -- -- /* EEPROM image version # is reported as firmware version # for -- * 82575 controllers -- */ -- strlcpy(drvinfo->fw_version, adapter->fw_version, -- sizeof(drvinfo->fw_version)); -- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), -- sizeof(drvinfo->bus_info)); -+ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); -+ strncpy(drvinfo->version, igb_driver_version, -+ sizeof(drvinfo->version) - 1); -+ -+ strncpy(drvinfo->fw_version, adapter->fw_version, -+ sizeof(drvinfo->fw_version) - 1); -+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), -+ sizeof(drvinfo->bus_info) - 1); - drvinfo->n_stats = IGB_STATS_LEN; - drvinfo->testinfo_len = IGB_TEST_LEN; - drvinfo->regdump_len = igb_get_regs_len(netdev); -@@ -855,8 +843,12 @@ - - ring->rx_max_pending = IGB_MAX_RXD; - ring->tx_max_pending = IGB_MAX_TXD; -+ ring->rx_mini_max_pending = 0; -+ ring->rx_jumbo_max_pending = 0; - ring->rx_pending = adapter->rx_ring_count; - ring->tx_pending = adapter->tx_ring_count; -+ ring->rx_mini_pending = 0; -+ ring->rx_jumbo_pending = 0; - } - - static int igb_set_ringparam(struct net_device *netdev, -@@ -870,12 +862,12 @@ - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - -- new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); -- new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); -+ new_rx_count = min_t(u16, ring->rx_pending, (u32)IGB_MAX_RXD); -+ new_rx_count = max_t(u16, new_rx_count, (u16)IGB_MIN_RXD); - new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - -- new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); -- new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); -+ new_tx_count = min_t(u16, ring->tx_pending, (u32)IGB_MAX_TXD); -+ new_tx_count = max_t(u16, new_tx_count, (u16)IGB_MIN_TXD); - new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); - - if ((new_tx_count == adapter->tx_ring_count) && -@@ -898,11 +890,11 @@ - } - - if (adapter->num_tx_queues > adapter->num_rx_queues) -- temp_ring = vmalloc(adapter->num_tx_queues * -- sizeof(struct igb_ring)); -+ temp_ring = vmalloc(adapter->num_tx_queues -+ * sizeof(struct igb_ring)); - else -- temp_ring = vmalloc(adapter->num_rx_queues * -- sizeof(struct igb_ring)); -+ temp_ring = vmalloc(adapter->num_rx_queues -+ * sizeof(struct igb_ring)); - - if (!temp_ring) { - err = -ENOMEM; -@@ -911,9 +903,10 @@ - - igb_down(adapter); - -- /* We can't just free everything and then setup again, -+ /* -+ * We can't just free everything and then setup again, - * because the ISRs in MSI-X mode get passed pointers -- * to the Tx and Rx ring structs. -+ * to the tx and rx ring structs. - */ - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { -@@ -975,224 +968,6 @@ - return err; - } - --/* ethtool register test data */ --struct igb_reg_test { -- u16 reg; -- u16 reg_offset; -- u16 array_len; -- u16 test_type; -- u32 mask; -- u32 write; --}; -- --/* In the hardware, registers are laid out either singly, in arrays -- * spaced 0x100 bytes apart, or in contiguous tables. We assume -- * most tests take place on arrays or single registers (handled -- * as a single-element array) and special-case the tables. -- * Table tests are always pattern tests. -- * -- * We also make provision for some required setup steps by specifying -- * registers to be written without any read-back testing. -- */ -- --#define PATTERN_TEST 1 --#define SET_READ_TEST 2 --#define WRITE_NO_TEST 3 --#define TABLE32_TEST 4 --#define TABLE64_TEST_LO 5 --#define TABLE64_TEST_HI 6 -- --/* i210 reg test */ --static struct igb_reg_test reg_test_i210[] = { -- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- /* RDH is read-only for i210, only test RDT. */ -- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RA, 0, 16, TABLE64_TEST_LO, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA, 0, 16, TABLE64_TEST_HI, -- 0x900FFFFF, 0xFFFFFFFF }, -- { E1000_MTA, 0, 128, TABLE32_TEST, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { 0, 0, 0, 0, 0 } --}; -- --/* i350 reg test */ --static struct igb_reg_test reg_test_i350[] = { -- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, -- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- /* RDH is read-only for i350, only test RDT. */ -- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RA, 0, 16, TABLE64_TEST_LO, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA, 0, 16, TABLE64_TEST_HI, -- 0xC3FFFFFF, 0xFFFFFFFF }, -- { E1000_RA2, 0, 16, TABLE64_TEST_LO, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA2, 0, 16, TABLE64_TEST_HI, -- 0xC3FFFFFF, 0xFFFFFFFF }, -- { E1000_MTA, 0, 128, TABLE32_TEST, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { 0, 0, 0, 0 } --}; -- --/* 82580 reg test */ --static struct igb_reg_test reg_test_82580[] = { -- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- /* RDH is read-only for 82580, only test RDT. */ -- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RA, 0, 16, TABLE64_TEST_LO, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA, 0, 16, TABLE64_TEST_HI, -- 0x83FFFFFF, 0xFFFFFFFF }, -- { E1000_RA2, 0, 8, TABLE64_TEST_LO, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA2, 0, 8, TABLE64_TEST_HI, -- 0x83FFFFFF, 0xFFFFFFFF }, -- { E1000_MTA, 0, 128, TABLE32_TEST, -- 0xFFFFFFFF, 0xFFFFFFFF }, -- { 0, 0, 0, 0 } --}; -- --/* 82576 reg test */ --static struct igb_reg_test reg_test_82576[] = { -- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- /* Enable all RX queues before testing. */ -- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, -- E1000_RXDCTL_QUEUE_ENABLE }, -- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, -- E1000_RXDCTL_QUEUE_ENABLE }, -- /* RDH is read-only for 82576, only test RDT. */ -- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, -- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, -- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, -- { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, -- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { 0, 0, 0, 0 } --}; -- --/* 82575 register test */ --static struct igb_reg_test reg_test_82575[] = { -- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- /* Enable all four RX queues before testing. */ -- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, -- E1000_RXDCTL_QUEUE_ENABLE }, -- /* RDH is read-only for 82575, only test RDT. */ -- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, -- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, -- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, -- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, -- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, -- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, -- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -- { 0, 0, 0, 0 } --}; -- - static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, - int reg, u32 mask, u32 write) - { -@@ -1201,13 +976,14 @@ - static const u32 _test[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; - for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { -- wr32(reg, (_test[pat] & write)); -- val = rd32(reg) & mask; -+ E1000_WRITE_REG(hw, reg, (_test[pat] & write)); -+ val = E1000_READ_REG(hw, reg) & mask; - if (val != (_test[pat] & write & mask)) { -- dev_err(&adapter->pdev->dev, -+ dev_err(pci_dev_to_dev(adapter->pdev), - "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", -- reg, val, (_test[pat] & write & mask)); -- *data = reg; -+ E1000_REGISTER(hw, reg), val, (_test[pat] -+ & write & mask)); -+ *data = E1000_REGISTER(hw, reg); - return true; - } - } -@@ -1220,14 +996,13 @@ - { - struct e1000_hw *hw = &adapter->hw; - u32 val; -- -- wr32(reg, write & mask); -- val = rd32(reg); -+ E1000_WRITE_REG(hw, reg, write & mask); -+ val = E1000_READ_REG(hw, reg); - if ((write & mask) != (val & mask)) { -- dev_err(&adapter->pdev->dev, -- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", -+ dev_err(pci_dev_to_dev(adapter->pdev), -+ "set/check reg %04X test failed:got 0x%08X expected 0x%08X\n", - reg, (val & mask), (write & mask)); -- *data = reg; -+ *data = E1000_REGISTER(hw, reg); - return true; - } - -@@ -1283,19 +1058,19 @@ - * tests. Some bits are read-only, some toggle, and some - * are writable on newer MACs. - */ -- before = rd32(E1000_STATUS); -- value = (rd32(E1000_STATUS) & toggle); -- wr32(E1000_STATUS, toggle); -- after = rd32(E1000_STATUS) & toggle; -+ before = E1000_READ_REG(hw, E1000_STATUS); -+ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle); -+ E1000_WRITE_REG(hw, E1000_STATUS, toggle); -+ after = E1000_READ_REG(hw, E1000_STATUS) & toggle; - if (value != after) { -- dev_err(&adapter->pdev->dev, -+ dev_err(pci_dev_to_dev(adapter->pdev), - "failed STATUS register test got: 0x%08X expected: 0x%08X\n", - after, value); - *data = 1; - return 1; - } - /* restore previous status */ -- wr32(E1000_STATUS, before); -+ E1000_WRITE_REG(hw, E1000_STATUS, before); - - /* Perform the remainder of the register test, looping through - * the test table until we either fail or reach the null entry. -@@ -1317,7 +1092,7 @@ - break; - case WRITE_NO_TEST: - writel(test->write, -- (adapter->hw.hw_addr + test->reg) -+ (adapter->hw.hw_addr + test->reg) - + (i * test->reg_offset)); - break; - case TABLE32_TEST: -@@ -1346,24 +1121,11 @@ - - static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) - { -- struct e1000_hw *hw = &adapter->hw; -- - *data = 0; - -- /* Validate eeprom on all parts but flashless */ -- switch (hw->mac.type) { -- case e1000_i210: -- case e1000_i211: -- if (igb_get_flash_presence_i210(hw)) { -- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) -- *data = 2; -- } -- break; -- default: -- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) -- *data = 2; -- break; -- } -+ /* Validate NVM checksum */ -+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) -+ *data = 2; - - return *data; - } -@@ -1373,7 +1135,7 @@ - struct igb_adapter *adapter = (struct igb_adapter *) data; - struct e1000_hw *hw = &adapter->hw; - -- adapter->test_icr |= rd32(E1000_ICR); -+ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR); - - return IRQ_HANDLED; - } -@@ -1382,20 +1144,20 @@ - { - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; -- u32 mask, ics_mask, i = 0, shared_int = true; -+ u32 mask, ics_mask, i = 0, shared_int = TRUE; - u32 irq = adapter->pdev->irq; - - *data = 0; - - /* Hook up test interrupt handler just for this test */ -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -+ if (adapter->msix_entries) { - if (request_irq(adapter->msix_entries[0].vector, -- igb_test_intr, 0, netdev->name, adapter)) { -+ &igb_test_intr, 0, netdev->name, adapter)) { - *data = 1; - return -1; - } - } else if (adapter->flags & IGB_FLAG_HAS_MSI) { -- shared_int = false; -+ shared_int = FALSE; - if (request_irq(irq, - igb_test_intr, 0, netdev->name, adapter)) { - *data = 1; -@@ -1403,19 +1165,19 @@ - } - } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, - netdev->name, adapter)) { -- shared_int = false; -- } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, -+ shared_int = FALSE; -+ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, - netdev->name, adapter)) { - *data = 1; - return -1; - } -- dev_info(&adapter->pdev->dev, "testing %s interrupt\n", -- (shared_int ? "shared" : "unshared")); -+ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n", -+ (shared_int ? "shared" : "unshared")); - - /* Disable all the interrupts */ -- wr32(E1000_IMC, ~0); -- wrfl(); -- usleep_range(10000, 11000); -+ E1000_WRITE_REG(hw, E1000_IMC, ~0); -+ E1000_WRITE_FLUSH(hw); -+ usleep_range(10000, 20000); - - /* Define all writable bits for ICS */ - switch (hw->mac.type) { -@@ -1430,9 +1192,11 @@ - break; - case e1000_i350: - case e1000_i354: -+ ics_mask = 0x77DCFED5; -+ break; - case e1000_i210: - case e1000_i211: -- ics_mask = 0x77DCFED5; -+ ics_mask = 0x774CFED5; - break; - default: - ics_mask = 0x7FFFFFFF; -@@ -1457,12 +1221,12 @@ - adapter->test_icr = 0; - - /* Flush any pending interrupts */ -- wr32(E1000_ICR, ~0); -+ E1000_WRITE_REG(hw, E1000_ICR, ~0); - -- wr32(E1000_IMC, mask); -- wr32(E1000_ICS, mask); -- wrfl(); -- usleep_range(10000, 11000); -+ E1000_WRITE_REG(hw, E1000_IMC, mask); -+ E1000_WRITE_REG(hw, E1000_ICS, mask); -+ E1000_WRITE_FLUSH(hw); -+ usleep_range(10000, 20000); - - if (adapter->test_icr & mask) { - *data = 3; -@@ -1479,12 +1243,12 @@ - adapter->test_icr = 0; - - /* Flush any pending interrupts */ -- wr32(E1000_ICR, ~0); -+ E1000_WRITE_REG(hw, E1000_ICR, ~0); - -- wr32(E1000_IMS, mask); -- wr32(E1000_ICS, mask); -- wrfl(); -- usleep_range(10000, 11000); -+ E1000_WRITE_REG(hw, E1000_IMS, mask); -+ E1000_WRITE_REG(hw, E1000_ICS, mask); -+ E1000_WRITE_FLUSH(hw); -+ usleep_range(10000, 20000); - - if (!(adapter->test_icr & mask)) { - *data = 4; -@@ -1501,12 +1265,12 @@ - adapter->test_icr = 0; - - /* Flush any pending interrupts */ -- wr32(E1000_ICR, ~0); -+ E1000_WRITE_REG(hw, E1000_ICR, ~0); - -- wr32(E1000_IMC, ~mask); -- wr32(E1000_ICS, ~mask); -- wrfl(); -- usleep_range(10000, 11000); -+ E1000_WRITE_REG(hw, E1000_IMC, ~mask); -+ E1000_WRITE_REG(hw, E1000_ICS, ~mask); -+ E1000_WRITE_FLUSH(hw); -+ usleep_range(10000, 20000); - - if (adapter->test_icr & mask) { - *data = 5; -@@ -1516,12 +1280,12 @@ - } - - /* Disable all the interrupts */ -- wr32(E1000_IMC, ~0); -- wrfl(); -- usleep_range(10000, 11000); -+ E1000_WRITE_REG(hw, E1000_IMC, ~0); -+ E1000_WRITE_FLUSH(hw); -+ usleep_range(10000, 20000); - - /* Unhook test interrupt handler */ -- if (adapter->flags & IGB_FLAG_HAS_MSIX) -+ if (adapter->msix_entries) - free_irq(adapter->msix_entries[0].vector, adapter); - else - free_irq(irq, adapter); -@@ -1544,7 +1308,7 @@ - - /* Setup Tx descriptor ring and Tx buffers */ - tx_ring->count = IGB_DEFAULT_TXD; -- tx_ring->dev = &adapter->pdev->dev; -+ tx_ring->dev = pci_dev_to_dev(adapter->pdev); - tx_ring->netdev = adapter->netdev; - tx_ring->reg_idx = adapter->vfs_allocated_count; - -@@ -1558,17 +1322,20 @@ - - /* Setup Rx descriptor ring and Rx buffers */ - rx_ring->count = IGB_DEFAULT_RXD; -- rx_ring->dev = &adapter->pdev->dev; -+ rx_ring->dev = pci_dev_to_dev(adapter->pdev); - rx_ring->netdev = adapter->netdev; -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN; -+#endif - rx_ring->reg_idx = adapter->vfs_allocated_count; - - if (igb_setup_rx_resources(rx_ring)) { -- ret_val = 3; -+ ret_val = 2; - goto err_nomem; - } - - /* set the default queue to queue 0 of PF */ -- wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); -+ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3); - - /* enable receive ring */ - igb_setup_rctl(adapter); -@@ -1588,10 +1355,10 @@ - struct e1000_hw *hw = &adapter->hw; - - /* Write out to PHY registers 29 and 30 to disable the Receiver. */ -- igb_write_phy_reg(hw, 29, 0x001F); -- igb_write_phy_reg(hw, 30, 0x8FFC); -- igb_write_phy_reg(hw, 29, 0x001A); -- igb_write_phy_reg(hw, 30, 0x8FF0); -+ igb_e1000_write_phy_reg(hw, 29, 0x001F); -+ igb_e1000_write_phy_reg(hw, 30, 0x8FFC); -+ igb_e1000_write_phy_reg(hw, 29, 0x001A); -+ igb_e1000_write_phy_reg(hw, 30, 0x8FF0); - } - - static int igb_integrated_phy_loopback(struct igb_adapter *adapter) -@@ -1599,34 +1366,32 @@ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_reg = 0; - -- hw->mac.autoneg = false; -+ hw->mac.autoneg = FALSE; - - if (hw->phy.type == e1000_phy_m88) { - if (hw->phy.id != I210_I_PHY_ID) { - /* Auto-MDI/MDIX Off */ -- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); -+ igb_e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ -- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); -+ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ -- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); -+ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140); - } else { - /* force 1000, set loopback */ -- igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); -- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); -+ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); -+ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); - } -- } else if (hw->phy.type == e1000_phy_82580) { -+ } else { - /* enable MII loopback */ -- igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); -+ if (hw->phy.type == e1000_phy_82580) -+ igb_e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041); - } - -- /* add small delay to avoid loopback test failure */ -- msleep(50); -- -- /* force 1000, set loopback */ -- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); -+ /* force 1000, set loopback */ -+ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140); - - /* Now set up the MAC to the same speed/duplex as the PHY. */ -- ctrl_reg = rd32(E1000_CTRL); -+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); - ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ - ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ -@@ -1637,7 +1402,7 @@ - if (hw->phy.type == e1000_phy_m88) - ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - -- wr32(E1000_CTRL, ctrl_reg); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); - - /* Disable the receiver on the PHY so when a cable is plugged in, the - * PHY does not begin to autoneg when a cable is reconnected to the NIC. -@@ -1659,64 +1424,64 @@ - struct e1000_hw *hw = &adapter->hw; - u32 reg; - -- reg = rd32(E1000_CTRL_EXT); -+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); - - /* use CTRL_EXT to identify link type as SGMII can appear as copper */ - if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { - if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || -- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || -- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || -- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || -- (hw->device_id == E1000_DEV_ID_I354_SGMII) || -- (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { -+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || -+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || -+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || -+ (hw->device_id == E1000_DEV_ID_I354_SGMII) || -+ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { - /* Enable DH89xxCC MPHY for near end loopback */ -- reg = rd32(E1000_MPHY_ADDR_CTL); -+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); - reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | -- E1000_MPHY_PCS_CLK_REG_OFFSET; -- wr32(E1000_MPHY_ADDR_CTL, reg); -+ E1000_MPHY_PCS_CLK_REG_OFFSET; -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); - -- reg = rd32(E1000_MPHY_DATA); -+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA); - reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; -- wr32(E1000_MPHY_DATA, reg); -+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); - } - -- reg = rd32(E1000_RCTL); -+ reg = E1000_READ_REG(hw, E1000_RCTL); - reg |= E1000_RCTL_LBM_TCVR; -- wr32(E1000_RCTL, reg); -+ E1000_WRITE_REG(hw, E1000_RCTL, reg); - -- wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); -+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); - -- reg = rd32(E1000_CTRL); -+ reg = E1000_READ_REG(hw, E1000_CTRL); - reg &= ~(E1000_CTRL_RFCE | - E1000_CTRL_TFCE | - E1000_CTRL_LRST); - reg |= E1000_CTRL_SLU | - E1000_CTRL_FD; -- wr32(E1000_CTRL, reg); -+ E1000_WRITE_REG(hw, E1000_CTRL, reg); - - /* Unset switch control to serdes energy detect */ -- reg = rd32(E1000_CONNSW); -+ reg = E1000_READ_REG(hw, E1000_CONNSW); - reg &= ~E1000_CONNSW_ENRGSRC; -- wr32(E1000_CONNSW, reg); -+ E1000_WRITE_REG(hw, E1000_CONNSW, reg); - - /* Unset sigdetect for SERDES loopback on -- * 82580 and newer devices. -+ * 82580 and newer devices - */ - if (hw->mac.type >= e1000_82580) { -- reg = rd32(E1000_PCS_CFG0); -+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); - reg |= E1000_PCS_CFG_IGN_SD; -- wr32(E1000_PCS_CFG0, reg); -+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); - } - - /* Set PCS register for forced speed */ -- reg = rd32(E1000_PCS_LCTL); -+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL); - reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ - reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ - E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ - E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ - E1000_PCS_LCTL_FSD | /* Force Speed */ - E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ -- wr32(E1000_PCS_LCTL, reg); -+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); - - return 0; - } -@@ -1731,36 +1496,37 @@ - u16 phy_reg; - - if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || -- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || -- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || -- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || -- (hw->device_id == E1000_DEV_ID_I354_SGMII)) { -+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || -+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || -+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || -+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) { - u32 reg; - - /* Disable near end loopback on DH89xxCC */ -- reg = rd32(E1000_MPHY_ADDR_CTL); -+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL); - reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | -- E1000_MPHY_PCS_CLK_REG_OFFSET; -- wr32(E1000_MPHY_ADDR_CTL, reg); -+ E1000_MPHY_PCS_CLK_REG_OFFSET; -+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg); - -- reg = rd32(E1000_MPHY_DATA); -+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA); - reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; -- wr32(E1000_MPHY_DATA, reg); -+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg); - } - -- rctl = rd32(E1000_RCTL); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); -- wr32(E1000_RCTL, rctl); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); - -- hw->mac.autoneg = true; -- igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); -+ hw->mac.autoneg = TRUE; -+ igb_e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg); - if (phy_reg & MII_CR_LOOPBACK) { - phy_reg &= ~MII_CR_LOOPBACK; -- igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); -- igb_phy_sw_reset(hw); -+ if (hw->phy.type == I210_I_PHY_ID) -+ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); -+ igb_e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg); -+ e1000_phy_commit(hw); - } - } -- - static void igb_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) - { -@@ -1779,19 +1545,25 @@ - - frame_size >>= 1; - -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ data = rx_buffer->skb->data; -+#else - data = kmap(rx_buffer->page); -+#endif - - if (data[3] != 0xFF || - data[frame_size + 10] != 0xBE || - data[frame_size + 12] != 0xAF) - match = false; - -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - kunmap(rx_buffer->page); - -+#endif - return match; - } - --static int igb_clean_test_rings(struct igb_ring *rx_ring, -+static u16 igb_clean_test_rings(struct igb_ring *rx_ring, - struct igb_ring *tx_ring, - unsigned int size) - { -@@ -1806,13 +1578,17 @@ - rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); - - while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { -- /* check Rx buffer */ -+ /* check rx buffer */ - rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; - - /* sync Rx buffer for CPU read */ - dma_sync_single_for_cpu(rx_ring->dev, - rx_buffer_info->dma, -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ IGB_RX_HDR_LEN, -+#else - IGB_RX_BUFSZ, -+#endif - DMA_FROM_DEVICE); - - /* verify contents of skb */ -@@ -1822,14 +1598,18 @@ - /* sync Rx buffer for device write */ - dma_sync_single_for_device(rx_ring->dev, - rx_buffer_info->dma, -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ IGB_RX_HDR_LEN, -+#else - IGB_RX_BUFSZ, -+#endif - DMA_FROM_DEVICE); - -- /* unmap buffer on Tx side */ -+ /* unmap buffer on tx side */ - tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; - igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - -- /* increment Rx/Tx next to clean counters */ -+ /* increment rx/tx next to clean counters */ - rx_ntc++; - if (rx_ntc == rx_ring->count) - rx_ntc = 0; -@@ -1841,8 +1621,6 @@ - rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); - } - -- netdev_tx_reset_queue(txring_txq(tx_ring)); -- - /* re-map buffers to ring, store next to clean values */ - igb_alloc_rx_buffers(rx_ring, count); - rx_ring->next_to_clean = rx_ntc; -@@ -1870,7 +1648,8 @@ - igb_create_lbtest_frame(skb, size); - skb_put(skb, size); - -- /* Calculate the loop count based on the largest descriptor ring -+ /* -+ * Calculate the loop count based on the largest descriptor ring - * The idea is to wrap the largest ring a number of times using 64 - * send/receive pairs during each loop - */ -@@ -1897,7 +1676,7 @@ - break; - } - -- /* allow 200 milliseconds for packets to go from Tx to Rx */ -+ /* allow 200 milliseconds for packets to go from tx to rx */ - msleep(200); - - good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); -@@ -1916,21 +1695,14 @@ - static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) - { - /* PHY loopback cannot be performed if SoL/IDER -- * sessions are active -- */ -- if (igb_check_reset_block(&adapter->hw)) { -- dev_err(&adapter->pdev->dev, -+ * sessions are active */ -+ if (e1000_check_reset_block(&adapter->hw)) { -+ dev_err(pci_dev_to_dev(adapter->pdev), - "Cannot do PHY loopback test when SoL/IDER is active.\n"); - *data = 0; - goto out; - } - -- if (adapter->hw.mac.type == e1000_i354) { -- dev_info(&adapter->pdev->dev, -- "Loopback test not supported on i354.\n"); -- *data = 0; -- goto out; -- } - *data = igb_setup_desc_rings(adapter); - if (*data) - goto out; -@@ -1938,6 +1710,7 @@ - if (*data) - goto err_loopback; - *data = igb_run_loopback_test(adapter); -+ - igb_loopback_cleanup(adapter); - - err_loopback: -@@ -1948,32 +1721,39 @@ - - static int igb_link_test(struct igb_adapter *adapter, u64 *data) - { -- struct e1000_hw *hw = &adapter->hw; -+ u32 link; -+ int i, time; -+ - *data = 0; -- if (hw->phy.media_type == e1000_media_type_internal_serdes) { -+ time = 0; -+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { - int i = 0; -- -- hw->mac.serdes_has_link = false; -+ adapter->hw.mac.serdes_has_link = FALSE; - - /* On some blade server designs, link establishment -- * could take as long as 2-3 minutes -- */ -+ * could take as long as 2-3 minutes */ - do { -- hw->mac.ops.check_for_link(&adapter->hw); -- if (hw->mac.serdes_has_link) -- return *data; -+ igb_e1000_check_for_link(&adapter->hw); -+ if (adapter->hw.mac.serdes_has_link) -+ goto out; - msleep(20); - } while (i++ < 3750); - - *data = 1; - } else { -- hw->mac.ops.check_for_link(&adapter->hw); -- if (hw->mac.autoneg) -- msleep(5000); -- -- if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) -+ for (i = 0; i < IGB_MAX_LINK_TRIES; i++) { -+ link = igb_has_link(adapter); -+ if (link) { -+ goto out; -+ } else { -+ time++; -+ msleep(1000); -+ } -+ } -+ if (!link) - *data = 1; - } -+out: - return *data; - } - -@@ -1986,10 +1766,6 @@ - bool if_running = netif_running(netdev); - - set_bit(__IGB_TESTING, &adapter->state); -- -- /* can't do offline tests on media switching devices */ -- if (adapter->hw.dev_spec._82575.mas_capable) -- eth_test->flags &= ~ETH_TEST_FL_OFFLINE; - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - -@@ -1998,20 +1774,19 @@ - forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; - autoneg = adapter->hw.mac.autoneg; - -- dev_info(&adapter->pdev->dev, "offline testing starting\n"); -+ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n"); - - /* power up link for link test */ - igb_power_up_link(adapter); - - /* Link test performed before hardware reset so autoneg doesn't -- * interfere with test result -- */ -+ * interfere with test result */ - if (igb_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (if_running) - /* indicate we're in test mode */ -- dev_close(netdev); -+ igb_close(netdev); - else - igb_reset(adapter); - -@@ -2027,8 +1802,10 @@ - eth_test->flags |= ETH_TEST_FL_FAILED; - - igb_reset(adapter); -+ - /* power up link for loopback test */ - igb_power_up_link(adapter); -+ - if (igb_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; - -@@ -2038,15 +1815,15 @@ - adapter->hw.mac.autoneg = autoneg; - - /* force this routine to wait until autoneg complete/timeout */ -- adapter->hw.phy.autoneg_wait_to_complete = true; -+ adapter->hw.phy.autoneg_wait_to_complete = TRUE; - igb_reset(adapter); -- adapter->hw.phy.autoneg_wait_to_complete = false; -+ adapter->hw.phy.autoneg_wait_to_complete = FALSE; - - clear_bit(__IGB_TESTING, &adapter->state); - if (if_running) -- dev_open(netdev); -+ igb_open(netdev); - } else { -- dev_info(&adapter->pdev->dev, "online testing starting\n"); -+ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n"); - - /* PHY is powered down when interface is down */ - if (if_running && igb_link_test(adapter, &data[4])) -@@ -2125,8 +1902,7 @@ - } - - /* bit defines for adapter->led_status */ --#define IGB_LED_ON 0 -- -+#ifdef HAVE_ETHTOOL_SET_PHYS_ID - static int igb_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) - { -@@ -2135,23 +1911,47 @@ - - switch (state) { - case ETHTOOL_ID_ACTIVE: -- igb_blink_led(hw); -+ e1000_blink_led(hw); - return 2; - case ETHTOOL_ID_ON: -- igb_blink_led(hw); -+ igb_e1000_led_on(hw); - break; - case ETHTOOL_ID_OFF: -- igb_led_off(hw); -+ igb_e1000_led_off(hw); - break; - case ETHTOOL_ID_INACTIVE: -- igb_led_off(hw); -- clear_bit(IGB_LED_ON, &adapter->led_status); -- igb_cleanup_led(hw); -+ igb_e1000_led_off(hw); -+ igb_e1000_cleanup_led(hw); - break; - } - - return 0; - } -+#else -+static int igb_phys_id(struct net_device *netdev, u32 data) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ struct e1000_hw *hw = &adapter->hw; -+ unsigned long timeout; -+ -+ timeout = data * 1000; -+ -+ /* -+ * msleep_interruptable only accepts unsigned int so we are limited -+ * in how long a duration we can wait -+ */ -+ if (!timeout || timeout > UINT_MAX) -+ timeout = UINT_MAX; -+ -+ e1000_blink_led(hw); -+ msleep_interruptible(timeout); -+ -+ igb_e1000_led_off(hw); -+ igb_e1000_cleanup_led(hw); -+ -+ return 0; -+} -+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ - - static int igb_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -@@ -2159,11 +1959,36 @@ - struct igb_adapter *adapter = netdev_priv(netdev); - int i; - -+ if (ec->rx_max_coalesced_frames || -+ ec->rx_coalesce_usecs_irq || -+ ec->rx_max_coalesced_frames_irq || -+ ec->tx_max_coalesced_frames || -+ ec->tx_coalesce_usecs_irq || -+ ec->stats_block_coalesce_usecs || -+ ec->use_adaptive_rx_coalesce || -+ ec->use_adaptive_tx_coalesce || -+ ec->pkt_rate_low || -+ ec->rx_coalesce_usecs_low || -+ ec->rx_max_coalesced_frames_low || -+ ec->tx_coalesce_usecs_low || -+ ec->tx_max_coalesced_frames_low || -+ ec->pkt_rate_high || -+ ec->rx_coalesce_usecs_high || -+ ec->rx_max_coalesced_frames_high || -+ ec->tx_coalesce_usecs_high || -+ ec->tx_max_coalesced_frames_high || -+ ec->rate_sample_interval) { -+ netdev_err(netdev, "set_coalesce: invalid parameter"); -+ return -ENOTSUPP; -+ } -+ - if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs > 3) && - (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || -- (ec->rx_coalesce_usecs == 2)) -+ (ec->rx_coalesce_usecs == 2)) { -+ netdev_err(netdev, "set_coalesce: invalid setting"); - return -EINVAL; -+ } - - if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || - ((ec->tx_coalesce_usecs > 3) && -@@ -2174,11 +1999,12 @@ - if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) - return -EINVAL; - -+ if (ec->tx_max_coalesced_frames_irq) -+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; -+ - /* If ITR is disabled, disable DMAC */ -- if (ec->rx_coalesce_usecs == 0) { -- if (adapter->flags & IGB_FLAG_DMAC) -- adapter->flags &= ~IGB_FLAG_DMAC; -- } -+ if (ec->rx_coalesce_usecs == 0) -+ adapter->dmac = IGB_DMAC_DISABLE; - - /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) -@@ -2219,6 +2045,8 @@ - else - ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - -+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; -+ - if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { - if (adapter->tx_itr_setting <= 3) - ec->tx_coalesce_usecs = adapter->tx_itr_setting; -@@ -2237,6 +2065,7 @@ - return 0; - } - -+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT - static int igb_get_sset_count(struct net_device *netdev, int sset) - { - switch (sset) { -@@ -2248,19 +2077,32 @@ - return -ENOTSUPP; - } - } -+#else -+static int igb_get_stats_count(struct net_device *netdev) -+{ -+ return IGB_STATS_LEN; -+} -+ -+static int igb_diag_test_count(struct net_device *netdev) -+{ -+ return IGB_TEST_LEN; -+} -+#endif - - static void igb_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) - { - struct igb_adapter *adapter = netdev_priv(netdev); -- struct rtnl_link_stats64 *net_stats = &adapter->stats64; -- unsigned int start; -- struct igb_ring *ring; -- int i, j; -+#ifdef HAVE_NETDEV_STATS_IN_NETDEV -+ struct net_device_stats *net_stats = &netdev->stats; -+#else -+ struct net_device_stats *net_stats = &adapter->net_stats; -+#endif -+ u64 *queue_stat; -+ int i, j, k; - char *p; - -- spin_lock(&adapter->stats64_lock); -- igb_update_stats(adapter, net_stats); -+ igb_update_stats(adapter); - - for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + igb_gstrings_stats[i].stat_offset; -@@ -2273,36 +2115,15 @@ - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (j = 0; j < adapter->num_tx_queues; j++) { -- u64 restart2; -- -- ring = adapter->tx_ring[j]; -- do { -- start = u64_stats_fetch_begin_irq(&ring->tx_syncp); -- data[i] = ring->tx_stats.packets; -- data[i+1] = ring->tx_stats.bytes; -- data[i+2] = ring->tx_stats.restart_queue; -- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); -- do { -- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); -- restart2 = ring->tx_stats.restart_queue2; -- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); -- data[i+2] += restart2; -- -- i += IGB_TX_QUEUE_STATS_LEN; -+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; -+ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) -+ data[i] = queue_stat[k]; - } - for (j = 0; j < adapter->num_rx_queues; j++) { -- ring = adapter->rx_ring[j]; -- do { -- start = u64_stats_fetch_begin_irq(&ring->rx_syncp); -- data[i] = ring->rx_stats.packets; -- data[i+1] = ring->rx_stats.bytes; -- data[i+2] = ring->rx_stats.drops; -- data[i+3] = ring->rx_stats.csum_err; -- data[i+4] = ring->rx_stats.alloc_failed; -- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); -- i += IGB_RX_QUEUE_STATS_LEN; -+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; -+ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) -+ data[i] = queue_stat[k]; - } -- spin_unlock(&adapter->stats64_lock); - } - - static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -@@ -2347,22 +2168,19 @@ - sprintf(p, "rx_queue_%u_alloc_failed", i); - p += ETH_GSTRING_LEN; - } -- /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ -+/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ - break; - } - } - -+#ifdef HAVE_ETHTOOL_GET_TS_INFO - static int igb_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) - { - struct igb_adapter *adapter = netdev_priv(dev); - -- if (adapter->ptp_clock) -- info->phc_index = ptp_clock_index(adapter->ptp_clock); -- else -- info->phc_index = -1; -- - switch (adapter->hw.mac.type) { -+#ifdef HAVE_PTP_1588_CLOCK - case e1000_82575: - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | -@@ -2383,6 +2201,11 @@ - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - -+ if (adapter->ptp_clock) -+ info->phc_index = ptp_clock_index(adapter->ptp_clock); -+ else -+ info->phc_index = -1; -+ - info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); -@@ -2396,201 +2219,217 @@ - info->rx_filters |= - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | -- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | -- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | -- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | -- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); - - return 0; -+#endif /* HAVE_PTP_1588_CLOCK */ - default: - return -EOPNOTSUPP; - } - } -+#endif /* HAVE_ETHTOOL_GET_TS_INFO */ - --static int igb_get_rss_hash_opts(struct igb_adapter *adapter, -- struct ethtool_rxnfc *cmd) -+#ifdef CONFIG_PM_RUNTIME -+static int igb_ethtool_begin(struct net_device *netdev) - { -- cmd->data = 0; -+ struct igb_adapter *adapter = netdev_priv(netdev); - -- /* Report default options for RSS on igb */ -- switch (cmd->flow_type) { -- case TCP_V4_FLOW: -- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -- /* Fall through */ -- case UDP_V4_FLOW: -- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) -- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -- /* Fall through */ -- case SCTP_V4_FLOW: -- case AH_ESP_V4_FLOW: -- case AH_V4_FLOW: -- case ESP_V4_FLOW: -- case IPV4_FLOW: -- cmd->data |= RXH_IP_SRC | RXH_IP_DST; -- break; -- case TCP_V6_FLOW: -- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -- /* Fall through */ -- case UDP_V6_FLOW: -- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) -- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -- /* Fall through */ -- case SCTP_V6_FLOW: -- case AH_ESP_V6_FLOW: -- case AH_V6_FLOW: -- case ESP_V6_FLOW: -- case IPV6_FLOW: -- cmd->data |= RXH_IP_SRC | RXH_IP_DST; -- break; -- default: -- return -EINVAL; -- } -+ pm_runtime_get_sync(&adapter->pdev->dev); - - return 0; - } - --static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, -- u32 *rule_locs) -+static void igb_ethtool_complete(struct net_device *netdev) - { -- struct igb_adapter *adapter = netdev_priv(dev); -- int ret = -EOPNOTSUPP; -+ struct igb_adapter *adapter = netdev_priv(netdev); - -- switch (cmd->cmd) { -- case ETHTOOL_GRXRINGS: -- cmd->data = adapter->num_rx_queues; -- ret = 0; -- break; -- case ETHTOOL_GRXFH: -- ret = igb_get_rss_hash_opts(adapter, cmd); -- break; -- default: -- break; -- } -+ pm_runtime_put(&adapter->pdev->dev); -+} -+#endif /* CONFIG_PM_RUNTIME */ - -- return ret; -+#ifndef HAVE_NDO_SET_FEATURES -+static u32 igb_get_rx_csum(struct net_device *netdev) -+{ -+ return !!(netdev->features & NETIF_F_RXCSUM); - } - --#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ -- IGB_FLAG_RSS_FIELD_IPV6_UDP) --static int igb_set_rss_hash_opt(struct igb_adapter *adapter, -- struct ethtool_rxnfc *nfc) -+static int igb_set_rx_csum(struct net_device *netdev, u32 data) - { -- u32 flags = adapter->flags; -+ const u32 feature_list = NETIF_F_RXCSUM; - -- /* RSS does not support anything other than hashing -- * to queues on src and dst IPs and ports -+ if (data) -+ netdev->features |= feature_list; -+ else -+ netdev->features &= ~feature_list; -+ -+ return 0; -+} -+ -+static int igb_set_tx_csum(struct net_device *netdev, u32 data) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+#ifdef NETIF_F_IPV6_CSUM -+ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; -+#else -+ u32 feature_list = NETIF_F_IP_CSUM; -+#endif -+ -+ if (adapter->hw.mac.type >= e1000_82576) -+ feature_list |= NETIF_F_SCTP_CSUM; -+ -+ if (data) -+ netdev->features |= feature_list; -+ else -+ netdev->features &= ~feature_list; -+ -+ return 0; -+} -+ -+#ifdef NETIF_F_TSO -+static int igb_set_tso(struct net_device *netdev, u32 data) -+{ -+#ifdef NETIF_F_TSO6 -+ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; -+#else -+ const u32 feature_list = NETIF_F_TSO; -+#endif -+ -+ if (data) -+ netdev->features |= feature_list; -+ else -+ netdev->features &= ~feature_list; -+ -+#ifndef HAVE_NETDEV_VLAN_FEATURES -+ if (!data) { -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ struct net_device *v_netdev; -+ int i; -+ -+ /* disable TSO on all VLANs if they're present */ -+ if (!adapter->vlgrp) -+ goto tso_out; -+ -+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { -+ v_netdev = vlan_group_get_device(adapter->vlgrp, i); -+ if (!v_netdev) -+ continue; -+ -+ v_netdev->features &= ~feature_list; -+ vlan_group_set_device(adapter->vlgrp, i, v_netdev); -+ } -+ } -+ -+tso_out: -+ -+#endif /* HAVE_NETDEV_VLAN_FEATURES */ -+ return 0; -+} -+ -+#endif /* NETIF_F_TSO */ -+#ifdef ETHTOOL_GFLAGS -+static int igb_set_flags(struct net_device *netdev, u32 data) -+{ -+ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | -+ ETH_FLAG_RXHASH; -+#ifndef HAVE_VLAN_RX_REGISTER -+ u32 changed = netdev->features ^ data; -+#endif -+ int rc; -+#ifndef IGB_NO_LRO -+ -+ supported_flags |= ETH_FLAG_LRO; -+#endif -+ /* -+ * Since there is no support for separate tx vlan accel -+ * enabled make sure tx flag is cleared if rx is. - */ -- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | -- RXH_L4_B_0_1 | RXH_L4_B_2_3)) -- return -EINVAL; -+ if (!(data & ETH_FLAG_RXVLAN)) -+ data &= ~ETH_FLAG_TXVLAN; - -- switch (nfc->flow_type) { -- case TCP_V4_FLOW: -- case TCP_V6_FLOW: -- if (!(nfc->data & RXH_IP_SRC) || -- !(nfc->data & RXH_IP_DST) || -- !(nfc->data & RXH_L4_B_0_1) || -- !(nfc->data & RXH_L4_B_2_3)) -- return -EINVAL; -- break; -- case UDP_V4_FLOW: -- if (!(nfc->data & RXH_IP_SRC) || -- !(nfc->data & RXH_IP_DST)) -- return -EINVAL; -- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { -- case 0: -- flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; -- break; -- case (RXH_L4_B_0_1 | RXH_L4_B_2_3): -- flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; -- break; -- default: -- return -EINVAL; -- } -- break; -- case UDP_V6_FLOW: -- if (!(nfc->data & RXH_IP_SRC) || -- !(nfc->data & RXH_IP_DST)) -- return -EINVAL; -- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { -- case 0: -- flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; -- break; -- case (RXH_L4_B_0_1 | RXH_L4_B_2_3): -- flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; -- break; -- default: -- return -EINVAL; -- } -- break; -- case AH_ESP_V4_FLOW: -- case AH_V4_FLOW: -- case ESP_V4_FLOW: -- case SCTP_V4_FLOW: -- case AH_ESP_V6_FLOW: -- case AH_V6_FLOW: -- case ESP_V6_FLOW: -- case SCTP_V6_FLOW: -- if (!(nfc->data & RXH_IP_SRC) || -- !(nfc->data & RXH_IP_DST) || -- (nfc->data & RXH_L4_B_0_1) || -- (nfc->data & RXH_L4_B_2_3)) -- return -EINVAL; -- break; -- default: -- return -EINVAL; -- } -- -- /* if we changed something we need to update flags */ -- if (flags != adapter->flags) { -- struct e1000_hw *hw = &adapter->hw; -- u32 mrqc = rd32(E1000_MRQC); -- -- if ((flags & UDP_RSS_FLAGS) && -- !(adapter->flags & UDP_RSS_FLAGS)) -- dev_err(&adapter->pdev->dev, -- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); -- -- adapter->flags = flags; -- -- /* Perform hash on these packet types */ -- mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | -- E1000_MRQC_RSS_FIELD_IPV4_TCP | -- E1000_MRQC_RSS_FIELD_IPV6 | -- E1000_MRQC_RSS_FIELD_IPV6_TCP; -- -- mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | -- E1000_MRQC_RSS_FIELD_IPV6_UDP); -- -- if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) -- mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; -- -- if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) -- mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; -- -- wr32(E1000_MRQC, mrqc); -- } -+ rc = ethtool_op_set_flags(netdev, data, supported_flags); -+ if (rc) -+ return rc; -+#ifndef HAVE_VLAN_RX_REGISTER -+ -+ if (changed & ETH_FLAG_RXVLAN) -+ igb_vlan_mode(netdev, data); -+#endif - - return 0; - } - --static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -+#endif /* ETHTOOL_GFLAGS */ -+#endif /* HAVE_NDO_SET_FEATURES */ -+#ifdef ETHTOOL_SADV_COAL -+static int igb_set_adv_coal(struct net_device *netdev, -+ struct ethtool_value *edata) - { -- struct igb_adapter *adapter = netdev_priv(dev); -- int ret = -EOPNOTSUPP; -+ struct igb_adapter *adapter = netdev_priv(netdev); - -- switch (cmd->cmd) { -- case ETHTOOL_SRXFH: -- ret = igb_set_rss_hash_opt(adapter, cmd); -+ switch (edata->data) { -+ case IGB_DMAC_DISABLE: -+ adapter->dmac = edata->data; - break; -- default: -+ case IGB_DMAC_MIN: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_500: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_EN_DEFAULT: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_2000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_3000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_4000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_5000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_6000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_7000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_8000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_9000: -+ adapter->dmac = edata->data; -+ break; -+ case IGB_DMAC_MAX: -+ adapter->dmac = edata->data; - break; -+ default: -+ adapter->dmac = IGB_DMAC_DISABLE; -+ netdev_info(netdev, -+ "set_dmac: invalid setting, setting DMAC to %d\n", -+ adapter->dmac); - } -+ netdev_info(netdev, "%s: setting DMAC to %d\n", -+ netdev->name, adapter->dmac); -+ return 0; -+} - -- return ret; -+#endif /* ETHTOOL_SADV_COAL */ -+#ifdef ETHTOOL_GADV_COAL -+static void igb_get_dmac(struct net_device *netdev, -+ struct ethtool_value *edata) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ edata->data = adapter->dmac; -+ -+ return; - } -+#endif - -+#ifdef ETHTOOL_GEEE - static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) - { - struct igb_adapter *adapter = netdev_priv(netdev); -@@ -2604,17 +2443,18 @@ - - edata->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full); -+ - if (!hw->dev_spec._82575.eee_disable) - edata->advertised = - mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); - - /* The IPCNFG and EEER registers are not supported on I354. */ - if (hw->mac.type == e1000_i354) { -- igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); -+ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active); - } else { - u32 eeer; - -- eeer = rd32(E1000_EEER); -+ eeer = E1000_READ_REG(hw, E1000_EEER); - - /* EEE status on negotiated link */ - if (eeer & E1000_EEER_EEE_NEG) -@@ -2627,19 +2467,20 @@ - /* EEE Link Partner Advertised */ - switch (hw->mac.type) { - case e1000_i350: -- ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, -- &phy_data); -+ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, -+ &phy_data); - if (ret_val) - return -ENODATA; - - edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); -+ - break; - case e1000_i354: - case e1000_i210: - case e1000_i211: -- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, -- E1000_EEE_LP_ADV_DEV_I210, -- &phy_data); -+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, -+ E1000_EEE_LP_ADV_DEV_I210, -+ &phy_data); - if (ret_val) - return -ENODATA; - -@@ -2656,7 +2497,8 @@ - (edata->eee_enabled)) - edata->tx_lpi_enabled = true; - -- /* Report correct negotiated EEE status for devices that -+ /* -+ * report correct negotiated EEE status for devices that - * wrongly report EEE at half-duplex - */ - if (adapter->link_duplex == HALF_DUPLEX) { -@@ -2668,60 +2510,59 @@ - - return 0; - } -+#endif - -+#ifdef ETHTOOL_SEEE - static int igb_set_eee(struct net_device *netdev, - struct ethtool_eee *edata) - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct ethtool_eee eee_curr; -+ bool adv1g_eee = true, adv100m_eee = true; - s32 ret_val; - - if ((hw->mac.type < e1000_i350) || - (hw->phy.media_type != e1000_media_type_copper)) - return -EOPNOTSUPP; - -- memset(&eee_curr, 0, sizeof(struct ethtool_eee)); -- - ret_val = igb_get_eee(netdev, &eee_curr); - if (ret_val) - return ret_val; - - if (eee_curr.eee_enabled) { - if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { -- dev_err(&adapter->pdev->dev, -+ dev_err(pci_dev_to_dev(adapter->pdev), - "Setting EEE tx-lpi is not supported\n"); - return -EINVAL; - } - -- /* Tx LPI timer is not implemented currently */ -+ /* Tx LPI time is not implemented currently */ - if (edata->tx_lpi_timer) { -- dev_err(&adapter->pdev->dev, -+ dev_err(pci_dev_to_dev(adapter->pdev), - "Setting EEE Tx LPI timer is not supported\n"); - return -EINVAL; - } - -- if (edata->advertised & -- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { -- dev_err(&adapter->pdev->dev, -- "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); -+ if (!edata->advertised || (edata->advertised & -+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { -+ dev_err(pci_dev_to_dev(adapter->pdev), -+ "EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n"); - return -EINVAL; - } -+ adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); -+ adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); - - } else if (!edata->eee_enabled) { -- dev_err(&adapter->pdev->dev, -- "Setting EEE options are not supported with EEE disabled\n"); -+ dev_err(pci_dev_to_dev(adapter->pdev), -+ "Setting EEE options is not supported with EEE disabled\n"); - return -EINVAL; - } - - adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); -+ - if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { - hw->dev_spec._82575.eee_disable = !edata->eee_enabled; -- adapter->flags |= IGB_FLAG_EEE; -- if (hw->mac.type == e1000_i350) -- igb_set_eee_i350(hw); -- else -- igb_set_eee_i354(hw); - - /* reset link */ - if (netif_running(netdev)) -@@ -2730,109 +2571,232 @@ - igb_reset(adapter); - } - -+ if (hw->mac.type == e1000_i354) -+ ret_val = e1000_set_eee_i354(hw, adv1g_eee, adv100m_eee); -+ else -+ ret_val = e1000_set_eee_i350(hw, adv1g_eee, adv100m_eee); -+ -+ if (ret_val) { -+ dev_err(pci_dev_to_dev(adapter->pdev), -+ "Problem setting EEE advertisement options\n"); -+ return -EINVAL; -+ } -+ - return 0; - } -+#endif /* ETHTOOL_SEEE */ -+#ifdef ETHTOOL_GRXFH -+#ifdef ETHTOOL_GRXFHINDIR - --static int igb_get_module_info(struct net_device *netdev, -- struct ethtool_modinfo *modinfo) -+static int igb_get_rss_hash_opts(struct igb_adapter *adapter, -+ struct ethtool_rxnfc *cmd) - { -- struct igb_adapter *adapter = netdev_priv(netdev); -- struct e1000_hw *hw = &adapter->hw; -- u32 status = 0; -- u16 sff8472_rev, addr_mode; -- bool page_swap = false; -- -- if ((hw->phy.media_type == e1000_media_type_copper) || -- (hw->phy.media_type == e1000_media_type_unknown)) -- return -EOPNOTSUPP; -+ cmd->data = 0; - -- /* Check whether we support SFF-8472 or not */ -- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); -- if (status) -- return -EIO; -- -- /* addressing mode is not supported */ -- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); -- if (status) -- return -EIO; -- -- /* addressing mode is not supported */ -- if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { -- hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); -- page_swap = true; -- } -- -- if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { -- /* We have an SFP, but it does not support SFF-8472 */ -- modinfo->type = ETH_MODULE_SFF_8079; -- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; -- } else { -- /* We have an SFP which supports a revision of SFF-8472 */ -- modinfo->type = ETH_MODULE_SFF_8472; -- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; -+ /* Report default options for RSS on igb */ -+ switch (cmd->flow_type) { -+ case TCP_V4_FLOW: -+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -+ /* Fall through */ -+ case UDP_V4_FLOW: -+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) -+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -+ /* Fall through */ -+ case SCTP_V4_FLOW: -+ case AH_ESP_V4_FLOW: -+ case AH_V4_FLOW: -+ case ESP_V4_FLOW: -+ case IPV4_FLOW: -+ cmd->data |= RXH_IP_SRC | RXH_IP_DST; -+ break; -+ case TCP_V6_FLOW: -+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -+ /* Fall through */ -+ case UDP_V6_FLOW: -+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) -+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -+ /* Fall through */ -+ case SCTP_V6_FLOW: -+ case AH_ESP_V6_FLOW: -+ case AH_V6_FLOW: -+ case ESP_V6_FLOW: -+ case IPV6_FLOW: -+ cmd->data |= RXH_IP_SRC | RXH_IP_DST; -+ break; -+ default: -+ return -EINVAL; - } - - return 0; - } - --static int igb_get_module_eeprom(struct net_device *netdev, -- struct ethtool_eeprom *ee, u8 *data) -+#endif /* ETHTOOL_GRXFHINDIR */ -+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, -+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS -+ void *rule_locs) -+#else -+ u32 *rule_locs) -+#endif - { -- struct igb_adapter *adapter = netdev_priv(netdev); -- struct e1000_hw *hw = &adapter->hw; -- u32 status = 0; -- u16 *dataword; -- u16 first_word, last_word; -- int i = 0; -+ struct igb_adapter *adapter = netdev_priv(dev); -+ int ret = -EOPNOTSUPP; - -- if (ee->len == 0) -- return -EINVAL; -+ switch (cmd->cmd) { -+ case ETHTOOL_GRXRINGS: -+ cmd->data = adapter->num_rx_queues; -+ ret = 0; -+ break; -+#ifdef ETHTOOL_GRXFHINDIR -+ case ETHTOOL_GRXFHINDIR: -+ ret = igb_get_rss_hash_opts(adapter, cmd); -+ break; -+#endif /* ETHTOOL_GRXFHINDIR */ -+ default: -+ break; -+ } - -- first_word = ee->offset >> 1; -- last_word = (ee->offset + ee->len - 1) >> 1; -+ return ret; -+} - -- dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1), -- GFP_KERNEL); -- if (!dataword) -- return -ENOMEM; -+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ -+ IGB_FLAG_RSS_FIELD_IPV6_UDP) -+static int igb_set_rss_hash_opt(struct igb_adapter *adapter, -+ struct ethtool_rxnfc *nfc) -+{ -+ u32 flags = adapter->flags; - -- /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ -- for (i = 0; i < last_word - first_word + 1; i++) { -- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); -- if (status) { -- /* Error occurred while reading module */ -- kfree(dataword); -- return -EIO; -- } -+ /* -+ * RSS does not support anything other than hashing -+ * to queues on src and dst IPs and ports -+ */ -+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | -+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) -+ return -EINVAL; - -- be16_to_cpus(&dataword[i]); -+ switch (nfc->flow_type) { -+ case TCP_V4_FLOW: -+ case TCP_V6_FLOW: -+ if (!(nfc->data & RXH_IP_SRC) || -+ !(nfc->data & RXH_IP_DST) || -+ !(nfc->data & RXH_L4_B_0_1) || -+ !(nfc->data & RXH_L4_B_2_3)) -+ return -EINVAL; -+ break; -+ case UDP_V4_FLOW: -+ if (!(nfc->data & RXH_IP_SRC) || -+ !(nfc->data & RXH_IP_DST)) -+ return -EINVAL; -+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { -+ case 0: -+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; -+ break; -+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): -+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; -+ break; -+ default: -+ return -EINVAL; -+ } -+ break; -+ case UDP_V6_FLOW: -+ if (!(nfc->data & RXH_IP_SRC) || -+ !(nfc->data & RXH_IP_DST)) -+ return -EINVAL; -+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { -+ case 0: -+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; -+ break; -+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): -+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; -+ break; -+ default: -+ return -EINVAL; -+ } -+ break; -+ case AH_ESP_V4_FLOW: -+ case AH_V4_FLOW: -+ case ESP_V4_FLOW: -+ case SCTP_V4_FLOW: -+ case AH_ESP_V6_FLOW: -+ case AH_V6_FLOW: -+ case ESP_V6_FLOW: -+ case SCTP_V6_FLOW: -+ if (!(nfc->data & RXH_IP_SRC) || -+ !(nfc->data & RXH_IP_DST) || -+ (nfc->data & RXH_L4_B_0_1) || -+ (nfc->data & RXH_L4_B_2_3)) -+ return -EINVAL; -+ break; -+ default: -+ return -EINVAL; - } - -- memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); -- kfree(dataword); -+ /* if we changed something we need to update flags */ -+ if (flags != adapter->flags) { -+ struct e1000_hw *hw = &adapter->hw; -+ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC); - -- return 0; --} -+ if ((flags & UDP_RSS_FLAGS) && -+ !(adapter->flags & UDP_RSS_FLAGS)) -+ DPRINTK(DRV, WARNING, -+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); -+ -+ adapter->flags = flags; -+ -+ /* Perform hash on these packet types */ -+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | -+ E1000_MRQC_RSS_FIELD_IPV4_TCP | -+ E1000_MRQC_RSS_FIELD_IPV6 | -+ E1000_MRQC_RSS_FIELD_IPV6_TCP; -+ -+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | -+ E1000_MRQC_RSS_FIELD_IPV6_UDP); -+ -+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) -+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; -+ -+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) -+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; -+ -+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc); -+ } - --static int igb_ethtool_begin(struct net_device *netdev) --{ -- struct igb_adapter *adapter = netdev_priv(netdev); -- pm_runtime_get_sync(&adapter->pdev->dev); - return 0; - } - --static void igb_ethtool_complete(struct net_device *netdev) -+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) - { -- struct igb_adapter *adapter = netdev_priv(netdev); -- pm_runtime_put(&adapter->pdev->dev); -+ struct igb_adapter *adapter = netdev_priv(dev); -+ int ret = -EOPNOTSUPP; -+ -+ switch (cmd->cmd) { -+ case ETHTOOL_SRXFH: -+ ret = igb_set_rss_hash_opt(adapter, cmd); -+ break; -+ default: -+ break; -+ } -+ -+ return ret; - } - -+#endif /* ETHTOOL_GRXFH */ -+#ifdef ETHTOOL_GRXFHINDIR -+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE - static u32 igb_get_rxfh_indir_size(struct net_device *netdev) - { - return IGB_RETA_SIZE; - } - -+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) -+#ifdef HAVE_RXFH_HASHFUNC -+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, -+ u8 *hfunc) -+#else - static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) -+#endif /* HAVE_RXFH_HASHFUNC */ -+#else -+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir) -+#endif /* HAVE_ETHTOOL_GSRSSH */ - { - struct igb_adapter *adapter = netdev_priv(netdev); - int i; -@@ -2843,6 +2807,22 @@ - return 0; - } - -+#else -+static int igb_get_rxfh_indir(struct net_device *netdev, -+ struct ethtool_rxfh_indir *indir) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ size_t copy_size = -+ min_t(size_t, indir->size, ARRAY_SIZE(adapter->rss_indir_tbl)); -+ -+ indir->size = ARRAY_SIZE(adapter->rss_indir_tbl); -+ memcpy(indir->ring_index, adapter->rss_indir_tbl, -+ copy_size * sizeof(indir->ring_index[0])); -+ return 0; -+} -+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ -+#endif /* ETHTOOL_GRXFHINDIR */ -+#ifdef ETHTOOL_SRXFHINDIR - void igb_write_rss_indir_tbl(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; -@@ -2872,14 +2852,24 @@ - val |= adapter->rss_indir_tbl[i + j]; - } - -- wr32(reg, val << shift); -+ E1000_WRITE_REG(hw, reg, val << shift); - reg += 4; - i += 4; - } - } - -+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE -+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) -+#ifdef HAVE_RXFH_HASHFUNC - static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, -- const u8 *key) -+ const u8 *key, const u8 hfunc) -+#else -+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, -+ const u8 *key) -+#endif /* HAVE_RXFH_HASHFUNC */ -+#else -+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) -+#endif /* HAVE_ETHTOOL_GSRSSH */ - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; -@@ -2911,135 +2901,314 @@ - - return 0; - } -+#else -+static int igb_set_rxfh_indir(struct net_device *netdev, -+ const struct ethtool_rxfh_indir *indir) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ size_t i; -+ -+ if (indir->size != ARRAY_SIZE(adapter->rss_indir_tbl)) -+ return -EINVAL; -+ for (i = 0; i < ARRAY_SIZE(adapter->rss_indir_tbl); i++) -+ if (indir->ring_index[i] >= adapter->rss_queues) -+ return -EINVAL; - --static unsigned int igb_max_channels(struct igb_adapter *adapter) -+ memcpy(adapter->rss_indir_tbl, indir->ring_index, -+ sizeof(adapter->rss_indir_tbl)); -+ igb_write_rss_indir_tbl(adapter); -+ return 0; -+} -+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ -+#endif /* ETHTOOL_SRXFHINDIR */ -+#ifdef ETHTOOL_GCHANNELS -+ -+static unsigned int igb_max_rss_queues(struct igb_adapter *adapter) - { -- struct e1000_hw *hw = &adapter->hw; -- unsigned int max_combined = 0; -+ unsigned int max_rss_queues; - -- switch (hw->mac.type) { -+ /* Determine the maximum number of RSS queues supported. */ -+ switch (adapter->hw.mac.type) { - case e1000_i211: -- max_combined = IGB_MAX_RX_QUEUES_I211; -+ max_rss_queues = IGB_MAX_RX_QUEUES_I211; - break; - case e1000_82575: - case e1000_i210: -- max_combined = IGB_MAX_RX_QUEUES_82575; -+ max_rss_queues = IGB_MAX_RX_QUEUES_82575; - break; - case e1000_i350: -- if (!!adapter->vfs_allocated_count) { -- max_combined = 1; -+ /* I350 cannot do RSS and SR-IOV at the same time */ -+ if (adapter->vfs_allocated_count) { -+ max_rss_queues = 1; - break; - } - /* fall through */ - case e1000_82576: -- if (!!adapter->vfs_allocated_count) { -- max_combined = 2; -+ if (adapter->vfs_allocated_count) { -+ max_rss_queues = 2; - break; - } - /* fall through */ - case e1000_82580: -- case e1000_i354: - default: -- max_combined = IGB_MAX_RX_QUEUES; -+ max_rss_queues = IGB_MAX_RX_QUEUES; - break; - } - -- return max_combined; -+ return max_rss_queues; - } - --static void igb_get_channels(struct net_device *netdev, -+static void igb_get_channels(struct net_device *dev, - struct ethtool_channels *ch) - { -- struct igb_adapter *adapter = netdev_priv(netdev); -+ struct igb_adapter *adapter = netdev_priv(dev); - -- /* Report maximum channels */ -- ch->max_combined = igb_max_channels(adapter); -+ /* report maximum channels */ -+ ch->max_combined = igb_max_rss_queues(adapter); -+ ch->max_rx = ch->max_combined; -+ if (adapter->vfs_allocated_count) -+ ch->max_tx = 1; -+ else -+ ch->max_tx = ch->max_combined; - -- /* Report info for other vector */ -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -+ /* report info for other vector */ -+ if (adapter->msix_entries) { - ch->max_other = NON_Q_VECTORS; - ch->other_count = NON_Q_VECTORS; - } - -- ch->combined_count = adapter->rss_queues; -+ /* record RSS/TSS queues */ -+ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) { -+ if (adapter->num_rx_queues > adapter->num_tx_queues) { -+ ch->combined_count = adapter->num_tx_queues; -+ ch->rx_count = adapter->num_rx_queues - -+ adapter->num_tx_queues; -+ } else if (adapter->num_rx_queues < adapter->num_tx_queues) { -+ ch->combined_count = adapter->num_rx_queues; -+ ch->tx_count = adapter->num_tx_queues - -+ adapter->num_rx_queues; -+ } else { -+ ch->combined_count = adapter->num_rx_queues; -+ } -+ } else { -+ ch->rx_count = adapter->num_rx_queues; -+ ch->tx_count = adapter->num_tx_queues; -+ } - } -+#endif /* ETHTOOL_GCHANNELS */ -+#ifdef ETHTOOL_SCHANNELS - --static int igb_set_channels(struct net_device *netdev, -- struct ethtool_channels *ch) -+static int igb_set_channels(struct net_device *dev, -+ struct ethtool_channels *ch) - { -- struct igb_adapter *adapter = netdev_priv(netdev); -- unsigned int count = ch->combined_count; -- unsigned int max_combined = 0; -+ struct igb_adapter *adapter = netdev_priv(dev); -+ unsigned int max_rss_queues; - -- /* Verify they are not requesting separate vectors */ -- if (!count || ch->rx_count || ch->tx_count) -+ /* we cannot support combined, Rx, and Tx vectors simultaneously */ -+ if (ch->combined_count && ch->rx_count && ch->tx_count) - return -EINVAL; - -- /* Verify other_count is valid and has not been changed */ -- if (ch->other_count != NON_Q_VECTORS) -+ /* ignore other_count since it is not changeable */ -+ -+ /* verify we have at least one channel in each direction */ -+ if (!ch->combined_count && (!ch->rx_count || !ch->tx_count)) - return -EINVAL; - -- /* Verify the number of channels doesn't exceed hw limits */ -- max_combined = igb_max_channels(adapter); -- if (count > max_combined) -+ /* verify number of Tx queues does not exceed 1 if SR-IOV is enabled */ -+ if (adapter->vfs_allocated_count && -+ ((ch->combined_count + ch->tx_count) > 1)) - return -EINVAL; - -- if (count != adapter->rss_queues) { -- adapter->rss_queues = count; -- igb_set_flag_queue_pairs(adapter, max_combined); -+ /* verify the number of channels does not exceed hardware limits */ -+ max_rss_queues = igb_max_rss_queues(adapter); -+ if (((ch->combined_count + ch->rx_count) > max_rss_queues) || -+ ((ch->combined_count + ch->tx_count) > max_rss_queues)) -+ return -EINVAL; - -- /* Hardware has to reinitialize queues and interrupts to -- * match the new configuration. -+ /* Determine if we need to pair queues. */ -+ switch (adapter->hw.mac.type) { -+ case e1000_82575: -+ case e1000_i211: -+ /* Device supports enough interrupts without queue pairing. */ -+ break; -+ case e1000_i350: -+ /* The PF has 3 interrupts and 1 queue pair w/ SR-IOV */ -+ if (adapter->vfs_allocated_count) -+ break; -+ case e1000_82576: -+ /* -+ * The PF has access to 6 interrupt vectors if the number of -+ * VFs is less than 7. If that is the case we don't have -+ * to pair up the queues. - */ -- return igb_reinit_queues(adapter); -+ if ((adapter->vfs_allocated_count > 0) && -+ (adapter->vfs_allocated_count < 7)) -+ break; -+ /* fall through */ -+ case e1000_82580: -+ case e1000_i210: -+ default: -+ /* verify we can support as many queues as requested */ -+ if ((ch->combined_count + -+ ch->rx_count + ch->tx_count) > MAX_Q_VECTORS) -+ return -EINVAL; -+ break; - } - -- return 0; -+ /* update configuration values */ -+ adapter->rss_queues = ch->combined_count + ch->rx_count; -+ if (ch->rx_count == ch->tx_count || adapter->vfs_allocated_count) -+ adapter->tss_queues = 0; -+ else -+ adapter->tss_queues = ch->combined_count + ch->tx_count; -+ -+ if (ch->combined_count) -+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS; -+ else -+ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; -+ -+ /* update queue configuration for adapter */ -+ return igb_setup_queues(adapter); - } - -+#endif /* ETHTOOL_SCHANNELS */ - static const struct ethtool_ops igb_ethtool_ops = { -- .get_settings = igb_get_settings, -- .set_settings = igb_set_settings, -- .get_drvinfo = igb_get_drvinfo, -- .get_regs_len = igb_get_regs_len, -- .get_regs = igb_get_regs, -- .get_wol = igb_get_wol, -- .set_wol = igb_set_wol, -- .get_msglevel = igb_get_msglevel, -- .set_msglevel = igb_set_msglevel, -- .nway_reset = igb_nway_reset, -- .get_link = igb_get_link, -- .get_eeprom_len = igb_get_eeprom_len, -- .get_eeprom = igb_get_eeprom, -- .set_eeprom = igb_set_eeprom, -- .get_ringparam = igb_get_ringparam, -- .set_ringparam = igb_set_ringparam, -- .get_pauseparam = igb_get_pauseparam, -- .set_pauseparam = igb_set_pauseparam, -- .self_test = igb_diag_test, -- .get_strings = igb_get_strings, -- .set_phys_id = igb_set_phys_id, -- .get_sset_count = igb_get_sset_count, -- .get_ethtool_stats = igb_get_ethtool_stats, -- .get_coalesce = igb_get_coalesce, -- .set_coalesce = igb_set_coalesce, -- .get_ts_info = igb_get_ts_info, -- .get_rxnfc = igb_get_rxnfc, -- .set_rxnfc = igb_set_rxnfc, -+ .get_settings = igb_get_settings, -+ .set_settings = igb_set_settings, -+ .get_drvinfo = igb_get_drvinfo, -+ .get_regs_len = igb_get_regs_len, -+ .get_regs = igb_get_regs, -+ .get_wol = igb_get_wol, -+ .set_wol = igb_set_wol, -+ .get_msglevel = igb_get_msglevel, -+ .set_msglevel = igb_set_msglevel, -+ .nway_reset = igb_nway_reset, -+ .get_link = igb_get_link, -+ .get_eeprom_len = igb_get_eeprom_len, -+ .get_eeprom = igb_get_eeprom, -+ .set_eeprom = igb_set_eeprom, -+ .get_ringparam = igb_get_ringparam, -+ .set_ringparam = igb_set_ringparam, -+ .get_pauseparam = igb_get_pauseparam, -+ .set_pauseparam = igb_set_pauseparam, -+ .self_test = igb_diag_test, -+ .get_strings = igb_get_strings, -+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -+#ifdef HAVE_ETHTOOL_SET_PHYS_ID -+ .set_phys_id = igb_set_phys_id, -+#else -+ .phys_id = igb_phys_id, -+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ -+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT -+ .get_sset_count = igb_get_sset_count, -+#else -+ .get_stats_count = igb_get_stats_count, -+ .self_test_count = igb_diag_test_count, -+#endif -+ .get_ethtool_stats = igb_get_ethtool_stats, -+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR -+ .get_perm_addr = ethtool_op_get_perm_addr, -+#endif -+ .get_coalesce = igb_get_coalesce, -+ .set_coalesce = igb_set_coalesce, -+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -+#ifdef HAVE_ETHTOOL_GET_TS_INFO -+ .get_ts_info = igb_get_ts_info, -+#endif /* HAVE_ETHTOOL_GET_TS_INFO */ -+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -+#ifdef CONFIG_PM_RUNTIME -+ .begin = igb_ethtool_begin, -+ .complete = igb_ethtool_complete, -+#endif /* CONFIG_PM_RUNTIME */ -+#ifndef HAVE_NDO_SET_FEATURES -+ .get_rx_csum = igb_get_rx_csum, -+ .set_rx_csum = igb_set_rx_csum, -+ .get_tx_csum = ethtool_op_get_tx_csum, -+ .set_tx_csum = igb_set_tx_csum, -+ .get_sg = ethtool_op_get_sg, -+ .set_sg = ethtool_op_set_sg, -+#ifdef NETIF_F_TSO -+ .get_tso = ethtool_op_get_tso, -+ .set_tso = igb_set_tso, -+#endif -+#ifdef ETHTOOL_GFLAGS -+ .get_flags = ethtool_op_get_flags, -+ .set_flags = igb_set_flags, -+#endif /* ETHTOOL_GFLAGS */ -+#endif /* HAVE_NDO_SET_FEATURES */ -+#ifdef ETHTOOL_GADV_COAL -+ .get_advcoal = igb_get_adv_coal, -+ .set_advcoal = igb_set_dmac_coal, -+#endif /* ETHTOOL_GADV_COAL */ -+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -+#ifdef ETHTOOL_GEEE - .get_eee = igb_get_eee, -+#endif -+#ifdef ETHTOOL_SEEE - .set_eee = igb_set_eee, -- .get_module_info = igb_get_module_info, -- .get_module_eeprom = igb_get_module_eeprom, -+#endif -+#ifdef ETHTOOL_GRXFHINDIR -+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE - .get_rxfh_indir_size = igb_get_rxfh_indir_size, -+#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ -+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) - .get_rxfh = igb_get_rxfh, -+#else -+ .get_rxfh_indir = igb_get_rxfh_indir, -+#endif /* HAVE_ETHTOOL_GSRSSH */ -+#endif /* ETHTOOL_GRXFHINDIR */ -+#ifdef ETHTOOL_SRXFHINDIR -+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) - .set_rxfh = igb_set_rxfh, -- .get_channels = igb_get_channels, -- .set_channels = igb_set_channels, -- .begin = igb_ethtool_begin, -- .complete = igb_ethtool_complete, -+#else -+ .set_rxfh_indir = igb_set_rxfh_indir, -+#endif /* HAVE_ETHTOOL_GSRSSH */ -+#endif /* ETHTOOL_SRXFHINDIR */ -+#ifdef ETHTOOL_GCHANNELS -+ .get_channels = igb_get_channels, -+#endif /* ETHTOOL_GCHANNELS */ -+#ifdef ETHTOOL_SCHANNELS -+ .set_channels = igb_set_channels, -+#endif /* ETHTOOL_SCHANNELS */ -+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -+#ifdef ETHTOOL_GRXFH -+ .get_rxnfc = igb_get_rxnfc, -+ .set_rxnfc = igb_set_rxnfc, -+#endif -+}; -+ -+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -+static const struct ethtool_ops_ext igb_ethtool_ops_ext = { -+ .size = sizeof(struct ethtool_ops_ext), -+ .get_ts_info = igb_get_ts_info, -+ .set_phys_id = igb_set_phys_id, -+ .get_eee = igb_get_eee, -+ .set_eee = igb_set_eee, -+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE -+ .get_rxfh_indir_size = igb_get_rxfh_indir_size, -+#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ -+ .get_rxfh_indir = igb_get_rxfh_indir, -+ .set_rxfh_indir = igb_set_rxfh_indir, -+ .get_channels = igb_get_channels, -+ .set_channels = igb_set_channels, - }; - - void igb_set_ethtool_ops(struct net_device *netdev) - { -- netdev->ethtool_ops = &igb_ethtool_ops; -+ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); -+ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext); - } -+#else -+void igb_set_ethtool_ops(struct net_device *netdev) -+{ -+ /* have to "undeclare" const on this struct to remove warnings */ -+#ifndef ETHTOOL_OPS_COMPAT -+ netdev->ethtool_ops = (struct ethtool_ops *)&igb_ethtool_ops; -+#else -+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops); -+#endif /* SET_ETHTOOL_OPS */ -+} -+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ -+#endif /* SIOCETHTOOL */ -+ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c ---- a/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,30 +1,31 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #include "igb.h" - #include "e1000_82575.h" - #include "e1000_hw.h" -- -+#ifdef IGB_HWMON - #include - #include - #include -@@ -34,28 +35,29 @@ - #include - #include - --#ifdef CONFIG_IGB_HWMON -+#ifdef HAVE_I2C_SUPPORT - static struct i2c_board_info i350_sensor_info = { - I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), - }; -+#endif /* HAVE_I2C_SUPPORT */ - - /* hwmon callback functions */ - static ssize_t igb_hwmon_show_location(struct device *dev, -- struct device_attribute *attr, -- char *buf) -+ struct device_attribute *attr, -+ char *buf) - { - struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -- dev_attr); -+ dev_attr); - return sprintf(buf, "loc%u\n", - igb_attr->sensor->location); - } - - static ssize_t igb_hwmon_show_temp(struct device *dev, -- struct device_attribute *attr, -- char *buf) -+ struct device_attribute *attr, -+ char *buf) - { - struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -- dev_attr); -+ dev_attr); - unsigned int value; - - /* reset the temp field */ -@@ -70,11 +72,11 @@ - } - - static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, -- struct device_attribute *attr, -- char *buf) -+ struct device_attribute *attr, -+ char *buf) - { - struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -- dev_attr); -+ dev_attr); - unsigned int value = igb_attr->sensor->caution_thresh; - - /* display millidegree */ -@@ -84,11 +86,11 @@ - } - - static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, -- struct device_attribute *attr, -- char *buf) -+ struct device_attribute *attr, -+ char *buf) - { - struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, -- dev_attr); -+ dev_attr); - unsigned int value = igb_attr->sensor->max_op_thresh; - - /* display millidegree */ -@@ -107,35 +109,34 @@ - * the data structures we need to get the data to display. - */ - static int igb_add_hwmon_attr(struct igb_adapter *adapter, -- unsigned int offset, int type) --{ -+ unsigned int offset, int type) { - int rc; - unsigned int n_attr; - struct hwmon_attr *igb_attr; - -- n_attr = adapter->igb_hwmon_buff->n_hwmon; -- igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; -+ n_attr = adapter->igb_hwmon_buff.n_hwmon; -+ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr]; - - switch (type) { - case IGB_HWMON_TYPE_LOC: - igb_attr->dev_attr.show = igb_hwmon_show_location; - snprintf(igb_attr->name, sizeof(igb_attr->name), -- "temp%u_label", offset + 1); -+ "temp%u_label", offset); - break; - case IGB_HWMON_TYPE_TEMP: - igb_attr->dev_attr.show = igb_hwmon_show_temp; - snprintf(igb_attr->name, sizeof(igb_attr->name), -- "temp%u_input", offset + 1); -+ "temp%u_input", offset); - break; - case IGB_HWMON_TYPE_CAUTION: - igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; - snprintf(igb_attr->name, sizeof(igb_attr->name), -- "temp%u_max", offset + 1); -+ "temp%u_max", offset); - break; - case IGB_HWMON_TYPE_MAX: - igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; - snprintf(igb_attr->name, sizeof(igb_attr->name), -- "temp%u_crit", offset + 1); -+ "temp%u_crit", offset); - break; - default: - rc = -EPERM; -@@ -150,16 +151,30 @@ - igb_attr->dev_attr.attr.mode = S_IRUGO; - igb_attr->dev_attr.attr.name = igb_attr->name; - sysfs_attr_init(&igb_attr->dev_attr.attr); -+ rc = device_create_file(&adapter->pdev->dev, -+ &igb_attr->dev_attr); -+ if (rc == 0) -+ ++adapter->igb_hwmon_buff.n_hwmon; - -- adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; -- -- ++adapter->igb_hwmon_buff->n_hwmon; -- -- return 0; -+ return rc; - } - - static void igb_sysfs_del_adapter(struct igb_adapter *adapter) - { -+ int i; -+ -+ if (adapter == NULL) -+ return; -+ -+ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) { -+ device_remove_file(&adapter->pdev->dev, -+ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr); -+ } -+ -+ kfree(adapter->igb_hwmon_buff.hwmon_list); -+ -+ if (adapter->igb_hwmon_buff.device) -+ hwmon_device_unregister(adapter->igb_hwmon_buff.device); - } - - /* called from igb_main.c */ -@@ -171,11 +186,13 @@ - /* called from igb_main.c */ - int igb_sysfs_init(struct igb_adapter *adapter) - { -- struct hwmon_buff *igb_hwmon; -- struct i2c_client *client; -- struct device *hwmon_dev; -+ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff; - unsigned int i; -+ int n_attrs; - int rc = 0; -+#ifdef HAVE_I2C_SUPPORT -+ struct i2c_client *client = NULL; -+#endif /* HAVE_I2C_SUPPORT */ - - /* If this method isn't defined we don't support thermals */ - if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) -@@ -183,16 +200,35 @@ - - /* Don't create thermal hwmon interface if no sensors present */ - rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); -- if (rc) -+ if (rc) -+ goto exit; -+#ifdef HAVE_I2C_SUPPORT -+ /* init i2c_client */ -+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); -+ if (client == NULL) { -+ dev_info(&adapter->pdev->dev, -+ "Failed to create new i2c device..\n"); - goto exit; -+ } -+ adapter->i2c_client = client; -+#endif /* HAVE_I2C_SUPPORT */ - -- igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), -- GFP_KERNEL); -- if (!igb_hwmon) { -+ /* Allocation space for max attributes -+ * max num sensors * values (loc, temp, max, caution) -+ */ -+ n_attrs = E1000_MAX_SENSORS * 4; -+ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), -+ GFP_KERNEL); -+ if (!igb_hwmon->hwmon_list) { - rc = -ENOMEM; -- goto exit; -+ goto err; -+ } -+ -+ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev); -+ if (IS_ERR(igb_hwmon->device)) { -+ rc = PTR_ERR(igb_hwmon->device); -+ goto err; - } -- adapter->igb_hwmon_buff = igb_hwmon; - - for (i = 0; i < E1000_MAX_SENSORS; i++) { - -@@ -204,39 +240,11 @@ - - /* Bail if any hwmon attr struct fails to initialize */ - rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); -+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); -+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); -+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); - if (rc) -- goto exit; -- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); -- if (rc) -- goto exit; -- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); -- if (rc) -- goto exit; -- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); -- if (rc) -- goto exit; -- } -- -- /* init i2c_client */ -- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); -- if (client == NULL) { -- dev_info(&adapter->pdev->dev, -- "Failed to create new i2c device.\n"); -- rc = -ENODEV; -- goto exit; -- } -- adapter->i2c_client = client; -- -- igb_hwmon->groups[0] = &igb_hwmon->group; -- igb_hwmon->group.attrs = igb_hwmon->attrs; -- -- hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, -- client->name, -- igb_hwmon, -- igb_hwmon->groups); -- if (IS_ERR(hwmon_dev)) { -- rc = PTR_ERR(hwmon_dev); -- goto err; -+ goto err; - } - - goto exit; -@@ -246,4 +254,4 @@ - exit: - return rc; - } --#endif -+#endif /* IGB_HWMON */ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c ---- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,113 +1,114 @@ --/* Intel(R) Gigabit Ethernet Linux driver -- * Copyright(c) 2007-2014 Intel Corporation. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms and conditions of the GNU General Public License, -- * version 2, as published by the Free Software Foundation. -- * -- * This program is distributed in the hope it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -- * more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- * -- * The full GNU General Public License is included in this distribution in -- * the file called "COPYING". -- * -- * Contact Information: -- * e1000-devel Mailing List -- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. - --#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ - - #include - #include - #include --#include - #include - #include - #include --#include --#include -+#include -+#ifdef NETIF_F_TSO - #include -+#ifdef NETIF_F_TSO6 -+#include - #include --#include -+#endif -+#endif -+#ifdef SIOCGMIIPHY - #include -+#endif -+#ifdef SIOCETHTOOL - #include --#include -+#endif - #include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -+#ifdef CONFIG_PM_RUNTIME - #include --#ifdef CONFIG_IGB_DCA --#include --#endif --#include -+#endif /* CONFIG_PM_RUNTIME */ -+ -+#include - #include "igb.h" -+#include "igb_vmdq.h" -+ -+#if defined(DEBUG) || defined(DEBUG_DUMP) || defined(DEBUG_ICR) \ -+ || defined(DEBUG_ITR) -+#define DRV_DEBUG "_debug" -+#else -+#define DRV_DEBUG -+#endif -+#define DRV_HW_PERF -+#define VERSION_SUFFIX - - #define MAJ 5 --#define MIN 0 --#define BUILD 5 --#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ --__stringify(BUILD) "-k" -+#define MIN 3 -+#define BUILD 5.4 -+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "."\ -+ __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF -+ - char igb_driver_name[] = "igb"; - char igb_driver_version[] = DRV_VERSION; - static const char igb_driver_string[] = - "Intel(R) Gigabit Ethernet Network Driver"; - static const char igb_copyright[] = -- "Copyright (c) 2007-2014 Intel Corporation."; -- --static const struct e1000_info *igb_info_tbl[] = { -- [board_82575] = &e1000_82575_info, --}; -+ "Copyright (c) 2007-2015 Intel Corporation."; - - static const struct pci_device_id igb_pci_tbl[] = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, -- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) }, - /* required last entry */ - {0, } - }; -@@ -122,84 +123,114 @@ - static int igb_probe(struct pci_dev *, const struct pci_device_id *); - static void igb_remove(struct pci_dev *pdev); - static int igb_sw_init(struct igb_adapter *); --static int igb_open(struct net_device *); --static int igb_close(struct net_device *); - static void igb_configure(struct igb_adapter *); - static void igb_configure_tx(struct igb_adapter *); - static void igb_configure_rx(struct igb_adapter *); - static void igb_clean_all_tx_rings(struct igb_adapter *); - static void igb_clean_all_rx_rings(struct igb_adapter *); - static void igb_clean_tx_ring(struct igb_ring *); --static void igb_clean_rx_ring(struct igb_ring *); - static void igb_set_rx_mode(struct net_device *); - static void igb_update_phy_info(unsigned long); - static void igb_watchdog(unsigned long); - static void igb_watchdog_task(struct work_struct *); -+static void igb_dma_err_task(struct work_struct *); -+static void igb_dma_err_timer(unsigned long data); - static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); --static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, -- struct rtnl_link_stats64 *stats); -+static struct net_device_stats *igb_get_stats(struct net_device *); - static int igb_change_mtu(struct net_device *, int); -+/* void igb_full_sync_mac_table(struct igb_adapter *adapter); */ - static int igb_set_mac(struct net_device *, void *); - static void igb_set_uta(struct igb_adapter *adapter); - static irqreturn_t igb_intr(int irq, void *); - static irqreturn_t igb_intr_msi(int irq, void *); - static irqreturn_t igb_msix_other(int irq, void *); -+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32, u8); - static irqreturn_t igb_msix_ring(int irq, void *); --#ifdef CONFIG_IGB_DCA -+#ifdef IGB_DCA - static void igb_update_dca(struct igb_q_vector *); - static void igb_setup_dca(struct igb_adapter *); --#endif /* CONFIG_IGB_DCA */ -+#endif /* IGB_DCA */ - static int igb_poll(struct napi_struct *, int); - static bool igb_clean_tx_irq(struct igb_q_vector *); - static bool igb_clean_rx_irq(struct igb_q_vector *, int); - static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); - static void igb_tx_timeout(struct net_device *); - static void igb_reset_task(struct work_struct *); --static void igb_vlan_mode(struct net_device *netdev, -- netdev_features_t features); --static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); --static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); -+#ifdef HAVE_VLAN_RX_REGISTER -+static void igb_vlan_mode(struct net_device *, struct vlan_group *); -+#endif -+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+static int igb_vlan_rx_add_vid(struct net_device *, -+ __always_unused __be16 proto, u16); -+static int igb_vlan_rx_kill_vid(struct net_device *, -+ __always_unused __be16 proto, u16); -+#else -+static int igb_vlan_rx_add_vid(struct net_device *, u16); -+static int igb_vlan_rx_kill_vid(struct net_device *, u16); -+#endif -+#else -+static void igb_vlan_rx_add_vid(struct net_device *, u16); -+static void igb_vlan_rx_kill_vid(struct net_device *, u16); -+#endif - static void igb_restore_vlan(struct igb_adapter *); --static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); - static void igb_ping_all_vfs(struct igb_adapter *); - static void igb_msg_task(struct igb_adapter *); - static void igb_vmm_control(struct igb_adapter *); - static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); - static void igb_restore_vf_multicasts(struct igb_adapter *adapter); -+static void igb_process_mdd_event(struct igb_adapter *); -+#ifdef IFLA_VF_MAX - static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); - static int igb_ndo_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos); --static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); -+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE - static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, -- bool setting); -+ bool setting); -+#endif -+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, -+ int min_tx_rate, int tx_rate); -+#else -+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); -+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, - struct ifla_vf_info *ivi); - static void igb_check_vf_rate_limit(struct igb_adapter *); -- --#ifdef CONFIG_PCI_IOV --static int igb_vf_configure(struct igb_adapter *adapter, int vf); --static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); - #endif -- -+static int igb_vf_configure(struct igb_adapter *adapter, int vf); - #ifdef CONFIG_PM --#ifdef CONFIG_PM_SLEEP --static int igb_suspend(struct device *); --#endif --static int igb_resume(struct device *); -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS -+static int igb_suspend(struct device *dev); -+static int igb_resume(struct device *dev); - #ifdef CONFIG_PM_RUNTIME - static int igb_runtime_suspend(struct device *dev); - static int igb_runtime_resume(struct device *dev); - static int igb_runtime_idle(struct device *dev); --#endif -+#endif /* CONFIG_PM_RUNTIME */ - static const struct dev_pm_ops igb_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) -+#ifdef CONFIG_PM_RUNTIME - SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, - igb_runtime_idle) -+#endif /* CONFIG_PM_RUNTIME */ - }; --#endif -+#else -+static int igb_suspend(struct pci_dev *pdev, pm_message_t state); -+static int igb_resume(struct pci_dev *pdev); -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ -+#endif /* CONFIG_PM */ -+#ifndef USE_REBOOT_NOTIFIER - static void igb_shutdown(struct pci_dev *); --static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); --#ifdef CONFIG_IGB_DCA -+#else -+static int igb_notify_reboot(struct notifier_block *, unsigned long, void *); -+static struct notifier_block igb_notifier_reboot = { -+ .notifier_call = igb_notify_reboot, -+ .next = NULL, -+ .priority = 0 -+}; -+#endif -+#ifdef IGB_DCA - static int igb_notify_dca(struct notifier_block *, unsigned long, void *); - static struct notifier_block dca_notifier = { - .notifier_call = igb_notify_dca, -@@ -211,462 +242,87 @@ - /* for netdump / net console */ - static void igb_netpoll(struct net_device *); - #endif --#ifdef CONFIG_PCI_IOV --static unsigned int max_vfs; --module_param(max_vfs, uint, 0); --MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); --#endif /* CONFIG_PCI_IOV */ - -+#ifdef HAVE_PCI_ERS - static pci_ers_result_t igb_io_error_detected(struct pci_dev *, - pci_channel_state_t); - static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); - static void igb_io_resume(struct pci_dev *); - --static const struct pci_error_handlers igb_err_handler = { -+static struct pci_error_handlers igb_err_handler = { - .error_detected = igb_io_error_detected, - .slot_reset = igb_io_slot_reset, - .resume = igb_io_resume, - }; -+#endif - -+static void igb_init_fw(struct igb_adapter *adapter); - static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); - - static struct pci_driver igb_driver = { - .name = igb_driver_name, - .id_table = igb_pci_tbl, - .probe = igb_probe, -- .remove = igb_remove, -+ .remove = __devexit_p(igb_remove), - #ifdef CONFIG_PM -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS - .driver.pm = &igb_pm_ops, --#endif -+#else -+ .suspend = igb_suspend, -+ .resume = igb_resume, -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ -+#endif /* CONFIG_PM */ -+#ifndef USE_REBOOT_NOTIFIER - .shutdown = igb_shutdown, -- .sriov_configure = igb_pci_sriov_configure, -+#endif -+#ifdef HAVE_PCI_ERS - .err_handler = &igb_err_handler -+#endif - }; - -+/* u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); */ -+ - MODULE_AUTHOR("Intel Corporation, "); - MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); - MODULE_LICENSE("GPL"); - MODULE_VERSION(DRV_VERSION); - --#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) --static int debug = -1; --module_param(debug, int, 0); --MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -- --struct igb_reg_info { -- u32 ofs; -- char *name; --}; -- --static const struct igb_reg_info igb_reg_info_tbl[] = { -- -- /* General Registers */ -- {E1000_CTRL, "CTRL"}, -- {E1000_STATUS, "STATUS"}, -- {E1000_CTRL_EXT, "CTRL_EXT"}, -- -- /* Interrupt Registers */ -- {E1000_ICR, "ICR"}, -- -- /* RX Registers */ -- {E1000_RCTL, "RCTL"}, -- {E1000_RDLEN(0), "RDLEN"}, -- {E1000_RDH(0), "RDH"}, -- {E1000_RDT(0), "RDT"}, -- {E1000_RXDCTL(0), "RXDCTL"}, -- {E1000_RDBAL(0), "RDBAL"}, -- {E1000_RDBAH(0), "RDBAH"}, -- -- /* TX Registers */ -- {E1000_TCTL, "TCTL"}, -- {E1000_TDBAL(0), "TDBAL"}, -- {E1000_TDBAH(0), "TDBAH"}, -- {E1000_TDLEN(0), "TDLEN"}, -- {E1000_TDH(0), "TDH"}, -- {E1000_TDT(0), "TDT"}, -- {E1000_TXDCTL(0), "TXDCTL"}, -- {E1000_TDFH, "TDFH"}, -- {E1000_TDFT, "TDFT"}, -- {E1000_TDFHS, "TDFHS"}, -- {E1000_TDFPC, "TDFPC"}, -- -- /* List Terminator */ -- {} --}; -- --/* igb_regdump - register printout routine */ --static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) --{ -- int n = 0; -- char rname[16]; -- u32 regs[8]; -- -- switch (reginfo->ofs) { -- case E1000_RDLEN(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RDLEN(n)); -- break; -- case E1000_RDH(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RDH(n)); -- break; -- case E1000_RDT(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RDT(n)); -- break; -- case E1000_RXDCTL(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RXDCTL(n)); -- break; -- case E1000_RDBAL(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RDBAL(n)); -- break; -- case E1000_RDBAH(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RDBAH(n)); -- break; -- case E1000_TDBAL(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_RDBAL(n)); -- break; -- case E1000_TDBAH(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_TDBAH(n)); -- break; -- case E1000_TDLEN(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_TDLEN(n)); -- break; -- case E1000_TDH(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_TDH(n)); -- break; -- case E1000_TDT(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_TDT(n)); -- break; -- case E1000_TXDCTL(0): -- for (n = 0; n < 4; n++) -- regs[n] = rd32(E1000_TXDCTL(n)); -- break; -- default: -- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); -- return; -- } -- -- snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); -- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], -- regs[2], regs[3]); --} -- --/* igb_dump - Print registers, Tx-rings and Rx-rings */ --static void igb_dump(struct igb_adapter *adapter) --{ -- struct net_device *netdev = adapter->netdev; -- struct e1000_hw *hw = &adapter->hw; -- struct igb_reg_info *reginfo; -- struct igb_ring *tx_ring; -- union e1000_adv_tx_desc *tx_desc; -- struct my_u0 { u64 a; u64 b; } *u0; -- struct igb_ring *rx_ring; -- union e1000_adv_rx_desc *rx_desc; -- u32 staterr; -- u16 i, n; -- -- if (!netif_msg_hw(adapter)) -- return; -- -- /* Print netdevice Info */ -- if (netdev) { -- dev_info(&adapter->pdev->dev, "Net device Info\n"); -- pr_info("Device Name state trans_start last_rx\n"); -- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, -- netdev->state, netdev->trans_start, netdev->last_rx); -- } -- -- /* Print Registers */ -- dev_info(&adapter->pdev->dev, "Register Dump\n"); -- pr_info(" Register Name Value\n"); -- for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; -- reginfo->name; reginfo++) { -- igb_regdump(hw, reginfo); -- } -- -- /* Print TX Ring Summary */ -- if (!netdev || !netif_running(netdev)) -- goto exit; -- -- dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); -- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); -- for (n = 0; n < adapter->num_tx_queues; n++) { -- struct igb_tx_buffer *buffer_info; -- tx_ring = adapter->tx_ring[n]; -- buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; -- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", -- n, tx_ring->next_to_use, tx_ring->next_to_clean, -- (u64)dma_unmap_addr(buffer_info, dma), -- dma_unmap_len(buffer_info, len), -- buffer_info->next_to_watch, -- (u64)buffer_info->time_stamp); -- } -- -- /* Print TX Rings */ -- if (!netif_msg_tx_done(adapter)) -- goto rx_ring_summary; -- -- dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); -- -- /* Transmit Descriptor Formats -- * -- * Advanced Transmit Descriptor -- * +--------------------------------------------------------------+ -- * 0 | Buffer Address [63:0] | -- * +--------------------------------------------------------------+ -- * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | -- * +--------------------------------------------------------------+ -- * 63 46 45 40 39 38 36 35 32 31 24 15 0 -- */ -- -- for (n = 0; n < adapter->num_tx_queues; n++) { -- tx_ring = adapter->tx_ring[n]; -- pr_info("------------------------------------\n"); -- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); -- pr_info("------------------------------------\n"); -- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); -- -- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { -- const char *next_desc; -- struct igb_tx_buffer *buffer_info; -- tx_desc = IGB_TX_DESC(tx_ring, i); -- buffer_info = &tx_ring->tx_buffer_info[i]; -- u0 = (struct my_u0 *)tx_desc; -- if (i == tx_ring->next_to_use && -- i == tx_ring->next_to_clean) -- next_desc = " NTC/U"; -- else if (i == tx_ring->next_to_use) -- next_desc = " NTU"; -- else if (i == tx_ring->next_to_clean) -- next_desc = " NTC"; -- else -- next_desc = ""; -- -- pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", -- i, le64_to_cpu(u0->a), -- le64_to_cpu(u0->b), -- (u64)dma_unmap_addr(buffer_info, dma), -- dma_unmap_len(buffer_info, len), -- buffer_info->next_to_watch, -- (u64)buffer_info->time_stamp, -- buffer_info->skb, next_desc); -- -- if (netif_msg_pktdata(adapter) && buffer_info->skb) -- print_hex_dump(KERN_INFO, "", -- DUMP_PREFIX_ADDRESS, -- 16, 1, buffer_info->skb->data, -- dma_unmap_len(buffer_info, len), -- true); -- } -- } -- -- /* Print RX Rings Summary */ --rx_ring_summary: -- dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); -- pr_info("Queue [NTU] [NTC]\n"); -- for (n = 0; n < adapter->num_rx_queues; n++) { -- rx_ring = adapter->rx_ring[n]; -- pr_info(" %5d %5X %5X\n", -- n, rx_ring->next_to_use, rx_ring->next_to_clean); -- } -- -- /* Print RX Rings */ -- if (!netif_msg_rx_status(adapter)) -- goto exit; -- -- dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); -- -- /* Advanced Receive Descriptor (Read) Format -- * 63 1 0 -- * +-----------------------------------------------------+ -- * 0 | Packet Buffer Address [63:1] |A0/NSE| -- * +----------------------------------------------+------+ -- * 8 | Header Buffer Address [63:1] | DD | -- * +-----------------------------------------------------+ -- * -- * -- * Advanced Receive Descriptor (Write-Back) Format -- * -- * 63 48 47 32 31 30 21 20 17 16 4 3 0 -- * +------------------------------------------------------+ -- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | -- * | Checksum Ident | | | | Type | Type | -- * +------------------------------------------------------+ -- * 8 | VLAN Tag | Length | Extended Error | Extended Status | -- * +------------------------------------------------------+ -- * 63 48 47 32 31 20 19 0 -- */ -- -- for (n = 0; n < adapter->num_rx_queues; n++) { -- rx_ring = adapter->rx_ring[n]; -- pr_info("------------------------------------\n"); -- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); -- pr_info("------------------------------------\n"); -- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); -- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); -- -- for (i = 0; i < rx_ring->count; i++) { -- const char *next_desc; -- struct igb_rx_buffer *buffer_info; -- buffer_info = &rx_ring->rx_buffer_info[i]; -- rx_desc = IGB_RX_DESC(rx_ring, i); -- u0 = (struct my_u0 *)rx_desc; -- staterr = le32_to_cpu(rx_desc->wb.upper.status_error); -- -- if (i == rx_ring->next_to_use) -- next_desc = " NTU"; -- else if (i == rx_ring->next_to_clean) -- next_desc = " NTC"; -- else -- next_desc = ""; -- -- if (staterr & E1000_RXD_STAT_DD) { -- /* Descriptor Done */ -- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", -- "RWB", i, -- le64_to_cpu(u0->a), -- le64_to_cpu(u0->b), -- next_desc); -- } else { -- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", -- "R ", i, -- le64_to_cpu(u0->a), -- le64_to_cpu(u0->b), -- (u64)buffer_info->dma, -- next_desc); -- -- if (netif_msg_pktdata(adapter) && -- buffer_info->dma && buffer_info->page) { -- print_hex_dump(KERN_INFO, "", -- DUMP_PREFIX_ADDRESS, -- 16, 1, -- page_address(buffer_info->page) + -- buffer_info->page_offset, -- IGB_RX_BUFSZ, true); -- } -- } -- } -- } -- --exit: -- return; --} -- --/** -- * igb_get_i2c_data - Reads the I2C SDA data bit -- * @hw: pointer to hardware structure -- * @i2cctl: Current value of I2CCTL register -- * -- * Returns the I2C data bit value -- **/ --static int igb_get_i2c_data(void *data) -+static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add) - { -- struct igb_adapter *adapter = (struct igb_adapter *)data; - struct e1000_hw *hw = &adapter->hw; -- s32 i2cctl = rd32(E1000_I2CPARAMS); -+ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie; -+ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; -+ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); -+ u32 vfta; - -- return !!(i2cctl & E1000_I2C_DATA_IN); --} -+ /* -+ * if this is the management vlan the only option is to add it in so -+ * that the management pass through will continue to work -+ */ -+ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && -+ (vid == mng_cookie->vlan_id)) -+ add = TRUE; - --/** -- * igb_set_i2c_data - Sets the I2C data bit -- * @data: pointer to hardware structure -- * @state: I2C data value (0 or 1) to set -- * -- * Sets the I2C data bit -- **/ --static void igb_set_i2c_data(void *data, int state) --{ -- struct igb_adapter *adapter = (struct igb_adapter *)data; -- struct e1000_hw *hw = &adapter->hw; -- s32 i2cctl = rd32(E1000_I2CPARAMS); -+ vfta = adapter->shadow_vfta[index]; - -- if (state) -- i2cctl |= E1000_I2C_DATA_OUT; -+ if (add) -+ vfta |= mask; - else -- i2cctl &= ~E1000_I2C_DATA_OUT; -+ vfta &= ~mask; - -- i2cctl &= ~E1000_I2C_DATA_OE_N; -- i2cctl |= E1000_I2C_CLK_OE_N; -- wr32(E1000_I2CPARAMS, i2cctl); -- wrfl(); -- --} -- --/** -- * igb_set_i2c_clk - Sets the I2C SCL clock -- * @data: pointer to hardware structure -- * @state: state to set clock -- * -- * Sets the I2C clock line to state -- **/ --static void igb_set_i2c_clk(void *data, int state) --{ -- struct igb_adapter *adapter = (struct igb_adapter *)data; -- struct e1000_hw *hw = &adapter->hw; -- s32 i2cctl = rd32(E1000_I2CPARAMS); -- -- if (state) { -- i2cctl |= E1000_I2C_CLK_OUT; -- i2cctl &= ~E1000_I2C_CLK_OE_N; -- } else { -- i2cctl &= ~E1000_I2C_CLK_OUT; -- i2cctl &= ~E1000_I2C_CLK_OE_N; -- } -- wr32(E1000_I2CPARAMS, i2cctl); -- wrfl(); --} -- --/** -- * igb_get_i2c_clk - Gets the I2C SCL clock state -- * @data: pointer to hardware structure -- * -- * Gets the I2C clock state -- **/ --static int igb_get_i2c_clk(void *data) --{ -- struct igb_adapter *adapter = (struct igb_adapter *)data; -- struct e1000_hw *hw = &adapter->hw; -- s32 i2cctl = rd32(E1000_I2CPARAMS); -- -- return !!(i2cctl & E1000_I2C_CLK_IN); -+ igb_e1000_write_vfta(hw, index, vfta); -+ adapter->shadow_vfta[index] = vfta; - } - --static const struct i2c_algo_bit_data igb_i2c_algo = { -- .setsda = igb_set_i2c_data, -- .setscl = igb_set_i2c_clk, -- .getsda = igb_get_i2c_data, -- .getscl = igb_get_i2c_clk, -- .udelay = 5, -- .timeout = 20, --}; -- --/** -- * igb_get_hw_dev - return device -- * @hw: pointer to hardware structure -- * -- * used by hardware layer to print debugging information -- **/ --struct net_device *igb_get_hw_dev(struct e1000_hw *hw) --{ -- struct igb_adapter *adapter = hw->back; -- return adapter->netdev; --} -+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; -+module_param(debug, int, 0); -+MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)"); - - /** -- * igb_init_module - Driver Registration Routine -+ * igb_init_module - Driver Registration Routine - * -- * igb_init_module is the first routine called when the driver is -- * loaded. All it does is register with the PCI subsystem. -+ * igb_init_module is the first routine called when the driver is -+ * loaded. All it does is register with the PCI subsystem. - **/ - static int __init igb_init_module(void) - { -@@ -674,76 +330,89 @@ - - pr_info("%s - version %s\n", - igb_driver_string, igb_driver_version); -+ - pr_info("%s\n", igb_copyright); -+#ifdef IGB_HWMON -+/* only use IGB_PROCFS if IGB_HWMON is not defined */ -+#else -+#ifdef IGB_PROCFS -+ if (igb_procfs_topdir_init()) -+ pr_info("Procfs failed to initialize topdir\n"); -+#endif /* IGB_PROCFS */ -+#endif /* IGB_HWMON */ - --#ifdef CONFIG_IGB_DCA -+#ifdef IGB_DCA - dca_register_notify(&dca_notifier); - #endif - ret = pci_register_driver(&igb_driver); -+#ifdef USE_REBOOT_NOTIFIER -+ if (ret >= 0) -+ register_reboot_notifier(&igb_notifier_reboot); -+#endif - return ret; - } - - module_init(igb_init_module); - - /** -- * igb_exit_module - Driver Exit Cleanup Routine -+ * igb_exit_module - Driver Exit Cleanup Routine - * -- * igb_exit_module is called just before the driver is removed -- * from memory. -+ * igb_exit_module is called just before the driver is removed -+ * from memory. - **/ - static void __exit igb_exit_module(void) - { --#ifdef CONFIG_IGB_DCA -+#ifdef IGB_DCA - dca_unregister_notify(&dca_notifier); - #endif -+#ifdef USE_REBOOT_NOTIFIER -+ unregister_reboot_notifier(&igb_notifier_reboot); -+#endif - pci_unregister_driver(&igb_driver); -+ -+#ifdef IGB_HWMON -+/* only compile IGB_PROCFS if IGB_HWMON is not defined */ -+#else -+#ifdef IGB_PROCFS -+ igb_procfs_topdir_exit(); -+#endif /* IGB_PROCFS */ -+#endif /* IGB_HWMON */ - } - - module_exit(igb_exit_module); - - #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) - /** -- * igb_cache_ring_register - Descriptor ring to register mapping -- * @adapter: board private structure to initialize -+ * igb_cache_ring_register - Descriptor ring to register mapping -+ * @adapter: board private structure to initialize - * -- * Once we know the feature-set enabled for the device, we'll cache -- * the register offset the descriptor ring is assigned to. -+ * Once we know the feature-set enabled for the device, we'll cache -+ * the register offset the descriptor ring is assigned to. - **/ - static void igb_cache_ring_register(struct igb_adapter *adapter) - { - int i = 0, j = 0; - u32 rbase_offset = adapter->vfs_allocated_count; - -- switch (adapter->hw.mac.type) { -- case e1000_82576: -+ if (adapter->hw.mac.type == e1000_82576) { - /* The queues are allocated for virtualization such that VF 0 - * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. - * In order to avoid collision we start at the first free queue - * and continue consuming queues in the same sequence - */ -- if (adapter->vfs_allocated_count) { -+ if ((adapter->rss_queues > 1) && adapter->vmdq_pools) { - for (; i < adapter->rss_queues; i++) - adapter->rx_ring[i]->reg_idx = rbase_offset + -- Q_IDX_82576(i); -+ Q_IDX_82576(i); - } -- /* Fall through */ -- case e1000_82575: -- case e1000_82580: -- case e1000_i350: -- case e1000_i354: -- case e1000_i210: -- case e1000_i211: -- /* Fall through */ -- default: -- for (; i < adapter->num_rx_queues; i++) -- adapter->rx_ring[i]->reg_idx = rbase_offset + i; -- for (; j < adapter->num_tx_queues; j++) -- adapter->tx_ring[j]->reg_idx = rbase_offset + j; -- break; - } -+ for (; i < adapter->num_rx_queues; i++) -+ adapter->rx_ring[i]->reg_idx = rbase_offset + i; -+ for (; j < adapter->num_tx_queues; j++) -+ adapter->tx_ring[j]->reg_idx = rbase_offset + j; - } - --u32 igb_rd32(struct e1000_hw *hw, u32 reg) -+u32 e1000_read_reg(struct e1000_hw *hw, u32 reg) - { - struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); - u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); -@@ -757,6 +426,7 @@ - /* reads should not return all F's */ - if (!(~value) && (!reg || !(~readl(hw_addr)))) { - struct net_device *netdev = igb->netdev; -+ - hw->hw_addr = NULL; - netif_device_detach(netdev); - netdev_err(netdev, "PCIe link lost, device now detached\n"); -@@ -765,6 +435,42 @@ - return value; - } - -+static void igb_configure_lli(struct igb_adapter *adapter) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ u16 port; -+ -+ /* LLI should only be enabled for MSI-X or MSI interrupts */ -+ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI)) -+ return; -+ -+ if (adapter->lli_port) { -+ /* use filter 0 for port */ -+ port = htons((u16)adapter->lli_port); -+ E1000_WRITE_REG(hw, E1000_IMIR(0), -+ (port | E1000_IMIR_PORT_IM_EN)); -+ E1000_WRITE_REG(hw, E1000_IMIREXT(0), -+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); -+ } -+ -+ if (adapter->flags & IGB_FLAG_LLI_PUSH) { -+ /* use filter 1 for push flag */ -+ E1000_WRITE_REG(hw, E1000_IMIR(1), -+ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); -+ E1000_WRITE_REG(hw, E1000_IMIREXT(1), -+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH)); -+ } -+ -+ if (adapter->lli_size) { -+ /* use filter 2 for size */ -+ E1000_WRITE_REG(hw, E1000_IMIR(2), -+ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN)); -+ E1000_WRITE_REG(hw, E1000_IMIREXT(2), -+ (adapter->lli_size | E1000_IMIREXT_CTRL_BP)); -+ } -+ -+} -+ - /** - * igb_write_ivar - configure ivar for given MSI-X vector - * @hw: pointer to the HW structure -@@ -780,7 +486,7 @@ - static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, - int index, int offset) - { -- u32 ivar = array_rd32(E1000_IVAR0, index); -+ u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); - - /* clear any bits that are currently set */ - ivar &= ~((u32)0xFF << offset); -@@ -788,7 +494,7 @@ - /* write vector and valid bit */ - ivar |= (msix_vector | E1000_IVAR_VALID) << offset; - -- array_wr32(E1000_IVAR0, index, ivar); -+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); - } - - #define IGB_N0_QUEUE -1 -@@ -816,13 +522,14 @@ - msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; - if (tx_queue > IGB_N0_QUEUE) - msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; -- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) -+ if (!adapter->msix_entries && msix_vector == 0) - msixbm |= E1000_EIMS_OTHER; -- array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); -+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm); - q_vector->eims_value = msixbm; - break; - case e1000_82576: -- /* 82576 uses a table that essentially consists of 2 columns -+ /* -+ * 82576 uses a table that essentially consists of 2 columns - * with 8 rows. The ordering is column-major so we use the - * lower 3 bits as the row index, and the 4th bit as the - * column offset. -@@ -842,7 +549,8 @@ - case e1000_i354: - case e1000_i210: - case e1000_i211: -- /* On 82580 and newer adapters the scheme is similar to 82576 -+ /* -+ * On 82580 and newer adapters the scheme is similar to 82576 - * however instead of ordering column-major we have things - * ordered row-major. So we traverse the table by using - * bit 0 as the column offset, and the remaining bits as the -@@ -871,11 +579,10 @@ - } - - /** -- * igb_configure_msix - Configure MSI-X hardware -- * @adapter: board private structure to initialize -+ * igb_configure_msix - Configure MSI-X hardware - * -- * igb_configure_msix sets up the hardware to properly -- * generate MSI-X interrupts. -+ * igb_configure_msix sets up the hardware to properly -+ * generate MSI-X interrupts. - **/ - static void igb_configure_msix(struct igb_adapter *adapter) - { -@@ -888,7 +595,7 @@ - /* set vector for other causes, i.e. link changes */ - switch (hw->mac.type) { - case e1000_82575: -- tmp = rd32(E1000_CTRL_EXT); -+ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); - /* enable MSI-X PBA support*/ - tmp |= E1000_CTRL_EXT_PBA_CLR; - -@@ -896,10 +603,11 @@ - tmp |= E1000_CTRL_EXT_EIAME; - tmp |= E1000_CTRL_EXT_IRCA; - -- wr32(E1000_CTRL_EXT, tmp); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); - - /* enable msix_other interrupt */ -- array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); -+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++, -+ E1000_EIMS_OTHER); - adapter->eims_other = E1000_EIMS_OTHER; - - break; -@@ -913,15 +621,15 @@ - /* Turn on MSI-X capability first, or our settings - * won't stick. And it will take days to debug. - */ -- wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | -- E1000_GPIE_PBA | E1000_GPIE_EIAME | -- E1000_GPIE_NSICR); -+ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | -+ E1000_GPIE_PBA | E1000_GPIE_EIAME | -+ E1000_GPIE_NSICR); - - /* enable msix_other interrupt */ - adapter->eims_other = 1 << vector; - tmp = (vector++ | E1000_IVAR_VALID) << 8; - -- wr32(E1000_IVAR_MISC, tmp); -+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp); - break; - default: - /* do nothing, since nothing else supports MSI-X */ -@@ -933,24 +641,22 @@ - for (i = 0; i < adapter->num_q_vectors; i++) - igb_assign_vector(adapter->q_vector[i], vector++); - -- wrfl(); -+ E1000_WRITE_FLUSH(hw); - } - - /** -- * igb_request_msix - Initialize MSI-X interrupts -- * @adapter: board private structure to initialize -+ * igb_request_msix - Initialize MSI-X interrupts - * -- * igb_request_msix allocates MSI-X vectors and requests interrupts from the -- * kernel. -+ * igb_request_msix allocates MSI-X vectors and requests interrupts from the -+ * kernel. - **/ - static int igb_request_msix(struct igb_adapter *adapter) - { - struct net_device *netdev = adapter->netdev; -- struct e1000_hw *hw = &adapter->hw; - int i, err = 0, vector = 0, free_vector = 0; - - err = request_irq(adapter->msix_entries[vector].vector, -- igb_msix_other, 0, netdev->name, adapter); -+ &igb_msix_other, 0, netdev->name, adapter); - if (err) - goto err_out; - -@@ -959,7 +665,7 @@ - - vector++; - -- q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); -+ q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); - - if (q_vector->rx.ring && q_vector->tx.ring) - sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, -@@ -997,11 +703,11 @@ - } - - /** -- * igb_free_q_vector - Free memory allocated for specific interrupt vector -- * @adapter: board private structure to initialize -- * @v_idx: Index of vector to be freed -+ * igb_free_q_vector - Free memory allocated for specific interrupt vector -+ * @adapter: board private structure to initialize -+ * @v_idx: Index of vector to be freed - * -- * This function frees the memory allocated to the q_vector. -+ * This function frees the memory allocated to the q_vector. - **/ - static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) - { -@@ -1013,6 +719,10 @@ - * we must wait a grace period before freeing it. - */ - kfree_rcu(q_vector, rcu); -+ -+#ifndef IGB_NO_LRO -+ __skb_queue_purge(&q_vector->lrolist.active); -+#endif - } - - /** -@@ -1027,8 +737,8 @@ - { - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - -- /* Coming from igb_set_interrupt_capability, the vectors are not yet -- * allocated. So, q_vector is NULL so we should stop here. -+ /* if we're coming from igb_set_interrupt_capability, the vectors are -+ * not yet allocated - */ - if (!q_vector) - return; -@@ -1047,22 +757,25 @@ - { - int v_idx = adapter->num_q_vectors; - -- if (adapter->flags & IGB_FLAG_HAS_MSIX) -+ if (adapter->msix_entries) { - pci_disable_msix(adapter->pdev); -- else if (adapter->flags & IGB_FLAG_HAS_MSI) -+ kfree(adapter->msix_entries); -+ adapter->msix_entries = NULL; -+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) { - pci_disable_msi(adapter->pdev); -+ } - - while (v_idx--) - igb_reset_q_vector(adapter, v_idx); - } - - /** -- * igb_free_q_vectors - Free memory allocated for interrupt vectors -- * @adapter: board private structure to initialize -+ * igb_free_q_vectors - Free memory allocated for interrupt vectors -+ * @adapter: board private structure to initialize - * -- * This function frees the memory allocated to the q_vectors. In addition if -- * NAPI is enabled it will delete any references to the NAPI struct prior -- * to freeing the q_vector. -+ * This function frees the memory allocated to the q_vectors. In addition if -+ * NAPI is enabled it will delete any references to the NAPI struct prior -+ * to freeing the q_vector. - **/ - static void igb_free_q_vectors(struct igb_adapter *adapter) - { -@@ -1079,11 +792,10 @@ - } - - /** -- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts -- * @adapter: board private structure to initialize -+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts - * -- * This function resets the device so that it has 0 Rx queues, Tx queues, and -- * MSI-X interrupts allocated. -+ * This function resets the device so that it has 0 rx queues, tx queues, and -+ * MSI-X interrupts allocated. - */ - static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) - { -@@ -1092,108 +804,306 @@ - } - - /** -- * igb_set_interrupt_capability - set MSI or MSI-X if supported -- * @adapter: board private structure to initialize -- * @msix: boolean value of MSIX capability -+ * igb_process_mdd_event -+ * @adapter - board private structure - * -- * Attempt to configure interrupts using the best available -- * capabilities of the hardware and kernel. -- **/ --static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) -+ * Identify a malicious VF, disable the VF TX/RX queues and log a message. -+ */ -+static void igb_process_mdd_event(struct igb_adapter *adapter) - { -- int err; -- int numvecs, i; -- -- if (!msix) -- goto msi_only; -- adapter->flags |= IGB_FLAG_HAS_MSIX; -- -- /* Number of supported queues. */ -- adapter->num_rx_queues = adapter->rss_queues; -- if (adapter->vfs_allocated_count) -- adapter->num_tx_queues = 1; -- else -- adapter->num_tx_queues = adapter->rss_queues; -+ struct e1000_hw *hw = &adapter->hw; -+ u32 lvmmc, vfte, vfre, mdfb; -+ u8 vf_queue; - -- /* start with one vector for every Rx queue */ -- numvecs = adapter->num_rx_queues; -+ lvmmc = E1000_READ_REG(hw, E1000_LVMMC); -+ vf_queue = lvmmc >> 29; - -- /* if Tx handler is separate add 1 for every Tx queue */ -- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) -- numvecs += adapter->num_tx_queues; -- -- /* store the number of vectors reserved for queues */ -- adapter->num_q_vectors = numvecs; -- -- /* add 1 vector for link status interrupts */ -- numvecs++; -- for (i = 0; i < numvecs; i++) -- adapter->msix_entries[i].entry = i; -- -- err = pci_enable_msix_range(adapter->pdev, -- adapter->msix_entries, -- numvecs, -- numvecs); -- if (err > 0) -+ /* VF index cannot be bigger or equal to VFs allocated */ -+ if (vf_queue >= adapter->vfs_allocated_count) - return; - -- igb_reset_interrupt_capability(adapter); -+ netdev_info(adapter->netdev, -+ "VF %d misbehaved. VF queues are disabled. VM misbehavior code is 0x%x\n", -+ vf_queue, lvmmc); - -- /* If we can't do MSI-X, try MSI */ --msi_only: -- adapter->flags &= ~IGB_FLAG_HAS_MSIX; --#ifdef CONFIG_PCI_IOV -- /* disable SR-IOV for non MSI-X configurations */ -- if (adapter->vf_data) { -- struct e1000_hw *hw = &adapter->hw; -- /* disable iov and allow time for transactions to clear */ -- pci_disable_sriov(adapter->pdev); -- msleep(500); -+ /* Disable VFTE and VFRE related bits */ -+ vfte = E1000_READ_REG(hw, E1000_VFTE); -+ vfte &= ~(1 << vf_queue); -+ E1000_WRITE_REG(hw, E1000_VFTE, vfte); - -- kfree(adapter->vf_data); -- adapter->vf_data = NULL; -- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); -- wrfl(); -- msleep(100); -- dev_info(&adapter->pdev->dev, "IOV Disabled\n"); -- } --#endif -- adapter->vfs_allocated_count = 0; -- adapter->rss_queues = 1; -- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; -- adapter->num_rx_queues = 1; -- adapter->num_tx_queues = 1; -- adapter->num_q_vectors = 1; -- if (!pci_enable_msi(adapter->pdev)) -- adapter->flags |= IGB_FLAG_HAS_MSI; --} -+ vfre = E1000_READ_REG(hw, E1000_VFRE); -+ vfre &= ~(1 << vf_queue); -+ E1000_WRITE_REG(hw, E1000_VFRE, vfre); - --static void igb_add_ring(struct igb_ring *ring, -- struct igb_ring_container *head) --{ -- head->ring = ring; -- head->count++; -+ /* Disable MDFB related bit. Clear on write */ -+ mdfb = E1000_READ_REG(hw, E1000_MDFB); -+ mdfb |= (1 << vf_queue); -+ E1000_WRITE_REG(hw, E1000_MDFB, mdfb); -+ -+ /* Reset the specific VF */ -+ E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST); - } - - /** -- * igb_alloc_q_vector - Allocate memory for a single interrupt vector -- * @adapter: board private structure to initialize -- * @v_count: q_vectors allocated on adapter, used for ring interleaving -- * @v_idx: index of vector in adapter struct -- * @txr_count: total number of Tx rings to allocate -- * @txr_idx: index of first Tx ring to allocate -- * @rxr_count: total number of Rx rings to allocate -- * @rxr_idx: index of first Rx ring to allocate -+ * igb_disable_mdd -+ * @adapter - board private structure - * -- * We allocate one q_vector. If allocation fails we return -ENOMEM. -+ * Disable MDD behavior in the HW - **/ --static int igb_alloc_q_vector(struct igb_adapter *adapter, -- int v_count, int v_idx, -- int txr_count, int txr_idx, -- int rxr_count, int rxr_idx) -+static void igb_disable_mdd(struct igb_adapter *adapter) - { -- struct igb_q_vector *q_vector; -- struct igb_ring *ring; -+ struct e1000_hw *hw = &adapter->hw; -+ u32 reg; -+ -+ if ((hw->mac.type != e1000_i350) && -+ (hw->mac.type != e1000_i354)) -+ return; -+ -+ reg = E1000_READ_REG(hw, E1000_DTXCTL); -+ reg &= (~E1000_DTXCTL_MDP_EN); -+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); -+} -+ -+/** -+ * igb_enable_mdd -+ * @adapter - board private structure -+ * -+ * Enable the HW to detect malicious driver and sends an interrupt to -+ * the driver. -+ **/ -+static void igb_enable_mdd(struct igb_adapter *adapter) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ u32 reg; -+ -+ /* Only available on i350 device */ -+ if (hw->mac.type != e1000_i350) -+ return; -+ -+ reg = E1000_READ_REG(hw, E1000_DTXCTL); -+ reg |= E1000_DTXCTL_MDP_EN; -+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); -+} -+ -+/** -+ * igb_reset_sriov_capability - disable SR-IOV if enabled -+ * -+ * Attempt to disable single root IO virtualization capabilites present in the -+ * kernel. -+ **/ -+static void igb_reset_sriov_capability(struct igb_adapter *adapter) -+{ -+ struct pci_dev *pdev = adapter->pdev; -+ struct e1000_hw *hw = &adapter->hw; -+ -+ /* reclaim resources allocated to VFs */ -+ if (adapter->vf_data) { -+ if (!pci_vfs_assigned(pdev)) { -+ /* -+ * disable iov and allow time for transactions to -+ * clear -+ */ -+ pci_disable_sriov(pdev); -+ msleep(500); -+ -+ dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n"); -+ } else { -+ dev_info(pci_dev_to_dev(pdev), -+ "IOV Not Disabled\n VF(s) are assigned to guests!\n"); -+ } -+ /* Disable Malicious Driver Detection */ -+ igb_disable_mdd(adapter); -+ -+ /* free vf data storage */ -+ kfree(adapter->vf_data); -+ adapter->vf_data = NULL; -+ -+ /* switch rings back to PF ownership */ -+ E1000_WRITE_REG(hw, E1000_IOVCTL, -+ E1000_IOVCTL_REUSE_VFQ); -+ E1000_WRITE_FLUSH(hw); -+ msleep(100); -+ } -+ -+ adapter->vfs_allocated_count = 0; -+} -+ -+/** -+ * igb_set_sriov_capability - setup SR-IOV if supported -+ * -+ * Attempt to enable single root IO virtualization capabilites present in the -+ * kernel. -+ **/ -+static void igb_set_sriov_capability(struct igb_adapter *adapter) -+{ -+ struct pci_dev *pdev = adapter->pdev; -+ int old_vfs = 0; -+ int i; -+ -+ old_vfs = pci_num_vf(pdev); -+ if (old_vfs) { -+ dev_info(pci_dev_to_dev(pdev), -+ "%d pre-allocated VFs found - override max_vfs setting of %d\n", -+ old_vfs, adapter->vfs_allocated_count); -+ adapter->vfs_allocated_count = old_vfs; -+ } -+ /* no VFs requested, do nothing */ -+ if (!adapter->vfs_allocated_count) -+ return; -+ -+ /* allocate vf data storage */ -+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count, -+ sizeof(struct vf_data_storage), -+ GFP_KERNEL); -+ -+ if (adapter->vf_data) { -+ if (!old_vfs) { -+ if (pci_enable_sriov(pdev, -+ adapter->vfs_allocated_count)) -+ goto err_out; -+ dev_warn(pci_dev_to_dev(pdev), -+ "SR-IOV has been enabled: configure port VLANs to keep your VFs secure\n"); -+ } -+ for (i = 0; i < adapter->vfs_allocated_count; i++) -+ igb_vf_configure(adapter, i); -+ -+ switch (adapter->hw.mac.type) { -+ case e1000_82576: -+ case e1000_i350: -+ /* Enable VM to VM loopback by default */ -+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; -+ break; -+ default: -+ /* Currently no other hardware supports loopback */ -+ break; -+ } -+ -+ /* DMA Coalescing is not supported in IOV mode. */ -+ if (adapter->hw.mac.type >= e1000_i350) -+ adapter->dmac = IGB_DMAC_DISABLE; -+ if (adapter->hw.mac.type < e1000_i350) -+ adapter->flags |= IGB_FLAG_DETECT_BAD_DMA; -+ return; -+ -+ } -+ -+err_out: -+ kfree(adapter->vf_data); -+ adapter->vf_data = NULL; -+ adapter->vfs_allocated_count = 0; -+ dev_warn(pci_dev_to_dev(pdev), -+ "Failed to initialize SR-IOV virtualization\n"); -+} -+ -+/** -+ * igb_set_interrupt_capability - set MSI or MSI-X if supported -+ * -+ * Attempt to configure interrupts using the best available -+ * capabilities of the hardware and kernel. -+ **/ -+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) -+{ -+ struct pci_dev *pdev = adapter->pdev; -+ int err; -+ int numvecs, i; -+ -+ if (!msix) -+ adapter->int_mode = IGB_INT_MODE_MSI; -+ -+ /* Number of supported queues. */ -+ adapter->num_rx_queues = adapter->rss_queues; -+ -+ if (adapter->vmdq_pools > 1) -+ adapter->num_rx_queues += adapter->vmdq_pools - 1; -+ -+#ifdef HAVE_TX_MQ -+ if (adapter->vmdq_pools) -+ adapter->num_tx_queues = adapter->vmdq_pools; -+ else -+ adapter->num_tx_queues = adapter->num_rx_queues; -+#else -+ adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools); -+#endif -+ -+ switch (adapter->int_mode) { -+ case IGB_INT_MODE_MSIX: -+ /* start with one vector for every Tx/Rx queue */ -+ numvecs = max_t(int, adapter->num_tx_queues, -+ adapter->num_rx_queues); -+ -+ /* if tx handler is separate make it 1 for every queue */ -+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) -+ numvecs = adapter->num_tx_queues + -+ adapter->num_rx_queues; -+ -+ /* store the number of vectors reserved for queues */ -+ adapter->num_q_vectors = numvecs; -+ -+ /* add 1 vector for link status interrupts */ -+ numvecs++; -+ adapter->msix_entries = kcalloc(numvecs, -+ sizeof(struct msix_entry), -+ GFP_KERNEL); -+ if (adapter->msix_entries) { -+ for (i = 0; i < numvecs; i++) -+ adapter->msix_entries[i].entry = i; -+ -+ err = pci_enable_msix(pdev, -+ adapter->msix_entries, numvecs); -+ if (err == 0) -+ break; -+ } -+ /* MSI-X failed, so fall through and try MSI */ -+ dev_warn(pci_dev_to_dev(pdev), -+ "Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); -+ igb_reset_interrupt_capability(adapter); -+ case IGB_INT_MODE_MSI: -+ if (!pci_enable_msi(pdev)) -+ adapter->flags |= IGB_FLAG_HAS_MSI; -+ else -+ dev_warn(pci_dev_to_dev(pdev), -+ "Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); -+ /* Fall through */ -+ case IGB_INT_MODE_LEGACY: -+ /* disable advanced features and set number of queues to 1 */ -+ igb_reset_sriov_capability(adapter); -+ adapter->vmdq_pools = 0; -+ adapter->rss_queues = 1; -+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS; -+ adapter->num_rx_queues = 1; -+ adapter->num_tx_queues = 1; -+ adapter->num_q_vectors = 1; -+ /* Don't do anything; this is system default */ -+ break; -+ } -+} -+ -+static void igb_add_ring(struct igb_ring *ring, -+ struct igb_ring_container *head) -+{ -+ head->ring = ring; -+ head->count++; -+} -+ -+/** -+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector -+ * @adapter: board private structure to initialize -+ * @v_count: q_vectors allocated on adapter, used for ring interleaving -+ * @v_idx: index of vector in adapter struct -+ * @txr_count: total number of Tx rings to allocate -+ * @txr_idx: index of first Tx ring to allocate -+ * @rxr_count: total number of Rx rings to allocate -+ * @rxr_idx: index of first Rx ring to allocate -+ * -+ * We allocate one q_vector. If allocation fails we return -ENOMEM. -+ **/ -+static int igb_alloc_q_vector(struct igb_adapter *adapter, -+ unsigned int v_count, unsigned int v_idx, -+ unsigned int txr_count, unsigned int txr_idx, -+ unsigned int rxr_count, unsigned int rxr_idx) -+{ -+ struct igb_q_vector *q_vector; -+ struct igb_ring *ring; - int ring_count, size; - - /* igb only supports 1 Tx and/or 1 Rx queue per vector */ -@@ -1206,17 +1116,18 @@ - - /* allocate q_vector and rings */ - q_vector = adapter->q_vector[v_idx]; -- if (!q_vector) { -- q_vector = kzalloc(size, GFP_KERNEL); -- } else if (size > ksize(q_vector)) { -- kfree_rcu(q_vector, rcu); -+ if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); -- } else { -+ else - memset(q_vector, 0, size); -- } - if (!q_vector) - return -ENOMEM; - -+#ifndef IGB_NO_LRO -+ /* initialize LRO */ -+ __skb_queue_head_init(&q_vector->lrolist.active); -+ -+#endif - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); -@@ -1229,7 +1140,7 @@ - q_vector->tx.work_limit = adapter->tx_work_limit; - - /* initialize ITR configuration */ -- q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); -+ q_vector->itr_register = adapter->io_addr + E1000_EITR(0); - q_vector->itr_val = IGB_START_ITR; - - /* initialize pointer to rings */ -@@ -1265,9 +1176,6 @@ - ring->count = adapter->tx_ring_count; - ring->queue_index = txr_idx; - -- u64_stats_init(&ring->tx_syncp); -- u64_stats_init(&ring->tx_syncp2); -- - /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; - -@@ -1286,22 +1194,23 @@ - /* update q_vector Rx values */ - igb_add_ring(ring, &q_vector->rx); - -+#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES) -+ /* enable rx checksum */ -+ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags); -+ -+#endif - /* set flag indicating ring supports SCTP checksum offload */ - if (adapter->hw.mac.type >= e1000_82576) - set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); - -- /* On i350, i354, i210, and i211, loopback VLAN packets -- * have the tag byte-swapped. -- */ -- if (adapter->hw.mac.type >= e1000_i350) -+ if ((adapter->hw.mac.type == e1000_i350) || -+ (adapter->hw.mac.type == e1000_i354)) - set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); - - /* apply Rx specific ring traits */ - ring->count = adapter->rx_ring_count; - ring->queue_index = rxr_idx; - -- u64_stats_init(&ring->rx_syncp); -- - /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; - } -@@ -1309,13 +1218,12 @@ - return 0; - } - -- - /** -- * igb_alloc_q_vectors - Allocate memory for interrupt vectors -- * @adapter: board private structure to initialize -+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors -+ * @adapter: board private structure to initialize - * -- * We allocate one q_vector per queue interrupt. If allocation fails we -- * return -ENOMEM. -+ * We allocate one q_vector per queue interrupt. If allocation fails we -+ * return -ENOMEM. - **/ - static int igb_alloc_q_vectors(struct igb_adapter *adapter) - { -@@ -1370,11 +1278,9 @@ - } - - /** -- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors -- * @adapter: board private structure to initialize -- * @msix: boolean value of MSIX capability -+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors - * -- * This function initializes the interrupts and allocates all of the queues. -+ * This function initializes the interrupts and allocates all of the queues. - **/ - static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) - { -@@ -1385,7 +1291,7 @@ - - err = igb_alloc_q_vectors(adapter); - if (err) { -- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); -+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n"); - goto err_alloc_q_vectors; - } - -@@ -1399,11 +1305,10 @@ - } - - /** -- * igb_request_irq - initialize interrupts -- * @adapter: board private structure to initialize -+ * igb_request_irq - initialize interrupts - * -- * Attempts to configure interrupts using the best available -- * capabilities of the hardware and kernel. -+ * Attempts to configure interrupts using the best available -+ * capabilities of the hardware and kernel. - **/ - static int igb_request_irq(struct igb_adapter *adapter) - { -@@ -1411,7 +1316,7 @@ - struct pci_dev *pdev = adapter->pdev; - int err = 0; - -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -+ if (adapter->msix_entries) { - err = igb_request_msix(adapter); - if (!err) - goto request_done; -@@ -1420,10 +1325,10 @@ - igb_free_all_rx_resources(adapter); - - igb_clear_interrupt_scheme(adapter); -+ igb_reset_sriov_capability(adapter); - err = igb_init_interrupt_scheme(adapter, false); - if (err) - goto request_done; -- - igb_setup_all_tx_resources(adapter); - igb_setup_all_rx_resources(adapter); - igb_configure(adapter); -@@ -1432,7 +1337,7 @@ - igb_assign_vector(adapter->q_vector[0], 0); - - if (adapter->flags & IGB_FLAG_HAS_MSI) { -- err = request_irq(pdev->irq, igb_intr_msi, 0, -+ err = request_irq(pdev->irq, &igb_intr_msi, 0, - netdev->name, adapter); - if (!err) - goto request_done; -@@ -1442,11 +1347,11 @@ - adapter->flags &= ~IGB_FLAG_HAS_MSI; - } - -- err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, -+ err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED, - netdev->name, adapter); - - if (err) -- dev_err(&pdev->dev, "Error %d getting interrupt\n", -+ dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n", - err); - - request_done: -@@ -1455,7 +1360,7 @@ - - static void igb_free_irq(struct igb_adapter *adapter) - { -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -+ if (adapter->msix_entries) { - int vector = 0, i; - - free_irq(adapter->msix_entries[vector++].vector, adapter); -@@ -1469,64 +1374,76 @@ - } - - /** -- * igb_irq_disable - Mask off interrupt generation on the NIC -- * @adapter: board private structure -+ * igb_irq_disable - Mask off interrupt generation on the NIC -+ * @adapter: board private structure - **/ - static void igb_irq_disable(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - -- /* we need to be careful when disabling interrupts. The VFs are also -+ /* -+ * we need to be careful when disabling interrupts. The VFs are also - * mapped into these registers and so clearing the bits can cause - * issues on the VF drivers so we only need to clear what we set - */ -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -- u32 regval = rd32(E1000_EIAM); -+ if (adapter->msix_entries) { -+ u32 regval = E1000_READ_REG(hw, E1000_EIAM); - -- wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); -- wr32(E1000_EIMC, adapter->eims_enable_mask); -- regval = rd32(E1000_EIAC); -- wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); -- } -+ E1000_WRITE_REG(hw, E1000_EIAM, regval -+ & ~adapter->eims_enable_mask); -+ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask); -+ regval = E1000_READ_REG(hw, E1000_EIAC); -+ E1000_WRITE_REG(hw, E1000_EIAC, regval -+ & ~adapter->eims_enable_mask); -+ } -+ -+ E1000_WRITE_REG(hw, E1000_IAM, 0); -+ E1000_WRITE_REG(hw, E1000_IMC, ~0); -+ E1000_WRITE_FLUSH(hw); - -- wr32(E1000_IAM, 0); -- wr32(E1000_IMC, ~0); -- wrfl(); -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -- int i; -+ if (adapter->msix_entries) { -+ int vector = 0, i; -+ -+ synchronize_irq(adapter->msix_entries[vector++].vector); - - for (i = 0; i < adapter->num_q_vectors; i++) -- synchronize_irq(adapter->msix_entries[i].vector); -+ synchronize_irq(adapter->msix_entries[vector++].vector); - } else { - synchronize_irq(adapter->pdev->irq); - } - } - - /** -- * igb_irq_enable - Enable default interrupt generation settings -- * @adapter: board private structure -+ * igb_irq_enable - Enable default interrupt generation settings -+ * @adapter: board private structure - **/ - static void igb_irq_enable(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -+ if (adapter->msix_entries) { - u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; -- u32 regval = rd32(E1000_EIAC); -+ u32 regval = E1000_READ_REG(hw, E1000_EIAC); - -- wr32(E1000_EIAC, regval | adapter->eims_enable_mask); -- regval = rd32(E1000_EIAM); -- wr32(E1000_EIAM, regval | adapter->eims_enable_mask); -- wr32(E1000_EIMS, adapter->eims_enable_mask); -+ E1000_WRITE_REG(hw, E1000_EIAC, regval -+ | adapter->eims_enable_mask); -+ regval = E1000_READ_REG(hw, E1000_EIAM); -+ E1000_WRITE_REG(hw, E1000_EIAM, regval -+ | adapter->eims_enable_mask); -+ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask); - if (adapter->vfs_allocated_count) { -- wr32(E1000_MBVFIMR, 0xFF); -+ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF); - ims |= E1000_IMS_VMMB; -+ if (adapter->mdd) -+ if ((adapter->hw.mac.type == e1000_i350) || -+ (adapter->hw.mac.type == e1000_i354)) -+ ims |= E1000_IMS_MDDET; - } -- wr32(E1000_IMS, ims); -+ E1000_WRITE_REG(hw, E1000_IMS, ims); - } else { -- wr32(E1000_IMS, IMS_ENABLE_MASK | -+ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK | - E1000_IMS_DRSTA); -- wr32(E1000_IAM, IMS_ENABLE_MASK | -+ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK | - E1000_IMS_DRSTA); - } - } -@@ -1539,7 +1456,7 @@ - - if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { - /* add VID to filter table */ -- igb_vfta_set(hw, vid, true); -+ igb_vfta_set(adapter, vid, TRUE); - adapter->mng_vlan_id = vid; - } else { - adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; -@@ -1547,19 +1464,24 @@ - - if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && - (vid != old_vid) && -+#ifdef HAVE_VLAN_RX_REGISTER -+ !vlan_group_get_device(adapter->vlgrp, old_vid)) { -+#else - !test_bit(old_vid, adapter->active_vlans)) { -+#endif - /* remove VID from filter table */ -- igb_vfta_set(hw, old_vid, false); -+ igb_vfta_set(adapter, old_vid, FALSE); - } - } - - /** -- * igb_release_hw_control - release control of the h/w to f/w -- * @adapter: address of board private structure -+ * igb_release_hw_control - release control of the h/w to f/w -+ * @adapter: address of board private structure -+ * -+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. -+ * For ASF and Pass Through versions of f/w this means that the -+ * driver is no longer loaded. - * -- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. -- * For ASF and Pass Through versions of f/w this means that the -- * driver is no longer loaded. - **/ - static void igb_release_hw_control(struct igb_adapter *adapter) - { -@@ -1567,18 +1489,19 @@ - u32 ctrl_ext; - - /* Let firmware take over control of h/w */ -- ctrl_ext = rd32(E1000_CTRL_EXT); -- wr32(E1000_CTRL_EXT, -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); - } - - /** -- * igb_get_hw_control - get control of the h/w from f/w -- * @adapter: address of board private structure -+ * igb_get_hw_control - get control of the h/w from f/w -+ * @adapter: address of board private structure -+ * -+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. -+ * For ASF and Pass Through versions of f/w this means that -+ * the driver is loaded. - * -- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. -- * For ASF and Pass Through versions of f/w this means that -- * the driver is loaded. - **/ - static void igb_get_hw_control(struct igb_adapter *adapter) - { -@@ -1586,14 +1509,14 @@ - u32 ctrl_ext; - - /* Let firmware know the driver has taken over */ -- ctrl_ext = rd32(E1000_CTRL_EXT); -- wr32(E1000_CTRL_EXT, -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); - } - - /** -- * igb_configure - configure the hardware for RX and TX -- * @adapter: private board structure -+ * igb_configure - configure the hardware for RX and TX -+ * @adapter: private board structure - **/ - static void igb_configure(struct igb_adapter *adapter) - { -@@ -1612,7 +1535,13 @@ - igb_configure_tx(adapter); - igb_configure_rx(adapter); - -- igb_rx_fifo_flush_82575(&adapter->hw); -+ e1000_rx_fifo_flush_82575(&adapter->hw); -+#ifdef CONFIG_NETDEVICES_MULTIQUEUE -+ if (adapter->num_tx_queues > 1) -+ netdev->features |= NETIF_F_MULTI_QUEUE; -+ else -+ netdev->features &= ~NETIF_F_MULTI_QUEUE; -+#endif - - /* call igb_desc_unused which always leaves - * at least 1 descriptor unused to make sure -@@ -1625,45 +1554,42 @@ - } - - /** -- * igb_power_up_link - Power up the phy/serdes link -- * @adapter: address of board private structure -+ * igb_power_up_link - Power up the phy/serdes link -+ * @adapter: address of board private structure - **/ - void igb_power_up_link(struct igb_adapter *adapter) - { -- igb_reset_phy(&adapter->hw); -+ igb_e1000_phy_hw_reset(&adapter->hw); - - if (adapter->hw.phy.media_type == e1000_media_type_copper) -- igb_power_up_phy_copper(&adapter->hw); -+ igb_e1000_power_up_phy(&adapter->hw); - else -- igb_power_up_serdes_link_82575(&adapter->hw); -- -- igb_setup_link(&adapter->hw); -+ e1000_power_up_fiber_serdes_link(&adapter->hw); - } - - /** -- * igb_power_down_link - Power down the phy/serdes link -- * @adapter: address of board private structure -+ * igb_power_down_link - Power down the phy/serdes link -+ * @adapter: address of board private structure - */ - static void igb_power_down_link(struct igb_adapter *adapter) - { - if (adapter->hw.phy.media_type == e1000_media_type_copper) -- igb_power_down_phy_copper_82575(&adapter->hw); -+ e1000_power_down_phy(&adapter->hw); - else -- igb_shutdown_serdes_link_82575(&adapter->hw); -+ e1000_shutdown_fiber_serdes_link(&adapter->hw); - } - --/** -- * Detect and switch function for Media Auto Sense -- * @adapter: address of the board private structure -- **/ -+/* Detect and switch function for Media Auto Sense */ - static void igb_check_swap_media(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_ext, connsw; - bool swap_now = false; -+ bool link; - -- ctrl_ext = rd32(E1000_CTRL_EXT); -- connsw = rd32(E1000_CONNSW); -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ connsw = E1000_READ_REG(hw, E1000_CONNSW); -+ link = igb_has_link(adapter); - - /* need to live swap if current media is copper and we have fiber/serdes - * to go to. -@@ -1674,10 +1600,10 @@ - swap_now = true; - } else if (!(connsw & E1000_CONNSW_SERDESD)) { - /* copper signal takes time to appear */ -- if (adapter->copper_tries < 4) { -+ if (adapter->copper_tries < 3) { - adapter->copper_tries++; - connsw |= E1000_CONNSW_AUTOSENSE_CONF; -- wr32(E1000_CONNSW, connsw); -+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); - return; - } else { - adapter->copper_tries = 0; -@@ -1685,143 +1611,263 @@ - (!(connsw & E1000_CONNSW_PHY_PDN))) { - swap_now = true; - connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; -- wr32(E1000_CONNSW, connsw); -+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); - } - } - } - -- if (!swap_now) -- return; -- -- switch (hw->phy.media_type) { -- case e1000_media_type_copper: -- netdev_info(adapter->netdev, -- "MAS: changing media to fiber/serdes\n"); -- ctrl_ext |= -- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; -- adapter->flags |= IGB_FLAG_MEDIA_RESET; -- adapter->copper_tries = 0; -- break; -- case e1000_media_type_internal_serdes: -- case e1000_media_type_fiber: -- netdev_info(adapter->netdev, -- "MAS: changing media to copper\n"); -- ctrl_ext &= -- ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; -- adapter->flags |= IGB_FLAG_MEDIA_RESET; -- break; -- default: -- /* shouldn't get here during regular operation */ -- netdev_err(adapter->netdev, -- "AMS: Invalid media type found, returning\n"); -- break; -+ if (swap_now) { -+ switch (hw->phy.media_type) { -+ case e1000_media_type_copper: -+ dev_info(pci_dev_to_dev(adapter->pdev), -+ "%s:MAS: changing media to fiber/serdes\n", -+ adapter->netdev->name); -+ ctrl_ext |= -+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; -+ adapter->flags |= IGB_FLAG_MEDIA_RESET; -+ adapter->copper_tries = 0; -+ break; -+ case e1000_media_type_internal_serdes: -+ case e1000_media_type_fiber: -+ dev_info(pci_dev_to_dev(adapter->pdev), -+ "%s:MAS: changing media to copper\n", -+ adapter->netdev->name); -+ ctrl_ext &= -+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; -+ adapter->flags |= IGB_FLAG_MEDIA_RESET; -+ break; -+ default: -+ /* shouldn't get here during regular operation */ -+ dev_err(pci_dev_to_dev(adapter->pdev), -+ "%s:AMS: Invalid media type found, returning\n", -+ adapter->netdev->name); -+ break; -+ } -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); - } -- wr32(E1000_CTRL_EXT, ctrl_ext); - } - --/** -- * igb_up - Open the interface and prepare it to handle traffic -- * @adapter: board private structure -- **/ --int igb_up(struct igb_adapter *adapter) -+#ifdef HAVE_I2C_SUPPORT -+/* igb_get_i2c_data - Reads the I2C SDA data bit -+ * @hw: pointer to hardware structure -+ * @i2cctl: Current value of I2CCTL register -+ * -+ * Returns the I2C data bit value -+ */ -+static int igb_get_i2c_data(void *data) - { -+ struct igb_adapter *adapter = (struct igb_adapter *)data; - struct e1000_hw *hw = &adapter->hw; -- int i; -- -- /* hardware has been reset, we need to reload some things */ -- igb_configure(adapter); -+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- clear_bit(__IGB_DOWN, &adapter->state); -+ return !!(i2cctl & E1000_I2C_DATA_IN); -+} - -- for (i = 0; i < adapter->num_q_vectors; i++) -- napi_enable(&(adapter->q_vector[i]->napi)); -+/* igb_set_i2c_data - Sets the I2C data bit -+ * @data: pointer to hardware structure -+ * @state: I2C data value (0 or 1) to set -+ * -+ * Sets the I2C data bit -+ */ -+static void igb_set_i2c_data(void *data, int state) -+{ -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ struct e1000_hw *hw = &adapter->hw; -+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- if (adapter->flags & IGB_FLAG_HAS_MSIX) -- igb_configure_msix(adapter); -+ if (state) -+ i2cctl |= E1000_I2C_DATA_OUT; - else -- igb_assign_vector(adapter->q_vector[0], 0); -- -- /* Clear any pending interrupts. */ -- rd32(E1000_ICR); -- igb_irq_enable(adapter); -- -- /* notify VFs that reset has been completed */ -- if (adapter->vfs_allocated_count) { -- u32 reg_data = rd32(E1000_CTRL_EXT); -+ i2cctl &= ~E1000_I2C_DATA_OUT; - -- reg_data |= E1000_CTRL_EXT_PFRSTD; -- wr32(E1000_CTRL_EXT, reg_data); -- } -+ i2cctl &= ~E1000_I2C_DATA_OE_N; -+ i2cctl |= E1000_I2C_CLK_OE_N; - -- netif_tx_start_all_queues(adapter->netdev); -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); -+ E1000_WRITE_FLUSH(hw); - -- /* start the watchdog. */ -- hw->mac.get_link_status = 1; -- schedule_work(&adapter->watchdog_task); -+} - -- if ((adapter->flags & IGB_FLAG_EEE) && -- (!hw->dev_spec._82575.eee_disable)) -- adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; -+/* igb_set_i2c_clk - Sets the I2C SCL clock -+ * @data: pointer to hardware structure -+ * @state: state to set clock -+ * -+ * Sets the I2C clock line to state -+ */ -+static void igb_set_i2c_clk(void *data, int state) -+{ -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ struct e1000_hw *hw = &adapter->hw; -+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- return 0; -+ if (state) { -+ i2cctl |= E1000_I2C_CLK_OUT; -+ i2cctl &= ~E1000_I2C_CLK_OE_N; -+ } else { -+ i2cctl &= ~E1000_I2C_CLK_OUT; -+ i2cctl &= ~E1000_I2C_CLK_OE_N; -+ } -+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); -+ E1000_WRITE_FLUSH(hw); - } - --void igb_down(struct igb_adapter *adapter) -+/* igb_get_i2c_clk - Gets the I2C SCL clock state -+ * @data: pointer to hardware structure -+ * -+ * Gets the I2C clock state -+ */ -+static int igb_get_i2c_clk(void *data) - { -- struct net_device *netdev = adapter->netdev; -+ struct igb_adapter *adapter = (struct igb_adapter *)data; - struct e1000_hw *hw = &adapter->hw; -- u32 tctl, rctl; -- int i; -+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); - -- /* signal that we're down so the interrupt handler does not -- * reschedule our watchdog timer -- */ -- set_bit(__IGB_DOWN, &adapter->state); -+ return !!(i2cctl & E1000_I2C_CLK_IN); -+} -+ -+static const struct i2c_algo_bit_data igb_i2c_algo = { -+ .setsda = igb_set_i2c_data, -+ .setscl = igb_set_i2c_clk, -+ .getsda = igb_get_i2c_data, -+ .getscl = igb_get_i2c_clk, -+ .udelay = 5, -+ .timeout = 20, -+}; -+ -+/* igb_init_i2c - Init I2C interface -+ * @adapter: pointer to adapter structure -+ * -+ */ -+static s32 igb_init_i2c(struct igb_adapter *adapter) -+{ -+ s32 status = E1000_SUCCESS; -+ -+ /* I2C interface supported on i350 devices */ -+ if (adapter->hw.mac.type != e1000_i350) -+ return E1000_SUCCESS; -+ -+ /* Initialize the i2c bus which is controlled by the registers. -+ * This bus will use the i2c_algo_bit structue that implements -+ * the protocol through toggling of the 4 bits in the register. -+ */ -+ adapter->i2c_adap.owner = THIS_MODULE; -+ adapter->i2c_algo = igb_i2c_algo; -+ adapter->i2c_algo.data = adapter; -+ adapter->i2c_adap.algo_data = &adapter->i2c_algo; -+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev; -+ strlcpy(adapter->i2c_adap.name, "igb BB", -+ sizeof(adapter->i2c_adap.name)); -+ status = i2c_bit_add_bus(&adapter->i2c_adap); -+ return status; -+} -+ -+#endif /* HAVE_I2C_SUPPORT */ -+/** -+ * igb_up - Open the interface and prepare it to handle traffic -+ * @adapter: board private structure -+ **/ -+int igb_up(struct igb_adapter *adapter) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ int i; -+ -+ /* hardware has been reset, we need to reload some things */ -+ igb_configure(adapter); -+ -+ clear_bit(__IGB_DOWN, &adapter->state); -+ -+ for (i = 0; i < adapter->num_q_vectors; i++) -+ napi_enable(&(adapter->q_vector[i]->napi)); -+ -+ if (adapter->msix_entries) -+ igb_configure_msix(adapter); -+ else -+ igb_assign_vector(adapter->q_vector[0], 0); -+ -+ igb_configure_lli(adapter); -+ -+ /* Clear any pending interrupts. */ -+ E1000_READ_REG(hw, E1000_ICR); -+ igb_irq_enable(adapter); -+ -+ /* notify VFs that reset has been completed */ -+ if (adapter->vfs_allocated_count) { -+ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ -+ reg_data |= E1000_CTRL_EXT_PFRSTD; -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); -+ } -+ -+ netif_tx_start_all_queues(adapter->netdev); -+ -+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) -+ schedule_work(&adapter->dma_err_task); -+ /* start the watchdog. */ -+ hw->mac.get_link_status = 1; -+ schedule_work(&adapter->watchdog_task); -+ -+ if ((adapter->flags & IGB_FLAG_EEE) && -+ (!hw->dev_spec._82575.eee_disable)) -+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; -+ -+ return 0; -+} -+ -+void igb_down(struct igb_adapter *adapter) -+{ -+ struct net_device *netdev = adapter->netdev; -+ struct e1000_hw *hw = &adapter->hw; -+ u32 tctl, rctl; -+ int i; -+ -+ /* signal that we're down so the interrupt handler does not -+ * reschedule our watchdog timer -+ */ -+ set_bit(__IGB_DOWN, &adapter->state); - - /* disable receives in the hardware */ -- rctl = rd32(E1000_RCTL); -- wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); - /* flush and sleep below */ - -+ netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - - /* disable transmits in the hardware */ -- tctl = rd32(E1000_TCTL); -+ tctl = E1000_READ_REG(hw, E1000_TCTL); - tctl &= ~E1000_TCTL_EN; -- wr32(E1000_TCTL, tctl); -+ E1000_WRITE_REG(hw, E1000_TCTL, tctl); - /* flush both disables and wait for them to finish */ -- wrfl(); -- usleep_range(10000, 11000); -+ E1000_WRITE_FLUSH(hw); -+ usleep_range(10000, 20000); -+ -+ for (i = 0; i < adapter->num_q_vectors; i++) -+ napi_disable(&(adapter->q_vector[i]->napi)); - - igb_irq_disable(adapter); - - adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; - -- for (i = 0; i < adapter->num_q_vectors; i++) { -- napi_synchronize(&(adapter->q_vector[i]->napi)); -- napi_disable(&(adapter->q_vector[i]->napi)); -- } -- -- - del_timer_sync(&adapter->watchdog_timer); -+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) -+ del_timer_sync(&adapter->dma_err_timer); - del_timer_sync(&adapter->phy_info_timer); - -- netif_carrier_off(netdev); -- - /* record the stats before reset*/ -- spin_lock(&adapter->stats64_lock); -- igb_update_stats(adapter, &adapter->stats64); -- spin_unlock(&adapter->stats64_lock); -+ igb_update_stats(adapter); - - adapter->link_speed = 0; - adapter->link_duplex = 0; - -+#ifdef HAVE_PCI_ERS - if (!pci_channel_offline(adapter->pdev)) - igb_reset(adapter); -+#else -+ igb_reset(adapter); -+#endif - igb_clean_all_tx_rings(adapter); - igb_clean_all_rx_rings(adapter); --#ifdef CONFIG_IGB_DCA -- -+#ifdef IGB_DCA - /* since we reset the hardware DCA settings were cleared */ - igb_setup_dca(adapter); - #endif -@@ -1837,35 +1883,26 @@ - clear_bit(__IGB_RESETTING, &adapter->state); - } - --/** igb_enable_mas - Media Autosense re-enable after swap -+/** -+ * igb_enable_mas - Media Autosense re-enable after swap - * - * @adapter: adapter struct - **/ --static s32 igb_enable_mas(struct igb_adapter *adapter) -+void igb_enable_mas(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - u32 connsw; -- s32 ret_val = 0; - -- connsw = rd32(E1000_CONNSW); -- if (!(hw->phy.media_type == e1000_media_type_copper)) -- return ret_val; -+ connsw = E1000_READ_REG(hw, E1000_CONNSW); - - /* configure for SerDes media detect */ -- if (!(connsw & E1000_CONNSW_SERDESD)) { -+ if ((hw->phy.media_type == e1000_media_type_copper) && -+ (!(connsw & E1000_CONNSW_SERDESD))) { - connsw |= E1000_CONNSW_ENRGSRC; - connsw |= E1000_CONNSW_AUTOSENSE_EN; -- wr32(E1000_CONNSW, connsw); -- wrfl(); -- } else if (connsw & E1000_CONNSW_SERDESD) { -- /* already SerDes, no need to enable anything */ -- return ret_val; -- } else { -- netdev_info(adapter->netdev, -- "MAS: Unable to configure feature, disabling..\n"); -- adapter->flags &= ~IGB_FLAG_MAS_ENABLE; -+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw); -+ E1000_WRITE_FLUSH(hw); - } -- return ret_val; - } - - void igb_reset(struct igb_adapter *adapter) -@@ -1881,13 +1918,13 @@ - */ - switch (mac->type) { - case e1000_i350: -- case e1000_i354: - case e1000_82580: -- pba = rd32(E1000_RXPBS); -- pba = igb_rxpbs_adjust_82580(pba); -+ case e1000_i354: -+ pba = E1000_READ_REG(hw, E1000_RXPBS); -+ pba = e1000_rxpbs_adjust_82580(pba); - break; - case e1000_82576: -- pba = rd32(E1000_RXPBS); -+ pba = E1000_READ_REG(hw, E1000_RXPBS); - pba &= E1000_RXPBS_SIZE_MASK_82576; - break; - case e1000_82575: -@@ -1901,7 +1938,7 @@ - if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && - (mac->type < e1000_82576)) { - /* adjust PBA for jumbo frames */ -- wr32(E1000_PBA, pba); -+ E1000_WRITE_REG(hw, E1000_PBA, pba); - - /* To maintain wire speed transmits, the Tx FIFO should be - * large enough to accommodate two full transmit packets, -@@ -1910,12 +1947,12 @@ - * one full receive packet and is similarly rounded up and - * expressed in KB. - */ -- pba = rd32(E1000_PBA); -+ pba = E1000_READ_REG(hw, E1000_PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; -- /* the Tx fifo also stores 16 bytes of information about the Tx -+ /* the tx fifo also stores 16 bytes of information about the tx - * but don't include ethernet FCS because hardware appends it - */ - min_tx_space = (adapter->max_frame_size + -@@ -1936,13 +1973,13 @@ - ((min_tx_space - tx_space) < pba)) { - pba = pba - (min_tx_space - tx_space); - -- /* if short on Rx space, Rx wins and must trump Tx -+ /* if short on rx space, rx wins and must trump tx - * adjustment - */ - if (pba < min_rx_space) - pba = min_rx_space; - } -- wr32(E1000_PBA, pba); -+ E1000_WRITE_REG(hw, E1000_PBA, pba); - } - - /* flow control settings */ -@@ -1965,6 +2002,10 @@ - if (adapter->vfs_allocated_count) { - int i; - -+ /* -+ * Clear all flags except indication that the PF has set -+ * the VF MAC addresses administratively -+ */ - for (i = 0 ; i < adapter->vfs_allocated_count; i++) - adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; - -@@ -1972,116 +2013,334 @@ - igb_ping_all_vfs(adapter); - - /* disable transmits and receives */ -- wr32(E1000_VFRE, 0); -- wr32(E1000_VFTE, 0); -+ E1000_WRITE_REG(hw, E1000_VFRE, 0); -+ E1000_WRITE_REG(hw, E1000_VFTE, 0); - } - - /* Allow time for pending master requests to run */ -- hw->mac.ops.reset_hw(hw); -- wr32(E1000_WUC, 0); -+ igb_e1000_reset_hw(hw); -+ E1000_WRITE_REG(hw, E1000_WUC, 0); - - if (adapter->flags & IGB_FLAG_MEDIA_RESET) { -- /* need to resetup here after media swap */ -- adapter->ei.get_invariants(hw); -+ e1000_setup_init_funcs(hw, TRUE); -+ igb_check_options(adapter); -+ igb_e1000_get_bus_info(hw); - adapter->flags &= ~IGB_FLAG_MEDIA_RESET; - } -- if (adapter->flags & IGB_FLAG_MAS_ENABLE) { -- if (igb_enable_mas(adapter)) -- dev_err(&pdev->dev, -- "Error enabling Media Auto Sense\n"); -+ if ((mac->type == e1000_82575) && -+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) { -+ igb_enable_mas(adapter); - } -- if (hw->mac.ops.init_hw(hw)) -- dev_err(&pdev->dev, "Hardware Error\n"); -+ if (igb_e1000_init_hw(hw)) -+ dev_err(pci_dev_to_dev(pdev), "Hardware Error\n"); - -- /* Flow control settings reset on hardware reset, so guarantee flow -+ /* -+ * Flow control settings reset on hardware reset, so guarantee flow - * control is off when forcing speed. - */ - if (!hw->mac.autoneg) -- igb_force_mac_fc(hw); -+ igb_e1000_force_mac_fc(hw); - - igb_init_dmac(adapter, pba); --#ifdef CONFIG_IGB_HWMON - /* Re-initialize the thermal sensor on i350 devices. */ -- if (!test_bit(__IGB_DOWN, &adapter->state)) { -- if (mac->type == e1000_i350 && hw->bus.func == 0) { -- /* If present, re-initialize the external thermal sensor -- * interface. -- */ -- if (adapter->ets) -- mac->ops.init_thermal_sensor_thresh(hw); -- } -+ if (mac->type == e1000_i350 && hw->bus.func == 0) { -+ /* -+ * If present, re-initialize the external thermal sensor -+ * interface. -+ */ -+ if (adapter->ets) -+ e1000_set_i2c_bb(hw); -+ e1000_init_thermal_sensor_thresh(hw); - } --#endif -- /* Re-establish EEE setting */ -+ -+ /*Re-establish EEE setting */ - if (hw->phy.media_type == e1000_media_type_copper) { - switch (mac->type) { - case e1000_i350: - case e1000_i210: - case e1000_i211: -- igb_set_eee_i350(hw); -+ e1000_set_eee_i350(hw, true, true); - break; - case e1000_i354: -- igb_set_eee_i354(hw); -+ e1000_set_eee_i354(hw, true, true); - break; - default: - break; - } - } -+ - if (!netif_running(adapter->netdev)) - igb_power_down_link(adapter); - - igb_update_mng_vlan(adapter); - - /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ -- wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); -+ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE); - -+#ifdef HAVE_PTP_1588_CLOCK - /* Re-enable PTP, where applicable. */ - igb_ptp_reset(adapter); -+#endif /* HAVE_PTP_1588_CLOCK */ - -- igb_get_phy_info(hw); -+ e1000_get_phy_info(hw); -+ -+ adapter->devrc++; - } - -+#ifdef HAVE_NDO_SET_FEATURES -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+static u32 igb_fix_features(struct net_device *netdev, -+ u32 features) -+#else - static netdev_features_t igb_fix_features(struct net_device *netdev, -- netdev_features_t features) -+ netdev_features_t features) -+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ - { -- /* Since there is no support for separate Rx/Tx vlan accel -- * enable/disable make sure Tx flag is always in same state as Rx. -+ /* -+ * Since there is no support for separate tx vlan accel -+ * enabled make sure tx flag is cleared if rx is. - */ -- if (features & NETIF_F_HW_VLAN_CTAG_RX) -- features |= NETIF_F_HW_VLAN_CTAG_TX; -- else -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) - features &= ~NETIF_F_HW_VLAN_CTAG_TX; -+#else -+ if (!(features & NETIF_F_HW_VLAN_RX)) -+ features &= ~NETIF_F_HW_VLAN_TX; -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+ -+#ifndef IGB_NO_LRO -+ /* If Rx checksum is disabled, then LRO should also be disabled */ -+ if (!(features & NETIF_F_RXCSUM)) -+ features &= ~NETIF_F_LRO; - -+#endif - return features; - } - - static int igb_set_features(struct net_device *netdev, -- netdev_features_t features) -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+ u32 features) -+#else -+ netdev_features_t features) -+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ - { - netdev_features_t changed = netdev->features ^ features; -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT - struct igb_adapter *adapter = netdev_priv(netdev); -+#endif - -+#ifdef NETIF_F_HW_VLAN_CTAG_RX - if (changed & NETIF_F_HW_VLAN_CTAG_RX) -+#else -+ if (changed & NETIF_F_HW_VLAN_RX) -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+ netdev->features = features; -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_vlan_mode(netdev, adapter->vlgrp); -+#else - igb_vlan_mode(netdev, features); -+#endif - -- if (!(changed & NETIF_F_RXALL)) -+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) - return 0; - - netdev->features = features; - -- if (netif_running(netdev)) -- igb_reinit_locked(adapter); -- else -- igb_reset(adapter); -+ return 0; -+} -+#endif /* HAVE_NDO_SET_FEATURES */ -+ -+#ifdef HAVE_FDB_OPS -+#ifdef USE_CONST_DEV_UC_CHAR -+static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *dev, -+ const unsigned char *addr, -+#ifdef HAVE_NDO_FDB_ADD_VID -+ u16 vid, -+#endif /* HAVE_NDO_FDB_ADD_VID */ -+ u16 flags) -+#else /* USE_CONST_DEV_UC_CHAR */ -+static int igb_ndo_fdb_add(struct ndmsg *ndm, -+ struct net_device *dev, -+ unsigned char *addr, -+ u16 flags) -+#endif /* USE_CONST_DEV_UC_CHAR */ -+{ -+ struct igb_adapter *adapter = netdev_priv(dev); -+ struct e1000_hw *hw = &adapter->hw; -+ int err; -+ -+ if (!(adapter->vfs_allocated_count)) -+ return -EOPNOTSUPP; -+ -+ /* Hardware does not support aging addresses so if a -+ * ndm_state is given only allow permanent addresses -+ */ -+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { -+ pr_info("%s: FDB only supports static addresses\n", -+ igb_driver_name); -+ return -EINVAL; -+ } -+ -+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { -+ u32 rar_uc_entries = hw->mac.rar_entry_count - -+ (adapter->vfs_allocated_count + 1); -+ -+ if (netdev_uc_count(dev) < rar_uc_entries) -+ err = dev_uc_add_excl(dev, addr); -+ else -+ err = -ENOMEM; -+ } else if (is_multicast_ether_addr(addr)) { -+ err = dev_mc_add_excl(dev, addr); -+ } else { -+ err = -EINVAL; -+ } -+ -+ /* Only return duplicate errors if NLM_F_EXCL is set */ -+ if (err == -EEXIST && !(flags & NLM_F_EXCL)) -+ err = 0; -+ -+ return err; -+} -+ -+#ifndef USE_DEFAULT_FDB_DEL_DUMP -+#ifdef USE_CONST_DEV_UC_CHAR -+static int igb_ndo_fdb_del(struct ndmsg *ndm, -+ struct net_device *dev, -+ const unsigned char *addr) -+#else -+static int igb_ndo_fdb_del(struct ndmsg *ndm, -+ struct net_device *dev, -+ unsigned char *addr) -+#endif /* USE_CONST_DEV_UC_CHAR */ -+{ -+ struct igb_adapter *adapter = netdev_priv(dev); -+ int err = -EOPNOTSUPP; -+ -+ if (ndm->ndm_state & NUD_PERMANENT) { -+ pr_info("%s: FDB only supports static addresses\n", -+ igb_driver_name); -+ return -EINVAL; -+ } -+ -+ if (adapter->vfs_allocated_count) { -+ if (is_unicast_ether_addr(addr)) -+ err = dev_uc_del(dev, addr); -+ else if (is_multicast_ether_addr(addr)) -+ err = dev_mc_del(dev, addr); -+ else -+ err = -EINVAL; -+ } -+ -+ return err; -+} -+ -+static int igb_ndo_fdb_dump(struct sk_buff *skb, -+ struct netlink_callback *cb, -+ struct net_device *dev, -+ int idx) -+{ -+ struct igb_adapter *adapter = netdev_priv(dev); -+ -+ if (adapter->vfs_allocated_count) -+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); -+ -+ return idx; -+} -+#endif /* USE_DEFAULT_FDB_DEL_DUMP */ -+#ifdef HAVE_BRIDGE_ATTRIBS -+#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS -+static int igb_ndo_bridge_setlink(struct net_device *dev, -+ struct nlmsghdr *nlh, -+ u16 flags) -+#else -+static int igb_ndo_bridge_setlink(struct net_device *dev, -+ struct nlmsghdr *nlh) -+#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ -+{ -+ struct igb_adapter *adapter = netdev_priv(dev); -+ struct e1000_hw *hw = &adapter->hw; -+ struct nlattr *attr, *br_spec; -+ int rem; -+ -+ if (!(adapter->vfs_allocated_count)) -+ return -EOPNOTSUPP; -+ -+ switch (adapter->hw.mac.type) { -+ case e1000_82576: -+ case e1000_i350: -+ case e1000_i354: -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); -+ -+ nla_for_each_nested(attr, br_spec, rem) { -+ __u16 mode; -+ -+ if (nla_type(attr) != IFLA_BRIDGE_MODE) -+ continue; -+ -+ mode = nla_get_u16(attr); -+ if (mode == BRIDGE_MODE_VEPA) { -+ e1000_vmdq_set_loopback_pf(hw, 0); -+ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE; -+ } else if (mode == BRIDGE_MODE_VEB) { -+ e1000_vmdq_set_loopback_pf(hw, 1); -+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE; -+ } else -+ return -EINVAL; -+ -+ netdev_info(adapter->netdev, "enabling bridge mode: %s\n", -+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); -+ } - - return 0; - } - -+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, -+ struct net_device *dev, u32 filter_mask, -+ int nlflags) -+#elif defined(HAVE_BRIDGE_FILTER) -+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, -+ struct net_device *dev, u32 filter_mask) -+#else -+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, -+ struct net_device *dev) -+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ -+{ -+ struct igb_adapter *adapter = netdev_priv(dev); -+ u16 mode; -+ -+ if (!(adapter->vfs_allocated_count)) -+ return -EOPNOTSUPP; -+ -+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE) -+ mode = BRIDGE_MODE_VEB; -+ else -+ mode = BRIDGE_MODE_VEPA; -+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT -+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, -+ filter_mask, NULL); -+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) -+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); -+#elif defined(NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS) -+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); -+#else -+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); -+#endif /* NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS */ -+} -+#endif /* HAVE_BRIDGE_ATTRIBS */ -+#endif /* HAVE_FDB_OPS */ -+#ifdef HAVE_NET_DEVICE_OPS - static const struct net_device_ops igb_netdev_ops = { - .ndo_open = igb_open, - .ndo_stop = igb_close, - .ndo_start_xmit = igb_xmit_frame, -- .ndo_get_stats64 = igb_get_stats64, -+ .ndo_get_stats = igb_get_stats, - .ndo_set_rx_mode = igb_set_rx_mode, - .ndo_set_mac_address = igb_set_mac, - .ndo_change_mtu = igb_change_mtu, -@@ -2090,60 +2349,190 @@ - .ndo_validate_addr = eth_validate_addr, - .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, -+#ifdef IFLA_VF_MAX - .ndo_set_vf_mac = igb_ndo_set_vf_mac, - .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, -+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - .ndo_set_vf_rate = igb_ndo_set_vf_bw, -- .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, -+#else -+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, -+#endif /*HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - .ndo_get_vf_config = igb_ndo_get_vf_config, -+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE -+ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, -+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ -+#endif /* IFLA_VF_MAX */ - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igb_netpoll, - #endif -+#ifdef HAVE_VLAN_RX_REGISTER -+ .ndo_vlan_rx_register = igb_vlan_mode, -+#endif -+#ifdef HAVE_FDB_OPS -+ .ndo_fdb_add = igb_ndo_fdb_add, -+#ifndef USE_DEFAULT_FDB_DEL_DUMP -+ .ndo_fdb_del = igb_ndo_fdb_del, -+ .ndo_fdb_dump = igb_ndo_fdb_dump, -+#endif -+#ifdef HAVE_BRIDGE_ATTRIBS -+ .ndo_bridge_setlink = igb_ndo_bridge_setlink, -+ .ndo_bridge_getlink = igb_ndo_bridge_getlink, -+#endif /* HAVE_BRIDGE_ATTRIBS */ -+#endif -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+}; -+ -+/* RHEL6 keeps these operations in a separate structure */ -+static const struct net_device_ops_ext igb_netdev_ops_ext = { -+ .size = sizeof(struct net_device_ops_ext), -+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ -+#ifdef HAVE_NDO_SET_FEATURES - .ndo_fix_features = igb_fix_features, - .ndo_set_features = igb_set_features, -+#endif /* HAVE_NDO_SET_FEATURES */ - }; - -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+static const struct net_device_ops igb_vmdq_ops = { -+ .ndo_open = &igb_vmdq_open, -+ .ndo_stop = &igb_vmdq_close, -+ .ndo_start_xmit = &igb_vmdq_xmit_frame, -+ .ndo_get_stats = &igb_vmdq_get_stats, -+ .ndo_set_rx_mode = &igb_vmdq_set_rx_mode, -+ .ndo_validate_addr = eth_validate_addr, -+ .ndo_set_mac_address = &igb_vmdq_set_mac, -+ .ndo_change_mtu = &igb_vmdq_change_mtu, -+ .ndo_tx_timeout = &igb_vmdq_tx_timeout, -+ .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register, -+ .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid, -+ .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid, -+}; -+ -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+#endif /* HAVE_NET_DEVICE_OPS */ -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev) -+{ -+#ifdef HAVE_NET_DEVICE_OPS -+ vnetdev->netdev_ops = &igb_vmdq_ops; -+#else -+ dev->open = &igb_vmdq_open; -+ dev->stop = &igb_vmdq_close; -+ dev->hard_start_xmit = &igb_vmdq_xmit_frame; -+ dev->get_stats = &igb_vmdq_get_stats; -+#ifdef HAVE_SET_RX_MODE -+ dev->set_rx_mode = &igb_vmdq_set_rx_mode; -+#endif -+ dev->set_multicast_list = &igb_vmdq_set_rx_mode; -+ dev->set_mac_address = &igb_vmdq_set_mac; -+ dev->change_mtu = &igb_vmdq_change_mtu; -+#ifdef HAVE_TX_TIMEOUT -+ dev->tx_timeout = &igb_vmdq_tx_timeout; -+#endif -+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) -+ dev->vlan_rx_register = &igb_vmdq_vlan_rx_register; -+ dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid; -+ dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid; -+#endif -+#endif /* HAVE_NET_DEVICE_OPS */ -+ igb_vmdq_set_ethtool_ops(vnetdev); -+ vnetdev->watchdog_timeo = 5 * HZ; -+ -+} -+ -+int igb_init_vmdq_netdevs(struct igb_adapter *adapter) -+{ -+ int pool, err = 0, base_queue; -+ struct net_device *vnetdev; -+ struct igb_vmdq_adapter *vmdq_adapter; -+ -+ for (pool = 1; pool < adapter->vmdq_pools; pool++) { -+ int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues); -+ -+ base_queue = pool * qpp; -+ vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter)); -+ if (!vnetdev) { -+ err = -ENOMEM; -+ break; -+ } -+ -+ vmdq_adapter = netdev_priv(vnetdev); -+ vmdq_adapter->vnetdev = vnetdev; -+ vmdq_adapter->real_adapter = adapter; -+ vmdq_adapter->rx_ring = adapter->rx_ring[base_queue]; -+ vmdq_adapter->tx_ring = adapter->tx_ring[base_queue]; -+ igb_assign_vmdq_netdev_ops(vnetdev); -+ snprintf(vnetdev->name, IFNAMSIZ, "%sv%d", -+ adapter->netdev->name, pool); -+ vnetdev->features = adapter->netdev->features; -+#ifdef HAVE_NETDEV_VLAN_FEATURES -+ vnetdev->vlan_features = adapter->netdev->vlan_features; -+#endif /* HAVE_NETDEV_VLAN_FEATURES */ -+ adapter->vmdq_netdev[pool-1] = vnetdev; -+ err = register_netdev(vnetdev); -+ if (err) -+ break; -+ } -+ return err; -+} -+ -+int igb_remove_vmdq_netdevs(struct igb_adapter *adapter) -+{ -+ int pool, err = 0; -+ -+ for (pool = 1; pool < adapter->vmdq_pools; pool++) { -+ unregister_netdev(adapter->vmdq_netdev[pool-1]); -+ free_netdev(adapter->vmdq_netdev[pool-1]); -+ adapter->vmdq_netdev[pool-1] = NULL; -+ } -+ return err; -+} -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+ - /** - * igb_set_fw_version - Configure version string for ethtool - * @adapter: adapter struct -+ * - **/ --void igb_set_fw_version(struct igb_adapter *adapter) -+static void igb_set_fw_version(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - struct e1000_fw_version fw; - -- igb_get_fw_version(hw, &fw); -+ e1000_get_fw_version(hw, &fw); - - switch (hw->mac.type) { - case e1000_i210: - case e1000_i211: -- if (!(igb_get_flash_presence_i210(hw))) { -+ if (!(e1000_get_flash_presence_i210(hw))) { - snprintf(adapter->fw_version, -- sizeof(adapter->fw_version), -- "%2d.%2d-%d", -- fw.invm_major, fw.invm_minor, -- fw.invm_img_type); -+ sizeof(adapter->fw_version), -+ "%2d.%2d-%d", -+ fw.invm_major, fw.invm_minor, fw.invm_img_type); - break; - } - /* fall through */ - default: -- /* if option is rom valid, display its version too */ -+ /* if option rom is valid, display its version too*/ - if (fw.or_valid) { - snprintf(adapter->fw_version, -- sizeof(adapter->fw_version), -- "%d.%d, 0x%08x, %d.%d.%d", -- fw.eep_major, fw.eep_minor, fw.etrack_id, -- fw.or_major, fw.or_build, fw.or_patch); -+ sizeof(adapter->fw_version), -+ "%d.%d, 0x%08x, %d.%d.%d", -+ fw.eep_major, fw.eep_minor, fw.etrack_id, -+ fw.or_major, fw.or_build, fw.or_patch); - /* no option rom */ -- } else if (fw.etrack_id != 0X0000) { -+ } else { -+ if (fw.etrack_id != 0X0000) { -+ snprintf(adapter->fw_version, -+ sizeof(adapter->fw_version), -+ "%d.%d, 0x%08x", -+ fw.eep_major, fw.eep_minor, fw.etrack_id); -+ } else { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), -- "%d.%d, 0x%08x", -- fw.eep_major, fw.eep_minor, fw.etrack_id); -- } else { -- snprintf(adapter->fw_version, -- sizeof(adapter->fw_version), -- "%d.%d.%d", -- fw.eep_major, fw.eep_minor, fw.eep_build); -+ "%d.%d.%d", -+ fw.eep_major, fw.eep_minor, fw.eep_build); -+ } - } - break; - } -@@ -2159,126 +2548,130 @@ - struct e1000_hw *hw = &adapter->hw; - u16 eeprom_data; - -- hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); -+ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data); - switch (hw->bus.func) { - case E1000_FUNC_0: -- if (eeprom_data & IGB_MAS_ENABLE_0) { -+ if (eeprom_data & IGB_MAS_ENABLE_0) - adapter->flags |= IGB_FLAG_MAS_ENABLE; -- netdev_info(adapter->netdev, -- "MAS: Enabling Media Autosense for port %d\n", -- hw->bus.func); -- } - break; - case E1000_FUNC_1: -- if (eeprom_data & IGB_MAS_ENABLE_1) { -+ if (eeprom_data & IGB_MAS_ENABLE_1) - adapter->flags |= IGB_FLAG_MAS_ENABLE; -- netdev_info(adapter->netdev, -- "MAS: Enabling Media Autosense for port %d\n", -- hw->bus.func); -- } - break; - case E1000_FUNC_2: -- if (eeprom_data & IGB_MAS_ENABLE_2) { -+ if (eeprom_data & IGB_MAS_ENABLE_2) - adapter->flags |= IGB_FLAG_MAS_ENABLE; -- netdev_info(adapter->netdev, -- "MAS: Enabling Media Autosense for port %d\n", -- hw->bus.func); -- } - break; - case E1000_FUNC_3: -- if (eeprom_data & IGB_MAS_ENABLE_3) { -+ if (eeprom_data & IGB_MAS_ENABLE_3) - adapter->flags |= IGB_FLAG_MAS_ENABLE; -- netdev_info(adapter->netdev, -- "MAS: Enabling Media Autosense for port %d\n", -- hw->bus.func); -- } - break; - default: - /* Shouldn't get here */ -- netdev_err(adapter->netdev, -- "MAS: Invalid port configuration, returning\n"); -+ dev_err(pci_dev_to_dev(adapter->pdev), -+ "%s:AMS: Invalid port configuration, returning\n", -+ adapter->netdev->name); - break; - } - } - --/** -- * igb_init_i2c - Init I2C interface -- * @adapter: pointer to adapter structure -- **/ --static s32 igb_init_i2c(struct igb_adapter *adapter) -+void igb_rar_set(struct igb_adapter *adapter, u32 index) - { -- s32 status = 0; -+ u32 rar_low, rar_high; -+ struct e1000_hw *hw = &adapter->hw; -+ u8 *addr = adapter->mac_table[index].addr; -+ /* HW expects these in little endian so we reverse the byte order -+ * from network order (big endian) to little endian -+ */ -+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | -+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); -+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - -- /* I2C interface supported on i350 devices */ -- if (adapter->hw.mac.type != e1000_i350) -- return 0; -+ /* Indicate to hardware the Address is Valid. */ -+ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) -+ rar_high |= E1000_RAH_AV; - -- /* Initialize the i2c bus which is controlled by the registers. -- * This bus will use the i2c_algo_bit structue that implements -- * the protocol through toggling of the 4 bits in the register. -- */ -- adapter->i2c_adap.owner = THIS_MODULE; -- adapter->i2c_algo = igb_i2c_algo; -- adapter->i2c_algo.data = adapter; -- adapter->i2c_adap.algo_data = &adapter->i2c_algo; -- adapter->i2c_adap.dev.parent = &adapter->pdev->dev; -- strlcpy(adapter->i2c_adap.name, "igb BB", -- sizeof(adapter->i2c_adap.name)); -- status = i2c_bit_add_bus(&adapter->i2c_adap); -- return status; -+ if (hw->mac.type == e1000_82575) -+ rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; -+ else -+ rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; -+ -+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); -+ E1000_WRITE_FLUSH(hw); -+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); -+ E1000_WRITE_FLUSH(hw); - } - - /** -- * igb_probe - Device Initialization Routine -- * @pdev: PCI device information struct -- * @ent: entry in igb_pci_tbl -+ * igb_probe - Device Initialization Routine -+ * @pdev: PCI device information struct -+ * @ent: entry in igb_pci_tbl - * -- * Returns 0 on success, negative on failure -+ * Returns 0 on success, negative on failure - * -- * igb_probe initializes an adapter identified by a pci_dev structure. -- * The OS initialization, configuring of the adapter private structure, -- * and a hardware reset occur. -+ * igb_probe initializes an adapter identified by a pci_dev structure. -+ * The OS initialization, configuring of the adapter private structure, -+ * and a hardware reset occur. - **/ --static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -+static int igb_probe(struct pci_dev *pdev, -+ const struct pci_device_id *ent) - { - struct net_device *netdev; - struct igb_adapter *adapter; - struct e1000_hw *hw; - u16 eeprom_data = 0; -+ u8 pba_str[E1000_PBANUM_LENGTH]; - s32 ret_val; - static int global_quad_port_a; /* global quad port a indication */ -- const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; - int err, pci_using_dac; -- u8 part_str[E1000_PBANUM_LENGTH]; -- -- /* Catch broken hardware that put the wrong VF device ID in -- * the PCIe SR-IOV capability. -- */ -- if (pdev->is_virtfn) { -- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", -- pci_name(pdev), pdev->vendor, pdev->device); -- return -EINVAL; -- } -+ static int cards_found; -+#ifdef HAVE_NDO_SET_FEATURES -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+ u32 hw_features; -+#else -+ netdev_features_t hw_features; -+#endif -+#endif - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - pci_using_dac = 0; -- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); -+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)); - if (!err) { -- pci_using_dac = 1; -+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), -+ DMA_BIT_MASK(64)); -+ if (!err) -+ pci_using_dac = 1; - } else { -- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); -+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); - if (err) { -- dev_err(&pdev->dev, -- "No usable DMA configuration, aborting\n"); -- goto err_dma; -+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), -+ DMA_BIT_MASK(32)); -+ if (err) { -+ IGB_ERR( -+ "No usable DMA configuration, aborting\n"); -+ goto err_dma; -+ } - } - } - -- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, -- IORESOURCE_MEM), -+#ifndef HAVE_ASPM_QUIRKS -+ /* 82575 requires that the pci-e link partner disable the L0s state */ -+ switch (pdev->device) { -+ case E1000_DEV_ID_82575EB_COPPER: -+ case E1000_DEV_ID_82575EB_FIBER_SERDES: -+ case E1000_DEV_ID_82575GB_QUAD_COPPER: -+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); -+ default: -+ break; -+ } -+ -+#endif /* HAVE_ASPM_QUIRKS */ -+ err = pci_request_selected_regions(pdev, -+ pci_select_bars(pdev, -+ IORESOURCE_MEM), - igb_driver_name); - if (err) - goto err_pci_reg; -@@ -2286,14 +2679,18 @@ - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); -- pci_save_state(pdev); - - err = -ENOMEM; -+#ifdef HAVE_TX_MQ - netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), - IGB_MAX_TX_QUEUES); -+#else -+ netdev = alloc_etherdev(sizeof(struct igb_adapter)); -+#endif /* HAVE_TX_MQ */ - if (!netdev) - goto err_alloc_etherdev; - -+ SET_MODULE_OWNER(netdev); - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); -@@ -2302,158 +2699,225 @@ - adapter->pdev = pdev; - hw = &adapter->hw; - hw->back = adapter; -- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); -+ adapter->port_num = hw->bus.func; -+ adapter->msg_enable = (1 << debug) - 1; - -+#ifdef HAVE_PCI_ERS -+ err = pci_save_state(pdev); -+ if (err) -+ goto err_ioremap; -+#endif - err = -EIO; -- hw->hw_addr = pci_iomap(pdev, 0, 0); -- if (!hw->hw_addr) -+ adapter->io_addr = ioremap(pci_resource_start(pdev, 0), -+ pci_resource_len(pdev, 0)); -+ if (!adapter->io_addr) - goto err_ioremap; -+ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ -+ hw->hw_addr = adapter->io_addr; - -+#ifdef HAVE_NET_DEVICE_OPS - netdev->netdev_ops = &igb_netdev_ops; -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+ set_netdev_ops_ext(netdev, &igb_netdev_ops_ext); -+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ -+#else /* HAVE_NET_DEVICE_OPS */ -+ netdev->open = &igb_open; -+ netdev->stop = &igb_close; -+ netdev->get_stats = &igb_get_stats; -+#ifdef HAVE_SET_RX_MODE -+ netdev->set_rx_mode = &igb_set_rx_mode; -+#endif -+ netdev->set_multicast_list = &igb_set_rx_mode; -+ netdev->set_mac_address = &igb_set_mac; -+ netdev->change_mtu = &igb_change_mtu; -+ netdev->do_ioctl = &igb_ioctl; -+#ifdef HAVE_TX_TIMEOUT -+ netdev->tx_timeout = &igb_tx_timeout; -+#endif -+ netdev->vlan_rx_register = igb_vlan_mode; -+ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; -+ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; -+#ifdef CONFIG_NET_POLL_CONTROLLER -+ netdev->poll_controller = igb_netpoll; -+#endif -+ netdev->hard_start_xmit = &igb_xmit_frame; -+#endif /* HAVE_NET_DEVICE_OPS */ - igb_set_ethtool_ops(netdev); -+#ifdef HAVE_TX_TIMEOUT - netdev->watchdog_timeo = 5 * HZ; -+#endif - - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - -- netdev->mem_start = pci_resource_start(pdev, 0); -- netdev->mem_end = pci_resource_end(pdev, 0); -- -- /* PCI config space info */ -- hw->vendor_id = pdev->vendor; -- hw->device_id = pdev->device; -- hw->revision_id = pdev->revision; -- hw->subsystem_vendor_id = pdev->subsystem_vendor; -- hw->subsystem_device_id = pdev->subsystem_device; -- -- /* Copy the default MAC, PHY and NVM function pointers */ -- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); -- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); -- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); -- /* Initialize skew-specific constants */ -- err = ei->get_invariants(hw); -- if (err) -- goto err_sw_init; -+ adapter->bd_number = cards_found; - - /* setup the private structure */ - err = igb_sw_init(adapter); - if (err) - goto err_sw_init; - -- igb_get_bus_info_pcie(hw); -+ igb_e1000_get_bus_info(hw); - -- hw->phy.autoneg_wait_to_complete = false; -+ hw->phy.autoneg_wait_to_complete = FALSE; -+ hw->mac.adaptive_ifs = FALSE; - - /* Copper options */ - if (hw->phy.media_type == e1000_media_type_copper) { - hw->phy.mdix = AUTO_ALL_MODES; -- hw->phy.disable_polarity_correction = false; -+ hw->phy.disable_polarity_correction = FALSE; - hw->phy.ms_type = e1000_ms_hw_default; - } - -- if (igb_check_reset_block(hw)) -- dev_info(&pdev->dev, -+ if (e1000_check_reset_block(hw)) -+ dev_info(pci_dev_to_dev(pdev), - "PHY reset is blocked due to SOL/IDER session.\n"); - -- /* features is initialized to 0 in allocation, it might have bits -+ /* -+ * features is initialized to 0 in allocation, it might have bits - * set by igb_sw_init so we should use an or instead of an - * assignment. - */ - netdev->features |= NETIF_F_SG | - NETIF_F_IP_CSUM | -+#ifdef NETIF_F_IPV6_CSUM - NETIF_F_IPV6_CSUM | -+#endif -+#ifdef NETIF_F_TSO - NETIF_F_TSO | -+#ifdef NETIF_F_TSO6 - NETIF_F_TSO6 | -+#endif -+#endif /* NETIF_F_TSO */ -+#ifdef NETIF_F_RXHASH - NETIF_F_RXHASH | -+#endif - NETIF_F_RXCSUM | -+#ifdef NETIF_F_HW_VLAN_CTAG_RX - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; -+#else -+ NETIF_F_HW_VLAN_RX | -+ NETIF_F_HW_VLAN_TX; -+#endif -+ -+ if (hw->mac.type >= e1000_82576) -+ netdev->features |= NETIF_F_SCTP_CSUM; - -+#ifdef HAVE_NDO_SET_FEATURES - /* copy netdev features into list of user selectable features */ -- netdev->hw_features |= netdev->features; -- netdev->hw_features |= NETIF_F_RXALL; -+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+ hw_features = netdev->hw_features; -+ -+ /* give us the option of enabling LRO later */ -+ hw_features |= NETIF_F_LRO; -+ -+#else -+ hw_features = get_netdev_hw_features(netdev); -+ -+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ -+ hw_features |= netdev->features; -+ -+#else -+#ifdef NETIF_F_GRO -+ -+ /* this is only needed on kernels prior to 2.6.39 */ -+ netdev->features |= NETIF_F_GRO; -+#endif /* NETIF_F_GRO */ -+#endif /* HAVE_NDO_SET_FEATURES */ - - /* set this bit last since it cannot be part of hw_features */ -+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; -+#endif /* NETIF_F_HW_FLAN_CTAG_FILTER */ -+#ifdef NETIF_F_HW_VLAN_TX -+ netdev->features |= NETIF_F_HW_VLAN_FILTER; -+#endif /* NETIF_F_HW_VLAN_TX */ -+ -+#ifdef HAVE_NDO_SET_FEATURES -+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT -+ set_netdev_hw_features(netdev, hw_features); -+#else -+ netdev->hw_features = hw_features; -+#endif -+#endif - -+#ifdef HAVE_NETDEV_VLAN_FEATURES - netdev->vlan_features |= NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; - -- netdev->priv_flags |= IFF_SUPP_NOFCS; -- -- if (pci_using_dac) { -+#endif /* HAVE_NETDEV_VLAN_FEATURES */ -+ if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; -- netdev->vlan_features |= NETIF_F_HIGHDMA; -- } - -- if (hw->mac.type >= e1000_82576) { -- netdev->hw_features |= NETIF_F_SCTP_CSUM; -- netdev->features |= NETIF_F_SCTP_CSUM; -- } -- -- netdev->priv_flags |= IFF_UNICAST_FLT; -- -- adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); -+ adapter->en_mng_pt = igb_e1000_enable_mng_pass_thru(hw); -+#ifdef DEBUG -+ if (adapter->dmac != IGB_DMAC_DISABLE) -+ netdev_info(netdev, "%s: DMA Coalescing is enabled..\n", -+ netdev->name); -+#endif - - /* before reading the NVM, reset the controller to put the device in a - * known good starting state - */ -- hw->mac.ops.reset_hw(hw); -+ igb_e1000_reset_hw(hw); - -- /* make sure the NVM is good , i211/i210 parts can have special NVM -- * that doesn't contain a checksum -- */ -- switch (hw->mac.type) { -- case e1000_i210: -- case e1000_i211: -- if (igb_get_flash_presence_i210(hw)) { -- if (hw->nvm.ops.validate(hw) < 0) { -- dev_err(&pdev->dev, -- "The NVM Checksum Is Not Valid\n"); -- err = -EIO; -- goto err_eeprom; -- } -- } -- break; -- default: -- if (hw->nvm.ops.validate(hw) < 0) { -- dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); -- err = -EIO; -- goto err_eeprom; -- } -- break; -+ /* make sure the NVM is good */ -+ if (e1000_validate_nvm_checksum(hw) < 0) { -+ dev_err(pci_dev_to_dev(pdev), -+ "The NVM Checksum Is Not Valid\n"); -+ err = -EIO; -+ goto err_eeprom; - } - - /* copy the MAC address out of the NVM */ -- if (hw->mac.ops.read_mac_addr(hw)) -- dev_err(&pdev->dev, "NVM Read Error\n"); -- -+ if (igb_e1000_read_mac_addr(hw)) -+ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n"); - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); -+#ifdef ETHTOOL_GPERMADDR -+ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); - -+ if (!is_valid_ether_addr(netdev->perm_addr)) { -+#else - if (!is_valid_ether_addr(netdev->dev_addr)) { -- dev_err(&pdev->dev, "Invalid MAC Address\n"); -+#endif -+ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n"); - err = -EIO; - goto err_eeprom; - } - -+ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len); -+ adapter->mac_table[0].queue = adapter->vfs_allocated_count; -+ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT -+ | IGB_MAC_STATE_IN_USE); -+ igb_rar_set(adapter, 0); -+ - /* get firmware version for ethtool -i */ - igb_set_fw_version(adapter); - - /* configure RXPBSIZE and TXPBSIZE */ - if (hw->mac.type == e1000_i210) { -- wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); -- wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); -+ E1000_WRITE_REG(hw, E1000_RXPBS, I210_RXPBSIZE_DEFAULT); -+ E1000_WRITE_REG(hw, E1000_TXPBS, I210_TXPBSIZE_DEFAULT); - } - -- setup_timer(&adapter->watchdog_timer, igb_watchdog, -+ /* Check if Media Autosense is enabled */ -+ if (hw->mac.type == e1000_82580) -+ igb_init_mas(adapter); -+ setup_timer(&adapter->watchdog_timer, &igb_watchdog, - (unsigned long) adapter); -- setup_timer(&adapter->phy_info_timer, igb_update_phy_info, -+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) -+ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer, -+ (unsigned long) adapter); -+ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, - (unsigned long) adapter); - - INIT_WORK(&adapter->reset_task, igb_reset_task); - INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); -+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) -+ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task); - - /* Initialize link properties that are user-changeable */ - adapter->fc_autoneg = true; -@@ -2463,19 +2927,19 @@ - hw->fc.requested_mode = e1000_fc_default; - hw->fc.current_mode = e1000_fc_default; - -- igb_validate_mdi_setting(hw); -+ igb_e1000_validate_mdi_setting(hw); - - /* By default, support wake on port A */ - if (hw->bus.func == 0) - adapter->flags |= IGB_FLAG_WOL_SUPPORTED; - -- /* Check the NVM for wake support on non-port A ports */ -+ /* Check the NVM for wake support for non-port A ports */ - if (hw->mac.type >= e1000_82580) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + - NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, - &eeprom_data); - else if (hw->bus.func == 1) -- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); -+ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - - if (eeprom_data & IGB_EEPROM_APME) - adapter->flags |= IGB_FLAG_WOL_SUPPORTED; -@@ -2494,7 +2958,7 @@ - /* Wake events only supported on port A for dual fiber - * regardless of eeprom setting - */ -- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) -+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) - adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; - break; - case E1000_DEV_ID_82576_QUAD_COPPER: -@@ -2509,9 +2973,7 @@ - global_quad_port_a = 0; - break; - default: -- /* If the device can't wake, don't set software support */ -- if (!device_can_wakeup(&adapter->pdev->dev)) -- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; -+ break; - } - - /* initialize the wol settings based on the eeprom settings */ -@@ -2525,145 +2987,185 @@ - adapter->wol = 0; - } - -- device_set_wakeup_enable(&adapter->pdev->dev, -+ /* Some vendors want the ability to Use the EEPROM setting as -+ * enable/disable only, and not for capability -+ */ -+ if (((hw->mac.type == e1000_i350) || -+ (hw->mac.type == e1000_i354)) && -+ (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { -+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; -+ adapter->wol = 0; -+ } -+ if (hw->mac.type == e1000_i350) { -+ if (((pdev->subsystem_device == 0x5001) || -+ (pdev->subsystem_device == 0x5002)) && -+ (hw->bus.func == 0)) { -+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; -+ adapter->wol = 0; -+ } -+ if (pdev->subsystem_device == 0x1F52) -+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED; -+ } -+ -+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), - adapter->flags & IGB_FLAG_WOL_SUPPORTED); - - /* reset the hardware with the new settings */ - igb_reset(adapter); -+ adapter->devrc = 0; - -+#ifdef HAVE_I2C_SUPPORT - /* Init the I2C interface */ - err = igb_init_i2c(adapter); - if (err) { - dev_err(&pdev->dev, "failed to init i2c interface\n"); - goto err_eeprom; - } -+#endif /* HAVE_I2C_SUPPORT */ - - /* let the f/w know that the h/w is now under the control of the - * driver. - */ - igb_get_hw_control(adapter); - -- strcpy(netdev->name, "eth%d"); -+ strncpy(netdev->name, "eth%d", IFNAMSIZ); - err = register_netdev(netdev); - if (err) - goto err_register; - -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ err = igb_init_vmdq_netdevs(adapter); -+ if (err) -+ goto err_register; -+#endif - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - --#ifdef CONFIG_IGB_DCA -- if (dca_add_requester(&pdev->dev) == 0) { -+#ifdef IGB_DCA -+ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) { - adapter->flags |= IGB_FLAG_DCA_ENABLED; -- dev_info(&pdev->dev, "DCA enabled\n"); -+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); - igb_setup_dca(adapter); - } - - #endif --#ifdef CONFIG_IGB_HWMON -+#ifdef HAVE_PTP_1588_CLOCK -+ /* do hw tstamp init after resetting */ -+ igb_ptp_init(adapter); -+#endif /* HAVE_PTP_1588_CLOCK */ -+ -+ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n"); -+ /* print bus type/speed/width info */ -+ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ", -+ netdev->name, -+ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" : -+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" : -+ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"), -+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : -+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : -+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : -+ (hw->mac.type == e1000_i354) ? "integrated" : "unknown")); -+ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); -+ -+ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH); -+ if (ret_val) -+ strcpy(pba_str, "Unknown"); -+ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name, -+ pba_str); -+ - /* Initialize the thermal sensor on i350 devices. */ -- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { -- u16 ets_word; -+ if (hw->mac.type == e1000_i350) { -+ if (hw->bus.func == 0) { -+ u16 ets_word; - -- /* Read the NVM to determine if this i350 device supports an -- * external thermal sensor. -- */ -- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); -- if (ets_word != 0x0000 && ets_word != 0xFFFF) -- adapter->ets = true; -- else -- adapter->ets = false; -- if (igb_sysfs_init(adapter)) -- dev_err(&pdev->dev, -- "failed to allocate sysfs resources\n"); -- } else { -- adapter->ets = false; -- } --#endif -- /* Check if Media Autosense is enabled */ -- adapter->ei = *ei; -- if (hw->dev_spec._82575.mas_capable) -- igb_init_mas(adapter); -+ /* -+ * Read the NVM to determine if this i350 device -+ * supports an external thermal sensor. -+ */ -+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word); -+ if (ets_word != 0x0000 && ets_word != 0xFFFF) -+ adapter->ets = true; -+ else -+ adapter->ets = false; -+ } -+#ifdef IGB_HWMON - -- /* do hw tstamp init after resetting */ -- igb_ptp_init(adapter); -+ igb_sysfs_init(adapter); -+#else -+#ifdef IGB_PROCFS - -- dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); -- /* print bus type/speed/width info, not applicable to i354 */ -- if (hw->mac.type != e1000_i354) { -- dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", -- netdev->name, -- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : -- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : -- "unknown"), -- ((hw->bus.width == e1000_bus_width_pcie_x4) ? -- "Width x4" : -- (hw->bus.width == e1000_bus_width_pcie_x2) ? -- "Width x2" : -- (hw->bus.width == e1000_bus_width_pcie_x1) ? -- "Width x1" : "unknown"), netdev->dev_addr); -- } -- -- if ((hw->mac.type >= e1000_i210 || -- igb_get_flash_presence_i210(hw))) { -- ret_val = igb_read_part_string(hw, part_str, -- E1000_PBANUM_LENGTH); -+ igb_procfs_init(adapter); -+#endif /* IGB_PROCFS */ -+#endif /* IGB_HWMON */ - } else { -- ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; -+ adapter->ets = false; - } - -- if (ret_val) -- strcpy(part_str, "Unknown"); -- dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); -- dev_info(&pdev->dev, -- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", -- (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : -- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", -- adapter->num_rx_queues, adapter->num_tx_queues); - if (hw->phy.media_type == e1000_media_type_copper) { - switch (hw->mac.type) { - case e1000_i350: - case e1000_i210: - case e1000_i211: - /* Enable EEE for internal copper PHY devices */ -- err = igb_set_eee_i350(hw); -+ err = e1000_set_eee_i350(hw, true, true); - if ((!err) && -- (!hw->dev_spec._82575.eee_disable)) { -+ (adapter->flags & IGB_FLAG_EEE)) - adapter->eee_advert = - MDIO_EEE_100TX | MDIO_EEE_1000T; -- adapter->flags |= IGB_FLAG_EEE; -- } - break; - case e1000_i354: -- if ((rd32(E1000_CTRL_EXT) & -- E1000_CTRL_EXT_LINK_MODE_SGMII)) { -- err = igb_set_eee_i354(hw); -+ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) & -+ (E1000_CTRL_EXT_LINK_MODE_SGMII)) { -+ err = e1000_set_eee_i354(hw, true, true); - if ((!err) && -- (!hw->dev_spec._82575.eee_disable)) { -+ (adapter->flags & IGB_FLAG_EEE)) - adapter->eee_advert = - MDIO_EEE_100TX | MDIO_EEE_1000T; -- adapter->flags |= IGB_FLAG_EEE; -- } - } - break; - default: - break; - } - } -+ -+ /* send driver version info to firmware */ -+ if ((hw->mac.type >= e1000_i350) && -+ (e1000_get_flash_presence_i210(hw))) -+ igb_init_fw(adapter); -+ -+#ifndef IGB_NO_LRO -+ if (netdev->features & NETIF_F_LRO) -+ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled\n"); -+ else -+ dev_info(pci_dev_to_dev(pdev), "LRO is disabled\n"); -+#endif -+ dev_info(pci_dev_to_dev(pdev), -+ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", -+ adapter->msix_entries ? "MSI-X" : -+ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", -+ adapter->num_rx_queues, adapter->num_tx_queues); -+ -+ cards_found++; -+ - pm_runtime_put_noidle(&pdev->dev); - return 0; - - err_register: - igb_release_hw_control(adapter); -+#ifdef HAVE_I2C_SUPPORT - memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); -+#endif /* HAVE_I2C_SUPPORT */ - err_eeprom: -- if (!igb_check_reset_block(hw)) -- igb_reset_phy(hw); -+ if (!e1000_check_reset_block(hw)) -+ igb_e1000_phy_hw_reset(hw); - - if (hw->flash_address) - iounmap(hw->flash_address); - err_sw_init: -+ kfree(adapter->shadow_vfta); - igb_clear_interrupt_scheme(adapter); -- pci_iounmap(pdev, hw->hw_addr); -+ igb_reset_sriov_capability(adapter); -+ iounmap(adapter->io_addr); - err_ioremap: - free_netdev(netdev); - err_alloc_etherdev: -@@ -2674,117 +3176,28 @@ - pci_disable_device(pdev); - return err; - } -- --#ifdef CONFIG_PCI_IOV --static int igb_disable_sriov(struct pci_dev *pdev) --{ -- struct net_device *netdev = pci_get_drvdata(pdev); -- struct igb_adapter *adapter = netdev_priv(netdev); -- struct e1000_hw *hw = &adapter->hw; -- -- /* reclaim resources allocated to VFs */ -- if (adapter->vf_data) { -- /* disable iov and allow time for transactions to clear */ -- if (pci_vfs_assigned(pdev)) { -- dev_warn(&pdev->dev, -- "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); -- return -EPERM; -- } else { -- pci_disable_sriov(pdev); -- msleep(500); -- } -- -- kfree(adapter->vf_data); -- adapter->vf_data = NULL; -- adapter->vfs_allocated_count = 0; -- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); -- wrfl(); -- msleep(100); -- dev_info(&pdev->dev, "IOV Disabled\n"); -- -- /* Re-enable DMA Coalescing flag since IOV is turned off */ -- adapter->flags |= IGB_FLAG_DMAC; -- } -- -- return 0; --} -- --static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) --{ -- struct net_device *netdev = pci_get_drvdata(pdev); -- struct igb_adapter *adapter = netdev_priv(netdev); -- int old_vfs = pci_num_vf(pdev); -- int err = 0; -- int i; -- -- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { -- err = -EPERM; -- goto out; -- } -- if (!num_vfs) -- goto out; -- -- if (old_vfs) { -- dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", -- old_vfs, max_vfs); -- adapter->vfs_allocated_count = old_vfs; -- } else -- adapter->vfs_allocated_count = num_vfs; -- -- adapter->vf_data = kcalloc(adapter->vfs_allocated_count, -- sizeof(struct vf_data_storage), GFP_KERNEL); -- -- /* if allocation failed then we do not support SR-IOV */ -- if (!adapter->vf_data) { -- adapter->vfs_allocated_count = 0; -- dev_err(&pdev->dev, -- "Unable to allocate memory for VF Data Storage\n"); -- err = -ENOMEM; -- goto out; -- } -- -- /* only call pci_enable_sriov() if no VFs are allocated already */ -- if (!old_vfs) { -- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); -- if (err) -- goto err_out; -- } -- dev_info(&pdev->dev, "%d VFs allocated\n", -- adapter->vfs_allocated_count); -- for (i = 0; i < adapter->vfs_allocated_count; i++) -- igb_vf_configure(adapter, i); -- -- /* DMA Coalescing is not supported in IOV mode. */ -- adapter->flags &= ~IGB_FLAG_DMAC; -- goto out; -- --err_out: -- kfree(adapter->vf_data); -- adapter->vf_data = NULL; -- adapter->vfs_allocated_count = 0; --out: -- return err; --} -- --#endif --/** -+#ifdef HAVE_I2C_SUPPORT -+/* - * igb_remove_i2c - Cleanup I2C interface - * @adapter: pointer to adapter structure -- **/ -+ * -+ */ - static void igb_remove_i2c(struct igb_adapter *adapter) - { -+ - /* free the adapter bus structure */ - i2c_del_adapter(&adapter->i2c_adap); - } -+#endif /* HAVE_I2C_SUPPORT */ - - /** -- * igb_remove - Device Removal Routine -- * @pdev: PCI device information struct -+ * igb_remove - Device Removal Routine -+ * @pdev: PCI device information struct - * -- * igb_remove is called by the PCI subsystem to alert the driver -- * that it should release a PCI device. The could be caused by a -- * Hot-Plug event, or because the driver is going to be removed from -- * memory. -+ * igb_remove is called by the PCI subsystem to alert the driver -+ * that it should release a PCI device. The could be caused by a -+ * Hot-Plug event, or because the driver is going to be removed from -+ * memory. - **/ - static void igb_remove(struct pci_dev *pdev) - { -@@ -2793,30 +3206,39 @@ - struct e1000_hw *hw = &adapter->hw; - - pm_runtime_get_noresume(&pdev->dev); --#ifdef CONFIG_IGB_HWMON -- igb_sysfs_exit(adapter); --#endif -+#ifdef HAVE_I2C_SUPPORT - igb_remove_i2c(adapter); -+#endif /* HAVE_I2C_SUPPORT */ -+#ifdef HAVE_PTP_1588_CLOCK - igb_ptp_stop(adapter); -- /* The watchdog timer may be rescheduled, so explicitly -- * disable watchdog from being rescheduled. -+#endif /* HAVE_PTP_1588_CLOCK */ -+ -+ /* flush_scheduled work may reschedule our watchdog task, so -+ * explicitly disable watchdog tasks from being rescheduled - */ - set_bit(__IGB_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); -+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) -+ del_timer_sync(&adapter->dma_err_timer); - del_timer_sync(&adapter->phy_info_timer); - -- cancel_work_sync(&adapter->reset_task); -- cancel_work_sync(&adapter->watchdog_task); -+ flush_scheduled_work(); - --#ifdef CONFIG_IGB_DCA -+#ifdef IGB_DCA - if (adapter->flags & IGB_FLAG_DCA_ENABLED) { -- dev_info(&pdev->dev, "DCA disabled\n"); -+ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); - dca_remove_requester(&pdev->dev); - adapter->flags &= ~IGB_FLAG_DCA_ENABLED; -- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); -+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE); - } - #endif - -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ igb_remove_vmdq_netdevs(adapter); -+#endif -+ -+ igb_reset_sriov_capability(adapter); -+ - /* Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. - */ -@@ -2826,16 +3248,21 @@ - - igb_clear_interrupt_scheme(adapter); - --#ifdef CONFIG_PCI_IOV -- igb_disable_sriov(pdev); --#endif -- -- pci_iounmap(pdev, hw->hw_addr); -+ if (adapter->io_addr) -+ iounmap(adapter->io_addr); - if (hw->flash_address) - iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); - -+#ifdef IGB_HWMON -+ igb_sysfs_exit(adapter); -+#else -+#ifdef IGB_PROCFS -+ igb_procfs_exit(adapter); -+#endif /* IGB_PROCFS */ -+#endif /* IGB_HWMON */ -+ kfree(adapter->mac_table); - kfree(adapter->shadow_vfta); - free_netdev(netdev); - -@@ -2845,110 +3272,12 @@ - } - - /** -- * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space -- * @adapter: board private structure to initialize -- * -- * This function initializes the vf specific data storage and then attempts to -- * allocate the VFs. The reason for ordering it this way is because it is much -- * mor expensive time wise to disable SR-IOV than it is to allocate and free -- * the memory for the VFs. -- **/ --static void igb_probe_vfs(struct igb_adapter *adapter) --{ --#ifdef CONFIG_PCI_IOV -- struct pci_dev *pdev = adapter->pdev; -- struct e1000_hw *hw = &adapter->hw; -- -- /* Virtualization features not supported on i210 family. */ -- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) -- return; -- -- pci_sriov_set_totalvfs(pdev, 7); -- igb_pci_enable_sriov(pdev, max_vfs); -- --#endif /* CONFIG_PCI_IOV */ --} -- --static void igb_init_queue_configuration(struct igb_adapter *adapter) --{ -- struct e1000_hw *hw = &adapter->hw; -- u32 max_rss_queues; -- -- /* Determine the maximum number of RSS queues supported. */ -- switch (hw->mac.type) { -- case e1000_i211: -- max_rss_queues = IGB_MAX_RX_QUEUES_I211; -- break; -- case e1000_82575: -- case e1000_i210: -- max_rss_queues = IGB_MAX_RX_QUEUES_82575; -- break; -- case e1000_i350: -- /* I350 cannot do RSS and SR-IOV at the same time */ -- if (!!adapter->vfs_allocated_count) { -- max_rss_queues = 1; -- break; -- } -- /* fall through */ -- case e1000_82576: -- if (!!adapter->vfs_allocated_count) { -- max_rss_queues = 2; -- break; -- } -- /* fall through */ -- case e1000_82580: -- case e1000_i354: -- default: -- max_rss_queues = IGB_MAX_RX_QUEUES; -- break; -- } -- -- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); -- -- igb_set_flag_queue_pairs(adapter, max_rss_queues); --} -- --void igb_set_flag_queue_pairs(struct igb_adapter *adapter, -- const u32 max_rss_queues) --{ -- struct e1000_hw *hw = &adapter->hw; -- -- /* Determine if we need to pair queues. */ -- switch (hw->mac.type) { -- case e1000_82575: -- case e1000_i211: -- /* Device supports enough interrupts without queue pairing. */ -- break; -- case e1000_82576: -- /* If VFs are going to be allocated with RSS queues then we -- * should pair the queues in order to conserve interrupts due -- * to limited supply. -- */ -- if ((adapter->rss_queues > 1) && -- (adapter->vfs_allocated_count > 6)) -- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; -- /* fall through */ -- case e1000_82580: -- case e1000_i350: -- case e1000_i354: -- case e1000_i210: -- default: -- /* If rss_queues > half of max_rss_queues, pair the queues in -- * order to conserve interrupts due to limited supply. -- */ -- if (adapter->rss_queues > (max_rss_queues / 2)) -- adapter->flags |= IGB_FLAG_QUEUE_PAIRS; -- break; -- } --} -- --/** -- * igb_sw_init - Initialize general software structures (struct igb_adapter) -- * @adapter: board private structure to initialize -+ * igb_sw_init - Initialize general software structures (struct igb_adapter) -+ * @adapter: board private structure to initialize - * -- * igb_sw_init initializes the Adapter private data structure. -- * Fields are initialized based on PCI device information and -- * OS network device settings (MTU size). -+ * igb_sw_init initializes the Adapter private data structure. -+ * Fields are initialized based on PCI device information and -+ * OS network device settings (MTU size). - **/ - static int igb_sw_init(struct igb_adapter *adapter) - { -@@ -2956,84 +3285,78 @@ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - -+ /* PCI config space info */ -+ -+ hw->vendor_id = pdev->vendor; -+ hw->device_id = pdev->device; -+ hw->subsystem_vendor_id = pdev->subsystem_vendor; -+ hw->subsystem_device_id = pdev->subsystem_device; -+ -+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); -+ - pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); - - /* set default ring sizes */ - adapter->tx_ring_count = IGB_DEFAULT_TXD; - adapter->rx_ring_count = IGB_DEFAULT_RXD; - -- /* set default ITR values */ -- adapter->rx_itr_setting = IGB_DEFAULT_ITR; -- adapter->tx_itr_setting = IGB_DEFAULT_ITR; -- - /* set default work limits */ - adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; - - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + -- VLAN_HLEN; -- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; -+ VLAN_HLEN; - -- spin_lock_init(&adapter->stats64_lock); --#ifdef CONFIG_PCI_IOV -- switch (hw->mac.type) { -- case e1000_82576: -- case e1000_i350: -- if (max_vfs > 7) { -- dev_warn(&pdev->dev, -- "Maximum of 7 VFs per PF, using max\n"); -- max_vfs = adapter->vfs_allocated_count = 7; -- } else -- adapter->vfs_allocated_count = max_vfs; -- if (adapter->vfs_allocated_count) -- dev_warn(&pdev->dev, -- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); -- break; -- default: -- break; -+ /* Initialize the hardware-specific values */ -+ if (e1000_setup_init_funcs(hw, TRUE)) { -+ dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n"); -+ return -EIO; - } --#endif /* CONFIG_PCI_IOV */ - -- igb_init_queue_configuration(adapter); -+ igb_check_options(adapter); -+ -+ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) * -+ hw->mac.rar_entry_count, -+ GFP_ATOMIC); - - /* Setup and initialize a copy of the hw vlan table array */ -- adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), -- GFP_ATOMIC); -+ adapter->shadow_vfta = kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES, -+ GFP_ATOMIC); -+ -+ /* These calls may decrease the number of queues */ -+ if (hw->mac.type < e1000_i210) -+ igb_set_sriov_capability(adapter); - -- /* This call may decrease the number of queues */ - if (igb_init_interrupt_scheme(adapter, true)) { -- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); -+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - -- igb_probe_vfs(adapter); -- - /* Explicitly disable IRQ since the NIC can be in any state. */ - igb_irq_disable(adapter); - -- if (hw->mac.type >= e1000_i350) -- adapter->flags &= ~IGB_FLAG_DMAC; -- - set_bit(__IGB_DOWN, &adapter->state); - return 0; - } - - /** -- * igb_open - Called when a network interface is made active -- * @netdev: network interface device structure -+ * igb_open - Called when a network interface is made active -+ * @netdev: network interface device structure - * -- * Returns 0 on success, negative value on failure -+ * Returns 0 on success, negative value on failure - * -- * The open entry point is called when a network interface is made -- * active by the system (IFF_UP). At this point all resources needed -- * for transmit and receive operations are allocated, the interrupt -- * handler is registered with the OS, the watchdog timer is started, -- * and the stack is notified that the interface is ready. -+ * The open entry point is called when a network interface is made -+ * active by the system (IFF_UP). At this point all resources needed -+ * for transmit and receive operations are allocated, the interrupt -+ * handler is registered with the OS, the watchdog timer is started, -+ * and the stack is notified that the interface is ready. - **/ - static int __igb_open(struct net_device *netdev, bool resuming) - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; -+#ifdef CONFIG_PM_RUNTIME - struct pci_dev *pdev = adapter->pdev; -+#endif /* CONFIG_PM_RUNTIME */ - int err; - int i; - -@@ -3043,8 +3366,10 @@ - return -EBUSY; - } - -+#ifdef CONFIG_PM_RUNTIME - if (!resuming) - pm_runtime_get_sync(&pdev->dev); -+#endif /* CONFIG_PM_RUNTIME */ - - netif_carrier_off(netdev); - -@@ -3072,12 +3397,12 @@ - goto err_req_irq; - - /* Notify the stack of the actual queue counts. */ -- err = netif_set_real_num_tx_queues(adapter->netdev, -- adapter->num_tx_queues); -- if (err) -- goto err_set_queues; -+ netif_set_real_num_tx_queues(netdev, -+ adapter->vmdq_pools ? 1 : -+ adapter->num_tx_queues); - -- err = netif_set_real_num_rx_queues(adapter->netdev, -+ err = netif_set_real_num_rx_queues(netdev, -+ adapter->vmdq_pools ? 1 : - adapter->num_rx_queues); - if (err) - goto err_set_queues; -@@ -3087,30 +3412,31 @@ - - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&(adapter->q_vector[i]->napi)); -+ igb_configure_lli(adapter); - - /* Clear any pending interrupts. */ -- rd32(E1000_ICR); -+ E1000_READ_REG(hw, E1000_ICR); - - igb_irq_enable(adapter); - - /* notify VFs that reset has been completed */ - if (adapter->vfs_allocated_count) { -- u32 reg_data = rd32(E1000_CTRL_EXT); -+ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT); - - reg_data |= E1000_CTRL_EXT_PFRSTD; -- wr32(E1000_CTRL_EXT, reg_data); -+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data); - } - - netif_tx_start_all_queues(netdev); - -- if (!resuming) -- pm_runtime_put(&pdev->dev); -+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA) -+ schedule_work(&adapter->dma_err_task); - - /* start the watchdog. */ - hw->mac.get_link_status = 1; - schedule_work(&adapter->watchdog_task); - -- return 0; -+ return E1000_SUCCESS; - - err_set_queues: - igb_free_irq(adapter); -@@ -3122,59 +3448,72 @@ - igb_free_all_tx_resources(adapter); - err_setup_tx: - igb_reset(adapter); -+ -+#ifdef CONFIG_PM_RUNTIME - if (!resuming) - pm_runtime_put(&pdev->dev); -+#endif /* CONFIG_PM_RUNTIME */ - - return err; - } - --static int igb_open(struct net_device *netdev) -+int igb_open(struct net_device *netdev) - { - return __igb_open(netdev, false); - } - - /** -- * igb_close - Disables a network interface -- * @netdev: network interface device structure -+ * igb_close - Disables a network interface -+ * @netdev: network interface device structure - * -- * Returns 0, this is not allowed to fail -+ * Returns 0, this is not allowed to fail - * -- * The close entry point is called when an interface is de-activated -- * by the OS. The hardware is still under the driver's control, but -- * needs to be disabled. A global MAC reset is issued to stop the -- * hardware, and all transmit and receive resources are freed. -+ * The close entry point is called when an interface is de-activated -+ * by the OS. The hardware is still under the driver's control, but -+ * needs to be disabled. A global MAC reset is issued to stop the -+ * hardware, and all transmit and receive resources are freed. - **/ - static int __igb_close(struct net_device *netdev, bool suspending) - { - struct igb_adapter *adapter = netdev_priv(netdev); -+#ifdef CONFIG_PM_RUNTIME - struct pci_dev *pdev = adapter->pdev; -+#endif /* CONFIG_PM_RUNTIME */ - - WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); - -+#ifdef CONFIG_PM_RUNTIME - if (!suspending) - pm_runtime_get_sync(&pdev->dev); -+#endif /* CONFIG_PM_RUNTIME */ - - igb_down(adapter); -+ -+ igb_release_hw_control(adapter); -+ - igb_free_irq(adapter); - - igb_free_all_tx_resources(adapter); - igb_free_all_rx_resources(adapter); - -+#ifdef CONFIG_PM_RUNTIME - if (!suspending) - pm_runtime_put_sync(&pdev->dev); -+#endif /* CONFIG_PM_RUNTIME */ -+ - return 0; - } - --static int igb_close(struct net_device *netdev) -+int igb_close(struct net_device *netdev) - { - return __igb_close(netdev, false); - } - - /** -- * igb_setup_tx_resources - allocate Tx resources (Descriptors) -- * @tx_ring: tx descriptor ring (for a specific queue) to setup -+ * igb_setup_tx_resources - allocate Tx resources (Descriptors) -+ * @tx_ring: tx descriptor ring (for a specific queue) to setup - * -- * Return 0 on success, negative on failure -+ * Return 0 on success, negative on failure - **/ - int igb_setup_tx_resources(struct igb_ring *tx_ring) - { -@@ -3182,7 +3521,6 @@ - int size; - - size = sizeof(struct igb_tx_buffer) * tx_ring->count; -- - tx_ring->tx_buffer_info = vzalloc(size); - if (!tx_ring->tx_buffer_info) - goto err; -@@ -3193,6 +3531,7 @@ - - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); -+ - if (!tx_ring->desc) - goto err; - -@@ -3203,17 +3542,17 @@ - - err: - vfree(tx_ring->tx_buffer_info); -- tx_ring->tx_buffer_info = NULL; -- dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); -+ dev_err(dev, -+ "Unable to allocate memory for the transmit descriptor ring\n"); - return -ENOMEM; - } - - /** -- * igb_setup_all_tx_resources - wrapper to allocate Tx resources -- * (Descriptors) for all queues -- * @adapter: board private structure -+ * igb_setup_all_tx_resources - wrapper to allocate Tx resources -+ * (Descriptors) for all queues -+ * @adapter: board private structure - * -- * Return 0 on success, negative on failure -+ * Return 0 on success, negative on failure - **/ - static int igb_setup_all_tx_resources(struct igb_adapter *adapter) - { -@@ -3223,7 +3562,7 @@ - for (i = 0; i < adapter->num_tx_queues; i++) { - err = igb_setup_tx_resources(adapter->tx_ring[i]); - if (err) { -- dev_err(&pdev->dev, -+ dev_err(pci_dev_to_dev(pdev), - "Allocation for Tx Queue %u failed\n", i); - for (i--; i >= 0; i--) - igb_free_tx_resources(adapter->tx_ring[i]); -@@ -3235,8 +3574,8 @@ - } - - /** -- * igb_setup_tctl - configure the transmit control registers -- * @adapter: Board private structure -+ * igb_setup_tctl - configure the transmit control registers -+ * @adapter: Board private structure - **/ - void igb_setup_tctl(struct igb_adapter *adapter) - { -@@ -3244,28 +3583,45 @@ - u32 tctl; - - /* disable queue 0 which is enabled by default on 82575 and 82576 */ -- wr32(E1000_TXDCTL(0), 0); -+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0); - - /* Program the Transmit Control Register */ -- tctl = rd32(E1000_TCTL); -+ tctl = E1000_READ_REG(hw, E1000_TCTL); - tctl &= ~E1000_TCTL_CT; - tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | - (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); - -- igb_config_collision_dist(hw); -+ igb_e1000_config_collision_dist(hw); - - /* Enable transmits */ - tctl |= E1000_TCTL_EN; - -- wr32(E1000_TCTL, tctl); -+ E1000_WRITE_REG(hw, E1000_TCTL, tctl); -+} -+ -+static u32 igb_tx_wthresh(struct igb_adapter *adapter) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ -+ switch (hw->mac.type) { -+ case e1000_i354: -+ return 4; -+ case e1000_82576: -+ if (adapter->msix_entries) -+ return 1; -+ default: -+ break; -+ } -+ -+ return 16; - } - - /** -- * igb_configure_tx_ring - Configure transmit ring after Reset -- * @adapter: board private structure -- * @ring: tx ring to configure -+ * igb_configure_tx_ring - Configure transmit ring after Reset -+ * @adapter: board private structure -+ * @ring: tx ring to configure - * -- * Configure a transmit ring after a reset. -+ * Configure a transmit ring after a reset. - **/ - void igb_configure_tx_ring(struct igb_adapter *adapter, - struct igb_ring *ring) -@@ -3276,33 +3632,33 @@ - int reg_idx = ring->reg_idx; - - /* disable the queue */ -- wr32(E1000_TXDCTL(reg_idx), 0); -- wrfl(); -+ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0); -+ E1000_WRITE_FLUSH(hw); - mdelay(10); - -- wr32(E1000_TDLEN(reg_idx), -- ring->count * sizeof(union e1000_adv_tx_desc)); -- wr32(E1000_TDBAL(reg_idx), -- tdba & 0x00000000ffffffffULL); -- wr32(E1000_TDBAH(reg_idx), tdba >> 32); -+ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx), -+ ring->count * sizeof(union e1000_adv_tx_desc)); -+ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx), -+ tdba & 0x00000000ffffffffULL); -+ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32); - -- ring->tail = hw->hw_addr + E1000_TDT(reg_idx); -- wr32(E1000_TDH(reg_idx), 0); -+ ring->tail = adapter->io_addr + E1000_TDT(reg_idx); -+ E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0); - writel(0, ring->tail); - - txdctl |= IGB_TX_PTHRESH; - txdctl |= IGB_TX_HTHRESH << 8; -- txdctl |= IGB_TX_WTHRESH << 16; -+ txdctl |= igb_tx_wthresh(adapter) << 16; - - txdctl |= E1000_TXDCTL_QUEUE_ENABLE; -- wr32(E1000_TXDCTL(reg_idx), txdctl); -+ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl); - } - - /** -- * igb_configure_tx - Configure transmit Unit after Reset -- * @adapter: board private structure -+ * igb_configure_tx - Configure transmit Unit after Reset -+ * @adapter: board private structure - * -- * Configure the Tx unit of the MAC after a reset. -+ * Configure the Tx unit of the MAC after a reset. - **/ - static void igb_configure_tx(struct igb_adapter *adapter) - { -@@ -3313,28 +3669,30 @@ - } - - /** -- * igb_setup_rx_resources - allocate Rx resources (Descriptors) -- * @rx_ring: Rx descriptor ring (for a specific queue) to setup -+ * igb_setup_rx_resources - allocate Rx resources (Descriptors) -+ * @rx_ring: rx descriptor ring (for a specific queue) to setup - * -- * Returns 0 on success, negative on failure -+ * Returns 0 on success, negative on failure - **/ - int igb_setup_rx_resources(struct igb_ring *rx_ring) - { - struct device *dev = rx_ring->dev; -- int size; -+ int size, desc_len; - - size = sizeof(struct igb_rx_buffer) * rx_ring->count; -- - rx_ring->rx_buffer_info = vzalloc(size); - if (!rx_ring->rx_buffer_info) - goto err; - -+ desc_len = sizeof(union e1000_adv_rx_desc); -+ - /* Round up to nearest 4K */ -- rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); -+ rx_ring->size = rx_ring->count * desc_len; - rx_ring->size = ALIGN(rx_ring->size, 4096); - - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); -+ - if (!rx_ring->desc) - goto err; - -@@ -3347,16 +3705,17 @@ - err: - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; -- dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); -+ dev_err(dev, -+ "Unable to allocate memory for the receive descriptor ring\n"); - return -ENOMEM; - } - - /** -- * igb_setup_all_rx_resources - wrapper to allocate Rx resources -- * (Descriptors) for all queues -- * @adapter: board private structure -+ * igb_setup_all_rx_resources - wrapper to allocate Rx resources -+ * (Descriptors) for all queues -+ * @adapter: board private structure - * -- * Return 0 on success, negative on failure -+ * Return 0 on success, negative on failure - **/ - static int igb_setup_all_rx_resources(struct igb_adapter *adapter) - { -@@ -3366,7 +3725,7 @@ - for (i = 0; i < adapter->num_rx_queues; i++) { - err = igb_setup_rx_resources(adapter->rx_ring[i]); - if (err) { -- dev_err(&pdev->dev, -+ dev_err(pci_dev_to_dev(pdev), - "Allocation for Rx Queue %u failed\n", i); - for (i--; i >= 0; i--) - igb_free_rx_resources(adapter->rx_ring[i]); -@@ -3378,14 +3737,17 @@ - } - - /** -- * igb_setup_mrqc - configure the multiple receive queue control registers -- * @adapter: Board private structure -+ * igb_setup_mrqc - configure the multiple receive queue control registers -+ * @adapter: Board private structure - **/ - static void igb_setup_mrqc(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - u32 mrqc, rxcsum; - u32 j, num_rx_queues; -+#ifndef ETHTOOL_SRXFHINDIR -+ u32 shift = 0, shift2 = 0; -+#endif /* ETHTOOL_SRXFHINDIR */ - static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, - 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, - 0xA32DCB77, 0x0CF23080, 0x3BB7426A, -@@ -3393,33 +3755,72 @@ - - /* Fill out hash function seeds */ - for (j = 0; j < 10; j++) -- wr32(E1000_RSSRK(j), rsskey[j]); -+ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]); - - num_rx_queues = adapter->rss_queues; - -- switch (hw->mac.type) { -- case e1000_82576: -+#ifdef ETHTOOL_SRXFHINDIR -+ if (hw->mac.type == e1000_82576) { - /* 82576 supports 2 RSS queues for SR-IOV */ - if (adapter->vfs_allocated_count) - num_rx_queues = 2; -- break; -- default: -- break; - } -- - if (adapter->rss_indir_tbl_init != num_rx_queues) { - for (j = 0; j < IGB_RETA_SIZE; j++) - adapter->rss_indir_tbl[j] = -- (j * num_rx_queues) / IGB_RETA_SIZE; -+ (j * num_rx_queues) / IGB_RETA_SIZE; - adapter->rss_indir_tbl_init = num_rx_queues; - } - igb_write_rss_indir_tbl(adapter); -+#else -+ /* 82575 and 82576 supports 2 RSS queues for VMDq */ -+ switch (hw->mac.type) { -+ case e1000_82575: -+ if (adapter->vmdq_pools) { -+ shift = 2; -+ shift2 = 6; -+ } -+ shift = 6; -+ break; -+ case e1000_82576: -+ /* 82576 supports 2 RSS queues for SR-IOV */ -+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) { -+ shift = 3; -+ num_rx_queues = 2; -+ } -+ break; -+ default: -+ break; -+ } -+ -+ /* -+ * Populate the redirection table 4 entries at a time. To do this -+ * we are generating the results for n and n+2 and then interleaving -+ * those with the results with n+1 and n+3. -+ */ -+ for (j = 0; j < 32; j++) { -+ /* first pass generates n and n+2 */ -+ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; -+ u32 reta = (base & 0x07800780) >> (7 - shift); -+ -+ /* second pass generates n+1 and n+3 */ -+ base += 0x00010001 * num_rx_queues; -+ reta |= (base & 0x07800780) << (1 + shift); -+ -+ /* generate 2nd table for 82575 based parts */ -+ if (shift2) -+ reta |= (0x01010101 * num_rx_queues) << shift2; -+ -+ E1000_WRITE_REG(hw, E1000_RETA(j), reta); -+ } -+#endif /* ETHTOOL_SRXFHINDIR */ - -- /* Disable raw packet checksumming so that RSS hash is placed in -+ /* -+ * Disable raw packet checksumming so that RSS hash is placed in - * descriptor on writeback. No need to enable TCP/UDP/IP checksum - * offloads as they are enabled by default - */ -- rxcsum = rd32(E1000_RXCSUM); -+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); - rxcsum |= E1000_RXCSUM_PCSD; - - if (adapter->hw.mac.type >= e1000_82576) -@@ -3427,7 +3828,7 @@ - rxcsum |= E1000_RXCSUM_CRCOFL; - - /* Don't need to set TUOFL or IPOFL, they default to 1 */ -- wr32(E1000_RXCSUM, rxcsum); -+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); - - /* Generate RSS hash based on packet types, TCP/UDP - * port numbers and/or IPv4/v6 src and dst addresses -@@ -3447,40 +3848,39 @@ - * we default to RSS so that an RSS hash is calculated per packet even - * if we are only using one queue - */ -- if (adapter->vfs_allocated_count) { -+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) { - if (hw->mac.type > e1000_82575) { - /* Set the default pool for the PF's first queue */ -- u32 vtctl = rd32(E1000_VT_CTL); -+ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL); - - vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | - E1000_VT_CTL_DISABLE_DEF_POOL); - vtctl |= adapter->vfs_allocated_count << - E1000_VT_CTL_DEFAULT_POOL_SHIFT; -- wr32(E1000_VT_CTL, vtctl); -+ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); - } - if (adapter->rss_queues > 1) - mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; - else - mrqc |= E1000_MRQC_ENABLE_VMDQ; - } else { -- if (hw->mac.type != e1000_i211) -- mrqc |= E1000_MRQC_ENABLE_RSS_4Q; -+ mrqc |= E1000_MRQC_ENABLE_RSS_4Q; - } - igb_vmm_control(adapter); - -- wr32(E1000_MRQC, mrqc); -+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc); - } - - /** -- * igb_setup_rctl - configure the receive control registers -- * @adapter: Board private structure -+ * igb_setup_rctl - configure the receive control registers -+ * @adapter: Board private structure - **/ - void igb_setup_rctl(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - -- rctl = rd32(E1000_RCTL); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); - - rctl &= ~(3 << E1000_RCTL_MO_SHIFT); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); -@@ -3488,7 +3888,8 @@ - rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | - (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); - -- /* enable stripping of CRC. It's unlikely this will break BMC -+ /* -+ * enable stripping of CRC. It's unlikely this will break BMC - * redirection as it did with e1000. Newer features require - * that the HW strips the CRC. - */ -@@ -3501,7 +3902,7 @@ - rctl |= E1000_RCTL_LPE; - - /* disable queue 0 to prevent tail write w/o re-config */ -- wr32(E1000_RXDCTL(0), 0); -+ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0); - - /* Attention!!! For SR-IOV PF driver operations you must enable - * queue drop for all VF and PF queues to prevent head of line blocking -@@ -3509,27 +3910,10 @@ - */ - if (adapter->vfs_allocated_count) { - /* set all queue drop enable bits */ -- wr32(E1000_QDE, ALL_QUEUES); -- } -- -- /* This is useful for sniffing bad packets. */ -- if (adapter->netdev->features & NETIF_F_RXALL) { -- /* UPE and MPE will be handled by normal PROMISC logic -- * in e1000e_set_rx_mode -- */ -- rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ -- E1000_RCTL_BAM | /* RX All Bcast Pkts */ -- E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ -- -- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ -- E1000_RCTL_DPF | /* Allow filtered pause */ -- E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ -- /* Do not mess with E1000_CTRL_VME, it affects transmit as well, -- * and that breaks VLANs. -- */ -+ E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES); - } - -- wr32(E1000_RCTL, rctl); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); - } - - static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, -@@ -3543,21 +3927,31 @@ - */ - if (vfn < adapter->vfs_allocated_count && - adapter->vf_data[vfn].vlans_enabled) -- size += VLAN_TAG_SIZE; -+ size += VLAN_HLEN; - -- vmolr = rd32(E1000_VMOLR(vfn)); -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ if (vfn >= adapter->vfs_allocated_count) { -+ int queue = vfn - adapter->vfs_allocated_count; -+ struct igb_vmdq_adapter *vadapter; -+ -+ vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]); -+ if (vadapter->vlgrp) -+ size += VLAN_HLEN; -+ } -+#endif -+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); - vmolr &= ~E1000_VMOLR_RLPML_MASK; - vmolr |= size | E1000_VMOLR_LPE; -- wr32(E1000_VMOLR(vfn), vmolr); -+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); - - return 0; - } - - /** -- * igb_rlpml_set - set maximum receive packet size -- * @adapter: board private structure -+ * igb_rlpml_set - set maximum receive packet size -+ * @adapter: board private structure - * -- * Configure maximum receivable packet size. -+ * Configure maximum receivable packet size. - **/ - static void igb_rlpml_set(struct igb_adapter *adapter) - { -@@ -3565,9 +3959,13 @@ - struct e1000_hw *hw = &adapter->hw; - u16 pf_id = adapter->vfs_allocated_count; - -- if (pf_id) { -- igb_set_vf_rlpml(adapter, max_frame_size, pf_id); -- /* If we're in VMDQ or SR-IOV mode, then set global RLPML -+ if (adapter->vmdq_pools && hw->mac.type != e1000_82575) { -+ int i; -+ -+ for (i = 0; i < adapter->vmdq_pools; i++) -+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i); -+ /* -+ * If we're in VMDQ or SR-IOV mode, then set global RLPML - * to our max jumbo frame size, in case we need to enable - * jumbo frames on one of the rings later. - * This will not pass over-length frames into the default -@@ -3575,56 +3973,73 @@ - */ - max_frame_size = MAX_JUMBO_FRAME_SIZE; - } -+ /* Set VF RLPML for the PF device. */ -+ if (adapter->vfs_allocated_count) -+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - -- wr32(E1000_RLPML, max_frame_size); -+ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size); - } - -+static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, -+ int vfn, bool enable) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ u32 val; -+ void __iomem *reg; -+ -+ if (hw->mac.type < e1000_82576) -+ return; -+ -+ if (hw->mac.type == e1000_i350) -+ reg = hw->hw_addr + E1000_DVMOLR(vfn); -+ else -+ reg = hw->hw_addr + E1000_VMOLR(vfn); -+ -+ val = readl(reg); -+ if (enable) -+ val |= E1000_VMOLR_STRVLAN; -+ else -+ val &= ~(E1000_VMOLR_STRVLAN); -+ writel(val, reg); -+} - static inline void igb_set_vmolr(struct igb_adapter *adapter, - int vfn, bool aupe) - { - struct e1000_hw *hw = &adapter->hw; - u32 vmolr; - -- /* This register exists only on 82576 and newer so if we are older then -+ /* -+ * This register exists only on 82576 and newer so if we are older then - * we should exit and do nothing - */ - if (hw->mac.type < e1000_82576) - return; - -- vmolr = rd32(E1000_VMOLR(vfn)); -- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ -- if (hw->mac.type == e1000_i350) { -- u32 dvmolr; -+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn)); - -- dvmolr = rd32(E1000_DVMOLR(vfn)); -- dvmolr |= E1000_DVMOLR_STRVLAN; -- wr32(E1000_DVMOLR(vfn), dvmolr); -- } - if (aupe) -- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ -+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ - else - vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ - - /* clear all bits that might not be set */ -- vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); -+ vmolr &= ~E1000_VMOLR_RSSE; - - if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) - vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ -- /* for VMDq only allow the VFs and pool 0 to accept broadcast and -- * multicast packets -- */ -- if (vfn <= adapter->vfs_allocated_count) -- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ - -- wr32(E1000_VMOLR(vfn), vmolr); -+ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ -+ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */ -+ -+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); - } - - /** -- * igb_configure_rx_ring - Configure a receive ring after Reset -- * @adapter: board private structure -- * @ring: receive ring to be configured -+ * igb_configure_rx_ring - Configure a receive ring after Reset -+ * @adapter: board private structure -+ * @ring: receive ring to be configured - * -- * Configure the Rx unit of the MAC after a reset. -+ * Configure the Rx unit of the MAC after a reset. - **/ - void igb_configure_rx_ring(struct igb_adapter *adapter, - struct igb_ring *ring) -@@ -3634,32 +4049,67 @@ - int reg_idx = ring->reg_idx; - u32 srrctl = 0, rxdctl = 0; - -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ /* -+ * RLPML prevents us from receiving a frame larger than max_frame so -+ * it is safe to just set the rx_buffer_len to max_frame without the -+ * risk of an skb over panic. -+ */ -+ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size, -+ MAXIMUM_ETHERNET_VLAN_SIZE); -+ -+#endif - /* disable the queue */ -- wr32(E1000_RXDCTL(reg_idx), 0); -+ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0); - - /* Set DMA base address registers */ -- wr32(E1000_RDBAL(reg_idx), -- rdba & 0x00000000ffffffffULL); -- wr32(E1000_RDBAH(reg_idx), rdba >> 32); -- wr32(E1000_RDLEN(reg_idx), -- ring->count * sizeof(union e1000_adv_rx_desc)); -+ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx), -+ rdba & 0x00000000ffffffffULL); -+ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32); -+ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx), -+ ring->count * sizeof(union e1000_adv_rx_desc)); - - /* initialize head and tail */ -- ring->tail = hw->hw_addr + E1000_RDT(reg_idx); -- wr32(E1000_RDH(reg_idx), 0); -+ ring->tail = adapter->io_addr + E1000_RDT(reg_idx); -+ E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0); - writel(0, ring->tail); - -+ /* reset next-to- use/clean to place SW in sync with hardwdare */ -+ ring->next_to_clean = 0; -+ ring->next_to_use = 0; -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ ring->next_to_alloc = 0; -+ -+#endif - /* set descriptor configuration */ -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; -+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ -+ srrctl = ALIGN(ring->rx_buffer_len, 1024) >> -+ E1000_SRRCTL_BSIZEPKT_SHIFT; -+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; -+#ifdef HAVE_PTP_1588_CLOCK - if (hw->mac.type >= e1000_82580) - srrctl |= E1000_SRRCTL_TIMESTAMP; -- /* Only set Drop Enable if we are supporting multiple queues */ -- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) -+#endif /* HAVE_PTP_1588_CLOCK */ -+ /* -+ * We should set the drop enable bit if: -+ * SR-IOV is enabled -+ * or -+ * Flow Control is disabled and number of RX queues > 1 -+ * -+ * This allows us to avoid head of line blocking for security -+ * and performance reasons. -+ */ -+ if (adapter->vfs_allocated_count || -+ (adapter->num_rx_queues > 1 && -+ (hw->fc.requested_mode == e1000_fc_none || -+ hw->fc.requested_mode == e1000_fc_rx_pause))) - srrctl |= E1000_SRRCTL_DROP_EN; - -- wr32(E1000_SRRCTL(reg_idx), srrctl); -+ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl); - - /* set filtering for VMDQ pools */ - igb_set_vmolr(adapter, reg_idx & 0x7, true); -@@ -3670,14 +4120,14 @@ - - /* enable receive descriptor fetching */ - rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; -- wr32(E1000_RXDCTL(reg_idx), rxdctl); -+ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl); - } - - /** -- * igb_configure_rx - Configure receive Unit after Reset -- * @adapter: board private structure -+ * igb_configure_rx - Configure receive Unit after Reset -+ * @adapter: board private structure - * -- * Configure the Rx unit of the MAC after a reset. -+ * Configure the Rx unit of the MAC after a reset. - **/ - static void igb_configure_rx(struct igb_adapter *adapter) - { -@@ -3698,10 +4148,10 @@ - } - - /** -- * igb_free_tx_resources - Free Tx Resources per Queue -- * @tx_ring: Tx descriptor ring for a specific queue -+ * igb_free_tx_resources - Free Tx Resources per Queue -+ * @tx_ring: Tx descriptor ring for a specific queue - * -- * Free all transmit software resources -+ * Free all transmit software resources - **/ - void igb_free_tx_resources(struct igb_ring *tx_ring) - { -@@ -3721,10 +4171,10 @@ - } - - /** -- * igb_free_all_tx_resources - Free Tx Resources for All Queues -- * @adapter: board private structure -+ * igb_free_all_tx_resources - Free Tx Resources for All Queues -+ * @adapter: board private structure - * -- * Free all transmit software resources -+ * Free all transmit software resources - **/ - static void igb_free_all_tx_resources(struct igb_adapter *adapter) - { -@@ -3746,9 +4196,9 @@ - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, -- dma_unmap_addr(tx_buffer, dma), -- dma_unmap_len(tx_buffer, len), -- DMA_TO_DEVICE); -+ dma_unmap_addr(tx_buffer, dma), -+ dma_unmap_len(tx_buffer, len), -+ DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; -@@ -3757,8 +4207,8 @@ - } - - /** -- * igb_clean_tx_ring - Free Tx Buffers -- * @tx_ring: ring to be cleaned -+ * igb_clean_tx_ring - Free Tx Buffers -+ * @tx_ring: ring to be cleaned - **/ - static void igb_clean_tx_ring(struct igb_ring *tx_ring) - { -@@ -3788,8 +4238,8 @@ - } - - /** -- * igb_clean_all_tx_rings - Free Tx Buffers for all queues -- * @adapter: board private structure -+ * igb_clean_all_tx_rings - Free Tx Buffers for all queues -+ * @adapter: board private structure - **/ - static void igb_clean_all_tx_rings(struct igb_adapter *adapter) - { -@@ -3800,10 +4250,10 @@ - } - - /** -- * igb_free_rx_resources - Free Rx Resources -- * @rx_ring: ring to clean the resources from -+ * igb_free_rx_resources - Free Rx Resources -+ * @rx_ring: ring to clean the resources from - * -- * Free all receive software resources -+ * Free all receive software resources - **/ - void igb_free_rx_resources(struct igb_ring *rx_ring) - { -@@ -3823,10 +4273,10 @@ - } - - /** -- * igb_free_all_rx_resources - Free Rx Resources for All Queues -- * @adapter: board private structure -+ * igb_free_all_rx_resources - Free Rx Resources for All Queues -+ * @adapter: board private structure - * -- * Free all receive software resources -+ * Free all receive software resources - **/ - static void igb_free_all_rx_resources(struct igb_adapter *adapter) - { -@@ -3837,25 +4287,40 @@ - } - - /** -- * igb_clean_rx_ring - Free Rx Buffers per Queue -- * @rx_ring: ring to free buffers from -+ * igb_clean_rx_ring - Free Rx Buffers per Queue -+ * @rx_ring: ring to free buffers from - **/ --static void igb_clean_rx_ring(struct igb_ring *rx_ring) -+void igb_clean_rx_ring(struct igb_ring *rx_ring) - { - unsigned long size; - u16 i; - -+ if (!rx_ring->rx_buffer_info) -+ return; -+ -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); - rx_ring->skb = NULL; - -- if (!rx_ring->rx_buffer_info) -- return; -- -+#endif - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ if (buffer_info->dma) { -+ dma_unmap_single(rx_ring->dev, -+ buffer_info->dma, -+ rx_ring->rx_buffer_len, -+ DMA_FROM_DEVICE); -+ buffer_info->dma = 0; -+ } - -+ if (buffer_info->skb) { -+ dev_kfree_skb(buffer_info->skb); -+ buffer_info->skb = NULL; -+ } -+#else - if (!buffer_info->page) - continue; - -@@ -3866,6 +4331,7 @@ - __free_page(buffer_info->page); - - buffer_info->page = NULL; -+#endif - } - - size = sizeof(struct igb_rx_buffer) * rx_ring->count; -@@ -3880,8 +4346,8 @@ - } - - /** -- * igb_clean_all_rx_rings - Free Rx Buffers for all queues -- * @adapter: board private structure -+ * igb_clean_all_rx_rings - Free Rx Buffers for all queues -+ * @adapter: board private structure - **/ - static void igb_clean_all_rx_rings(struct igb_adapter *adapter) - { -@@ -3892,11 +4358,11 @@ - } - - /** -- * igb_set_mac - Change the Ethernet Address of the NIC -- * @netdev: network interface device structure -- * @p: pointer to an address structure -+ * igb_set_mac - Change the Ethernet Address of the NIC -+ * @netdev: network interface device structure -+ * @p: pointer to an address structure - * -- * Returns 0 on success, negative on failure -+ * Returns 0 on success, negative on failure - **/ - static int igb_set_mac(struct net_device *netdev, void *p) - { -@@ -3910,60 +4376,155 @@ - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - -- /* set the correct pool for the new PF MAC address in entry 0 */ -- igb_rar_set_qsel(adapter, hw->mac.addr, 0, -- adapter->vfs_allocated_count); -+ /* set the correct pool for the new PF MAC address in entry 0 */ -+ igb_rar_set_qsel(adapter, hw->mac.addr, 0, -+ adapter->vfs_allocated_count); -+ -+ return 0; -+} -+ -+/** -+ * igb_write_mc_addr_list - write multicast addresses to MTA -+ * @netdev: network interface device structure -+ * -+ * Writes multicast address list to the MTA hash table. -+ * Returns: -ENOMEM on failure -+ * 0 on no addresses written -+ * X on writing X addresses to MTA -+ **/ -+int igb_write_mc_addr_list(struct net_device *netdev) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ struct e1000_hw *hw = &adapter->hw; -+#ifdef NETDEV_HW_ADDR_T_MULTICAST -+ struct netdev_hw_addr *ha; -+#else -+ struct dev_mc_list *ha; -+#endif -+ u8 *mta_list; -+ int i, count; -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ int vm; -+#endif -+ count = netdev_mc_count(netdev); -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ for (vm = 1; vm < adapter->vmdq_pools; vm++) { -+ if (!adapter->vmdq_netdev[vm]) -+ break; -+ if (!netif_running(adapter->vmdq_netdev[vm])) -+ continue; -+ count += netdev_mc_count(adapter->vmdq_netdev[vm]); -+ } -+#endif -+ -+ if (!count) { -+ e1000_update_mc_addr_list(hw, NULL, 0); -+ return 0; -+ } -+ mta_list = kzalloc(count * 6, GFP_ATOMIC); -+ if (!mta_list) -+ return -ENOMEM; -+ -+ /* The shared function expects a packed array of only addresses. */ -+ i = 0; -+ netdev_for_each_mc_addr(ha, netdev) -+#ifdef NETDEV_HW_ADDR_T_MULTICAST -+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); -+#else -+ memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN); -+#endif -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ for (vm = 1; vm < adapter->vmdq_pools; vm++) { -+ if (!adapter->vmdq_netdev[vm]) -+ break; -+ if (!netif_running(adapter->vmdq_netdev[vm]) || -+ !netdev_mc_count(adapter->vmdq_netdev[vm])) -+ continue; -+ netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm]) -+#ifdef NETDEV_HW_ADDR_T_MULTICAST -+ memcpy(mta_list + (i++ * ETH_ALEN), -+ ha->addr, ETH_ALEN); -+#else -+ memcpy(mta_list + (i++ * ETH_ALEN), -+ ha->dmi_addr, ETH_ALEN); -+#endif -+ } -+#endif -+ e1000_update_mc_addr_list(hw, mta_list, i); -+ kfree(mta_list); -+ -+ return count; -+} -+ -+void igb_full_sync_mac_table(struct igb_adapter *adapter) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ int i; - -- return 0; -+ for (i = 0; i < hw->mac.rar_entry_count; i++) -+ igb_rar_set(adapter, i); - } - --/** -- * igb_write_mc_addr_list - write multicast addresses to MTA -- * @netdev: network interface device structure -- * -- * Writes multicast address list to the MTA hash table. -- * Returns: -ENOMEM on failure -- * 0 on no addresses written -- * X on writing X addresses to MTA -- **/ --static int igb_write_mc_addr_list(struct net_device *netdev) -+void igb_sync_mac_table(struct igb_adapter *adapter) - { -- struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; -- struct netdev_hw_addr *ha; -- u8 *mta_list; - int i; - -- if (netdev_mc_empty(netdev)) { -- /* nothing to program, so clear mc list */ -- igb_update_mc_addr_list(hw, NULL, 0); -- igb_restore_vf_multicasts(adapter); -- return 0; -+ for (i = 0; i < hw->mac.rar_entry_count; i++) { -+ if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED) -+ igb_rar_set(adapter, i); -+ adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED); - } -+} - -- mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); -- if (!mta_list) -- return -ENOMEM; -+int igb_available_rars(struct igb_adapter *adapter) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ int i, count = 0; - -- /* The shared function expects a packed array of only addresses. */ -- i = 0; -- netdev_for_each_mc_addr(ha, netdev) -- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); -+ for (i = 0; i < hw->mac.rar_entry_count; i++) { -+ if (adapter->mac_table[i].state == 0) -+ count++; -+ } -+ return count; -+} - -- igb_update_mc_addr_list(hw, mta_list, i); -- kfree(mta_list); -+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, -+ u8 qsel) -+{ -+ u32 rar_low, rar_high; -+ struct e1000_hw *hw = &adapter->hw; -+ -+ /* HW expects these in little endian so we reverse the byte order -+ * from network order (big endian) to little endian -+ */ -+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | -+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); -+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); -+ -+ /* Indicate to hardware the Address is Valid. */ -+ rar_high |= E1000_RAH_AV; -+ -+ if (hw->mac.type == e1000_82575) -+ rar_high |= E1000_RAH_POOL_1 * qsel; -+ else -+ rar_high |= E1000_RAH_POOL_1 << qsel; - -- return netdev_mc_count(netdev); -+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); -+ E1000_WRITE_FLUSH(hw); -+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); -+ E1000_WRITE_FLUSH(hw); - } - -+#ifdef HAVE_SET_RX_MODE - /** -- * igb_write_uc_addr_list - write unicast addresses to RAR table -- * @netdev: network interface device structure -+ * igb_write_uc_addr_list - write unicast addresses to RAR table -+ * @netdev: network interface device structure - * -- * Writes unicast address list to the RAR table. -- * Returns: -ENOMEM on failure/insufficient address space -- * 0 on no addresses written -- * X on writing X addresses to the RAR table -+ * Writes unicast address list to the RAR table. -+ * Returns: -ENOMEM on failure/insufficient address space -+ * 0 on no addresses written -+ * X on writing X addresses to the RAR table - **/ - static int igb_write_uc_addr_list(struct net_device *netdev) - { -@@ -3974,39 +4535,48 @@ - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ -- if (netdev_uc_count(netdev) > rar_entries) -+ if (netdev_uc_count(netdev) > igb_available_rars(adapter)) - return -ENOMEM; -- - if (!netdev_uc_empty(netdev) && rar_entries) { -+#ifdef NETDEV_HW_ADDR_T_UNICAST - struct netdev_hw_addr *ha; -- -+#else -+ struct dev_mc_list *ha; -+#endif - netdev_for_each_uc_addr(ha, netdev) { -+#ifdef NETDEV_HW_ADDR_T_UNICAST - if (!rar_entries) - break; - igb_rar_set_qsel(adapter, ha->addr, - rar_entries--, - vfn); -+#else -+ igb_rar_set_qsel(adapter, ha->da_addr, -+ rar_entries--, -+ vfn); -+#endif - count++; - } - } -+ - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) { -- wr32(E1000_RAH(rar_entries), 0); -- wr32(E1000_RAL(rar_entries), 0); -+ E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0); -+ E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0); - } -- wrfl(); -- -+ E1000_WRITE_FLUSH(hw); - return count; - } - -+#endif /* HAVE_SET_RX_MODE */ - /** -- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set -- * @netdev: network interface device structure -+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set -+ * @netdev: network interface device structure - * -- * The set_rx_mode entry point is called whenever the unicast or multicast -- * address lists or the network interface flags are updated. This routine is -- * responsible for configuring the hardware for proper unicast, multicast, -- * promiscuous mode, and all-multi behavior. -+ * The set_rx_mode entry point is called whenever the unicast or multicast -+ * address lists or the network interface flags are updated. This routine is -+ * responsible for configuring the hardware for proper unicast, multicast, -+ * promiscuous mode, and all-multi behavior. - **/ - static void igb_set_rx_mode(struct net_device *netdev) - { -@@ -4017,23 +4587,24 @@ - int count; - - /* Check for Promiscuous and All Multicast modes */ -- rctl = rd32(E1000_RCTL); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); - - /* clear the effected bits */ - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); - - if (netdev->flags & IFF_PROMISC) { -- /* retain VLAN HW filtering if in VT mode */ -- if (adapter->vfs_allocated_count) -- rctl |= E1000_RCTL_VFE; - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); -+ /* retain VLAN HW filtering if in VT mode */ -+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) -+ rctl |= E1000_RCTL_VFE; - } else { - if (netdev->flags & IFF_ALLMULTI) { - rctl |= E1000_RCTL_MPE; - vmolr |= E1000_VMOLR_MPME; - } else { -- /* Write addresses to the MTA, if the attempt fails -+ /* -+ * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ -@@ -4045,7 +4616,9 @@ - vmolr |= E1000_VMOLR_ROMPE; - } - } -- /* Write addresses to available RAR registers, if there is not -+#ifdef HAVE_SET_RX_MODE -+ /* -+ * Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ -@@ -4054,21 +4627,23 @@ - rctl |= E1000_RCTL_UPE; - vmolr |= E1000_VMOLR_ROPE; - } -+#endif /* HAVE_SET_RX_MODE */ - rctl |= E1000_RCTL_VFE; - } -- wr32(E1000_RCTL, rctl); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); - -- /* In order to support SR-IOV and eventually VMDq it is necessary to set -+ /* -+ * In order to support SR-IOV and eventually VMDq it is necessary to set - * the VMOLR to enable the appropriate modes. Without this workaround - * we will have issues with VLAN tag stripping not being done for frames - * that are only arriving because we are the default pool - */ -- if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) -+ if (hw->mac.type < e1000_82576) - return; - -- vmolr |= rd32(E1000_VMOLR(vfn)) & -- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); -- wr32(E1000_VMOLR(vfn), vmolr); -+ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) & -+ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); -+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); - igb_restore_vf_multicasts(adapter); - } - -@@ -4080,7 +4655,7 @@ - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: -- wvbr = rd32(E1000_WVBR); -+ wvbr = E1000_READ_REG(hw, E1000_WVBR); - if (!wvbr) - return; - break; -@@ -4100,15 +4675,34 @@ - if (!adapter->wvbr) - return; - -- for (j = 0; j < adapter->vfs_allocated_count; j++) { -- if (adapter->wvbr & (1 << j) || -- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { -- dev_warn(&adapter->pdev->dev, -- "Spoof event(s) detected on VF %d\n", j); -- adapter->wvbr &= -- ~((1 << j) | -- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); -+ switch (adapter->hw.mac.type) { -+ case e1000_82576: -+ for (j = 0; j < adapter->vfs_allocated_count; j++) { -+ if (adapter->wvbr & (1 << j) || -+ adapter->wvbr & (1 << (j -+ + IGB_STAGGERED_QUEUE_OFFSET))) { -+ DPRINTK(DRV, WARNING, -+ "Spoof event(s) detected on VF %d\n", -+ j); -+ adapter->wvbr &= -+ ~((1 << j) | -+ (1 << (j + -+ IGB_STAGGERED_QUEUE_OFFSET))); -+ } -+ } -+ break; -+ case e1000_i350: -+ for (j = 0; j < adapter->vfs_allocated_count; j++) { -+ if (adapter->wvbr & (1 << j)) { -+ DPRINTK(DRV, WARNING, -+ "Spoof event(s) detected on VF %d\n", -+ j); -+ adapter->wvbr &= ~(1 << j); -+ } - } -+ break; -+ default: -+ break; - } - } - -@@ -4118,21 +4712,22 @@ - static void igb_update_phy_info(unsigned long data) - { - struct igb_adapter *adapter = (struct igb_adapter *) data; -- igb_get_phy_info(&adapter->hw); -+ -+ e1000_get_phy_info(&adapter->hw); - } - - /** -- * igb_has_link - check shared code for link and determine up/down -- * @adapter: pointer to driver private info -+ * igb_has_link - check shared code for link and determine up/down -+ * @adapter: pointer to driver private info - **/ - bool igb_has_link(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; -- bool link_active = false; -+ bool link_active = FALSE; - - /* get_link_status is set on LSC (link status) interrupt or - * rx sequence error interrupt. get_link_status will stay -- * false until the e1000_check_for_link establishes link -+ * false until the igb_e1000_check_for_link establishes link - * for copper adapters ONLY - */ - switch (hw->phy.media_type) { -@@ -4140,11 +4735,11 @@ - if (!hw->mac.get_link_status) - return true; - case e1000_media_type_internal_serdes: -- hw->mac.ops.check_for_link(hw); -+ igb_e1000_check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; -- default: - case e1000_media_type_unknown: -+ default: - break; - } - -@@ -4162,27 +4757,9 @@ - return link_active; - } - --static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) --{ -- bool ret = false; -- u32 ctrl_ext, thstat; -- -- /* check for thermal sensor event on i350 copper only */ -- if (hw->mac.type == e1000_i350) { -- thstat = rd32(E1000_THSTAT); -- ctrl_ext = rd32(E1000_CTRL_EXT); -- -- if ((hw->phy.media_type == e1000_media_type_copper) && -- !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) -- ret = !!(thstat & event); -- } -- -- return ret; --} -- - /** -- * igb_watchdog - Timer Call-back -- * @data: pointer to adapter cast into an unsigned long -+ * igb_watchdog - Timer Call-back -+ * @data: pointer to adapter cast into an unsigned long - **/ - static void igb_watchdog(unsigned long data) - { -@@ -4197,29 +4774,28 @@ - struct igb_adapter, - watchdog_task); - struct e1000_hw *hw = &adapter->hw; -- struct e1000_phy_info *phy = &hw->phy; - struct net_device *netdev = adapter->netdev; -- u32 link; -+ u32 thstat, ctrl_ext, link; - int i; - u32 connsw; - - link = igb_has_link(adapter); - -- if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { -- if (time_after(jiffies, (adapter->link_check_timeout + HZ))) -- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; -- else -- link = false; -- } -- - /* Force link down if we have fiber to swap to */ - if (adapter->flags & IGB_FLAG_MAS_ENABLE) { - if (hw->phy.media_type == e1000_media_type_copper) { -- connsw = rd32(E1000_CONNSW); -+ connsw = E1000_READ_REG(hw, E1000_CONNSW); - if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) - link = 0; - } - } -+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { -+ if (time_after(jiffies, (adapter->link_check_timeout + HZ))) -+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; -+ else -+ link = FALSE; -+ } -+ - if (link) { - /* Perform a reset if the media type changed. */ - if (hw->dev_spec._82575.media_changed) { -@@ -4227,48 +4803,29 @@ - adapter->flags |= IGB_FLAG_MEDIA_RESET; - igb_reset(adapter); - } -+ - /* Cancel scheduled suspend requests. */ - pm_runtime_resume(netdev->dev.parent); - - if (!netif_carrier_ok(netdev)) { - u32 ctrl; - -- hw->mac.ops.get_speed_and_duplex(hw, -- &adapter->link_speed, -- &adapter->link_duplex); -+ igb_e1000_get_speed_and_duplex(hw, -+ &adapter->link_speed, -+ &adapter->link_duplex); - -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - /* Links status message must follow this format */ - netdev_info(netdev, -- "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", -- netdev->name, -- adapter->link_speed, -- adapter->link_duplex == FULL_DUPLEX ? -- "Full" : "Half", -- (ctrl & E1000_CTRL_TFCE) && -- (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : -- (ctrl & E1000_CTRL_RFCE) ? "RX" : -- (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); -- -- /* disable EEE if enabled */ -- if ((adapter->flags & IGB_FLAG_EEE) && -- (adapter->link_duplex == HALF_DUPLEX)) { -- dev_info(&adapter->pdev->dev, -- "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); -- adapter->hw.dev_spec._82575.eee_disable = true; -- adapter->flags &= ~IGB_FLAG_EEE; -- } -- -- /* check if SmartSpeed worked */ -- igb_check_downshift(hw); -- if (phy->speed_downgraded) -- netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); -- -- /* check for thermal sensor event */ -- if (igb_thermal_sensor_event(hw, -- E1000_THSTAT_LINK_THROTTLE)) -- netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); -- -+ "igb: %s NIC Link is Up %d Mbps %s, Flow Control: %s\n", -+ netdev->name, -+ adapter->link_speed, -+ adapter->link_duplex == FULL_DUPLEX ? -+ "Full Duplex" : "Half Duplex", -+ ((ctrl & E1000_CTRL_TFCE) && -+ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : -+ ((ctrl & E1000_CTRL_RFCE) ? "RX" : -+ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); - /* adjust timeout factor according to speed/duplex */ - adapter->tx_timeout_factor = 1; - switch (adapter->link_speed) { -@@ -4278,12 +4835,17 @@ - case SPEED_100: - /* maybe add some timeout factor ? */ - break; -+ default: -+ break; - } - - netif_carrier_on(netdev); -+ netif_tx_wake_all_queues(netdev); - - igb_ping_all_vfs(adapter); -+#ifdef IFLA_VF_MAX - igb_check_vf_rate_limit(adapter); -+#endif /* IFLA_VF_MAX */ - - /* link state has changed, schedule phy info update */ - if (!test_bit(__IGB_DOWN, &adapter->state)) -@@ -4294,17 +4856,33 @@ - if (netif_carrier_ok(netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = 0; -- -- /* check for thermal sensor event */ -- if (igb_thermal_sensor_event(hw, -- E1000_THSTAT_PWR_DOWN)) { -- netdev_err(netdev, "The network adapter was stopped because it overheated\n"); -+ /* check for thermal sensor event on i350 */ -+ if (hw->mac.type == e1000_i350) { -+ thstat = E1000_READ_REG(hw, E1000_THSTAT); -+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); -+ if ((hw->phy.media_type == -+ e1000_media_type_copper) && -+ !(ctrl_ext & -+ E1000_CTRL_EXT_LINK_MODE_SGMII)) { -+ if (thstat & E1000_THSTAT_PWR_DOWN) { -+ netdev_err(netdev, -+ "igb: %s The network adapter was stopped because it overheated.\n", -+ netdev->name); -+ } -+ if (thstat & -+ E1000_THSTAT_LINK_THROTTLE) { -+ netdev_err(netdev, -+ "igb: %s The network adapter supported link speed was downshifted because it overheated.\n", -+ netdev->name); -+ } -+ } - } - - /* Links status message must follow this format */ - netdev_info(netdev, "igb: %s NIC Link is Down\n", - netdev->name); - netif_carrier_off(netdev); -+ netif_tx_stop_all_queues(netdev); - - igb_ping_all_vfs(adapter); - -@@ -4312,7 +4890,6 @@ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); -- - /* link is down, time to check for alternate media */ - if (adapter->flags & IGB_FLAG_MAS_ENABLE) { - igb_check_swap_media(adapter); -@@ -4328,6 +4905,7 @@ - /* also check for alternate media here */ - } else if (!netif_carrier_ok(netdev) && - (adapter->flags & IGB_FLAG_MAS_ENABLE)) { -+ hw->mac.ops.power_up_serdes(hw); - igb_check_swap_media(adapter); - if (adapter->flags & IGB_FLAG_MEDIA_RESET) { - schedule_work(&adapter->reset_task); -@@ -4337,12 +4915,11 @@ - } - } - -- spin_lock(&adapter->stats64_lock); -- igb_update_stats(adapter, &adapter->stats64); -- spin_unlock(&adapter->stats64_lock); -+ igb_update_stats(adapter); - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *tx_ring = adapter->tx_ring[i]; -+ - if (!netif_carrier_ok(netdev)) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going -@@ -4361,19 +4938,18 @@ - set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); - } - -- /* Cause software interrupt to ensure Rx ring is cleaned */ -- if (adapter->flags & IGB_FLAG_HAS_MSIX) { -+ /* Cause software interrupt to ensure rx ring is cleaned */ -+ if (adapter->msix_entries) { - u32 eics = 0; - - for (i = 0; i < adapter->num_q_vectors; i++) - eics |= adapter->q_vector[i]->eims_value; -- wr32(E1000_EICS, eics); -+ E1000_WRITE_REG(hw, E1000_EICS, eics); - } else { -- wr32(E1000_ICS, E1000_ICS_RXDMT0); -+ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0); - } - - igb_spoof_check(adapter); -- igb_ptp_rx_hang(adapter); - - /* Reset the timer */ - if (!test_bit(__IGB_DOWN, &adapter->state)) { -@@ -4386,6 +4962,70 @@ - } - } - -+static void igb_dma_err_task(struct work_struct *work) -+{ -+ struct igb_adapter *adapter = container_of(work, -+ struct igb_adapter, -+ dma_err_task); -+ int vf; -+ struct e1000_hw *hw = &adapter->hw; -+ struct net_device *netdev = adapter->netdev; -+ u32 hgptc; -+ u32 ciaa, ciad; -+ -+ hgptc = E1000_READ_REG(hw, E1000_HGPTC); -+ if (hgptc) /* If incrementing then no need for the check below */ -+ goto dma_timer_reset; -+ /* -+ * Check to see if a bad DMA write target from an errant or -+ * malicious VF has caused a PCIe error. If so then we can -+ * issue a VFLR to the offending VF(s) and then resume without -+ * requesting a full slot reset. -+ */ -+ -+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { -+ ciaa = (vf << 16) | 0x80000000; -+ /* 32 bit read so align, we really want status at offset 6 */ -+ ciaa |= PCI_COMMAND; -+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); -+ ciad = E1000_READ_REG(hw, E1000_CIAD); -+ ciaa &= 0x7FFFFFFF; -+ /* disable debug mode asap after reading data */ -+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); -+ /* Get the upper 16 bits which will be the PCI status reg */ -+ ciad >>= 16; -+ if (ciad & (PCI_STATUS_REC_MASTER_ABORT | -+ PCI_STATUS_REC_TARGET_ABORT | -+ PCI_STATUS_SIG_SYSTEM_ERROR)) { -+ netdev_err(netdev, "VF %d suffered error\n", vf); -+ /* Issue VFLR */ -+ ciaa = (vf << 16) | 0x80000000; -+ ciaa |= 0xA8; -+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); -+ ciad = 0x00008000; /* VFLR */ -+ E1000_WRITE_REG(hw, E1000_CIAD, ciad); -+ ciaa &= 0x7FFFFFFF; -+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa); -+ } -+ } -+dma_timer_reset: -+ /* Reset the timer */ -+ if (!test_bit(__IGB_DOWN, &adapter->state)) -+ mod_timer(&adapter->dma_err_timer, -+ round_jiffies(jiffies + HZ / 10)); -+} -+ -+/** -+ * igb_dma_err_timer - Timer Call-back -+ * @data: pointer to adapter cast into an unsigned long -+ **/ -+static void igb_dma_err_timer(unsigned long data) -+{ -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ /* Do the rest outside of interrupt context */ -+ schedule_work(&adapter->dma_err_task); -+} -+ - enum latency_range { - lowest_latency = 0, - low_latency = 1, -@@ -4394,19 +5034,20 @@ - }; - - /** -- * igb_update_ring_itr - update the dynamic ITR value based on packet size -- * @q_vector: pointer to q_vector -+ * igb_update_ring_itr - update the dynamic ITR value based on packet size - * -- * Stores a new ITR value based on strictly on packet size. This -- * algorithm is less sophisticated than that used in igb_update_itr, -- * due to the difficulty of synchronizing statistics across multiple -- * receive rings. The divisors and thresholds used by this function -- * were determined based on theoretical maximum wire speed and testing -- * data, in order to minimize response time while increasing bulk -- * throughput. -- * This functionality is controlled by ethtool's coalescing settings. -- * NOTE: This function is called only when operating in a multiqueue -- * receive environment. -+ * Stores a new ITR value based on strictly on packet size. This -+ * algorithm is less sophisticated than that used in igb_update_itr, -+ * due to the difficulty of synchronizing statistics across multiple -+ * receive rings. The divisors and thresholds used by this function -+ * were determined based on theoretical maximum wire speed and testing -+ * data, in order to minimize response time while increasing bulk -+ * throughput. -+ * This functionality is controlled by the InterruptThrottleRate module -+ * parameter (see igb_param.c) -+ * NOTE: This function is called only when operating in a multiqueue -+ * receive environment. -+ * @q_vector: pointer to q_vector - **/ - static void igb_update_ring_itr(struct igb_q_vector *q_vector) - { -@@ -4418,9 +5059,13 @@ - /* For non-gigabit speeds, just fix the interrupt rate at 4000 - * ints/sec - ITR timer value of 120 ticks. - */ -- if (adapter->link_speed != SPEED_1000) { -+ switch (adapter->link_speed) { -+ case SPEED_10: -+ case SPEED_100: - new_val = IGB_4K_ITR; - goto set_itr_val; -+ default: -+ break; - } - - packets = q_vector->rx.total_packets; -@@ -4467,20 +5112,20 @@ - } - - /** -- * igb_update_itr - update the dynamic ITR value based on statistics -- * @q_vector: pointer to q_vector -- * @ring_container: ring info to update the itr for -- * -- * Stores a new ITR value based on packets and byte -- * counts during the last interrupt. The advantage of per interrupt -- * computation is faster updates and more accurate ITR for the current -- * traffic pattern. Constants in this function were computed -- * based on theoretical maximum wire speed and thresholds were set based -- * on testing data as well as attempting to minimize response time -- * while increasing bulk throughput. -- * This functionality is controlled by ethtool's coalescing settings. -- * NOTE: These calculations are only valid when operating in a single- -- * queue environment. -+ * igb_update_itr - update the dynamic ITR value based on statistics -+ * Stores a new ITR value based on packets and byte -+ * counts during the last interrupt. The advantage of per interrupt -+ * computation is faster updates and more accurate ITR for the current -+ * traffic pattern. Constants in this function were computed -+ * based on theoretical maximum wire speed and thresholds were set based -+ * on testing data as well as attempting to minimize response time -+ * while increasing bulk throughput. -+ * this functionality is controlled by the InterruptThrottleRate module -+ * parameter (see igb_param.c) -+ * NOTE: These calculations are only valid when operating in a single- -+ * queue environment. -+ * @q_vector: pointer to q_vector -+ * @ring_container: ring info to update the itr for - **/ - static void igb_update_itr(struct igb_q_vector *q_vector, - struct igb_ring_container *ring_container) -@@ -4504,12 +5149,13 @@ - case low_latency: /* 50 usec aka 20000 ints/s */ - if (bytes > 10000) { - /* this if handles the TSO accounting */ -- if (bytes/packets > 8000) -+ if (bytes/packets > 8000) { - itrval = bulk_latency; -- else if ((packets < 10) || ((bytes/packets) > 1200)) -+ } else if ((packets < 10) || ((bytes/packets) > 1200)) { - itrval = bulk_latency; -- else if ((packets > 35)) -+ } else if ((packets > 35)) { - itrval = lowest_latency; -+ } - } else if (bytes/packets > 2000) { - itrval = bulk_latency; - } else if (packets <= 2 && bytes < 512) { -@@ -4541,10 +5187,14 @@ - u8 current_itr = 0; - - /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ -- if (adapter->link_speed != SPEED_1000) { -+ switch (adapter->link_speed) { -+ case SPEED_10: -+ case SPEED_100: - current_itr = 0; - new_itr = IGB_4K_ITR; - goto set_itr_now; -+ default: -+ break; - } - - igb_update_itr(q_vector, &q_vector->tx); -@@ -4580,9 +5230,9 @@ - * increasing - */ - new_itr = new_itr > q_vector->itr_val ? -- max((new_itr * q_vector->itr_val) / -- (new_itr + (q_vector->itr_val >> 2)), -- new_itr) : new_itr; -+ max((new_itr * q_vector->itr_val) / -+ (new_itr + (q_vector->itr_val >> 2)), -+ new_itr) : new_itr; - /* Don't write the value here; it resets the adapter's - * internal timer, and causes us to delay far longer than - * we should between interrupts. Instead, we write the ITR -@@ -4594,8 +5244,8 @@ - } - } - --static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, -- u32 type_tucmd, u32 mss_l4len_idx) -+void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, -+ u32 type_tucmd, u32 mss_l4len_idx) - { - struct e1000_adv_tx_context_desc *context_desc; - u16 i = tx_ring->next_to_use; -@@ -4622,26 +5272,32 @@ - struct igb_tx_buffer *first, - u8 *hdr_len) - { -+#ifdef NETIF_F_TSO - struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; -- int err; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (!skb_is_gso(skb)) -+#endif /* NETIF_F_TSO */ - return 0; -+#ifdef NETIF_F_TSO - -- err = skb_cow_head(skb, 0); -- if (err < 0) -- return err; -+ if (skb_header_cloned(skb)) { -+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); -+ -+ if (err) -+ return err; -+ } - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); -+ - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, -@@ -4652,6 +5308,7 @@ - first->tx_flags |= IGB_TX_FLAGS_TSO | - IGB_TX_FLAGS_CSUM | - IGB_TX_FLAGS_IPV4; -+#ifdef NETIF_F_TSO6 - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, -@@ -4659,6 +5316,7 @@ - 0, IPPROTO_TCP, 0); - first->tx_flags |= IGB_TX_FLAGS_TSO | - IGB_TX_FLAGS_CSUM; -+#endif - } - - /* compute header lengths */ -@@ -4681,6 +5339,7 @@ - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); - - return 1; -+#endif /* NETIF_F_TSO */ - } - - static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) -@@ -4694,38 +5353,42 @@ - if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) - return; - } else { -- u8 l4_hdr = 0; -+ u8 nexthdr = 0; - - switch (first->protocol) { -- case htons(ETH_P_IP): -+ case __constant_htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; -- l4_hdr = ip_hdr(skb)->protocol; -+ nexthdr = ip_hdr(skb)->protocol; - break; -- case htons(ETH_P_IPV6): -+#ifdef NETIF_F_IPV6_CSUM -+ case __constant_htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); -- l4_hdr = ipv6_hdr(skb)->nexthdr; -+ nexthdr = ipv6_hdr(skb)->nexthdr; - break; -+#endif - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, -- "partial checksum but proto=%x!\n", -- first->protocol); -+ "partial checksum but proto=%x!\n", -+ first->protocol); - } - break; - } - -- switch (l4_hdr) { -+ switch (nexthdr) { - case IPPROTO_TCP: - type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - E1000_ADVTXD_L4LEN_SHIFT; - break; -+#ifdef HAVE_SCTP - case IPPROTO_SCTP: - type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - E1000_ADVTXD_L4LEN_SHIFT; - break; -+#endif - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - E1000_ADVTXD_L4LEN_SHIFT; -@@ -4733,8 +5396,8 @@ - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, -- "partial checksum but l4 proto=%x!\n", -- l4_hdr); -+ "partial checksum but l4 proto=%x!\n", -+ nexthdr); - } - break; - } -@@ -4773,9 +5436,6 @@ - cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, - (E1000_ADVTXD_MAC_TSTAMP)); - -- /* insert frame checksum */ -- cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); -- - return cmd_type; - } - -@@ -4882,11 +5542,11 @@ - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); -- - /* set the timestamp */ - first->time_stamp = jiffies; - -- /* Force memory writes to complete before letting h/w know there -+ /* -+ * Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. (Only applicable for weak-ordered - * memory model archs, such as IA-64). - * -@@ -4907,7 +5567,7 @@ - writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail -- * at a time, it synchronizes IO on IA64/Altix systems -+ * at a time, it syncronizes IO on IA64/Altix systems - */ - mmiowb(); - -@@ -4932,9 +5592,12 @@ - - static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) - { -- struct net_device *netdev = tx_ring->netdev; -+ struct net_device *netdev = netdev_ring(tx_ring); - -- netif_stop_subqueue(netdev, tx_ring->queue_index); -+ if (netif_is_multiqueue(netdev)) -+ netif_stop_subqueue(netdev, ring_queue_index(tx_ring)); -+ else -+ netif_stop_queue(netdev); - - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); -@@ -4949,11 +5612,12 @@ - return -EBUSY; - - /* A reprieve! */ -- netif_wake_subqueue(netdev, tx_ring->queue_index); -+ if (netif_is_multiqueue(netdev)) -+ netif_wake_subqueue(netdev, ring_queue_index(tx_ring)); -+ else -+ netif_wake_queue(netdev); - -- u64_stats_update_begin(&tx_ring->tx_syncp2); -- tx_ring->tx_stats.restart_queue2++; -- u64_stats_update_end(&tx_ring->tx_syncp2); -+ tx_ring->tx_stats.restart_queue++; - - return 0; - } -@@ -4971,25 +5635,26 @@ - struct igb_tx_buffer *first; - int tso; - u32 tx_flags = 0; -+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD -+ unsigned short f; -+#endif - u16 count = TXD_USE_COUNT(skb_headlen(skb)); - __be16 protocol = vlan_get_protocol(skb); - u8 hdr_len = 0; - -- /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, -+ /* -+ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, - * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, - * + 1 desc for context descriptor, - * otherwise try next time - */ -- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { -- unsigned short f; -- -- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) -- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -- } else { -- count += skb_shinfo(skb)->nr_frags; -- } -- -+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD -+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) -+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -+#else -+ count += skb_shinfo(skb)->nr_frags; -+#endif - if (igb_maybe_stop_tx(tx_ring, count + 3)) { - /* this is a hard error */ - return NETDEV_TX_BUSY; -@@ -5001,12 +5666,21 @@ - first->bytecount = skb->len; - first->gso_segs = 1; - -+#ifdef HAVE_PTP_1588_CLOCK -+#ifdef SKB_SHARED_TX_IS_UNION -+ if (unlikely(skb_tx(skb)->hardware)) { -+#else - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { -+#endif - struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - - if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, - &adapter->state)) { -+#ifdef SKB_SHARED_TX_IS_UNION -+ skb_tx(skb)->in_progress = 1; -+#else - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -+#endif - tx_flags |= IGB_TX_FLAGS_TSTAMP; - - adapter->ptp_tx_skb = skb_get(skb); -@@ -5015,12 +5689,11 @@ - schedule_work(&adapter->ptp_tx_work); - } - } -- -+#endif /* HAVE_PTP_1588_CLOCK */ - skb_tx_timestamp(skb); -- -- if (vlan_tx_tag_present(skb)) { -+ if (skb_vlan_tag_present(skb)) { - tx_flags |= IGB_TX_FLAGS_VLAN; -- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); -+ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); - } - - /* record initial flags and protocol */ -@@ -5035,6 +5708,10 @@ - - igb_tx_map(tx_ring, first, hdr_len); - -+#ifndef HAVE_TRANS_START_IN_QUEUE -+ netdev_ring(tx_ring)->trans_start = jiffies; -+ -+#endif - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - -@@ -5046,6 +5723,7 @@ - return NETDEV_TX_OK; - } - -+#ifdef HAVE_TX_MQ - static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, - struct sk_buff *skb) - { -@@ -5056,6 +5734,9 @@ - - return adapter->tx_ring[r_idx]; - } -+#else -+#define igb_tx_queue_mapping(_adapter, _skb) ((_adapter)->tx_ring[0]) -+#endif - - static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -@@ -5072,22 +5753,22 @@ - return NETDEV_TX_OK; - } - -- /* The minimum packet size with TCTL.PSP set is 17 so pad the skb -+ /* -+ * The minimum packet size with TCTL.PSP set is 17 so pad the skb - * in order to meet this minimum size requirement. - */ -- if (unlikely(skb->len < 17)) { -- if (skb_pad(skb, 17 - skb->len)) -+ if (skb->len < 17) { -+ if (skb_padto(skb, 17)) - return NETDEV_TX_OK; - skb->len = 17; -- skb_set_tail_pointer(skb, 17); - } - - return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); - } - - /** -- * igb_tx_timeout - Respond to a Tx Hang -- * @netdev: network interface device structure -+ * igb_tx_timeout - Respond to a Tx Hang -+ * @netdev: network interface device structure - **/ - static void igb_tx_timeout(struct net_device *netdev) - { -@@ -5101,59 +5782,64 @@ - hw->dev_spec._82575.global_device_reset = true; - - schedule_work(&adapter->reset_task); -- wr32(E1000_EICS, -- (adapter->eims_enable_mask & ~adapter->eims_other)); -+ E1000_WRITE_REG(hw, E1000_EICS, -+ (adapter->eims_enable_mask & ~adapter->eims_other)); - } - - static void igb_reset_task(struct work_struct *work) - { - struct igb_adapter *adapter; -+ - adapter = container_of(work, struct igb_adapter, reset_task); - -- igb_dump(adapter); -- netdev_err(adapter->netdev, "Reset adapter\n"); - igb_reinit_locked(adapter); - } - - /** -- * igb_get_stats64 - Get System Network Statistics -- * @netdev: network interface device structure -- * @stats: rtnl_link_stats64 pointer -+ * igb_get_stats - Get System Network Statistics -+ * @netdev: network interface device structure -+ * -+ * Returns the address of the device statistics structure. -+ * The statistics are updated here and also from the timer callback. - **/ --static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, -- struct rtnl_link_stats64 *stats) -+static struct net_device_stats *igb_get_stats(struct net_device *netdev) - { - struct igb_adapter *adapter = netdev_priv(netdev); - -- spin_lock(&adapter->stats64_lock); -- igb_update_stats(adapter, &adapter->stats64); -- memcpy(stats, &adapter->stats64, sizeof(*stats)); -- spin_unlock(&adapter->stats64_lock); -+ if (!test_bit(__IGB_RESETTING, &adapter->state)) -+ igb_update_stats(adapter); - -- return stats; -+#ifdef HAVE_NETDEV_STATS_IN_NETDEV -+ /* only return the current stats */ -+ return &netdev->stats; -+#else -+ /* only return the current stats */ -+ return &adapter->net_stats; -+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ - } - - /** -- * igb_change_mtu - Change the Maximum Transfer Unit -- * @netdev: network interface device structure -- * @new_mtu: new value for maximum frame size -+ * igb_change_mtu - Change the Maximum Transfer Unit -+ * @netdev: network interface device structure -+ * @new_mtu: new value for maximum frame size - * -- * Returns 0 on success, negative on failure -+ * Returns 0 on success, negative on failure - **/ - static int igb_change_mtu(struct net_device *netdev, int new_mtu) - { - struct igb_adapter *adapter = netdev_priv(netdev); -+ struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { -- dev_err(&pdev->dev, "Invalid MTU setting\n"); -+ dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n"); - return -EINVAL; - } - - #define MAX_STD_JUMBO_FRAME_SIZE 9238 - if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { -- dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); -+ dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n"); - return -EINVAL; - } - -@@ -5170,9 +5856,10 @@ - if (netif_running(netdev)) - igb_down(adapter); - -- dev_info(&pdev->dev, "changing MTU from %d to %d\n", -+ dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); - netdev->mtu = new_mtu; -+ hw->dev_spec._82575.mtu = new_mtu; - - if (netif_running(netdev)) - igb_up(adapter); -@@ -5185,53 +5872,74 @@ - } - - /** -- * igb_update_stats - Update the board statistics counters -- * @adapter: board private structure -+ * igb_update_stats - Update the board statistics counters -+ * @adapter: board private structure - **/ --void igb_update_stats(struct igb_adapter *adapter, -- struct rtnl_link_stats64 *net_stats) -+ -+void igb_update_stats(struct igb_adapter *adapter) - { -+#ifdef HAVE_NETDEV_STATS_IN_NETDEV -+ struct net_device_stats *net_stats = &adapter->netdev->stats; -+#else -+ struct net_device_stats *net_stats = &adapter->net_stats; -+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ - struct e1000_hw *hw = &adapter->hw; -+#ifdef HAVE_PCI_ERS - struct pci_dev *pdev = adapter->pdev; -+#endif - u32 reg, mpc; - u16 phy_tmp; - int i; - u64 bytes, packets; -- unsigned int start; -- u64 _bytes, _packets; -+#ifndef IGB_NO_LRO -+ u32 flushed = 0, coal = 0; -+ struct igb_q_vector *q_vector; -+#endif - - #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF - -- /* Prevent stats update while adapter is being reset, or if the pci -+ /* -+ * Prevent stats update while adapter is being reset, or if the pci - * connection is down. - */ - if (adapter->link_speed == 0) - return; -+#ifdef HAVE_PCI_ERS - if (pci_channel_offline(pdev)) - return; - -+#endif -+#ifndef IGB_NO_LRO -+ for (i = 0; i < adapter->num_q_vectors; i++) { -+ q_vector = adapter->q_vector[i]; -+ if (!q_vector) -+ continue; -+ flushed += q_vector->lrolist.stats.flushed; -+ coal += q_vector->lrolist.stats.coal; -+ } -+ adapter->lro_stats.flushed = flushed; -+ adapter->lro_stats.coal = coal; -+ -+#endif - bytes = 0; - packets = 0; -- -- rcu_read_lock(); - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *ring = adapter->rx_ring[i]; -- u32 rqdpc = rd32(E1000_RQDPC(i)); -- if (hw->mac.type >= e1000_i210) -- wr32(E1000_RQDPC(i), 0); -+ u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF; - -- if (rqdpc) { -- ring->rx_stats.drops += rqdpc; -- net_stats->rx_fifo_errors += rqdpc; -+ if (hw->mac.type >= e1000_i210) -+ E1000_WRITE_REG(hw, E1000_RQDPC(i), 0); -+ ring->rx_stats.drops += rqdpc_tmp; -+ net_stats->rx_fifo_errors += rqdpc_tmp; -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ if (!ring->vmdq_netdev) { -+ bytes += ring->rx_stats.bytes; -+ packets += ring->rx_stats.packets; - } -- -- do { -- start = u64_stats_fetch_begin_irq(&ring->rx_syncp); -- _bytes = ring->rx_stats.bytes; -- _packets = ring->rx_stats.packets; -- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); -- bytes += _bytes; -- packets += _packets; -+#else -+ bytes += ring->rx_stats.bytes; -+ packets += ring->rx_stats.packets; -+#endif - } - - net_stats->rx_bytes = bytes; -@@ -5241,98 +5949,98 @@ - packets = 0; - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *ring = adapter->tx_ring[i]; -- do { -- start = u64_stats_fetch_begin_irq(&ring->tx_syncp); -- _bytes = ring->tx_stats.bytes; -- _packets = ring->tx_stats.packets; -- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); -- bytes += _bytes; -- packets += _packets; -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ if (!ring->vmdq_netdev) { -+ bytes += ring->tx_stats.bytes; -+ packets += ring->tx_stats.packets; -+ } -+#else -+ bytes += ring->tx_stats.bytes; -+ packets += ring->tx_stats.packets; -+#endif - } - net_stats->tx_bytes = bytes; - net_stats->tx_packets = packets; -- rcu_read_unlock(); - - /* read stats registers */ -- adapter->stats.crcerrs += rd32(E1000_CRCERRS); -- adapter->stats.gprc += rd32(E1000_GPRC); -- adapter->stats.gorc += rd32(E1000_GORCL); -- rd32(E1000_GORCH); /* clear GORCL */ -- adapter->stats.bprc += rd32(E1000_BPRC); -- adapter->stats.mprc += rd32(E1000_MPRC); -- adapter->stats.roc += rd32(E1000_ROC); -- -- adapter->stats.prc64 += rd32(E1000_PRC64); -- adapter->stats.prc127 += rd32(E1000_PRC127); -- adapter->stats.prc255 += rd32(E1000_PRC255); -- adapter->stats.prc511 += rd32(E1000_PRC511); -- adapter->stats.prc1023 += rd32(E1000_PRC1023); -- adapter->stats.prc1522 += rd32(E1000_PRC1522); -- adapter->stats.symerrs += rd32(E1000_SYMERRS); -- adapter->stats.sec += rd32(E1000_SEC); -+ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); -+ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC); -+ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL); -+ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */ -+ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC); -+ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC); -+ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC); -+ -+ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64); -+ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127); -+ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255); -+ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511); -+ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023); -+ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522); -+ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS); -+ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC); - -- mpc = rd32(E1000_MPC); -+ mpc = E1000_READ_REG(hw, E1000_MPC); - adapter->stats.mpc += mpc; - net_stats->rx_fifo_errors += mpc; -- adapter->stats.scc += rd32(E1000_SCC); -- adapter->stats.ecol += rd32(E1000_ECOL); -- adapter->stats.mcc += rd32(E1000_MCC); -- adapter->stats.latecol += rd32(E1000_LATECOL); -- adapter->stats.dc += rd32(E1000_DC); -- adapter->stats.rlec += rd32(E1000_RLEC); -- adapter->stats.xonrxc += rd32(E1000_XONRXC); -- adapter->stats.xontxc += rd32(E1000_XONTXC); -- adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); -- adapter->stats.xofftxc += rd32(E1000_XOFFTXC); -- adapter->stats.fcruc += rd32(E1000_FCRUC); -- adapter->stats.gptc += rd32(E1000_GPTC); -- adapter->stats.gotc += rd32(E1000_GOTCL); -- rd32(E1000_GOTCH); /* clear GOTCL */ -- adapter->stats.rnbc += rd32(E1000_RNBC); -- adapter->stats.ruc += rd32(E1000_RUC); -- adapter->stats.rfc += rd32(E1000_RFC); -- adapter->stats.rjc += rd32(E1000_RJC); -- adapter->stats.tor += rd32(E1000_TORH); -- adapter->stats.tot += rd32(E1000_TOTH); -- adapter->stats.tpr += rd32(E1000_TPR); -- -- adapter->stats.ptc64 += rd32(E1000_PTC64); -- adapter->stats.ptc127 += rd32(E1000_PTC127); -- adapter->stats.ptc255 += rd32(E1000_PTC255); -- adapter->stats.ptc511 += rd32(E1000_PTC511); -- adapter->stats.ptc1023 += rd32(E1000_PTC1023); -- adapter->stats.ptc1522 += rd32(E1000_PTC1522); -- -- adapter->stats.mptc += rd32(E1000_MPTC); -- adapter->stats.bptc += rd32(E1000_BPTC); -- -- adapter->stats.tpt += rd32(E1000_TPT); -- adapter->stats.colc += rd32(E1000_COLC); -- -- adapter->stats.algnerrc += rd32(E1000_ALGNERRC); -- /* read internal phy specific stats */ -- reg = rd32(E1000_CTRL_EXT); -+ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC); -+ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL); -+ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC); -+ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL); -+ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC); -+ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC); -+ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC); -+ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC); -+ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC); -+ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); -+ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC); -+ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC); -+ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL); -+ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */ -+ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC); -+ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC); -+ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC); -+ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC); -+ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH); -+ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH); -+ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR); -+ -+ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64); -+ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127); -+ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255); -+ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511); -+ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); -+ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); -+ -+ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC); -+ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC); -+ -+ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT); -+ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC); -+ -+ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); -+ /* read internal phy sepecific stats */ -+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); - if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { -- adapter->stats.rxerrc += rd32(E1000_RXERRC); -+ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC); - - /* this stat has invalid values on i210/i211 */ - if ((hw->mac.type != e1000_i210) && - (hw->mac.type != e1000_i211)) -- adapter->stats.tncrs += rd32(E1000_TNCRS); -+ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS); - } -+ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC); -+ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); - -- adapter->stats.tsctc += rd32(E1000_TSCTC); -- adapter->stats.tsctfc += rd32(E1000_TSCTFC); -- -- adapter->stats.iac += rd32(E1000_IAC); -- adapter->stats.icrxoc += rd32(E1000_ICRXOC); -- adapter->stats.icrxptc += rd32(E1000_ICRXPTC); -- adapter->stats.icrxatc += rd32(E1000_ICRXATC); -- adapter->stats.ictxptc += rd32(E1000_ICTXPTC); -- adapter->stats.ictxatc += rd32(E1000_ICTXATC); -- adapter->stats.ictxqec += rd32(E1000_ICTXQEC); -- adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); -- adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); -+ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC); -+ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); -+ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); -+ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); -+ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); -+ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); -+ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); -+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); -+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); - - /* Fill out the OS statistics structure */ - net_stats->multicast = adapter->stats.mprc; -@@ -5365,24 +6073,20 @@ - /* Phy Stats */ - if (hw->phy.media_type == e1000_media_type_copper) { - if ((adapter->link_speed == SPEED_1000) && -- (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { -+ (!igb_e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; - adapter->phy_stats.idle_errors += phy_tmp; - } - } - - /* Management Stats */ -- adapter->stats.mgptc += rd32(E1000_MGTPTC); -- adapter->stats.mgprc += rd32(E1000_MGTPRC); -- adapter->stats.mgpdc += rd32(E1000_MGTPDC); -- -- /* OS2BMC Stats */ -- reg = rd32(E1000_MANC); -- if (reg & E1000_MANC_EN_BMC2OS) { -- adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); -- adapter->stats.o2bspc += rd32(E1000_O2BSPC); -- adapter->stats.b2ospc += rd32(E1000_B2OSPC); -- adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); -+ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC); -+ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC); -+ if (hw->mac.type > e1000_82580) { -+ adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC); -+ adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC); -+ adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC); -+ adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC); - } - } - -@@ -5390,7 +6094,7 @@ - { - struct igb_adapter *adapter = data; - struct e1000_hw *hw = &adapter->hw; -- u32 icr = rd32(E1000_ICR); -+ u32 icr = E1000_READ_REG(hw, E1000_ICR); - /* reading ICR causes bit 31 of EICR to be cleared */ - - if (icr & E1000_ICR_DRSTA) -@@ -5417,18 +6121,24 @@ - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - -+#ifdef HAVE_PTP_1588_CLOCK - if (icr & E1000_ICR_TS) { -- u32 tsicr = rd32(E1000_TSICR); -+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); - - if (tsicr & E1000_TSICR_TXTS) { - /* acknowledge the interrupt */ -- wr32(E1000_TSICR, E1000_TSICR_TXTS); -+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); - /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - } - } -+#endif /* HAVE_PTP_1588_CLOCK */ - -- wr32(E1000_EIMS, adapter->eims_other); -+ /* Check for MDD event */ -+ if (icr & E1000_ICR_MDDET) -+ igb_process_mdd_event(adapter); -+ -+ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other); - - return IRQ_HANDLED; - } -@@ -5465,7 +6175,7 @@ - return IRQ_HANDLED; - } - --#ifdef CONFIG_IGB_DCA -+#ifdef IGB_DCA - static void igb_update_tx_dca(struct igb_adapter *adapter, - struct igb_ring *tx_ring, - int cpu) -@@ -5474,9 +6184,10 @@ - u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); - - if (hw->mac.type != e1000_82575) -- txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; -+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576; - -- /* We can enable relaxed ordering for reads, but not writes when -+ /* -+ * We can enable relaxed ordering for reads, but not writes when - * DCA is enabled. This is due to a known issue in some chipsets - * which will cause the DCA tag to be cleared. - */ -@@ -5484,7 +6195,7 @@ - E1000_DCA_TXCTRL_DATA_RRO_EN | - E1000_DCA_TXCTRL_DESC_DCA_EN; - -- wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); -+ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); - } - - static void igb_update_rx_dca(struct igb_adapter *adapter, -@@ -5495,16 +6206,17 @@ - u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); - - if (hw->mac.type != e1000_82575) -- rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; -+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576; - -- /* We can enable relaxed ordering for reads, but not writes when -+ /* -+ * We can enable relaxed ordering for reads, but not writes when - * DCA is enabled. This is due to a known issue in some chipsets - * which will cause the DCA tag to be cleared. - */ - rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | - E1000_DCA_RXCTRL_DESC_DCA_EN; - -- wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); -+ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); - } - - static void igb_update_dca(struct igb_q_vector *q_vector) -@@ -5535,7 +6247,7 @@ - return; - - /* Always use CB2 mode, difference is masked in the CB driver. */ -- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); -+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); - - for (i = 0; i < adapter->num_q_vectors; i++) { - adapter->q_vector[i]->cpu = -1; -@@ -5556,9 +6268,9 @@ - /* if already enabled, don't do it again */ - if (adapter->flags & IGB_FLAG_DCA_ENABLED) - break; -- if (dca_add_requester(dev) == 0) { -+ if (dca_add_requester(dev) == E1000_SUCCESS) { - adapter->flags |= IGB_FLAG_DCA_ENABLED; -- dev_info(&pdev->dev, "DCA enabled\n"); -+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n"); - igb_setup_dca(adapter); - break; - } -@@ -5569,14 +6281,15 @@ - * hanging around in the sysfs model - */ - dca_remove_requester(dev); -- dev_info(&pdev->dev, "DCA disabled\n"); -+ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n"); - adapter->flags &= ~IGB_FLAG_DCA_ENABLED; -- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); -+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, -+ E1000_DCA_CTRL_DCA_DISABLE); - } - break; - } - -- return 0; -+ return E1000_SUCCESS; - } - - static int igb_notify_dca(struct notifier_block *nb, unsigned long event, -@@ -5585,27 +6298,29 @@ - int ret_val; - - ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, -- __igb_notify_dca); -+ __igb_notify_dca); - - return ret_val ? NOTIFY_BAD : NOTIFY_DONE; - } --#endif /* CONFIG_IGB_DCA */ -+#endif /* IGB_DCA */ - --#ifdef CONFIG_PCI_IOV - static int igb_vf_configure(struct igb_adapter *adapter, int vf) - { - unsigned char mac_addr[ETH_ALEN]; - -- eth_zero_addr(mac_addr); -+ random_ether_addr(mac_addr); - igb_set_vf_mac(adapter, vf, mac_addr); - -+#ifdef IFLA_VF_MAX -+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE - /* By default spoof check is enabled for all VFs */ - adapter->vf_data[vf].spoofchk_enabled = true; -+#endif -+#endif - -- return 0; -+ return true; - } - --#endif - static void igb_ping_all_vfs(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; -@@ -5616,26 +6331,71 @@ - ping = E1000_PF_CONTROL_MSG; - if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) - ping |= E1000_VT_MSGTYPE_CTS; -- igb_write_mbx(hw, &ping, 1, i); -+ e1000_write_mbx(hw, &ping, 1, i); - } - } - -+/** -+ * igb_mta_set_ - Set multicast filter table address -+ * @adapter: pointer to the adapter structure -+ * @hash_value: determines the MTA register and bit to set -+ * -+ * The multicast table address is a register array of 32-bit registers. -+ * The hash_value is used to determine what register the bit is in, the -+ * current value is read, the new bit is OR'd in and the new value is -+ * written back into the register. -+ **/ -+void igb_mta_set(struct igb_adapter *adapter, u32 hash_value) -+{ -+ struct e1000_hw *hw = &adapter->hw; -+ u32 hash_bit, hash_reg, mta; -+ -+ /* -+ * The MTA is a register array of 32-bit registers. It is -+ * treated like an array of (32*mta_reg_count) bits. We want to -+ * set bit BitArray[hash_value]. So we figure out what register -+ * the bit is in, read it, OR in the new bit, then write -+ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a -+ * mask to bits 31:5 of the hash value which gives us the -+ * register we're modifying. The hash bit within that register -+ * is determined by the lower 5 bits of the hash value. -+ */ -+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); -+ hash_bit = hash_value & 0x1F; -+ -+ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); -+ -+ mta |= (1 << hash_bit); -+ -+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); -+ E1000_WRITE_FLUSH(hw); -+} -+ - static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) - { -+ - struct e1000_hw *hw = &adapter->hw; -- u32 vmolr = rd32(E1000_VMOLR(vf)); -+ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); - struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - - vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | - IGB_VF_FLAG_MULTI_PROMISC); - vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); - -+#ifdef IGB_ENABLE_VF_PROMISC -+ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) { -+ vmolr |= E1000_VMOLR_ROPE; -+ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC; -+ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST; -+ } -+#endif - if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { - vmolr |= E1000_VMOLR_MPME; - vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; - *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; - } else { -- /* if we have hashes and we are clearing a multicast promisc -+ /* -+ * if we have hashes and we are clearing a multicast promisc - * flag we need to write the hashes to the MTA as this step - * was previously skipped - */ -@@ -5646,17 +6406,18 @@ - - vmolr |= E1000_VMOLR_ROMPE; - for (j = 0; j < vf_data->num_vf_mc_hashes; j++) -- igb_mta_set(hw, vf_data->vf_mc_hashes[j]); -+ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); - } - } - -- wr32(E1000_VMOLR(vf), vmolr); -+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); - - /* there are flags left unprocessed, likely not supported */ - if (*msgbuf & E1000_VT_MSGINFO_MASK) - return -EINVAL; - - return 0; -+ - } - - static int igb_set_vf_multicasts(struct igb_adapter *adapter, -@@ -5694,7 +6455,7 @@ - int i, j; - - for (i = 0; i < adapter->vfs_allocated_count; i++) { -- u32 vmolr = rd32(E1000_VMOLR(i)); -+ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); - - vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); - -@@ -5706,9 +6467,9 @@ - } else if (vf_data->num_vf_mc_hashes) { - vmolr |= E1000_VMOLR_ROMPE; - for (j = 0; j < vf_data->num_vf_mc_hashes; j++) -- igb_mta_set(hw, vf_data->vf_mc_hashes[j]); -+ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]); - } -- wr32(E1000_VMOLR(i), vmolr); -+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); - } - } - -@@ -5716,13 +6477,14 @@ - { - struct e1000_hw *hw = &adapter->hw; - u32 pool_mask, reg, vid; -+ u16 vlan_default; - int i; - - pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { -- reg = rd32(E1000_VLVF(i)); -+ reg = E1000_READ_REG(hw, E1000_VLVF(i)); - - /* remove the vf from the pool */ - reg &= ~pool_mask; -@@ -5732,16 +6494,20 @@ - (reg & E1000_VLVF_VLANID_ENABLE)) { - reg = 0; - vid = reg & E1000_VLVF_VLANID_MASK; -- igb_vfta_set(hw, vid, false); -+ igb_vfta_set(adapter, vid, FALSE); - } - -- wr32(E1000_VLVF(i), reg); -+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); - } - - adapter->vf_data[vf].vlans_enabled = 0; -+ -+ vlan_default = adapter->vf_data[vf].default_vf_vlan_id; -+ if (vlan_default) -+ igb_vlvf_set(adapter, vlan_default, true, vf); - } - --static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) -+s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) - { - struct e1000_hw *hw = &adapter->hw; - u32 reg, i; -@@ -5751,12 +6517,12 @@ - return -1; - - /* we only need to do this if VMDq is enabled */ -- if (!adapter->vfs_allocated_count) -+ if (!adapter->vmdq_pools) - return -1; - - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { -- reg = rd32(E1000_VLVF(i)); -+ reg = E1000_READ_REG(hw, E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; -@@ -5769,7 +6535,7 @@ - * one without the enable bit set - */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { -- reg = rd32(E1000_VLVF(i)); -+ reg = E1000_READ_REG(hw, E1000_VLVF(i)); - if (!(reg & E1000_VLVF_VLANID_ENABLE)) - break; - } -@@ -5781,26 +6547,26 @@ - /* if !enabled we need to set this up in vfta */ - if (!(reg & E1000_VLVF_VLANID_ENABLE)) { - /* add VID to filter table */ -- igb_vfta_set(hw, vid, true); -+ igb_vfta_set(adapter, vid, TRUE); - reg |= E1000_VLVF_VLANID_ENABLE; - } - reg &= ~E1000_VLVF_VLANID_MASK; - reg |= vid; -- wr32(E1000_VLVF(i), reg); -+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) -- return 0; -+ return E1000_SUCCESS; - - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - -- reg = rd32(E1000_VMOLR(vf)); -+ reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size += 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; -- wr32(E1000_VMOLR(vf), reg); -+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); - } - - adapter->vf_data[vf].vlans_enabled++; -@@ -5812,38 +6578,40 @@ - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK)) { - reg = 0; -- igb_vfta_set(hw, vid, false); -+ igb_vfta_set(adapter, vid, FALSE); - } -- wr32(E1000_VLVF(i), reg); -+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) -- return 0; -+ return E1000_SUCCESS; - - adapter->vf_data[vf].vlans_enabled--; - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - -- reg = rd32(E1000_VMOLR(vf)); -+ reg = E1000_READ_REG(hw, E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size -= 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; -- wr32(E1000_VMOLR(vf), reg); -+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg); - } - } - } -- return 0; -+ return E1000_SUCCESS; - } - -+#ifdef IFLA_VF_MAX - static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) - { - struct e1000_hw *hw = &adapter->hw; - - if (vid) -- wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); -+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), -+ (vid | E1000_VMVIR_VLANA_DEFAULT)); - else -- wr32(E1000_VMVIR(vf), 0); -+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); - } - - static int igb_ndo_set_vf_vlan(struct net_device *netdev, -@@ -5852,7 +6620,9 @@ - int err = 0; - struct igb_adapter *adapter = netdev_priv(netdev); - -- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) -+ /* VLAN IDs accepted range 0-4094 */ -+ if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) -+ || (qos > 7)) - return -EINVAL; - if (vlan || qos) { - err = igb_vlvf_set(adapter, vlan, !!vlan, vf); -@@ -5862,6 +6632,7 @@ - igb_set_vmolr(adapter, vf, !vlan); - adapter->vf_data[vf].pf_vlan = vlan; - adapter->vf_data[vf].pf_qos = qos; -+ igb_set_vf_vlan_strip(adapter, vf, true); - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IGB_DOWN, &adapter->state)) { -@@ -5871,10 +6642,14 @@ - "Bring the PF device up before attempting to use the VF device.\n"); - } - } else { -+ if (adapter->vf_data[vf].pf_vlan) -+ dev_info(&adapter->pdev->dev, -+ "Clearing VLAN on VF %d\n", vf); - igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, -- false, vf); -+ false, vf); - igb_set_vmvir(adapter, vlan, vf); - igb_set_vmolr(adapter, vf, true); -+ igb_set_vf_vlan_strip(adapter, vf, false); - adapter->vf_data[vf].pf_vlan = 0; - adapter->vf_data[vf].pf_qos = 0; - } -@@ -5882,6 +6657,36 @@ - return err; - } - -+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE -+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, -+ bool setting) -+{ -+ struct igb_adapter *adapter = netdev_priv(netdev); -+ struct e1000_hw *hw = &adapter->hw; -+ u32 dtxswc, reg_offset; -+ -+ if (!adapter->vfs_allocated_count) -+ return -EOPNOTSUPP; -+ -+ if (vf >= adapter->vfs_allocated_count) -+ return -EINVAL; -+ -+ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; -+ dtxswc = E1000_READ_REG(hw, reg_offset); -+ if (setting) -+ dtxswc |= ((1 << vf) | -+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); -+ else -+ dtxswc &= ~((1 << vf) | -+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); -+ E1000_WRITE_REG(hw, reg_offset, dtxswc); -+ -+ adapter->vf_data[vf].spoofchk_enabled = setting; -+ return E1000_SUCCESS; -+} -+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */ -+#endif /* IFLA_VF_MAX */ -+ - static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) - { - struct e1000_hw *hw = &adapter->hw; -@@ -5890,7 +6695,7 @@ - - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { -- reg = rd32(E1000_VLVF(i)); -+ reg = E1000_READ_REG(hw, E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; -@@ -5909,6 +6714,11 @@ - int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); - int err = 0; - -+ if (vid) -+ igb_set_vf_vlan_strip(adapter, vf, true); -+ else -+ igb_set_vf_vlan_strip(adapter, vf, false); -+ - /* If in promiscuous mode we need to make sure the PF also has - * the VLAN filter set. - */ -@@ -5928,6 +6738,7 @@ - */ - if (!add && (adapter->netdev->flags & IFF_PROMISC)) { - u32 vlvf, bits; -+ - int regndx = igb_find_vlvf_entry(adapter, vid); - - if (regndx < 0) -@@ -5935,7 +6746,7 @@ - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ -- vlvf = bits = rd32(E1000_VLVF(regndx)); -+ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx)); - bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count); - /* If the filter was removed then ensure PF pool bit -@@ -5943,7 +6754,9 @@ - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && -+#ifndef HAVE_VLAN_RX_REGISTER - !test_bit(vid, adapter->active_vlans) && -+#endif - !bits) - igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); -@@ -5955,7 +6768,9 @@ - - static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) - { -- /* clear flags - except flag that indicates PF has set the MAC */ -+ struct e1000_hw *hw = &adapter->hw; -+ -+ /* clear flags except flag that the PF has set the MAC */ - adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; - adapter->vf_data[vf].last_nack = jiffies; - -@@ -5964,27 +6779,40 @@ - - /* reset vlans for device */ - igb_clear_vf_vfta(adapter, vf); -+#ifdef IFLA_VF_MAX - if (adapter->vf_data[vf].pf_vlan) - igb_ndo_set_vf_vlan(adapter->netdev, vf, - adapter->vf_data[vf].pf_vlan, - adapter->vf_data[vf].pf_qos); - else - igb_clear_vf_vfta(adapter, vf); -+#endif - - /* reset multicast table array for vf */ - adapter->vf_data[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - igb_set_rx_mode(adapter->netdev); -+ -+ /* -+ * Reset the VFs TDWBAL and TDWBAH registers which are not -+ * cleared by a VFLR -+ */ -+ E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0); -+ E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0); -+ if (hw->mac.type == e1000_82576) { -+ E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0); -+ E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0); -+ } - } - - static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) - { - unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - -- /* clear mac address as we were hotplug removed/added */ -+ /* generate a new mac address as we were hotplug removed/added */ - if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) -- eth_zero_addr(vf_mac); -+ random_ether_addr(vf_mac); - - /* process remaining reset events */ - igb_vf_reset(adapter, vf); -@@ -6005,25 +6833,26 @@ - igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); - - /* enable transmit and receive for vf */ -- reg = rd32(E1000_VFTE); -- wr32(E1000_VFTE, reg | (1 << vf)); -- reg = rd32(E1000_VFRE); -- wr32(E1000_VFRE, reg | (1 << vf)); -+ reg = E1000_READ_REG(hw, E1000_VFTE); -+ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf)); -+ reg = E1000_READ_REG(hw, E1000_VFRE); -+ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf)); - - adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; - - /* reply to reset with ack and vf mac address */ - msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; -- memcpy(addr, vf_mac, ETH_ALEN); -- igb_write_mbx(hw, msgbuf, 3, vf); -+ memcpy(addr, vf_mac, 6); -+ e1000_write_mbx(hw, msgbuf, 3, vf); - } - - static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) - { -- /* The VF MAC Address is stored in a packed array of bytes -+ /* -+ * The VF MAC Address is stored in a packed array of bytes - * starting at the second 32 bit word of the msg array - */ -- unsigned char *addr = (char *)&msg[1]; -+ unsigned char *addr = (unsigned char *)&msg[1]; - int err = -1; - - if (is_valid_ether_addr(addr)) -@@ -6041,7 +6870,7 @@ - /* if device isn't clear to send it shouldn't be reading either */ - if (!(vf_data->flags & IGB_VF_FLAG_CTS) && - time_after(jiffies, vf_data->last_nack + (2 * HZ))) { -- igb_write_mbx(hw, &msg, 1, vf); -+ e1000_write_mbx(hw, &msg, 1, vf); - vf_data->last_nack = jiffies; - } - } -@@ -6054,45 +6883,47 @@ - struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - s32 retval; - -- retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); -+ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); - - if (retval) { -- /* if receive failed revoke VF CTS stats and restart init */ -- dev_err(&pdev->dev, "Error receiving message from VF\n"); -- vf_data->flags &= ~IGB_VF_FLAG_CTS; -- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) -- return; -- goto out; -+ dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n"); -+ return; - } - - /* this is a message we already processed, do nothing */ - if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) - return; - -- /* until the vf completes a reset it should not be -+ /* -+ * until the vf completes a reset it should not be - * allowed to start any configuration. - */ -+ - if (msgbuf[0] == E1000_VF_RESET) { - igb_vf_reset_msg(adapter, vf); - return; - } - - if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { -- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) -- return; -- retval = -1; -- goto out; -+ msgbuf[0] = E1000_VT_MSGTYPE_NACK; -+ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) { -+ e1000_write_mbx(hw, msgbuf, 1, vf); -+ vf_data->last_nack = jiffies; -+ } -+ return; - } - - switch ((msgbuf[0] & 0xFFFF)) { - case E1000_VF_SET_MAC_ADDR: - retval = -EINVAL; -+#ifndef IGB_DISABLE_VF_MAC_SET - if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) - retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); - else -- dev_warn(&pdev->dev, -- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", -- vf); -+ DPRINTK(DRV, INFO, -+ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", -+ vf); -+#endif - break; - case E1000_VF_SET_PROMISC: - retval = igb_set_vf_promisc(adapter, msgbuf, vf); -@@ -6105,28 +6936,31 @@ - break; - case E1000_VF_SET_VLAN: - retval = -1; -+#ifdef IFLA_VF_MAX - if (vf_data->pf_vlan) -- dev_warn(&pdev->dev, -- "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", -- vf); -+ DPRINTK(DRV, INFO, -+ "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", -+ vf); - else -+#endif - retval = igb_set_vf_vlan(adapter, msgbuf, vf); - break; - default: -- dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); -- retval = -1; -+ dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", -+ msgbuf[0]); -+ retval = -E1000_ERR_MBX; - break; - } - -- msgbuf[0] |= E1000_VT_MSGTYPE_CTS; --out: - /* notify the VF of the results of what it sent us */ - if (retval) - msgbuf[0] |= E1000_VT_MSGTYPE_NACK; - else - msgbuf[0] |= E1000_VT_MSGTYPE_ACK; - -- igb_write_mbx(hw, msgbuf, 1, vf); -+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS; -+ -+ e1000_write_mbx(hw, msgbuf, 1, vf); - } - - static void igb_msg_task(struct igb_adapter *adapter) -@@ -6136,15 +6970,15 @@ - - for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { - /* process any reset requests */ -- if (!igb_check_for_rst(hw, vf)) -+ if (!e1000_check_for_rst(hw, vf)) - igb_vf_reset_event(adapter, vf); - - /* process any messages pending */ -- if (!igb_check_for_msg(hw, vf)) -+ if (!e1000_check_for_msg(hw, vf)) - igb_rcv_msg_from_vf(adapter, vf); - - /* process any acks */ -- if (!igb_check_for_ack(hw, vf)) -+ if (!e1000_check_for_ack(hw, vf)) - igb_rcv_ack_from_vf(adapter, vf); - } - } -@@ -6169,17 +7003,17 @@ - return; - - /* we only need to do this if VMDq is enabled */ -- if (!adapter->vfs_allocated_count) -+ if (!adapter->vmdq_pools) - return; - - for (i = 0; i < hw->mac.uta_reg_count; i++) -- array_wr32(E1000_UTA, i, ~0); -+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0); - } - - /** -- * igb_intr_msi - Interrupt Handler -- * @irq: interrupt number -- * @data: pointer to a network interface device structure -+ * igb_intr_msi - Interrupt Handler -+ * @irq: interrupt number -+ * @data: pointer to a network interface device structure - **/ - static irqreturn_t igb_intr_msi(int irq, void *data) - { -@@ -6187,7 +7021,7 @@ - struct igb_q_vector *q_vector = adapter->q_vector[0]; - struct e1000_hw *hw = &adapter->hw; - /* read ICR disables interrupts using IAM */ -- u32 icr = rd32(E1000_ICR); -+ u32 icr = E1000_READ_REG(hw, E1000_ICR); - - igb_write_itr(q_vector); - -@@ -6205,16 +7039,18 @@ - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - -+#ifdef HAVE_PTP_1588_CLOCK - if (icr & E1000_ICR_TS) { -- u32 tsicr = rd32(E1000_TSICR); -+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); - - if (tsicr & E1000_TSICR_TXTS) { - /* acknowledge the interrupt */ -- wr32(E1000_TSICR, E1000_TSICR_TXTS); -+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); - /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - } - } -+#endif /* HAVE_PTP_1588_CLOCK */ - - napi_schedule(&q_vector->napi); - -@@ -6222,9 +7058,9 @@ - } - - /** -- * igb_intr - Legacy Interrupt Handler -- * @irq: interrupt number -- * @data: pointer to a network interface device structure -+ * igb_intr - Legacy Interrupt Handler -+ * @irq: interrupt number -+ * @data: pointer to a network interface device structure - **/ - static irqreturn_t igb_intr(int irq, void *data) - { -@@ -6234,7 +7070,7 @@ - /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No - * need for the IMC write - */ -- u32 icr = rd32(E1000_ICR); -+ u32 icr = E1000_READ_REG(hw, E1000_ICR); - - /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is - * not set, then the adapter didn't send an interrupt -@@ -6259,23 +7095,25 @@ - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - -+#ifdef HAVE_PTP_1588_CLOCK - if (icr & E1000_ICR_TS) { -- u32 tsicr = rd32(E1000_TSICR); -+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR); - - if (tsicr & E1000_TSICR_TXTS) { - /* acknowledge the interrupt */ -- wr32(E1000_TSICR, E1000_TSICR_TXTS); -+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS); - /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - } - } -+#endif /* HAVE_PTP_1588_CLOCK */ - - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; - } - --static void igb_ring_irq_enable(struct igb_q_vector *q_vector) -+void igb_ring_irq_enable(struct igb_q_vector *q_vector) - { - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; -@@ -6289,26 +7127,25 @@ - } - - if (!test_bit(__IGB_DOWN, &adapter->state)) { -- if (adapter->flags & IGB_FLAG_HAS_MSIX) -- wr32(E1000_EIMS, q_vector->eims_value); -+ if (adapter->msix_entries) -+ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value); - else - igb_irq_enable(adapter); - } - } - - /** -- * igb_poll - NAPI Rx polling callback -- * @napi: napi polling structure -- * @budget: count of how many packets we should handle -+ * igb_poll - NAPI Rx polling callback -+ * @napi: napi polling structure -+ * @budget: count of how many packets we should handle - **/ - static int igb_poll(struct napi_struct *napi, int budget) - { - struct igb_q_vector *q_vector = container_of(napi, -- struct igb_q_vector, -- napi); -+ struct igb_q_vector, napi); - bool clean_complete = true; - --#ifdef CONFIG_IGB_DCA -+#ifdef IGB_DCA - if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) - igb_update_dca(q_vector); - #endif -@@ -6318,6 +7155,12 @@ - if (q_vector->rx.ring) - clean_complete &= igb_clean_rx_irq(q_vector, budget); - -+#ifndef HAVE_NETDEV_NAPI_LIST -+ /* if netdev is disabled we need to stop polling */ -+ if (!netif_running(q_vector->adapter->netdev)) -+ clean_complete = true; -+ -+#endif - /* If all work not completed, return budget and keep polling */ - if (!clean_complete) - return budget; -@@ -6330,10 +7173,9 @@ - } - - /** -- * igb_clean_tx_irq - Reclaim resources after transmit completes -- * @q_vector: pointer to q_vector containing needed info -- * -- * returns true if ring is completely cleaned -+ * igb_clean_tx_irq - Reclaim resources after transmit completes -+ * @q_vector: pointer to q_vector containing needed info -+ * returns TRUE if ring is completely cleaned - **/ - static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) - { -@@ -6426,16 +7268,20 @@ - - netdev_tx_completed_queue(txring_txq(tx_ring), - total_packets, total_bytes); -+ - i += tx_ring->count; - tx_ring->next_to_clean = i; -- u64_stats_update_begin(&tx_ring->tx_syncp); - tx_ring->tx_stats.bytes += total_bytes; - tx_ring->tx_stats.packets += total_packets; -- u64_stats_update_end(&tx_ring->tx_syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; - -+#ifdef DEBUG -+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) && -+ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) { -+#else - if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { -+#endif - struct e1000_hw *hw = &adapter->hw; - - /* Detect a transmit hang in hardware, this serializes the -@@ -6444,10 +7290,23 @@ - clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); - if (tx_buffer->next_to_watch && - time_after(jiffies, tx_buffer->time_stamp + -- (adapter->tx_timeout_factor * HZ)) && -- !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { -+ (adapter->tx_timeout_factor * HZ)) -+ && !(E1000_READ_REG(hw, E1000_STATUS) & -+ E1000_STATUS_TXOFF)) { - - /* detected Tx unit hang */ -+#ifdef DEBUG -+ adapter->tx_hang_detected = TRUE; -+ if (adapter->disable_hw_reset) { -+ DPRINTK(DRV, WARNING, -+ "Deactivating netdev watchdog timer\n"); -+ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer)) -+ dev_put(netdev_ring(tx_ring)); -+#ifndef HAVE_NET_DEVICE_OPS -+ netdev_ring(tx_ring)->tx_timeout = NULL; -+#endif -+ } -+#endif /* DEBUG */ - dev_err(tx_ring->dev, - "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" -@@ -6461,7 +7320,7 @@ - " jiffies <%lx>\n" - " desc.status <%x>\n", - tx_ring->queue_index, -- rd32(E1000_TDH(tx_ring->reg_idx)), -+ E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)), - readl(tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, -@@ -6469,8 +7328,11 @@ - tx_buffer->next_to_watch, - jiffies, - tx_buffer->next_to_watch->wb.status); -- netif_stop_subqueue(tx_ring->netdev, -- tx_ring->queue_index); -+ if (netif_is_multiqueue(netdev_ring(tx_ring))) -+ netif_stop_subqueue(netdev_ring(tx_ring), -+ ring_queue_index(tx_ring)); -+ else -+ netif_stop_queue(netdev_ring(tx_ring)); - - /* we are about to reset, no point in enabling stuff */ - return true; -@@ -6479,33 +7341,63 @@ - - #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(total_packets && -- netif_carrier_ok(tx_ring->netdev) && -- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { -+ netif_carrier_ok(netdev_ring(tx_ring)) && -+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); -- if (__netif_subqueue_stopped(tx_ring->netdev, -- tx_ring->queue_index) && -- !(test_bit(__IGB_DOWN, &adapter->state))) { -- netif_wake_subqueue(tx_ring->netdev, -- tx_ring->queue_index); -- -- u64_stats_update_begin(&tx_ring->tx_syncp); -- tx_ring->tx_stats.restart_queue++; -- u64_stats_update_end(&tx_ring->tx_syncp); -+ if (netif_is_multiqueue(netdev_ring(tx_ring))) { -+ if (__netif_subqueue_stopped(netdev_ring(tx_ring), -+ ring_queue_index(tx_ring)) && -+ !(test_bit(__IGB_DOWN, &adapter->state))) { -+ netif_wake_subqueue(netdev_ring(tx_ring), -+ ring_queue_index(tx_ring)); -+ tx_ring->tx_stats.restart_queue++; -+ } -+ } else { -+ if (netif_queue_stopped(netdev_ring(tx_ring)) && -+ !(test_bit(__IGB_DOWN, &adapter->state))) { -+ netif_wake_queue(netdev_ring(tx_ring)); -+ tx_ring->tx_stats.restart_queue++; -+ } - } - } - - return !!budget; - } - -+#ifdef HAVE_VLAN_RX_REGISTER -+/** -+ * igb_receive_skb - helper function to handle rx indications -+ * @q_vector: structure containing interrupt and ring information -+ * @skb: packet to send up -+ **/ -+static void igb_receive_skb(struct igb_q_vector *q_vector, -+ struct sk_buff *skb) -+{ -+ struct vlan_group **vlgrp = netdev_priv(skb->dev); -+ -+ if (IGB_CB(skb)->vid) { -+ if (*vlgrp) { -+ vlan_gro_receive(&q_vector->napi, *vlgrp, -+ IGB_CB(skb)->vid, skb); -+ } else { -+ dev_kfree_skb_any(skb); -+ } -+ } else { -+ napi_gro_receive(&q_vector->napi, skb); -+ } -+} -+ -+#endif /* HAVE_VLAN_RX_REGISTER */ -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - /** -- * igb_reuse_rx_page - page flip buffer and store it back on the ring -- * @rx_ring: rx descriptor ring to store buffers on -- * @old_buff: donor buffer to have page reused -+ * igb_reuse_rx_page - page flip buffer and store it back on the ring -+ * @rx_ring: rx descriptor ring to store buffers on -+ * @old_buff: donor buffer to have page reused - * -- * Synchronizes page for reuse by the adapter -+ * Synchronizes page for reuse by the adapter - **/ - static void igb_reuse_rx_page(struct igb_ring *rx_ring, - struct igb_rx_buffer *old_buff) -@@ -6545,39 +7437,34 @@ - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= IGB_RX_BUFSZ; - -- /* since we are the only owner of the page and we need to -- * increment it, just set the value to 2 in order to avoid -- * an unnecessary locked operation -- */ -- atomic_set(&page->_count, 2); - #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) - return false; -+#endif - - /* bump ref count on page before it is given to the stack */ - get_page(page); --#endif - - return true; - } - - /** -- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff -- * @rx_ring: rx descriptor ring to transact packets on -- * @rx_buffer: buffer containing page to add -- * @rx_desc: descriptor containing length of buffer written by hardware -- * @skb: sk_buff to place the data into -- * -- * This function will add the data contained in rx_buffer->page to the skb. -- * This is done either through a direct copy if the data in the buffer is -- * less than the skb header size, otherwise it will just attach the page as -- * a frag to the skb. -+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff -+ * @rx_ring: rx descriptor ring to transact packets on -+ * @rx_buffer: buffer containing page to add -+ * @rx_desc: descriptor containing length of buffer written by hardware -+ * @skb: sk_buff to place the data into -+ * -+ * This function will add the data contained in rx_buffer->page to the skb. -+ * This is done either through a direct copy if the data in the buffer is -+ * less than the skb header size, otherwise it will just attach the page as -+ * a frag to the skb. - * -- * The function will then update the page offset if necessary and return -- * true if the buffer can be reused by the adapter. -+ * The function will then update the page offset if necessary and return -+ * true if the buffer can be reused by the adapter. - **/ - static bool igb_add_rx_frag(struct igb_ring *rx_ring, - struct igb_rx_buffer *rx_buffer, -@@ -6585,22 +7472,27 @@ - struct sk_buff *skb) - { - struct page *page = rx_buffer->page; -+ unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - #if (PAGE_SIZE < 8192) - unsigned int truesize = IGB_RX_BUFSZ; - #else -- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); -+ unsigned int truesize = SKB_DATA_ALIGN(size); - #endif -+ unsigned int pull_len; - -- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { -- unsigned char *va = page_address(page) + rx_buffer->page_offset; -+ if (unlikely(skb_is_nonlinear(skb))) -+ goto add_tail_frag; - -- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { -- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); -- va += IGB_TS_HDR_LEN; -- size -= IGB_TS_HDR_LEN; -- } -+#ifdef HAVE_PTP_1588_CLOCK -+ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { -+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); -+ va += IGB_TS_HDR_LEN; -+ size -= IGB_TS_HDR_LEN; -+ } -+#endif /* HAVE_PTP_1588_CLOCK */ - -+ if (likely(size <= IGB_RX_HDR_LEN)) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* we can reuse buffer as-is, just make sure it is local */ -@@ -6612,8 +7504,21 @@ - return false; - } - -+ /* we need the header to contain the greater of either ETH_HLEN or -+ * 60 bytes if the skb->len is less than 60 for skb_pad. -+ */ -+ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); -+ -+ /* align pull length to size of long to optimize memcpy performance */ -+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); -+ -+ /* update all of the pointers */ -+ va += pull_len; -+ size -= pull_len; -+ -+add_tail_frag: - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, -- rx_buffer->page_offset, size, truesize); -+ (unsigned long)va & ~PAGE_MASK, size, truesize); - - return igb_can_reuse_rx_page(rx_buffer, page, truesize); - } -@@ -6648,7 +7553,8 @@ - return NULL; - } - -- /* we will be copying header into skb->data in -+ /* -+ * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ -@@ -6672,72 +7578,606 @@ - PAGE_SIZE, DMA_FROM_DEVICE); - } - -- /* clear contents of rx_buffer */ -- rx_buffer->page = NULL; -+ /* clear contents of rx_buffer */ -+ rx_buffer->page = NULL; -+ -+ return skb; -+} -+ -+#endif -+static inline void igb_rx_checksum(struct igb_ring *ring, -+ union e1000_adv_rx_desc *rx_desc, -+ struct sk_buff *skb) -+{ -+ skb_checksum_none_assert(skb); -+ -+ /* Ignore Checksum bit is set */ -+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) -+ return; -+ -+ /* Rx checksum disabled via ethtool */ -+ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) -+ return; -+ -+ /* TCP/UDP checksum error bit is set */ -+ if (igb_test_staterr(rx_desc, -+ E1000_RXDEXT_STATERR_TCPE | -+ E1000_RXDEXT_STATERR_IPE)) { -+ /* -+ * work around errata with sctp packets where the TCPE aka -+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) -+ * packets, (aka let the stack check the crc32c) -+ */ -+ if (!((skb->len == 60) && -+ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) -+ ring->rx_stats.csum_err++; -+ -+ /* let the stack verify checksum errors */ -+ return; -+ } -+ /* It must be a TCP or UDP packet with a valid checksum */ -+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | -+ E1000_RXD_STAT_UDPCS)) -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+} -+ -+#ifdef NETIF_F_RXHASH -+static inline void igb_rx_hash(struct igb_ring *ring, -+ union e1000_adv_rx_desc *rx_desc, -+ struct sk_buff *skb) -+{ -+ if (netdev_ring(ring)->features & NETIF_F_RXHASH) -+ skb_set_hash(skb, -+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), -+ PKT_HASH_TYPE_L3); -+} -+ -+#endif -+#ifndef IGB_NO_LRO -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+/** -+ * igb_merge_active_tail - merge active tail into lro skb -+ * @tail: pointer to active tail in frag_list -+ * -+ * This function merges the length and data of an active tail into the -+ * skb containing the frag_list. It resets the tail's pointer to the head, -+ * but it leaves the heads pointer to tail intact. -+ **/ -+static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail) -+{ -+ struct sk_buff *head = IGB_CB(tail)->head; -+ -+ if (!head) -+ return tail; -+ -+ head->len += tail->len; -+ head->data_len += tail->len; -+ head->truesize += tail->len; -+ -+ IGB_CB(tail)->head = NULL; -+ -+ return head; -+} -+ -+/** -+ * igb_add_active_tail - adds an active tail into the skb frag_list -+ * @head: pointer to the start of the skb -+ * @tail: pointer to active tail to add to frag_list -+ * -+ * This function adds an active tail to the end of the frag list. This tail -+ * will still be receiving data so we cannot yet ad it's stats to the main -+ * skb. That is done via igb_merge_active_tail. -+ **/ -+static inline void igb_add_active_tail(struct sk_buff *head, -+ struct sk_buff *tail) -+{ -+ struct sk_buff *old_tail = IGB_CB(head)->tail; -+ -+ if (old_tail) { -+ igb_merge_active_tail(old_tail); -+ old_tail->next = tail; -+ } else { -+ skb_shinfo(head)->frag_list = tail; -+ } -+ -+ IGB_CB(tail)->head = head; -+ IGB_CB(head)->tail = tail; -+ -+ IGB_CB(head)->append_cnt++; -+} -+ -+/** -+ * igb_close_active_frag_list - cleanup pointers on a frag_list skb -+ * @head: pointer to head of an active frag list -+ * -+ * This function will clear the frag_tail_tracker pointer on an active -+ * frag_list and returns true if the pointer was actually set -+ **/ -+static inline bool igb_close_active_frag_list(struct sk_buff *head) -+{ -+ struct sk_buff *tail = IGB_CB(head)->tail; -+ -+ if (!tail) -+ return false; -+ -+ igb_merge_active_tail(tail); -+ -+ IGB_CB(head)->tail = NULL; -+ -+ return true; -+} -+ -+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ -+/** -+ * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled -+ * @adapter: board private structure -+ * @rx_desc: pointer to the rx descriptor -+ * @skb: pointer to the skb to be merged -+ * -+ **/ -+static inline bool igb_can_lro(struct igb_ring *rx_ring, -+ union e1000_adv_rx_desc *rx_desc, -+ struct sk_buff *skb) -+{ -+ struct iphdr *iph = (struct iphdr *)skb->data; -+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; -+ -+ /* verify hardware indicates this is IPv4/TCP */ -+ if ((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) || -+ !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4)))) -+ return false; -+ -+ /* .. and LRO is enabled */ -+ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO)) -+ return false; -+ -+ /* .. and we are not in promiscuous mode */ -+ if (netdev_ring(rx_ring)->flags & IFF_PROMISC) -+ return false; -+ -+ /* .. and the header is large enough for us to read IP/TCP fields */ -+ if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr))) -+ return false; -+ -+ /* .. and there are no VLANs on packet */ -+ if (skb->protocol != htons(ETH_P_IP)) -+ return false; -+ -+ /* .. and we are version 4 with no options */ -+ if (*(u8 *)iph != 0x45) -+ return false; -+ -+ /* .. and the packet is not fragmented */ -+ if (iph->frag_off & htons(IP_MF | IP_OFFSET)) -+ return false; -+ -+ /* .. and that next header is TCP */ -+ if (iph->protocol != IPPROTO_TCP) -+ return false; -+ -+ return true; -+} -+ -+static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb) -+{ -+ return (struct igb_lrohdr *)skb->data; -+} -+ -+/** -+ * igb_lro_flush - Indicate packets to upper layer. -+ * -+ * Update IP and TCP header part of head skb if more than one -+ * skb's chained and indicate packets to upper layer. -+ **/ -+static void igb_lro_flush(struct igb_q_vector *q_vector, -+ struct sk_buff *skb) -+{ -+ struct igb_lro_list *lrolist = &q_vector->lrolist; -+ -+ __skb_unlink(skb, &lrolist->active); -+ -+ if (IGB_CB(skb)->append_cnt) { -+ struct igb_lrohdr *lroh = igb_lro_hdr(skb); -+ -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ /* close any active lro contexts */ -+ igb_close_active_frag_list(skb); -+ -+#endif -+ /* incorporate ip header and re-calculate checksum */ -+ lroh->iph.tot_len = ntohs(skb->len); -+ lroh->iph.check = 0; -+ -+ /* header length is 5 since we know no options exist */ -+ lroh->iph.check = ip_fast_csum((u8 *)lroh, 5); -+ -+ /* clear TCP checksum to indicate we are an LRO frame */ -+ lroh->th.check = 0; -+ -+ /* incorporate latest timestamp into the tcp header */ -+ if (IGB_CB(skb)->tsecr) { -+ lroh->ts[2] = IGB_CB(skb)->tsecr; -+ lroh->ts[1] = htonl(IGB_CB(skb)->tsval); -+ } -+#ifdef NETIF_F_GSO -+ -+#ifdef NAPI_GRO_CB -+ NAPI_GRO_CB(skb)->data_offset = 0; -+#endif -+ skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss; -+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; -+#endif -+ } -+ -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_receive_skb(q_vector, skb); -+#else -+ napi_gro_receive(&q_vector->napi, skb); -+#endif -+ lrolist->stats.flushed++; -+} -+ -+static void igb_lro_flush_all(struct igb_q_vector *q_vector) -+{ -+ struct igb_lro_list *lrolist = &q_vector->lrolist; -+ struct sk_buff *skb, *tmp; -+ -+ skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp) -+ igb_lro_flush(q_vector, skb); -+} -+ -+/* -+ * igb_lro_header_ok - Main LRO function. -+ **/ -+static void igb_lro_header_ok(struct sk_buff *skb) -+{ -+ struct igb_lrohdr *lroh = igb_lro_hdr(skb); -+ u16 opt_bytes, data_len; -+ -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ IGB_CB(skb)->tail = NULL; -+#endif -+ IGB_CB(skb)->tsecr = 0; -+ IGB_CB(skb)->append_cnt = 0; -+ IGB_CB(skb)->mss = 0; -+ -+ /* ensure that the checksum is valid */ -+ if (skb->ip_summed != CHECKSUM_UNNECESSARY) -+ return; -+ -+ /* If we see CE codepoint in IP header, packet is not mergeable */ -+ if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph))) -+ return; -+ -+ /* ensure no bits set besides ack or psh */ -+ if (lroh->th.fin || lroh->th.syn || lroh->th.rst || -+ lroh->th.urg || lroh->th.ece || lroh->th.cwr || -+ !lroh->th.ack) -+ return; -+ -+ /* store the total packet length */ -+ data_len = ntohs(lroh->iph.tot_len); -+ -+ /* remove any padding from the end of the skb */ -+ __pskb_trim(skb, data_len); -+ -+ /* remove header length from data length */ -+ data_len -= sizeof(struct igb_lrohdr); -+ -+ /* -+ * check for timestamps. Since the only option we handle are timestamps, -+ * we only have to handle the simple case of aligned timestamps -+ */ -+ opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr); -+ if (opt_bytes != 0) { -+ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) || -+ !pskb_may_pull(skb, sizeof(struct igb_lrohdr) + -+ TCPOLEN_TSTAMP_ALIGNED) || -+ (lroh->ts[0] != htonl((TCPOPT_NOP << 24) | -+ (TCPOPT_NOP << 16) | -+ (TCPOPT_TIMESTAMP << 8) | -+ TCPOLEN_TIMESTAMP)) || -+ (lroh->ts[2] == 0)) { -+ return; -+ } -+ -+ IGB_CB(skb)->tsval = ntohl(lroh->ts[1]); -+ IGB_CB(skb)->tsecr = lroh->ts[2]; -+ -+ data_len -= TCPOLEN_TSTAMP_ALIGNED; -+ } -+ -+ /* record data_len as mss for the packet */ -+ IGB_CB(skb)->mss = data_len; -+ IGB_CB(skb)->next_seq = ntohl(lroh->th.seq); -+} -+ -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -+static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb) -+{ -+ struct skb_shared_info *sh_info; -+ struct skb_shared_info *new_skb_info; -+ unsigned int data_len; -+ -+ sh_info = skb_shinfo(lro_skb); -+ new_skb_info = skb_shinfo(new_skb); -+ -+ /* copy frags into the last skb */ -+ memcpy(sh_info->frags + sh_info->nr_frags, -+ new_skb_info->frags, -+ new_skb_info->nr_frags * sizeof(skb_frag_t)); -+ -+ /* copy size data over */ -+ sh_info->nr_frags += new_skb_info->nr_frags; -+ data_len = IGB_CB(new_skb)->mss; -+ lro_skb->len += data_len; -+ lro_skb->data_len += data_len; -+ lro_skb->truesize += data_len; -+ -+ /* wipe record of data from new_skb */ -+ new_skb_info->nr_frags = 0; -+ new_skb->len = new_skb->data_len = 0; -+ dev_kfree_skb_any(new_skb); -+} -+ -+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ -+/** -+ * igb_lro_receive - if able, queue skb into lro chain -+ * @q_vector: structure containing interrupt and ring information -+ * @new_skb: pointer to current skb being checked -+ * -+ * Checks whether the skb given is eligible for LRO and if that's -+ * fine chains it to the existing lro_skb based on flowid. If an LRO for -+ * the flow doesn't exist create one. -+ **/ -+static void igb_lro_receive(struct igb_q_vector *q_vector, -+ struct sk_buff *new_skb) -+{ -+ struct sk_buff *lro_skb; -+ struct igb_lro_list *lrolist = &q_vector->lrolist; -+ struct igb_lrohdr *lroh = igb_lro_hdr(new_skb); -+ __be32 saddr = lroh->iph.saddr; -+ __be32 daddr = lroh->iph.daddr; -+ __be32 tcp_ports = *(__be32 *)&lroh->th; -+ u16 data_len; -+#ifdef HAVE_VLAN_RX_REGISTER -+ u16 vid = IGB_CB(new_skb)->vid; -+#else -+ u16 vid = new_skb->vlan_tci; -+#endif -+ -+ igb_lro_header_ok(new_skb); -+ -+ /* -+ * we have a packet that might be eligible for LRO, -+ * so see if it matches anything we might expect -+ */ -+ skb_queue_walk(&lrolist->active, lro_skb) { -+ if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports || -+ igb_lro_hdr(lro_skb)->iph.saddr != saddr || -+ igb_lro_hdr(lro_skb)->iph.daddr != daddr) -+ continue; -+ -+#ifdef HAVE_VLAN_RX_REGISTER -+ if (IGB_CB(lro_skb)->vid != vid) -+#else -+ if (lro_skb->vlan_tci != vid) -+#endif -+ continue; -+ -+ /* out of order packet */ -+ if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) { -+ igb_lro_flush(q_vector, lro_skb); -+ IGB_CB(new_skb)->mss = 0; -+ break; -+ } -+ -+ /* TCP timestamp options have changed */ -+ if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) { -+ igb_lro_flush(q_vector, lro_skb); -+ break; -+ } -+ -+ /* make sure timestamp values are increasing */ -+ if (IGB_CB(lro_skb)->tsecr && -+ IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) { -+ igb_lro_flush(q_vector, lro_skb); -+ IGB_CB(new_skb)->mss = 0; -+ break; -+ } -+ -+ data_len = IGB_CB(new_skb)->mss; -+ -+ /* Check for all of the above below -+ * malformed header -+ * no tcp data -+ * resultant packet would be too large -+ * new skb is larger than our current mss -+ * data would remain in header -+ * we would consume more frags then the sk_buff contains -+ * ack sequence numbers changed -+ * window size has changed -+ */ -+ if (data_len == 0 || -+ data_len > IGB_CB(lro_skb)->mss || -+ data_len > IGB_CB(lro_skb)->free || -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ data_len != new_skb->data_len || -+ skb_shinfo(new_skb)->nr_frags >= -+ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) || -+#endif -+ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq || -+ igb_lro_hdr(lro_skb)->th.window != lroh->th.window) { -+ igb_lro_flush(q_vector, lro_skb); -+ break; -+ } -+ -+ /* Remove IP and TCP header*/ -+ skb_pull(new_skb, new_skb->len - data_len); -+ -+ /* update timestamp and timestamp echo response */ -+ IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval; -+ IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr; -+ -+ /* update sequence and free space */ -+ IGB_CB(lro_skb)->next_seq += data_len; -+ IGB_CB(lro_skb)->free -= data_len; -+ -+ /* update append_cnt */ -+ IGB_CB(lro_skb)->append_cnt++; -+ -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ /* if header is empty pull pages into current skb */ -+ igb_merge_frags(lro_skb, new_skb); -+#else -+ /* chain this new skb in frag_list */ -+ igb_add_active_tail(lro_skb, new_skb); -+#endif -+ -+ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh || -+ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) { -+ igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh; -+ igb_lro_flush(q_vector, lro_skb); -+ } -+ -+ lrolist->stats.coal++; -+ return; -+ } -+ -+ if (IGB_CB(new_skb)->mss && !lroh->th.psh) { -+ /* if we are at capacity flush the tail */ -+ if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) { -+ lro_skb = skb_peek_tail(&lrolist->active); -+ if (lro_skb) -+ igb_lro_flush(q_vector, lro_skb); -+ } -+ -+ /* update sequence and free space */ -+ IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss; -+ IGB_CB(new_skb)->free = 65521 - new_skb->len; - -- return skb; -+ /* .. and insert at the front of the active list */ -+ __skb_queue_head(&lrolist->active, new_skb); -+ -+ lrolist->stats.coal++; -+ return; -+ } -+ -+ /* packet not handled by any of the above, pass it to the stack */ -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_receive_skb(q_vector, new_skb); -+#else -+ napi_gro_receive(&q_vector->napi, new_skb); -+#endif - } - --static inline void igb_rx_checksum(struct igb_ring *ring, -+#endif /* IGB_NO_LRO */ -+/** -+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor -+ * @rx_ring: rx descriptor ring packet is being transacted on -+ * @rx_desc: pointer to the EOP Rx descriptor -+ * @skb: pointer to current skb being populated -+ * -+ * This function checks the ring, descriptor, and packet information in -+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and -+ * other fields within the skb. -+ **/ -+static void igb_process_skb_fields(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) - { -- skb_checksum_none_assert(skb); -+ struct net_device *dev = rx_ring->netdev; -+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; -+ bool notype; - -- /* Ignore Checksum bit is set */ -- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) -- return; -+#ifdef NETIF_F_RXHASH -+ igb_rx_hash(rx_ring, rx_desc, skb); - -- /* Rx checksum disabled via ethtool */ -- if (!(ring->netdev->features & NETIF_F_RXCSUM)) -- return; -+#endif -+ igb_rx_checksum(rx_ring, rx_desc, skb); - -- /* TCP/UDP checksum error bit is set */ -- if (igb_test_staterr(rx_desc, -- E1000_RXDEXT_STATERR_TCPE | -- E1000_RXDEXT_STATERR_IPE)) { -- /* work around errata with sctp packets where the TCPE aka -- * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) -- * packets, (aka let the stack check the crc32c) -- */ -- if (!((skb->len == 60) && -- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { -- u64_stats_update_begin(&ring->rx_syncp); -- ring->rx_stats.csum_err++; -- u64_stats_update_end(&ring->rx_syncp); -- } -- /* let the stack verify checksum errors */ -- return; -+ /* update packet type stats */ -+ switch (pkt_info & E1000_RXDADV_PKTTYPE_ILMASK) { -+ case E1000_RXDADV_PKTTYPE_IPV4: -+ rx_ring->pkt_stats.ipv4_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_IPV4_EX: -+ rx_ring->pkt_stats.ipv4e_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_IPV6: -+ rx_ring->pkt_stats.ipv6_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_IPV6_EX: -+ rx_ring->pkt_stats.ipv6e_packets++; -+ break; -+ default: -+ notype = true; -+ break; - } -- /* It must be a TCP or UDP packet with a valid checksum */ -- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | -- E1000_RXD_STAT_UDPCS)) -- skb->ip_summed = CHECKSUM_UNNECESSARY; - -- dev_dbg(ring->dev, "cksum success: bits %08X\n", -- le32_to_cpu(rx_desc->wb.upper.status_error)); --} -+ switch (pkt_info & E1000_RXDADV_PKTTYPE_TLMASK) { -+ case E1000_RXDADV_PKTTYPE_TCP: -+ rx_ring->pkt_stats.tcp_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_UDP: -+ rx_ring->pkt_stats.udp_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_SCTP: -+ rx_ring->pkt_stats.sctp_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_NFS: -+ rx_ring->pkt_stats.nfs_packets++; -+ break; -+ case E1000_RXDADV_PKTTYPE_NONE: -+ if (notype) -+ rx_ring->pkt_stats.other_packets++; -+ break; -+ default: -+ break; -+ } - --static inline void igb_rx_hash(struct igb_ring *ring, -- union e1000_adv_rx_desc *rx_desc, -- struct sk_buff *skb) --{ -- if (ring->netdev->features & NETIF_F_RXHASH) -- skb_set_hash(skb, -- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), -- PKT_HASH_TYPE_L3); -+#ifdef HAVE_PTP_1588_CLOCK -+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && -+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) -+ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); -+ -+#endif /* HAVE_PTP_1588_CLOCK */ -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && -+#else -+ if ((dev->features & NETIF_F_HW_VLAN_RX) && -+#endif -+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { -+ u16 vid = 0; -+ -+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && -+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) -+ vid = be16_to_cpu(rx_desc->wb.upper.vlan); -+ else -+ vid = le16_to_cpu(rx_desc->wb.upper.vlan); -+#ifdef HAVE_VLAN_RX_REGISTER -+ IGB_CB(skb)->vid = vid; -+ } else { -+ IGB_CB(skb)->vid = 0; -+#else -+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); -+#endif -+ } -+ -+ skb_record_rx_queue(skb, rx_ring->queue_index); -+ -+ skb->protocol = eth_type_trans(skb, dev); - } - - /** -- * igb_is_non_eop - process handling of non-EOP buffers -- * @rx_ring: Rx ring being processed -- * @rx_desc: Rx descriptor for current buffer -- * @skb: current socket buffer containing buffer in progress -- * -- * This function updates next to clean. If the buffer is an EOP buffer -- * this function exits returning false, otherwise it will place the -- * sk_buff in the next buffer to be chained and return true indicating -- * that this is in fact a non-EOP buffer. -+ * igb_is_non_eop - process handling of non-EOP buffers -+ * @rx_ring: Rx ring being processed -+ * @rx_desc: Rx descriptor for current buffer -+ * -+ * This function updates next to clean. If the buffer is an EOP buffer -+ * this function exits returning false, otherwise it will place the -+ * sk_buff in the next buffer to be chained and return true indicating -+ * that this is in fact a non-EOP buffer. - **/ - static bool igb_is_non_eop(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc) -@@ -6756,200 +8196,134 @@ - return true; - } - --/** -- * igb_get_headlen - determine size of header for LRO/GRO -- * @data: pointer to the start of the headers -- * @max_len: total length of section to find headers in -- * -- * This function is meant to determine the length of headers that will -- * be recognized by hardware for LRO, and GRO offloads. The main -- * motivation of doing this is to only perform one pull for IPv4 TCP -- * packets so that we can do basic things like calculating the gso_size -- * based on the average data per packet. -- **/ --static unsigned int igb_get_headlen(unsigned char *data, -- unsigned int max_len) --{ -- union { -- unsigned char *network; -- /* l2 headers */ -- struct ethhdr *eth; -- struct vlan_hdr *vlan; -- /* l3 headers */ -- struct iphdr *ipv4; -- struct ipv6hdr *ipv6; -- } hdr; -- __be16 protocol; -- u8 nexthdr = 0; /* default to not TCP */ -- u8 hlen; -- -- /* this should never happen, but better safe than sorry */ -- if (max_len < ETH_HLEN) -- return max_len; -- -- /* initialize network frame pointer */ -- hdr.network = data; -- -- /* set first protocol and move network header forward */ -- protocol = hdr.eth->h_proto; -- hdr.network += ETH_HLEN; -- -- /* handle any vlan tag if present */ -- if (protocol == htons(ETH_P_8021Q)) { -- if ((hdr.network - data) > (max_len - VLAN_HLEN)) -- return max_len; -- -- protocol = hdr.vlan->h_vlan_encapsulated_proto; -- hdr.network += VLAN_HLEN; -- } -- -- /* handle L3 protocols */ -- if (protocol == htons(ETH_P_IP)) { -- if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) -- return max_len; -- -- /* access ihl as a u8 to avoid unaligned access on ia64 */ -- hlen = (hdr.network[0] & 0x0F) << 2; -- -- /* verify hlen meets minimum size requirements */ -- if (hlen < sizeof(struct iphdr)) -- return hdr.network - data; -- -- /* record next protocol if header is present */ -- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) -- nexthdr = hdr.ipv4->protocol; -- } else if (protocol == htons(ETH_P_IPV6)) { -- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) -- return max_len; -- -- /* record next protocol */ -- nexthdr = hdr.ipv6->nexthdr; -- hlen = sizeof(struct ipv6hdr); -- } else { -- return hdr.network - data; -- } -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+/* igb_clean_rx_irq -- * legacy */ -+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) -+{ -+ struct igb_ring *rx_ring = q_vector->rx.ring; -+ unsigned int total_bytes = 0, total_packets = 0; -+ u16 cleaned_count = igb_desc_unused(rx_ring); - -- /* relocate pointer to start of L4 header */ -- hdr.network += hlen; -+ do { -+ struct igb_rx_buffer *rx_buffer; -+ union e1000_adv_rx_desc *rx_desc; -+ struct sk_buff *skb; -+ u16 ntc; - -- /* finally sort out TCP */ -- if (nexthdr == IPPROTO_TCP) { -- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) -- return max_len; -+ /* return some buffers to hardware, one at a time is too slow */ -+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { -+ igb_alloc_rx_buffers(rx_ring, cleaned_count); -+ cleaned_count = 0; -+ } - -- /* access doff as a u8 to avoid unaligned access on ia64 */ -- hlen = (hdr.network[12] & 0xF0) >> 2; -+ ntc = rx_ring->next_to_clean; -+ rx_desc = IGB_RX_DESC(rx_ring, ntc); -+ rx_buffer = &rx_ring->rx_buffer_info[ntc]; - -- /* verify hlen meets minimum size requirements */ -- if (hlen < sizeof(struct tcphdr)) -- return hdr.network - data; -+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) -+ break; - -- hdr.network += hlen; -- } else if (nexthdr == IPPROTO_UDP) { -- if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) -- return max_len; -+ /* -+ * This memory barrier is needed to keep us from reading -+ * any other fields out of the rx_desc until we know the -+ * RXD_STAT_DD bit is set -+ */ -+ rmb(); - -- hdr.network += sizeof(struct udphdr); -- } -+ skb = rx_buffer->skb; - -- /* If everything has gone correctly hdr.network should be the -- * data section of the packet and will be the end of the header. -- * If not then it probably represents the end of the last recognized -- * header. -- */ -- if ((hdr.network - data) < max_len) -- return hdr.network - data; -- else -- return max_len; --} -+ prefetch(skb->data); - --/** -- * igb_pull_tail - igb specific version of skb_pull_tail -- * @rx_ring: rx descriptor ring packet is being transacted on -- * @rx_desc: pointer to the EOP Rx descriptor -- * @skb: pointer to current skb being adjusted -- * -- * This function is an igb specific version of __pskb_pull_tail. The -- * main difference between this version and the original function is that -- * this function can make several assumptions about the state of things -- * that allow for significant optimizations versus the standard function. -- * As a result we can do things like drop a frag and maintain an accurate -- * truesize for the skb. -- */ --static void igb_pull_tail(struct igb_ring *rx_ring, -- union e1000_adv_rx_desc *rx_desc, -- struct sk_buff *skb) --{ -- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; -- unsigned char *va; -- unsigned int pull_len; -+ /* pull the header of the skb in */ -+ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); - -- /* it is valid to use page_address instead of kmap since we are -- * working with pages allocated out of the lomem pool per -- * alloc_page(GFP_ATOMIC) -- */ -- va = skb_frag_address(frag); -+ /* clear skb reference in buffer info structure */ -+ rx_buffer->skb = NULL; - -- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { -- /* retrieve timestamp from buffer */ -- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); -+ cleaned_count++; - -- /* update pointers to remove timestamp header */ -- skb_frag_size_sub(frag, IGB_TS_HDR_LEN); -- frag->page_offset += IGB_TS_HDR_LEN; -- skb->data_len -= IGB_TS_HDR_LEN; -- skb->len -= IGB_TS_HDR_LEN; -+ BUG_ON(igb_is_non_eop(rx_ring, rx_desc)); - -- /* move va to start of packet data */ -- va += IGB_TS_HDR_LEN; -- } -+ dma_unmap_single(rx_ring->dev, rx_buffer->dma, -+ rx_ring->rx_buffer_len, -+ DMA_FROM_DEVICE); -+ rx_buffer->dma = 0; - -- /* we need the header to contain the greater of either ETH_HLEN or -- * 60 bytes if the skb->len is less than 60 for skb_pad. -- */ -- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); -+ if (igb_test_staterr(rx_desc, -+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { -+ dev_kfree_skb_any(skb); -+ continue; -+ } - -- /* align pull length to size of long to optimize memcpy performance */ -- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); -+ total_bytes += skb->len; - -- /* update all of the pointers */ -- skb_frag_size_sub(frag, pull_len); -- frag->page_offset += pull_len; -- skb->data_len -= pull_len; -- skb->tail += pull_len; -+ /* populate checksum, timestamp, VLAN, and protocol */ -+ igb_process_skb_fields(rx_ring, rx_desc, skb); -+ -+#ifndef IGB_NO_LRO -+ if (igb_can_lro(rx_ring, rx_desc, skb)) -+ igb_lro_receive(q_vector, skb); -+ else -+#endif -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_receive_skb(q_vector, skb); -+#else -+ napi_gro_receive(&q_vector->napi, skb); -+#endif -+ -+#ifndef NETIF_F_GRO -+ netdev_ring(rx_ring)->last_rx = jiffies; -+ -+#endif -+ /* update budget accounting */ -+ total_packets++; -+ } while (likely(total_packets < budget)); -+ -+ rx_ring->rx_stats.packets += total_packets; -+ rx_ring->rx_stats.bytes += total_bytes; -+ q_vector->rx.total_packets += total_packets; -+ q_vector->rx.total_bytes += total_bytes; -+ -+ if (cleaned_count) -+ igb_alloc_rx_buffers(rx_ring, cleaned_count); -+ -+#ifndef IGB_NO_LRO -+ igb_lro_flush_all(q_vector); -+ -+#endif /* IGB_NO_LRO */ -+ return (total_packets < budget); - } -+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - - /** -- * igb_cleanup_headers - Correct corrupted or empty headers -- * @rx_ring: rx descriptor ring packet is being transacted on -- * @rx_desc: pointer to the EOP Rx descriptor -- * @skb: pointer to current skb being fixed -+ * igb_cleanup_headers - Correct corrupted or empty headers -+ * @rx_ring: rx descriptor ring packet is being transacted on -+ * @rx_desc: pointer to the EOP Rx descriptor -+ * @skb: pointer to current skb being fixed - * -- * Address the case where we are pulling data in on pages only -- * and as such no data is present in the skb header. -+ * Address the case where we are pulling data in on pages only -+ * and as such no data is present in the skb header. - * -- * In addition if skb is not at least 60 bytes we need to pad it so that -- * it is large enough to qualify as a valid Ethernet frame. -+ * In addition if skb is not at least 60 bytes we need to pad it so that -+ * it is large enough to qualify as a valid Ethernet frame. - * -- * Returns true if an error was encountered and skb was freed. -+ * Returns true if an error was encountered and skb was freed. - **/ - static bool igb_cleanup_headers(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) - { -+ - if (unlikely((igb_test_staterr(rx_desc, - E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { - struct net_device *netdev = rx_ring->netdev; -+ - if (!(netdev->features & NETIF_F_RXALL)) { - dev_kfree_skb_any(skb); - return true; - } - } - -- /* place header in linear portion of buffer */ -- if (skb_is_nonlinear(skb)) -- igb_pull_tail(rx_ring, rx_desc, skb); -- - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; -@@ -6962,56 +8336,15 @@ - return false; - } - --/** -- * igb_process_skb_fields - Populate skb header fields from Rx descriptor -- * @rx_ring: rx descriptor ring packet is being transacted on -- * @rx_desc: pointer to the EOP Rx descriptor -- * @skb: pointer to current skb being populated -- * -- * This function checks the ring, descriptor, and packet information in -- * order to populate the hash, checksum, VLAN, timestamp, protocol, and -- * other fields within the skb. -- **/ --static void igb_process_skb_fields(struct igb_ring *rx_ring, -- union e1000_adv_rx_desc *rx_desc, -- struct sk_buff *skb) --{ -- struct net_device *dev = rx_ring->netdev; -- -- igb_rx_hash(rx_ring, rx_desc, skb); -- -- igb_rx_checksum(rx_ring, rx_desc, skb); -- -- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && -- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) -- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); -- -- if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && -- igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { -- u16 vid; -- -- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && -- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) -- vid = be16_to_cpu(rx_desc->wb.upper.vlan); -- else -- vid = le16_to_cpu(rx_desc->wb.upper.vlan); -- -- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); -- } -- -- skb_record_rx_queue(skb, rx_ring->queue_index); -- -- skb->protocol = eth_type_trans(skb, rx_ring->netdev); --} -- --static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) -+/* igb_clean_rx_irq -- * packet split */ -+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) - { - struct igb_ring *rx_ring = q_vector->rx.ring; - struct sk_buff *skb = rx_ring->skb; - unsigned int total_bytes = 0, total_packets = 0; - u16 cleaned_count = igb_desc_unused(rx_ring); - -- while (likely(total_packets < budget)) { -+ do { - union e1000_adv_rx_desc *rx_desc; - - /* return some buffers to hardware, one at a time is too slow */ -@@ -7025,7 +8358,8 @@ - if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) - break; - -- /* This memory barrier is needed to keep us from reading -+ /* -+ * This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * RXD_STAT_DD bit is set - */ -@@ -7056,31 +8390,89 @@ - /* populate checksum, timestamp, VLAN, and protocol */ - igb_process_skb_fields(rx_ring, rx_desc, skb); - -- napi_gro_receive(&q_vector->napi, skb); -+#ifndef IGB_NO_LRO -+ if (igb_can_lro(rx_ring, rx_desc, skb)) -+ igb_lro_receive(q_vector, skb); -+ else -+#endif -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_receive_skb(q_vector, skb); -+#else -+ napi_gro_receive(&q_vector->napi, skb); -+#endif -+#ifndef NETIF_F_GRO -+ -+ netdev_ring(rx_ring)->last_rx = jiffies; -+#endif - - /* reset skb pointer */ - skb = NULL; - - /* update budget accounting */ - total_packets++; -- } -+ } while (likely(total_packets < budget)); - - /* place incomplete frames back on ring for completion */ - rx_ring->skb = skb; - -- u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.packets += total_packets; - rx_ring->rx_stats.bytes += total_bytes; -- u64_stats_update_end(&rx_ring->rx_syncp); - q_vector->rx.total_packets += total_packets; - q_vector->rx.total_bytes += total_bytes; - - if (cleaned_count) - igb_alloc_rx_buffers(rx_ring, cleaned_count); - -- return total_packets < budget; -+#ifndef IGB_NO_LRO -+ igb_lro_flush_all(q_vector); -+ -+#endif /* IGB_NO_LRO */ -+ return (total_packets < budget); -+} -+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ -+ -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, -+ struct igb_rx_buffer *bi) -+{ -+ struct sk_buff *skb = bi->skb; -+ dma_addr_t dma = bi->dma; -+ -+ if (dma) -+ return true; -+ -+ if (likely(!skb)) { -+ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), -+ rx_ring->rx_buffer_len); -+ bi->skb = skb; -+ if (!skb) { -+ rx_ring->rx_stats.alloc_failed++; -+ return false; -+ } -+ -+ /* initialize skb for ring */ -+ skb_record_rx_queue(skb, ring_queue_index(rx_ring)); -+ } -+ -+ dma = dma_map_single(rx_ring->dev, skb->data, -+ rx_ring->rx_buffer_len, DMA_FROM_DEVICE); -+ -+ /* if mapping failed free memory back to system since -+ * there isn't much point in holding memory we can't use -+ */ -+ if (dma_mapping_error(rx_ring->dev, dma)) { -+ dev_kfree_skb_any(skb); -+ bi->skb = NULL; -+ -+ rx_ring->rx_stats.alloc_failed++; -+ return false; -+ } -+ -+ bi->dma = dma; -+ return true; - } - -+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, - struct igb_rx_buffer *bi) - { -@@ -7092,7 +8484,7 @@ - return true; - - /* alloc new page for storage */ -- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); -+ page = alloc_page(GFP_ATOMIC | __GFP_COLD); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_failed++; - return false; -@@ -7101,7 +8493,8 @@ - /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - -- /* if mapping failed free memory back to system since -+ /* -+ * if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { -@@ -7118,9 +8511,10 @@ - return true; - } - -+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - /** -- * igb_alloc_rx_buffers - Replace used receive buffers; packet split -- * @adapter: address of board private structure -+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split -+ * @adapter: address of board private structure - **/ - void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) - { -@@ -7137,13 +8531,22 @@ - i -= rx_ring->count; - - do { -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ if (!igb_alloc_mapped_skb(rx_ring, bi)) -+#else - if (!igb_alloc_mapped_page(rx_ring, bi)) -+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */ - break; - -- /* Refresh the desc even if buffer_addrs didn't change -+ /* -+ * Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ -+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT -+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); -+#else - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); -+#endif - - rx_desc++; - bi++; -@@ -7166,10 +8569,13 @@ - /* record the next descriptor to use */ - rx_ring->next_to_use = i; - -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = i; - -- /* Force memory writes to complete before letting h/w -+#endif -+ /* -+ * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). -@@ -7179,6 +8585,7 @@ - } - } - -+#ifdef SIOCGMIIPHY - /** - * igb_mii_ioctl - - * @netdev: -@@ -7198,17 +8605,20 @@ - data->phy_id = adapter->hw.phy.addr; - break; - case SIOCGMIIREG: -- if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, -- &data->val_out)) -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, -+ &data->val_out)) - return -EIO; - break; - case SIOCSMIIREG: - default: - return -EOPNOTSUPP; - } -- return 0; -+ return E1000_SUCCESS; - } - -+#endif - /** - * igb_ioctl - - * @netdev: -@@ -7218,156 +8628,295 @@ - static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) - { - switch (cmd) { -+#ifdef SIOCGMIIPHY - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return igb_mii_ioctl(netdev, ifr, cmd); -+#endif -+#ifdef HAVE_PTP_1588_CLOCK -+#ifdef SIOCGHWTSTAMP - case SIOCGHWTSTAMP: - return igb_ptp_get_ts_config(netdev, ifr); -+#endif - case SIOCSHWTSTAMP: - return igb_ptp_set_ts_config(netdev, ifr); -+#endif /* HAVE_PTP_1588_CLOCK */ -+#ifdef ETHTOOL_OPS_COMPAT -+ case SIOCETHTOOL: -+ return ethtool_ioctl(ifr); -+#endif - default: - return -EOPNOTSUPP; - } - } - --void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) -+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) - { - struct igb_adapter *adapter = hw->back; - - pci_read_config_word(adapter->pdev, reg, value); - } - --void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) -+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) - { - struct igb_adapter *adapter = hw->back; - - pci_write_config_word(adapter->pdev, reg, *value); - } - --s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) -+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) - { - struct igb_adapter *adapter = hw->back; -+ u16 cap_offset; - -- if (pcie_capability_read_word(adapter->pdev, reg, value)) -+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); -+ if (!cap_offset) - return -E1000_ERR_CONFIG; - -- return 0; -+ pci_read_config_word(adapter->pdev, cap_offset + reg, value); -+ -+ return E1000_SUCCESS; - } - --s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) -+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) - { - struct igb_adapter *adapter = hw->back; -+ u16 cap_offset; - -- if (pcie_capability_write_word(adapter->pdev, reg, *value)) -+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); -+ if (!cap_offset) - return -E1000_ERR_CONFIG; - -- return 0; -+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value); -+ -+ return E1000_SUCCESS; - } - --static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) -+#ifdef HAVE_VLAN_RX_REGISTER -+static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp) -+#else -+void igb_vlan_mode(struct net_device *netdev, u32 features) -+#endif /* HAVE_VLAN_RX_REGISTER */ - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, rctl; -- bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); -+ bool enable; -+ int i; -+#ifdef HAVE_VLAN_RX_REGISTER -+ enable = !!vlgrp; -+ igb_irq_disable(adapter); -+ -+ adapter->vlgrp = vlgrp; -+ -+ if (!test_bit(__IGB_DOWN, &adapter->state)) -+ igb_irq_enable(adapter); -+#else -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); -+#else -+ enable = !!(features & NETIF_F_HW_VLAN_RX); -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+#endif /* HAVE_VLAN_RX_REGISTER */ - - if (enable) { - /* enable VLAN tag insert/strip */ -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - ctrl |= E1000_CTRL_VME; -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - - /* Disable CFI check */ -- rctl = rd32(E1000_RCTL); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); - rctl &= ~E1000_RCTL_CFIEN; -- wr32(E1000_RCTL, rctl); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); - } else { - /* disable VLAN tag insert/strip */ -- ctrl = rd32(E1000_CTRL); -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - ctrl &= ~E1000_CTRL_VME; -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); -+ } -+ -+#ifndef CONFIG_IGB_VMDQ_NETDEV -+ for (i = 0; i < adapter->vmdq_pools; i++) { -+ igb_set_vf_vlan_strip(adapter, -+ adapter->vfs_allocated_count + i, -+ enable); -+ } -+ -+#else -+ igb_set_vf_vlan_strip(adapter, -+ adapter->vfs_allocated_count, -+ enable); -+ -+ for (i = 1; i < adapter->vmdq_pools; i++) { -+#ifdef HAVE_VLAN_RX_REGISTER -+ struct igb_vmdq_adapter *vadapter; -+ -+ vadapter = netdev_priv(adapter->vmdq_netdev[i-1]); -+ -+ enable = !!vadapter->vlgrp; -+#else -+ struct net_device *vnetdev; -+ -+ vnetdev = adapter->vmdq_netdev[i-1]; -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX); -+#else -+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX); -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+#endif /* HAVE_VLAN_RX_REGISTER */ -+ igb_set_vf_vlan_strip(adapter, -+ adapter->vfs_allocated_count + i, -+ enable); - } - -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ - igb_rlpml_set(adapter); - } - -+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID -+#ifdef NETIF_F_HW_VLAN_CTAG_RX - static int igb_vlan_rx_add_vid(struct net_device *netdev, -- __be16 proto, u16 vid) -+ __always_unused __be16 proto, u16 vid) -+#else -+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+#else -+static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ - { - struct igb_adapter *adapter = netdev_priv(netdev); -- struct e1000_hw *hw = &adapter->hw; - int pf_id = adapter->vfs_allocated_count; - - /* attempt to add filter to vlvf array */ -- igb_vlvf_set(adapter, vid, true, pf_id); -+ igb_vlvf_set(adapter, vid, TRUE, pf_id); - - /* add the filter since PF can receive vlans w/o entry in vlvf */ -- igb_vfta_set(hw, vid, true); -+ igb_vfta_set(adapter, vid, TRUE); -+#ifndef HAVE_NETDEV_VLAN_FEATURES - -- set_bit(vid, adapter->active_vlans); -+ /* Copy feature flags from netdev to the vlan netdev for this vid. -+ * This allows things like TSO to bubble down to our vlan device. -+ * There is no need to update netdev for vlan 0 (DCB), since it -+ * wouldn't has v_netdev. -+ */ -+ if (adapter->vlgrp) { -+ struct vlan_group *vlgrp = adapter->vlgrp; -+ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); - -+ if (v_netdev) { -+ v_netdev->features |= netdev->features; -+ vlan_group_set_device(vlgrp, vid, v_netdev); -+ } -+ } -+#endif /* HAVE_NETDEV_VLAN_FEATURES */ -+#ifndef HAVE_VLAN_RX_REGISTER -+ -+ set_bit(vid, adapter->active_vlans); -+#endif /* HAVE_VLAN_RX_REGISTER */ -+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID - return 0; -+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ - } - -+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID -+#ifdef NETIF_F_HW_VLAN_CTAG_RX - static int igb_vlan_rx_kill_vid(struct net_device *netdev, -- __be16 proto, u16 vid) -+ __always_unused __be16 proto, u16 vid) -+#else -+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+#else -+static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ - { - struct igb_adapter *adapter = netdev_priv(netdev); -- struct e1000_hw *hw = &adapter->hw; - int pf_id = adapter->vfs_allocated_count; - s32 err; - -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_irq_disable(adapter); -+ -+ vlan_group_set_device(adapter->vlgrp, vid, NULL); -+ -+ if (!test_bit(__IGB_DOWN, &adapter->state)) -+ igb_irq_enable(adapter); -+ -+#endif /* HAVE_VLAN_RX_REGISTER */ - /* remove vlan from VLVF table array */ -- err = igb_vlvf_set(adapter, vid, false, pf_id); -+ err = igb_vlvf_set(adapter, vid, FALSE, pf_id); - - /* if vid was not present in VLVF just remove it from table */ - if (err) -- igb_vfta_set(hw, vid, false); -+ igb_vfta_set(adapter, vid, FALSE); -+#ifndef HAVE_VLAN_RX_REGISTER - - clear_bit(vid, adapter->active_vlans); -- -+#endif /* HAVE_VLAN_RX_REGISTER */ -+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID - return 0; -+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ - } - - static void igb_restore_vlan(struct igb_adapter *adapter) - { -+#ifdef HAVE_VLAN_RX_REGISTER -+ igb_vlan_mode(adapter->netdev, adapter->vlgrp); -+ -+ if (adapter->vlgrp) { -+ u16 vid; -+ -+ for (vid = 0; vid < VLAN_N_VID; vid++) { -+ if (!vlan_group_get_device(adapter->vlgrp, vid)) -+ continue; -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+ igb_vlan_rx_add_vid(adapter->netdev, -+ htons(ETH_P_8021Q), vid); -+#else -+ igb_vlan_rx_add_vid(adapter->netdev, vid); -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+ } -+ } -+#else - u16 vid; - - igb_vlan_mode(adapter->netdev, adapter->netdev->features); - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) -- igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); -+#ifdef NETIF_F_HW_VLAN_CTAG_RX -+ igb_vlan_rx_add_vid(adapter->netdev, -+ htons(ETH_P_8021Q), vid); -+#else -+ igb_vlan_rx_add_vid(adapter->netdev, vid); -+#endif /* NETIF_F_HW_VLAN_CTAG_RX */ -+#endif /* HAVE_VLAN_RX_REGISTER */ - } - --int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) -+int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) - { - struct pci_dev *pdev = adapter->pdev; - struct e1000_mac_info *mac = &adapter->hw.mac; - - mac->autoneg = 0; - -- /* Make sure dplx is at most 1 bit and lsb of speed is not set -- * for the switch() below to work -- */ -- if ((spd & 1) || (dplx & ~1)) -- goto err_inval; -- -- /* Fiber NIC's only allow 1000 gbps Full duplex -- * and 100Mbps Full duplex for 100baseFx sfp -+ /* SerDes device's does not support 10Mbps Full/duplex -+ * and 100Mbps Half duplex - */ - if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { -- switch (spd + dplx) { -+ switch (spddplx) { - case SPEED_10 + DUPLEX_HALF: - case SPEED_10 + DUPLEX_FULL: - case SPEED_100 + DUPLEX_HALF: -- goto err_inval; -+ dev_err(pci_dev_to_dev(pdev), -+ "Unsupported Speed/Duplex configuration\n"); -+ return -EINVAL; - default: - break; - } - } - -- switch (spd + dplx) { -+ switch (spddplx) { - case SPEED_10 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_10_HALF; - break; -@@ -7386,17 +8935,52 @@ - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - default: -- goto err_inval; -+ dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n"); -+ return -EINVAL; - } - - /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ - adapter->hw.phy.mdix = AUTO_ALL_MODES; - - return 0; -+} - --err_inval: -- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); -- return -EINVAL; -+/* This function should only be called if RTNL lock is held */ -+int igb_setup_queues(struct igb_adapter *adapter) -+{ -+ struct net_device *dev = adapter->netdev; -+ int err; -+ -+ if (adapter->rss_queues == adapter->num_rx_queues) { -+ if (adapter->tss_queues) { -+ if (adapter->tss_queues == adapter->num_tx_queues) -+ return 0; -+ } else if (adapter->vfs_allocated_count || -+ adapter->rss_queues == adapter->num_tx_queues) { -+ return 0; -+ } -+ } -+ -+ /* -+ * Hardware has to reinitialize queues and interrupts to -+ * match the new configuration. Unfortunately, the hardware -+ * is not flexible enough to do this dynamically. -+ */ -+ if (netif_running(dev)) -+ igb_close(dev); -+ -+ igb_clear_interrupt_scheme(adapter); -+ -+ err = igb_init_interrupt_scheme(adapter, true); -+ if (err) { -+ dev_close(dev); -+ return err; -+ } -+ -+ if (netif_running(dev)) -+ err = igb_open(dev); -+ -+ return err; - } - - static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, -@@ -7413,6 +8997,10 @@ - - netif_device_detach(netdev); - -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ if (status & E1000_STATUS_LU) -+ wufc &= ~E1000_WUFC_LNKC; -+ - if (netif_running(netdev)) - __igb_close(netdev, true); - -@@ -7424,37 +9012,31 @@ - return retval; - #endif - -- status = rd32(E1000_STATUS); -- if (status & E1000_STATUS_LU) -- wufc &= ~E1000_WUFC_LNKC; -- - if (wufc) { - igb_setup_rctl(adapter); - igb_set_rx_mode(netdev); - - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & E1000_WUFC_MC) { -- rctl = rd32(E1000_RCTL); -+ rctl = E1000_READ_REG(hw, E1000_RCTL); - rctl |= E1000_RCTL_MPE; -- wr32(E1000_RCTL, rctl); -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); - } - -- ctrl = rd32(E1000_CTRL); -- /* advertise wake from D3Cold */ -- #define E1000_CTRL_ADVD3WUC 0x00100000 -+ ctrl = E1000_READ_REG(hw, E1000_CTRL); - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 - ctrl |= E1000_CTRL_ADVD3WUC; -- wr32(E1000_CTRL, ctrl); -+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl); - - /* Allow time for pending master requests to run */ -- igb_disable_pcie_master(hw); -+ e1000_disable_pcie_master(hw); - -- wr32(E1000_WUC, E1000_WUC_PME_EN); -- wr32(E1000_WUFC, wufc); -+ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN); -+ E1000_WRITE_REG(hw, E1000_WUFC, wufc); - } else { -- wr32(E1000_WUC, 0); -- wr32(E1000_WUFC, 0); -+ E1000_WRITE_REG(hw, E1000_WUC, 0); -+ E1000_WRITE_REG(hw, E1000_WUFC, 0); - } - - *enable_wake = wufc || adapter->en_mng_pt; -@@ -7474,12 +9056,17 @@ - } - - #ifdef CONFIG_PM --#ifdef CONFIG_PM_SLEEP -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS - static int igb_suspend(struct device *dev) -+#else -+static int igb_suspend(struct pci_dev *pdev, pm_message_t state) -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ - { -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS -+ struct pci_dev *pdev = to_pci_dev(dev); -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ - int retval; - bool wake; -- struct pci_dev *pdev = to_pci_dev(dev); - - retval = __igb_shutdown(pdev, &wake, 0); - if (retval) -@@ -7494,11 +9081,16 @@ - - return 0; - } --#endif /* CONFIG_PM_SLEEP */ - -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS - static int igb_resume(struct device *dev) -+#else -+static int igb_resume(struct pci_dev *pdev) -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ - { -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS - struct pci_dev *pdev = to_pci_dev(dev); -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; -@@ -7510,7 +9102,7 @@ - - err = pci_enable_device_mem(pdev); - if (err) { -- dev_err(&pdev->dev, -+ dev_err(pci_dev_to_dev(pdev), - "igb: Cannot enable PCI device from suspend\n"); - return err; - } -@@ -7520,18 +9112,18 @@ - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (igb_init_interrupt_scheme(adapter, true)) { -- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); -+ dev_err(pci_dev_to_dev(pdev), -+ "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - - igb_reset(adapter); - -- /* let the f/w know that the h/w is now under the control of the -- * driver. -+ /* let the f/w know that the h/w is now under the control of the driver. - */ - igb_get_hw_control(adapter); - -- wr32(E1000_WUS, ~0); -+ E1000_WRITE_REG(hw, E1000_WUS, ~0); - - if (netdev->flags & IFF_UP) { - rtnl_lock(); -@@ -7542,10 +9134,12 @@ - } - - netif_device_attach(netdev); -+ - return 0; - } - - #ifdef CONFIG_PM_RUNTIME -+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS - static int igb_runtime_idle(struct device *dev) - { - struct pci_dev *pdev = to_pci_dev(dev); -@@ -7582,91 +9176,51 @@ - { - return igb_resume(dev); - } -+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */ - #endif /* CONFIG_PM_RUNTIME */ --#endif -+#endif /* CONFIG_PM */ - --static void igb_shutdown(struct pci_dev *pdev) -+#ifdef USE_REBOOT_NOTIFIER -+/* only want to do this for 2.4 kernels? */ -+static int igb_notify_reboot(struct notifier_block *nb, unsigned long event, -+ void *p) - { -+ struct pci_dev *pdev = NULL; - bool wake; - -- __igb_shutdown(pdev, &wake, 0); -- -- if (system_state == SYSTEM_POWER_OFF) { -- pci_wake_from_d3(pdev, wake); -- pci_set_power_state(pdev, PCI_D3hot); -+ switch (event) { -+ case SYS_DOWN: -+ case SYS_HALT: -+ case SYS_POWER_OFF: -+ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { -+ if (pci_dev_driver(pdev) == &igb_driver) { -+ __igb_shutdown(pdev, &wake, 0); -+ if (event == SYS_POWER_OFF) { -+ pci_wake_from_d3(pdev, wake); -+ pci_set_power_state(pdev, PCI_D3hot); -+ } -+ } -+ } - } -+ return NOTIFY_DONE; - } -- --#ifdef CONFIG_PCI_IOV --static int igb_sriov_reinit(struct pci_dev *dev) -+#else -+static void igb_shutdown(struct pci_dev *pdev) - { -- struct net_device *netdev = pci_get_drvdata(dev); -- struct igb_adapter *adapter = netdev_priv(netdev); -- struct pci_dev *pdev = adapter->pdev; -+ bool wake = false; - -- rtnl_lock(); -- -- if (netif_running(netdev)) -- igb_close(netdev); -- else -- igb_reset(adapter); -- -- igb_clear_interrupt_scheme(adapter); -- -- igb_init_queue_configuration(adapter); -+ __igb_shutdown(pdev, &wake, 0); - -- if (igb_init_interrupt_scheme(adapter, true)) { -- dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); -- return -ENOMEM; -+ if (system_state == SYSTEM_POWER_OFF) { -+ pci_wake_from_d3(pdev, wake); -+ pci_set_power_state(pdev, PCI_D3hot); - } -- -- if (netif_running(netdev)) -- igb_open(netdev); -- -- rtnl_unlock(); -- -- return 0; --} -- --static int igb_pci_disable_sriov(struct pci_dev *dev) --{ -- int err = igb_disable_sriov(dev); -- -- if (!err) -- err = igb_sriov_reinit(dev); -- -- return err; --} -- --static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) --{ -- int err = igb_enable_sriov(dev, num_vfs); -- -- if (err) -- goto out; -- -- err = igb_sriov_reinit(dev); -- if (!err) -- return num_vfs; -- --out: -- return err; --} -- --#endif --static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) --{ --#ifdef CONFIG_PCI_IOV -- if (num_vfs == 0) -- return igb_pci_disable_sriov(dev); -- else -- return igb_pci_enable_sriov(dev, num_vfs); --#endif -- return 0; - } -+#endif /* USE_REBOOT_NOTIFIER */ - - #ifdef CONFIG_NET_POLL_CONTROLLER --/* Polling 'interrupt' - used by things like netconsole to send skbs -+/* -+ * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -@@ -7679,8 +9233,8 @@ - - for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; -- if (adapter->flags & IGB_FLAG_HAS_MSIX) -- wr32(E1000_EIMC, q_vector->eims_value); -+ if (adapter->msix_entries) -+ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value); - else - igb_irq_disable(adapter); - napi_schedule(&q_vector->napi); -@@ -7688,20 +9242,98 @@ - } - #endif /* CONFIG_NET_POLL_CONTROLLER */ - -+#ifdef HAVE_PCI_ERS -+#define E1000_DEV_ID_82576_VF 0x10CA - /** -- * igb_io_error_detected - called when PCI error is detected -- * @pdev: Pointer to PCI device -- * @state: The current pci connection state -+ * igb_io_error_detected - called when PCI error is detected -+ * @pdev: Pointer to PCI device -+ * @state: The current pci connection state - * -- * This function is called after a PCI bus error affecting -- * this device has been detected. -- **/ -+ * This function is called after a PCI bus error affecting -+ * this device has been detected. -+ */ - static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) - { - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - -+#ifdef CONFIG_PCI_IOV -+ struct pci_dev *bdev, *vfdev; -+ u32 dw0, dw1, dw2, dw3; -+ int vf, pos; -+ u16 req_id, pf_func; -+ -+ if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA)) -+ goto skip_bad_vf_detection; -+ -+ bdev = pdev->bus->self; -+ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) -+ bdev = bdev->bus->self; -+ -+ if (!bdev) -+ goto skip_bad_vf_detection; -+ -+ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); -+ if (!pos) -+ goto skip_bad_vf_detection; -+ -+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); -+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); -+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); -+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); -+ -+ req_id = dw1 >> 16; -+ /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */ -+ if (!(req_id & 0x0080)) -+ goto skip_bad_vf_detection; -+ -+ pf_func = req_id & 0x01; -+ if ((pf_func & 1) == (pdev->devfn & 1)) { -+ -+ vf = (req_id & 0x7F) >> 1; -+ dev_err(pci_dev_to_dev(pdev), -+ "VF %d has caused a PCIe error\n", vf); -+ dev_err(pci_dev_to_dev(pdev), -+ "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2:\n%8.8x\tdw3: %8.8x\n", -+ dw0, dw1, dw2, dw3); -+ -+ /* Find the pci device of the offending VF */ -+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, -+ E1000_DEV_ID_82576_VF, NULL); -+ while (vfdev) { -+ if (vfdev->devfn == (req_id & 0xFF)) -+ break; -+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, -+ E1000_DEV_ID_82576_VF, vfdev); -+ } -+ /* -+ * There's a slim chance the VF could have been hot plugged, -+ * so if it is no longer present we don't need to issue the -+ * VFLR. Just clean up the AER in that case. -+ */ -+ if (vfdev) { -+ dev_err(pci_dev_to_dev(pdev), -+ "Issuing VFLR to VF %d\n", vf); -+ pci_write_config_dword(vfdev, 0xA8, 0x00008000); -+ } -+ -+ pci_cleanup_aer_uncorrect_error_status(pdev); -+ } -+ -+ /* -+ * Even though the error may have occurred on the other port -+ * we still need to increment the vf error reference count for -+ * both ports because the I/O resume function will be called -+ * for both of them. -+ */ -+ adapter->vferr_refcount++; -+ -+ return PCI_ERS_RESULT_RECOVERED; -+ -+skip_bad_vf_detection: -+#endif /* CONFIG_PCI_IOV */ -+ - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) -@@ -7716,22 +9348,21 @@ - } - - /** -- * igb_io_slot_reset - called after the pci bus has been reset. -- * @pdev: Pointer to PCI device -+ * igb_io_slot_reset - called after the pci bus has been reset. -+ * @pdev: Pointer to PCI device - * -- * Restart the card from scratch, as if from a cold-boot. Implementation -- * resembles the first-half of the igb_resume routine. -- **/ -+ * Restart the card from scratch, as if from a cold-boot. Implementation -+ * resembles the first-half of the igb_resume routine. -+ */ - static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) - { - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - pci_ers_result_t result; -- int err; - - if (pci_enable_device_mem(pdev)) { -- dev_err(&pdev->dev, -+ dev_err(pci_dev_to_dev(pdev), - "Cannot re-enable PCI device after reset.\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { -@@ -7742,77 +9373,91 @@ - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - -- igb_reset(adapter); -- wr32(E1000_WUS, ~0); -+ schedule_work(&adapter->reset_task); -+ E1000_WRITE_REG(hw, E1000_WUS, ~0); - result = PCI_ERS_RESULT_RECOVERED; - } - -- err = pci_cleanup_aer_uncorrect_error_status(pdev); -- if (err) { -- dev_err(&pdev->dev, -- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", -- err); -- /* non-fatal, continue */ -- } -+ pci_cleanup_aer_uncorrect_error_status(pdev); - - return result; - } - - /** -- * igb_io_resume - called when traffic can start flowing again. -- * @pdev: Pointer to PCI device -+ * igb_io_resume - called when traffic can start flowing again. -+ * @pdev: Pointer to PCI device - * -- * This callback is called when the error recovery driver tells us that -- * its OK to resume normal operation. Implementation resembles the -- * second-half of the igb_resume routine. -+ * This callback is called when the error recovery driver tells us that -+ * its OK to resume normal operation. Implementation resembles the -+ * second-half of the igb_resume routine. - */ - static void igb_io_resume(struct pci_dev *pdev) - { - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - -+ if (adapter->vferr_refcount) { -+ dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n"); -+ adapter->vferr_refcount--; -+ return; -+ } -+ - if (netif_running(netdev)) { - if (igb_up(adapter)) { -- dev_err(&pdev->dev, "igb_up failed after reset\n"); -+ dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n"); - return; - } - } - - netif_device_attach(netdev); - -- /* let the f/w know that the h/w is now under the control of the -- * driver. -+ /* let the f/w know that the h/w is now under the control of the driver. - */ - igb_get_hw_control(adapter); - } - --static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, -- u8 qsel) -+#endif /* HAVE_PCI_ERS */ -+ -+int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) - { -- u32 rar_low, rar_high; - struct e1000_hw *hw = &adapter->hw; -+ int i; - -- /* HW expects these in little endian so we reverse the byte order -- * from network order (big endian) to little endian -- */ -- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | -- ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); -- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); -- -- /* Indicate to hardware the Address is Valid. */ -- rar_high |= E1000_RAH_AV; -- -- if (hw->mac.type == e1000_82575) -- rar_high |= E1000_RAH_POOL_1 * qsel; -- else -- rar_high |= E1000_RAH_POOL_1 << qsel; -+ if (is_zero_ether_addr(addr)) -+ return 0; - -- wr32(E1000_RAL(index), rar_low); -- wrfl(); -- wr32(E1000_RAH(index), rar_high); -- wrfl(); -+ for (i = 0; i < hw->mac.rar_entry_count; i++) { -+ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) -+ continue; -+ adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED | -+ IGB_MAC_STATE_IN_USE); -+ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); -+ adapter->mac_table[i].queue = queue; -+ igb_sync_mac_table(adapter); -+ return 0; -+ } -+ return -ENOMEM; - } -+int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue) -+{ -+ /* search table for addr, if found, set to 0 and sync */ -+ int i; -+ struct e1000_hw *hw = &adapter->hw; - -+ if (is_zero_ether_addr(addr)) -+ return 0; -+ for (i = 0; i < hw->mac.rar_entry_count; i++) { -+ if (!ether_addr_equal(addr, adapter->mac_table[i].addr) && -+ adapter->mac_table[i].queue == queue) { -+ adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED; -+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN); -+ adapter->mac_table[i].queue = 0; -+ igb_sync_mac_table(adapter); -+ return 0; -+ } -+ } -+ return -ENOMEM; -+} - static int igb_set_vf_mac(struct igb_adapter *adapter, - int vf, unsigned char *mac_addr) - { -@@ -7829,15 +9474,17 @@ - return 0; - } - -+#ifdef IFLA_VF_MAX - static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) - { - struct igb_adapter *adapter = netdev_priv(netdev); -+ - if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) - return -EINVAL; - adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, -- "Reload the VF driver to make this change effective."); -+ "Reload the VF driver to make this change effective.\n"); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF MAC address has been set, but the PF device is not up.\n"); -@@ -7854,13 +9501,15 @@ - return 100; - case SPEED_1000: - return 1000; -+ case SPEED_2500: -+ return 2500; - default: - return 0; - } - } - - static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, -- int link_speed) -+ int link_speed) - { - int rf_dec, rf_int; - u32 bcnrc_val; -@@ -7869,23 +9518,23 @@ - /* Calculate the rate factor values to set */ - rf_int = link_speed / tx_rate; - rf_dec = (link_speed - (rf_int * tx_rate)); -- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / -- tx_rate; -+ rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) || -- (adapter->hw.mac.type != e1000_82576)) -+ (adapter->hw.mac.type != e1000_82576)) - return; - - actual_link_speed = igb_link_mbps(adapter->link_speed); -@@ -7903,7 +9552,7 @@ - reset_rate = true; - adapter->vf_rate_link_speed = 0; - dev_info(&adapter->pdev->dev, -- "Link speed has been changed. VF Transmit rate is disabled\n"); -+ "Link speed has been changed. VF Transmit rate is disabled\n"); - } - - for (i = 0; i < adapter->vfs_allocated_count; i++) { -@@ -7911,13 +9560,16 @@ - adapter->vf_data[i].tx_rate = 0; - - igb_set_vf_rate_limit(&adapter->hw, i, -- adapter->vf_data[i].tx_rate, -- actual_link_speed); -+ adapter->vf_data[i].tx_rate, actual_link_speed); - } - } - --static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, -- int min_tx_rate, int max_tx_rate) -+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, -+ int max_tx_rate) -+#else -+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) -+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; -@@ -7926,105 +9578,137 @@ - if (hw->mac.type != e1000_82576) - return -EOPNOTSUPP; - -- if (min_tx_rate) -- return -EINVAL; -- - actual_link_speed = igb_link_mbps(adapter->link_speed); - if ((vf >= adapter->vfs_allocated_count) || -- (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || -- (max_tx_rate < 0) || -- (max_tx_rate > actual_link_speed)) -+ (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) || -+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -+ (max_tx_rate < 0) || (max_tx_rate > actual_link_speed)) -+#else -+ (tx_rate < 0) || (tx_rate > actual_link_speed)) -+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - return -EINVAL; - - adapter->vf_rate_link_speed = actual_link_speed; -+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; - igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); -+#else -+ adapter->vf_data[vf].tx_rate = (u16)tx_rate; -+ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); -+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - - return 0; - } - --static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, -- bool setting) --{ -- struct igb_adapter *adapter = netdev_priv(netdev); -- struct e1000_hw *hw = &adapter->hw; -- u32 reg_val, reg_offset; -- -- if (!adapter->vfs_allocated_count) -- return -EOPNOTSUPP; -- -- if (vf >= adapter->vfs_allocated_count) -- return -EINVAL; -- -- reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; -- reg_val = rd32(reg_offset); -- if (setting) -- reg_val |= ((1 << vf) | -- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); -- else -- reg_val &= ~((1 << vf) | -- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); -- wr32(reg_offset, reg_val); -- -- adapter->vf_data[vf].spoofchk_enabled = setting; -- return 0; --} -- - static int igb_ndo_get_vf_config(struct net_device *netdev, - int vf, struct ifla_vf_info *ivi) - { - struct igb_adapter *adapter = netdev_priv(netdev); -+ - if (vf >= adapter->vfs_allocated_count) - return -EINVAL; - ivi->vf = vf; - memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); -+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE - ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; - ivi->min_tx_rate = 0; -+#else -+ ivi->tx_rate = adapter->vf_data[vf].tx_rate; -+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ - ivi->vlan = adapter->vf_data[vf].pf_vlan; - ivi->qos = adapter->vf_data[vf].pf_qos; -+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE - ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; -+#endif - return 0; - } -- -+#endif - static void igb_vmm_control(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; -+ int count; - u32 reg; - - switch (hw->mac.type) { - case e1000_82575: -- case e1000_i210: -- case e1000_i211: -- case e1000_i354: - default: - /* replication is not supported for 82575 */ - return; - case e1000_82576: - /* notify HW that the MAC is adding vlan tags */ -- reg = rd32(E1000_DTXCTL); -- reg |= E1000_DTXCTL_VLAN_ADDED; -- wr32(E1000_DTXCTL, reg); -+ reg = E1000_READ_REG(hw, E1000_DTXCTL); -+ reg |= (E1000_DTXCTL_VLAN_ADDED | -+ E1000_DTXCTL_SPOOF_INT); -+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg); - /* Fall through */ - case e1000_82580: - /* enable replication vlan tag stripping */ -- reg = rd32(E1000_RPLOLR); -+ reg = E1000_READ_REG(hw, E1000_RPLOLR); - reg |= E1000_RPLOLR_STRVLAN; -- wr32(E1000_RPLOLR, reg); -+ E1000_WRITE_REG(hw, E1000_RPLOLR, reg); - /* Fall through */ - case e1000_i350: -+ case e1000_i354: - /* none of the above registers are supported by i350 */ - break; - } - -- if (adapter->vfs_allocated_count) { -- igb_vmdq_set_loopback_pf(hw, true); -- igb_vmdq_set_replication_pf(hw, true); -- igb_vmdq_set_anti_spoofing_pf(hw, true, -- adapter->vfs_allocated_count); -- } else { -- igb_vmdq_set_loopback_pf(hw, false); -- igb_vmdq_set_replication_pf(hw, false); -- } -+ /* Enable Malicious Driver Detection */ -+ if ((adapter->vfs_allocated_count) && -+ (adapter->mdd)) { -+ if (hw->mac.type == e1000_i350) -+ igb_enable_mdd(adapter); -+ } -+ -+ /* enable replication and loopback support */ -+ count = adapter->vfs_allocated_count || adapter->vmdq_pools; -+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count) -+ e1000_vmdq_set_loopback_pf(hw, 1); -+ e1000_vmdq_set_anti_spoofing_pf(hw, -+ adapter->vfs_allocated_count || adapter->vmdq_pools, -+ adapter->vfs_allocated_count); -+ e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count || -+ adapter->vmdq_pools); -+} -+ -+static void igb_init_fw(struct igb_adapter *adapter) -+{ -+ struct e1000_fw_drv_info fw_cmd; -+ struct e1000_hw *hw = &adapter->hw; -+ int i; -+ u16 mask; -+ -+ if (hw->mac.type == e1000_i210) -+ mask = E1000_SWFW_EEP_SM; -+ else -+ mask = E1000_SWFW_PHY0_SM; -+ /* i211 parts do not support this feature */ -+ if (hw->mac.type == e1000_i211) -+ hw->mac.arc_subsystem_valid = false; -+ -+ if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) { -+ for (i = 0; i <= FW_MAX_RETRIES; i++) { -+ E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI); -+ fw_cmd.hdr.cmd = FW_CMD_DRV_INFO; -+ fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN; -+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED; -+ fw_cmd.port_num = hw->bus.func; -+ fw_cmd.drv_version = FW_FAMILY_DRV_VER; -+ fw_cmd.hdr.checksum = 0; -+ fw_cmd.hdr.checksum = -+ e1000_calculate_checksum((u8 *)&fw_cmd, -+ (FW_HDR_LEN + -+ fw_cmd.hdr.buf_len)); -+ e1000_host_interface_command(hw, (u8 *)&fw_cmd, -+ sizeof(fw_cmd)); -+ if (fw_cmd.hdr.cmd_or_resp.ret_status -+ == FW_STATUS_SUCCESS) -+ break; -+ } -+ } else -+ dev_warn(pci_dev_to_dev(adapter->pdev), -+ "Unable to get semaphore, firmware init failed.\n"); -+ hw->mac.ops.release_swfw_sync(hw, mask); - } - - static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) -@@ -8032,34 +9716,40 @@ - struct e1000_hw *hw = &adapter->hw; - u32 dmac_thr; - u16 hwm; -+ u32 status; -+ -+ if (hw->mac.type == e1000_i211) -+ return; - - if (hw->mac.type > e1000_82580) { -- if (adapter->flags & IGB_FLAG_DMAC) { -+ if (adapter->dmac != IGB_DMAC_DISABLE) { - u32 reg; - -- /* force threshold to 0. */ -- wr32(E1000_DMCTXTH, 0); -+ /* force threshold to 0. */ -+ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); - -- /* DMA Coalescing high water mark needs to be greater -+ /* -+ * DMA Coalescing high water mark needs to be greater - * than the Rx threshold. Set hwm to PBA - max frame - * size in 16B units, capping it at PBA - 6KB. - */ - hwm = 64 * pba - adapter->max_frame_size / 16; - if (hwm < 64 * (pba - 6)) - hwm = 64 * (pba - 6); -- reg = rd32(E1000_FCRTC); -+ reg = E1000_READ_REG(hw, E1000_FCRTC); - reg &= ~E1000_FCRTC_RTH_COAL_MASK; - reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) - & E1000_FCRTC_RTH_COAL_MASK); -- wr32(E1000_FCRTC, reg); -+ E1000_WRITE_REG(hw, E1000_FCRTC, reg); - -- /* Set the DMA Coalescing Rx threshold to PBA - 2 * max -+ /* -+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max - * frame size, capping it at PBA - 10KB. - */ - dmac_thr = pba - adapter->max_frame_size / 512; - if (dmac_thr < pba - 10) - dmac_thr = pba - 10; -- reg = rd32(E1000_DMACR); -+ reg = E1000_READ_REG(hw, E1000_DMACR); - reg &= ~E1000_DMACR_DMACTHR_MASK; - reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) - & E1000_DMACR_DMACTHR_MASK); -@@ -8067,47 +9757,84 @@ - /* transition to L0x or L1 if available..*/ - reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); - -- /* watchdog timer= +-1000 usec in 32usec intervals */ -- reg |= (1000 >> 5); -+ /* Check if status is 2.5Gb backplane connection -+ * before configuration of watchdog timer, which is -+ * in msec values in 12.8usec intervals -+ * watchdog timer= msec values in 32usec intervals -+ * for non 2.5Gb connection -+ */ -+ if (hw->mac.type == e1000_i354) { -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ if ((status & E1000_STATUS_2P5_SKU) && -+ (!(status & E1000_STATUS_2P5_SKU_OVER))) -+ reg |= ((adapter->dmac * 5) >> 6); -+ else -+ reg |= ((adapter->dmac) >> 5); -+ } else { -+ reg |= ((adapter->dmac) >> 5); -+ } - -- /* Disable BMC-to-OS Watchdog Enable */ -+ /* -+ * Disable BMC-to-OS Watchdog enable -+ * on devices that support OS-to-BMC -+ */ - if (hw->mac.type != e1000_i354) - reg &= ~E1000_DMACR_DC_BMC2OSW_EN; -+ E1000_WRITE_REG(hw, E1000_DMACR, reg); - -- wr32(E1000_DMACR, reg); -+ /* no lower threshold to disable coalescing -+ * (smart fifb)-UTRESH=0 -+ */ -+ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); - -- /* no lower threshold to disable -- * coalescing(smart fifb)-UTRESH=0 -+ /* This sets the time to wait before requesting -+ * transition to low power state to number of usecs -+ * needed to receive 1 512 byte frame at gigabit -+ * line rate. On i350 device, time to make transition -+ * to Lx state is delayed by 4 usec with flush disable -+ * bit set to avoid losing mailbox interrupts - */ -- wr32(E1000_DMCRTRH, 0); -+ reg = E1000_READ_REG(hw, E1000_DMCTLX); -+ if (hw->mac.type == e1000_i350) -+ reg |= IGB_DMCTLX_DCFLUSH_DIS; - -- reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); -+ /* in 2.5Gb connection, TTLX unit is 0.4 usec -+ * which is 0x4*2 = 0xA. But delay is still 4 usec -+ */ -+ if (hw->mac.type == e1000_i354) { -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ if ((status & E1000_STATUS_2P5_SKU) && -+ (!(status & E1000_STATUS_2P5_SKU_OVER))) -+ reg |= 0xA; -+ else -+ reg |= 0x4; -+ } else { -+ reg |= 0x4; -+ } - -- wr32(E1000_DMCTLX, reg); -+ E1000_WRITE_REG(hw, E1000_DMCTLX, reg); - -- /* free space in tx packet buffer to wake from -- * DMA coal -- */ -- wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - -- (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); -+ /* free space in tx pkt buffer to wake from DMA coal */ -+ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - -+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) -+ >> 6); - -- /* make low power state decision controlled -- * by DMA coal -- */ -- reg = rd32(E1000_PCIEMISC); -+ /* low power state decision controlled by DMA coal */ -+ reg = E1000_READ_REG(hw, E1000_PCIEMISC); - reg &= ~E1000_PCIEMISC_LX_DECISION; -- wr32(E1000_PCIEMISC, reg); -+ E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); - } /* endif adapter->dmac is not disabled */ - } else if (hw->mac.type == e1000_82580) { -- u32 reg = rd32(E1000_PCIEMISC); -+ u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); - -- wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); -- wr32(E1000_DMACR, 0); -+ E1000_WRITE_REG(hw, E1000_PCIEMISC, -+ reg & ~E1000_PCIEMISC_LX_DECISION); -+ E1000_WRITE_REG(hw, E1000_DMACR, 0); - } - } - --/** -- * igb_read_i2c_byte - Reads 8 bit word over I2C -+#ifdef HAVE_I2C_SUPPORT -+/* igb_read_i2c_byte - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @dev_addr: device address -@@ -8115,9 +9842,9 @@ - * - * Performs byte read operation over I2C interface at - * a specified device address. -- **/ -+ */ - s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, -- u8 dev_addr, u8 *data) -+ u8 dev_addr, u8 *data) - { - struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = adapter->i2c_client; -@@ -8129,7 +9856,8 @@ - - swfw_mask = E1000_SWFW_PHY0_SM; - -- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) -+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) -+ != E1000_SUCCESS) - return E1000_ERR_SWFW_SYNC; - - status = i2c_smbus_read_byte_data(this_client, byte_offset); -@@ -8139,12 +9867,11 @@ - return E1000_ERR_I2C; - else { - *data = status; -- return 0; -+ return E1000_SUCCESS; - } - } - --/** -- * igb_write_i2c_byte - Writes 8 bit word over I2C -+/* igb_write_i2c_byte - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @dev_addr: device address -@@ -8152,9 +9879,9 @@ - * - * Performs byte write operation over I2C interface at - * a specified device address. -- **/ -+ */ - s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, -- u8 dev_addr, u8 data) -+ u8 dev_addr, u8 data) - { - struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = adapter->i2c_client; -@@ -8164,7 +9891,7 @@ - if (!this_client) - return E1000_ERR_I2C; - -- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) -+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) - return E1000_ERR_SWFW_SYNC; - status = i2c_smbus_write_byte_data(this_client, byte_offset, data); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); -@@ -8172,9 +9899,9 @@ - if (status) - return E1000_ERR_I2C; - else -- return 0; -- -+ return E1000_SUCCESS; - } -+#endif /* HAVE_I2C_SUPPORT */ - - int igb_reinit_queues(struct igb_adapter *adapter) - { -@@ -8197,4 +9924,5 @@ - - return err; - } -+ - /* igb_main.c */ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_param.c b/drivers/net/ethernet/intel/igb/igb_param.c ---- a/drivers/net/ethernet/intel/igb/igb_param.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_param.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,872 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+ -+#include -+ -+#include "igb.h" -+ -+/* This is the only thing that needs to be changed to adjust the -+ * maximum number of ports that the driver can manage. -+ */ -+ -+#define IGB_MAX_NIC 32 -+ -+#define OPTION_UNSET -1 -+#define OPTION_DISABLED 0 -+#define OPTION_ENABLED 1 -+#define MAX_NUM_LIST_OPTS 15 -+ -+/* All parameters are treated the same, as an integer array of values. -+ * This macro just reduces the need to repeat the same declaration code -+ * over and over (plus this helps to avoid typo bugs). -+ */ -+ -+#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET } -+#ifndef module_param_array -+/* Module Parameters are always initialized to -1, so that the driver -+ * can tell the difference between no user specified value or the -+ * user asking for the default value. -+ * The true default values are loaded in when igb_check_options is called. -+ * -+ * This is a GCC extension to ANSI C. -+ * See the item "Labeled Elements in Initializers" in the section -+ * "Extensions to the C Language Family" of the GCC documentation. -+ */ -+ -+#define IGB_PARAM(X, desc) \ -+ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ -+ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \ -+ MODULE_PARM_DESC(X, desc); -+#else -+#define IGB_PARAM(X, desc) \ -+ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \ -+ static unsigned int num_##X; \ -+ module_param_array_named(X, X, int, &num_##X, 0); \ -+ MODULE_PARM_DESC(X, desc); -+#endif -+ -+/* Interrupt Throttle Rate (interrupts/sec) -+ * -+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) -+ */ -+IGB_PARAM(InterruptThrottleRate, -+ "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive"); -+#define DEFAULT_ITR 3 -+#define MAX_ITR 100000 -+/* #define MIN_ITR 120 */ -+#define MIN_ITR 0 -+/* IntMode (Interrupt Mode) -+ * -+ * Valid Range: 0 - 2 -+ * -+ * Default Value: 2 (MSI-X) -+ */ -+IGB_PARAM(IntMode, -+ "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2"); -+#define MAX_INTMODE IGB_INT_MODE_MSIX -+#define MIN_INTMODE IGB_INT_MODE_LEGACY -+ -+IGB_PARAM(Node, "set the starting node to allocate memory on, default -1"); -+ -+/* LLIPort (Low Latency Interrupt TCP Port) -+ * -+ * Valid Range: 0 - 65535 -+ * -+ * Default Value: 0 (disabled) -+ */ -+IGB_PARAM(LLIPort, -+ "Low Latency Interrupt TCP Port (0-65535), default 0=off"); -+ -+#define DEFAULT_LLIPORT 0 -+#define MAX_LLIPORT 0xFFFF -+#define MIN_LLIPORT 0 -+ -+/* LLIPush (Low Latency Interrupt on TCP Push flag) -+ * -+ * Valid Range: 0, 1 -+ * -+ * Default Value: 0 (disabled) -+ */ -+IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off"); -+ -+#define DEFAULT_LLIPUSH 0 -+#define MAX_LLIPUSH 1 -+#define MIN_LLIPUSH 0 -+ -+/* LLISize (Low Latency Interrupt on Packet Size) -+ * -+ * Valid Range: 0 - 1500 -+ * -+ * Default Value: 0 (disabled) -+ */ -+IGB_PARAM(LLISize, -+ "Low Latency Interrupt on Packet Size (0-1500), default 0=off"); -+ -+#define DEFAULT_LLISIZE 0 -+#define MAX_LLISIZE 1500 -+#define MIN_LLISIZE 0 -+ -+/* RSS (Enable RSS multiqueue receive) -+ * -+ * Valid Range: 0 - 8 -+ * -+ * Default Value: 1 -+ */ -+IGB_PARAM(RSS, -+ "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus"); -+ -+#define DEFAULT_RSS 1 -+#define MAX_RSS 8 -+#define MIN_RSS 0 -+ -+/* VMDQ (Enable VMDq multiqueue receive) -+ * -+ * Valid Range: 0 - 8 -+ * -+ * Default Value: 0 -+ */ -+IGB_PARAM(VMDQ, -+ "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0"); -+ -+#define DEFAULT_VMDQ 0 -+#define MAX_VMDQ MAX_RSS -+#define MIN_VMDQ 0 -+ -+/* max_vfs (Enable SR-IOV VF devices) -+ * -+ * Valid Range: 0 - 7 -+ * -+ * Default Value: 0 -+ */ -+IGB_PARAM(max_vfs, -+ "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0"); -+ -+#define DEFAULT_SRIOV 0 -+#define MAX_SRIOV 7 -+#define MIN_SRIOV 0 -+ -+/* MDD (Enable Malicious Driver Detection) -+ * -+ * Only available when SR-IOV is enabled - max_vfs is greater than 0 -+ * -+ * Valid Range: 0, 1 -+ * -+ * Default Value: 1 -+ */ -+IGB_PARAM(MDD, -+ "Malicious Driver Detection (0/1), default 1 = enabled. Only available when max_vfs is greater than 0"); -+ -+#ifdef DEBUG -+ -+/* Disable Hardware Reset on Tx Hang -+ * -+ * Valid Range: 0, 1 -+ * -+ * Default Value: 0 (disabled, i.e. h/w will reset) -+ */ -+IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang"); -+ -+/* Dump Transmit and Receive buffers -+ * -+ * Valid Range: 0, 1 -+ * -+ * Default Value: 0 -+ */ -+IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request"); -+ -+#endif /* DEBUG */ -+ -+/* QueuePairs (Enable TX/RX queue pairs for interrupt handling) -+ * -+ * Valid Range: 0 - 1 -+ * -+ * Default Value: 1 -+ */ -+IGB_PARAM(QueuePairs, -+ "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on"); -+ -+#define DEFAULT_QUEUE_PAIRS 1 -+#define MAX_QUEUE_PAIRS 1 -+#define MIN_QUEUE_PAIRS 0 -+ -+/* Enable/disable EEE (a.k.a. IEEE802.3az) -+ * -+ * Valid Range: 0, 1 -+ * -+ * Default Value: 1 -+ */ -+IGB_PARAM(EEE, -+ "Enable/disable on parts that support the feature"); -+ -+/* Enable/disable DMA Coalescing -+ * -+ * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, -+ * 9000, 10000(msec), 250(usec), 500(usec) -+ * -+ * Default Value: 0 -+ */ -+IGB_PARAM(DMAC, -+ "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))"); -+ -+#ifndef IGB_NO_LRO -+/* Enable/disable Large Receive Offload -+ * -+ * Valid Values: 0(off), 1(on) -+ * -+ * Default Value: 0 -+ */ -+IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off"); -+ -+#endif -+struct igb_opt_list { -+ int i; -+ char *str; -+}; -+struct igb_option { -+ enum { enable_option, range_option, list_option } type; -+ const char *name; -+ const char *err; -+ int def; -+ union { -+ struct { /* range_option info */ -+ int min; -+ int max; -+ } r; -+ struct { /* list_option info */ -+ int nr; -+ struct igb_opt_list *p; -+ } l; -+ } arg; -+}; -+ -+static int igb_validate_option(unsigned int *value, -+ struct igb_option *opt, -+ struct igb_adapter *adapter) -+{ -+ if (*value == OPTION_UNSET) { -+ *value = opt->def; -+ return 0; -+ } -+ -+ switch (opt->type) { -+ case enable_option: -+ switch (*value) { -+ case OPTION_ENABLED: -+ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); -+ return 0; -+ case OPTION_DISABLED: -+ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); -+ return 0; -+ } -+ break; -+ case range_option: -+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { -+ DPRINTK(PROBE, INFO, -+ "%s set to %d\n", opt->name, *value); -+ return 0; -+ } -+ break; -+ case list_option: { -+ int i; -+ struct igb_opt_list *ent; -+ -+ for (i = 0; i < opt->arg.l.nr; i++) { -+ ent = &opt->arg.l.p[i]; -+ if (*value == ent->i) { -+ if (ent->str[0] != '\0') -+ DPRINTK(PROBE, INFO, "%s\n", ent->str); -+ return 0; -+ } -+ } -+ } -+ break; -+ default: -+ BUG(); -+ } -+ -+ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n", -+ opt->name, *value, opt->err); -+ *value = opt->def; -+ return -1; -+} -+ -+/** -+ * igb_check_options - Range Checking for Command Line Parameters -+ * @adapter: board private structure -+ * -+ * This routine checks all command line parameters for valid user -+ * input. If an invalid value is given, or if no user specified -+ * value exists, a default value is used. The final value is stored -+ * in a variable in the adapter structure. -+ **/ -+ -+void igb_check_options(struct igb_adapter *adapter) -+{ -+ int bd = adapter->bd_number; -+ struct e1000_hw *hw = &adapter->hw; -+ -+ if (bd >= IGB_MAX_NIC) { -+ DPRINTK(PROBE, NOTICE, -+ "Warning: no configuration for board #%d\n", bd); -+ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); -+#ifndef module_param_array -+ bd = IGB_MAX_NIC; -+#endif -+ } -+ -+ { /* Interrupt Throttling Rate */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "Interrupt Throttling Rate (ints/sec)", -+ .err = "using default of "__MODULE_STRING(DEFAULT_ITR), -+ .def = DEFAULT_ITR, -+ .arg = { .r = { .min = MIN_ITR, -+ .max = MAX_ITR } } -+ }; -+ -+#ifdef module_param_array -+ if (num_InterruptThrottleRate > bd) { -+#endif -+ unsigned int itr = InterruptThrottleRate[bd]; -+ -+ switch (itr) { -+ case 0: -+ DPRINTK(PROBE, INFO, "%s turned off\n", -+ opt.name); -+ if (hw->mac.type >= e1000_i350) -+ adapter->dmac = IGB_DMAC_DISABLE; -+ adapter->rx_itr_setting = itr; -+ break; -+ case 1: -+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", -+ opt.name); -+ adapter->rx_itr_setting = itr; -+ break; -+ case 3: -+ DPRINTK(PROBE, INFO, -+ "%s set to dynamic conservative mode\n", -+ opt.name); -+ adapter->rx_itr_setting = itr; -+ break; -+ default: -+ igb_validate_option(&itr, &opt, adapter); -+ /* Save the setting, because the dynamic bits -+ * change itr. In case of invalid user value, -+ * default to conservative mode, else need to -+ * clear the lower two bits because they are -+ * used as control */ -+ if (itr == 3) { -+ adapter->rx_itr_setting = itr; -+ } else { -+ adapter->rx_itr_setting = 1000000000 -+ / (itr * 256); -+ adapter->rx_itr_setting &= ~3; -+ } -+ break; -+ } -+#ifdef module_param_array -+ } else { -+ adapter->rx_itr_setting = opt.def; -+ } -+#endif -+ adapter->tx_itr_setting = adapter->rx_itr_setting; -+ } -+ { /* Interrupt Mode */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "Interrupt Mode", -+ .err = "defaulting to 2 (MSI-X)", -+ .def = IGB_INT_MODE_MSIX, -+ .arg = { .r = { .min = MIN_INTMODE, -+ .max = MAX_INTMODE } } -+ }; -+ -+#ifdef module_param_array -+ if (num_IntMode > bd) { -+#endif -+ unsigned int int_mode = IntMode[bd]; -+ igb_validate_option(&int_mode, &opt, adapter); -+ adapter->int_mode = int_mode; -+#ifdef module_param_array -+ } else { -+ adapter->int_mode = opt.def; -+ } -+#endif -+ } -+ { /* Low Latency Interrupt TCP Port */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "Low Latency Interrupt TCP Port", -+ .err = "using default of " -+ __MODULE_STRING(DEFAULT_LLIPORT), -+ .def = DEFAULT_LLIPORT, -+ .arg = { .r = { .min = MIN_LLIPORT, -+ .max = MAX_LLIPORT } } -+ }; -+ -+#ifdef module_param_array -+ if (num_LLIPort > bd) { -+#endif -+ adapter->lli_port = LLIPort[bd]; -+ if (adapter->lli_port) { -+ igb_validate_option(&adapter->lli_port, &opt, -+ adapter); -+ } else { -+ DPRINTK(PROBE, INFO, "%s turned off\n", -+ opt.name); -+ } -+#ifdef module_param_array -+ } else { -+ adapter->lli_port = opt.def; -+ } -+#endif -+ } -+ { /* Low Latency Interrupt on Packet Size */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "Low Latency Interrupt on Packet Size", -+ .err = "using default of " -+ __MODULE_STRING(DEFAULT_LLISIZE), -+ .def = DEFAULT_LLISIZE, -+ .arg = { .r = { .min = MIN_LLISIZE, -+ .max = MAX_LLISIZE } } -+ }; -+ -+#ifdef module_param_array -+ if (num_LLISize > bd) { -+#endif -+ adapter->lli_size = LLISize[bd]; -+ if (adapter->lli_size) { -+ igb_validate_option(&adapter->lli_size, &opt, -+ adapter); -+ } else { -+ DPRINTK(PROBE, INFO, "%s turned off\n", -+ opt.name); -+ } -+#ifdef module_param_array -+ } else { -+ adapter->lli_size = opt.def; -+ } -+#endif -+ } -+ { /* Low Latency Interrupt on TCP Push flag */ -+ struct igb_option opt = { -+ .type = enable_option, -+ .name = "Low Latency Interrupt on TCP Push flag", -+ .err = "defaulting to Disabled", -+ .def = OPTION_DISABLED -+ }; -+ -+#ifdef module_param_array -+ if (num_LLIPush > bd) { -+#endif -+ unsigned int lli_push = LLIPush[bd]; -+ igb_validate_option(&lli_push, &opt, adapter); -+ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0; -+#ifdef module_param_array -+ } else { -+ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0; -+ } -+#endif -+ } -+ { /* SRIOV - Enable SR-IOV VF devices */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "max_vfs - SR-IOV VF devices", -+ .err = "using default of " -+ __MODULE_STRING(DEFAULT_SRIOV), -+ .def = DEFAULT_SRIOV, -+ .arg = { .r = { .min = MIN_SRIOV, -+ .max = MAX_SRIOV } } -+ }; -+ -+#ifdef module_param_array -+ if (num_max_vfs > bd) { -+#endif -+ adapter->vfs_allocated_count = max_vfs[bd]; -+ igb_validate_option(&adapter->vfs_allocated_count, -+ &opt, adapter); -+ -+#ifdef module_param_array -+ } else { -+ adapter->vfs_allocated_count = opt.def; -+ } -+#endif -+ if (adapter->vfs_allocated_count) { -+ switch (hw->mac.type) { -+ case e1000_82575: -+ case e1000_82580: -+ case e1000_i210: -+ case e1000_i211: -+ case e1000_i354: -+ adapter->vfs_allocated_count = 0; -+ DPRINTK(PROBE, INFO, -+ "SR-IOV option max_vfs not supported.\n"); -+ /* Fall through */ -+ default: -+ break; -+ } -+ } -+ } -+ { /* VMDQ - Enable VMDq multiqueue receive */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "VMDQ - VMDq multiqueue queue count", -+ .err = "using default of "__MODULE_STRING(DEFAULT_VMDQ), -+ .def = DEFAULT_VMDQ, -+ .arg = { .r = { .min = MIN_VMDQ, -+ .max = (MAX_VMDQ -+ - adapter->vfs_allocated_count)} } -+ }; -+ if ((hw->mac.type != e1000_i210) || -+ (hw->mac.type != e1000_i211)) { -+#ifdef module_param_array -+ if (num_VMDQ > bd) { -+#endif -+ adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]); -+ if (adapter->vfs_allocated_count && -+ !adapter->vmdq_pools) { -+ DPRINTK(PROBE, INFO, -+ "Enabling SR-IOV requires VMDq be set to at least 1\n"); -+ adapter->vmdq_pools = 1; -+ } -+ igb_validate_option(&adapter->vmdq_pools, &opt, -+ adapter); -+ -+#ifdef module_param_array -+ } else { -+ if (!adapter->vfs_allocated_count) -+ adapter->vmdq_pools = (opt.def == 1 ? 0 -+ : opt.def); -+ else -+ adapter->vmdq_pools = 1; -+ } -+#endif -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+ if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) { -+ DPRINTK(PROBE, INFO, -+ "VMDq not supported on this part.\n"); -+ adapter->vmdq_pools = 0; -+ } -+#endif -+ -+ } else { -+ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n"); -+ adapter->vmdq_pools = opt.def; -+ } -+ } -+ { /* RSS - Enable RSS multiqueue receives */ -+ struct igb_option opt = { -+ .type = range_option, -+ .name = "RSS - RSS multiqueue receive count", -+ .err = "using default of "__MODULE_STRING(DEFAULT_RSS), -+ .def = DEFAULT_RSS, -+ .arg = { .r = { .min = MIN_RSS, -+ .max = MAX_RSS } } -+ }; -+ -+ switch (hw->mac.type) { -+ case e1000_82575: -+#ifndef CONFIG_IGB_VMDQ_NETDEV -+ if (!!adapter->vmdq_pools) { -+ if (adapter->vmdq_pools <= 2) { -+ if (adapter->vmdq_pools == 2) -+ opt.arg.r.max = 3; -+ } else { -+ opt.arg.r.max = 1; -+ } -+ } else { -+ opt.arg.r.max = 4; -+ } -+#else -+ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4; -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+ break; -+ case e1000_i210: -+ opt.arg.r.max = 4; -+ break; -+ case e1000_i211: -+ opt.arg.r.max = 2; -+ break; -+ case e1000_82576: -+#ifndef CONFIG_IGB_VMDQ_NETDEV -+ if (!!adapter->vmdq_pools) -+ opt.arg.r.max = 2; -+ break; -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+ case e1000_82580: -+ case e1000_i350: -+ case e1000_i354: -+ default: -+ if (!!adapter->vmdq_pools) -+ opt.arg.r.max = 1; -+ break; -+ } -+ -+ if (adapter->int_mode != IGB_INT_MODE_MSIX) { -+ DPRINTK(PROBE, INFO, -+ "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n", -+ opt.err); -+ opt.arg.r.max = 1; -+ } -+ -+#ifdef module_param_array -+ if (num_RSS > bd) { -+#endif -+ adapter->rss_queues = RSS[bd]; -+ switch (adapter->rss_queues) { -+ case 1: -+ break; -+ default: -+ igb_validate_option(&adapter->rss_queues, &opt, -+ adapter); -+ if (adapter->rss_queues) -+ break; -+ case 0: -+ adapter->rss_queues = min_t(u32, opt.arg.r.max, -+ num_online_cpus()); -+ break; -+ } -+#ifdef module_param_array -+ } else { -+ adapter->rss_queues = opt.def; -+ } -+#endif -+ } -+ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */ -+ struct igb_option opt = { -+ .type = enable_option, -+ .name = -+ "QueuePairs - Tx/Rx queue pairs for interrupt handling", -+ .err = "defaulting to Enabled", -+ .def = OPTION_ENABLED -+ }; -+#ifdef module_param_array -+ if (num_QueuePairs > bd) { -+#endif -+ unsigned int qp = QueuePairs[bd]; -+ /* -+ * We must enable queue pairs if the number of queues -+ * exceeds the number of available interrupts. We are -+ * limited to 10, or 3 per unallocated vf. On I210 and -+ * I211 devices, we are limited to 5 interrupts. -+ * However, since I211 only supports 2 queues, we do not -+ * need to check and override the user option. -+ */ -+ if (qp == OPTION_DISABLED) { -+ if (adapter->rss_queues > 4) -+ qp = OPTION_ENABLED; -+ -+ if (adapter->vmdq_pools > 4) -+ qp = OPTION_ENABLED; -+ -+ if (adapter->rss_queues > 1 && -+ (adapter->vmdq_pools > 3 || -+ adapter->vfs_allocated_count > 6)) -+ qp = OPTION_ENABLED; -+ -+ if (hw->mac.type == e1000_i210 && -+ adapter->rss_queues > 2) -+ qp = OPTION_ENABLED; -+ -+ if (qp == OPTION_ENABLED) -+ DPRINTK(PROBE, INFO, -+ "Number of queues exceeds available interrupts, %s\n", -+ opt.err); -+ } -+ igb_validate_option(&qp, &opt, adapter); -+ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0; -+#ifdef module_param_array -+ } else { -+ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0; -+ } -+#endif -+ } -+ { /* EEE - Enable EEE for capable adapters */ -+ -+ if (hw->mac.type >= e1000_i350) { -+ struct igb_option opt = { -+ .type = enable_option, -+ .name = "EEE Support", -+ .err = "defaulting to Enabled", -+ .def = OPTION_ENABLED -+ }; -+#ifdef module_param_array -+ if (num_EEE > bd) { -+#endif -+ unsigned int eee = EEE[bd]; -+ igb_validate_option(&eee, &opt, adapter); -+ adapter->flags |= eee ? IGB_FLAG_EEE : 0; -+ if (eee) -+ hw->dev_spec._82575.eee_disable = false; -+ else -+ hw->dev_spec._82575.eee_disable = true; -+ -+#ifdef module_param_array -+ } else { -+ adapter->flags |= opt.def ? IGB_FLAG_EEE : 0; -+ if (adapter->flags & IGB_FLAG_EEE) -+ hw->dev_spec._82575.eee_disable = false; -+ else -+ hw->dev_spec._82575.eee_disable = true; -+ } -+#endif -+ } -+ } -+ { /* DMAC - Enable DMA Coalescing for capable adapters */ -+ -+ if (hw->mac.type >= e1000_i350) { -+ struct igb_opt_list list[] = { -+ { IGB_DMAC_DISABLE, "DMAC Disable"}, -+ { IGB_DMAC_MIN, "DMAC 250 usec"}, -+ { IGB_DMAC_500, "DMAC 500 usec"}, -+ { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"}, -+ { IGB_DMAC_2000, "DMAC 2000 usec"}, -+ { IGB_DMAC_3000, "DMAC 3000 usec"}, -+ { IGB_DMAC_4000, "DMAC 4000 usec"}, -+ { IGB_DMAC_5000, "DMAC 5000 usec"}, -+ { IGB_DMAC_6000, "DMAC 6000 usec"}, -+ { IGB_DMAC_7000, "DMAC 7000 usec"}, -+ { IGB_DMAC_8000, "DMAC 8000 usec"}, -+ { IGB_DMAC_9000, "DMAC 9000 usec"}, -+ { IGB_DMAC_MAX, "DMAC 10000 usec"} -+ }; -+ struct igb_option opt = { -+ .type = list_option, -+ .name = "DMA Coalescing", -+ .err = "using default of " -+ __MODULE_STRING(IGB_DMAC_DISABLE), -+ .def = IGB_DMAC_DISABLE, -+ .arg = { .l = { .nr = 13, -+ .p = list -+ } -+ } -+ }; -+#ifdef module_param_array -+ if (num_DMAC > bd) { -+#endif -+ unsigned int dmac = DMAC[bd]; -+ if (adapter->rx_itr_setting == IGB_DMAC_DISABLE) -+ dmac = IGB_DMAC_DISABLE; -+ igb_validate_option(&dmac, &opt, adapter); -+ switch (dmac) { -+ case IGB_DMAC_DISABLE: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_MIN: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_500: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_EN_DEFAULT: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_2000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_3000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_4000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_5000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_6000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_7000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_8000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_9000: -+ adapter->dmac = dmac; -+ break; -+ case IGB_DMAC_MAX: -+ adapter->dmac = dmac; -+ break; -+ default: -+ adapter->dmac = opt.def; -+ DPRINTK(PROBE, INFO, -+ "Invalid DMAC setting, resetting DMAC to %d\n", -+ opt.def); -+ } -+#ifdef module_param_array -+ } else -+ adapter->dmac = opt.def; -+#endif -+ } -+ } -+#ifndef IGB_NO_LRO -+ { /* LRO - Enable Large Receive Offload */ -+ struct igb_option opt = { -+ .type = enable_option, -+ .name = "LRO - Large Receive Offload", -+ .err = "defaulting to Disabled", -+ .def = OPTION_DISABLED -+ }; -+ struct net_device *netdev = adapter->netdev; -+#ifdef module_param_array -+ if (num_LRO > bd) { -+#endif -+ unsigned int lro = LRO[bd]; -+ igb_validate_option(&lro, &opt, adapter); -+ netdev->features |= lro ? NETIF_F_LRO : 0; -+#ifdef module_param_array -+ } else if (opt.def == OPTION_ENABLED) { -+ netdev->features |= NETIF_F_LRO; -+ } -+#endif -+ } -+#endif /* IGB_NO_LRO */ -+ { /* MDD - Enable Malicious Driver Detection. Only available when -+ SR-IOV is enabled. */ -+ struct igb_option opt = { -+ .type = enable_option, -+ .name = "Malicious Driver Detection", -+ .err = "defaulting to 1", -+ .def = OPTION_ENABLED, -+ .arg = { .r = { .min = OPTION_DISABLED, -+ .max = OPTION_ENABLED } } -+ }; -+ -+#ifdef module_param_array -+ if (num_MDD > bd) { -+#endif -+ adapter->mdd = MDD[bd]; -+ igb_validate_option((uint *)&adapter->mdd, &opt, -+ adapter); -+#ifdef module_param_array -+ } else { -+ adapter->mdd = opt.def; -+ } -+#endif -+ } -+} -+ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_procfs.c b/drivers/net/ethernet/intel/igb/igb_procfs.c ---- a/drivers/net/ethernet/intel/igb/igb_procfs.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_procfs.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,356 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "igb.h" -+#include "e1000_82575.h" -+#include "e1000_hw.h" -+ -+#ifdef IGB_PROCFS -+#ifndef IGB_HWMON -+ -+#include -+#include -+#include -+#include -+#include -+ -+static struct proc_dir_entry *igb_top_dir; -+ -+bool igb_thermal_present(struct igb_adapter *adapter) -+{ -+ s32 status; -+ struct e1000_hw *hw; -+ -+ if (adapter == NULL) -+ return false; -+ hw = &adapter->hw; -+ -+ /* -+ * Only set I2C bit-bang mode if an external thermal sensor is -+ * supported on this device. -+ */ -+ if (adapter->ets) { -+ status = e1000_set_i2c_bb(hw); -+ if (status != E1000_SUCCESS) -+ return false; -+ } -+ -+ status = hw->mac.ops.init_thermal_sensor_thresh(hw); -+ if (status != E1000_SUCCESS) -+ return false; -+ -+ return true; -+} -+ -+static int igb_macburn(char *page, char **start, off_t off, int count, -+ int *eof, void *data) -+{ -+ struct e1000_hw *hw; -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ if (adapter == NULL) -+ return snprintf(page, count, "error: no adapter\n"); -+ -+ hw = &adapter->hw; -+ if (hw == NULL) -+ return snprintf(page, count, "error: no hw data\n"); -+ -+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", -+ (unsigned int)hw->mac.perm_addr[0], -+ (unsigned int)hw->mac.perm_addr[1], -+ (unsigned int)hw->mac.perm_addr[2], -+ (unsigned int)hw->mac.perm_addr[3], -+ (unsigned int)hw->mac.perm_addr[4], -+ (unsigned int)hw->mac.perm_addr[5]); -+} -+ -+static int igb_macadmn(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ struct e1000_hw *hw; -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ if (adapter == NULL) -+ return snprintf(page, count, "error: no adapter\n"); -+ -+ hw = &adapter->hw; -+ if (hw == NULL) -+ return snprintf(page, count, "error: no hw data\n"); -+ -+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", -+ (unsigned int)hw->mac.addr[0], -+ (unsigned int)hw->mac.addr[1], -+ (unsigned int)hw->mac.addr[2], -+ (unsigned int)hw->mac.addr[3], -+ (unsigned int)hw->mac.addr[4], -+ (unsigned int)hw->mac.addr[5]); -+} -+ -+static int igb_numeports(char *page, char **start, off_t off, int count, -+ int *eof, void *data) -+{ -+ struct e1000_hw *hw; -+ int ports; -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ if (adapter == NULL) -+ return snprintf(page, count, "error: no adapter\n"); -+ -+ hw = &adapter->hw; -+ if (hw == NULL) -+ return snprintf(page, count, "error: no hw data\n"); -+ -+ ports = 4; -+ -+ return snprintf(page, count, "%d\n", ports); -+} -+ -+static int igb_porttype(char *page, char **start, off_t off, int count, -+ int *eof, void *data) -+{ -+ struct igb_adapter *adapter = (struct igb_adapter *)data; -+ if (adapter == NULL) -+ return snprintf(page, count, "error: no adapter\n"); -+ -+ return snprintf(page, count, "%d\n", -+ test_bit(__IGB_DOWN, &adapter->state)); -+} -+ -+static int igb_therm_location(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ struct igb_therm_proc_data *therm_data = -+ (struct igb_therm_proc_data *)data; -+ -+ if (therm_data == NULL) -+ return snprintf(page, count, "error: no therm_data\n"); -+ -+ return snprintf(page, count, "%d\n", therm_data->sensor_data->location); -+} -+ -+static int igb_therm_maxopthresh(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ struct igb_therm_proc_data *therm_data = -+ (struct igb_therm_proc_data *)data; -+ -+ if (therm_data == NULL) -+ return snprintf(page, count, "error: no therm_data\n"); -+ -+ return snprintf(page, count, "%d\n", -+ therm_data->sensor_data->max_op_thresh); -+} -+ -+static int igb_therm_cautionthresh(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ struct igb_therm_proc_data *therm_data = -+ (struct igb_therm_proc_data *)data; -+ -+ if (therm_data == NULL) -+ return snprintf(page, count, "error: no therm_data\n"); -+ -+ return snprintf(page, count, "%d\n", -+ therm_data->sensor_data->caution_thresh); -+} -+ -+static int igb_therm_temp(char *page, char **start, off_t off, -+ int count, int *eof, void *data) -+{ -+ s32 status; -+ struct igb_therm_proc_data *therm_data = -+ (struct igb_therm_proc_data *)data; -+ -+ if (therm_data == NULL) -+ return snprintf(page, count, "error: no therm_data\n"); -+ -+ status = e1000_get_thermal_sensor_data(therm_data->hw); -+ if (status != E1000_SUCCESS) -+ snprintf(page, count, "error: status %d returned\n", status); -+ -+ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); -+} -+ -+struct igb_proc_type { -+ char name[32]; -+ int (*read)(char*, char**, off_t, int, int*, void*); -+}; -+ -+struct igb_proc_type igb_proc_entries[] = { -+ {"numeports", &igb_numeports}, -+ {"porttype", &igb_porttype}, -+ {"macburn", &igb_macburn}, -+ {"macadmn", &igb_macadmn}, -+ {"", NULL} -+}; -+ -+struct igb_proc_type igb_internal_entries[] = { -+ {"location", &igb_therm_location}, -+ {"temp", &igb_therm_temp}, -+ {"cautionthresh", &igb_therm_cautionthresh}, -+ {"maxopthresh", &igb_therm_maxopthresh}, -+ {"", NULL} -+}; -+ -+void igb_del_proc_entries(struct igb_adapter *adapter) -+{ -+ int index, i; -+ char buf[16]; /* much larger than the sensor number will ever be */ -+ -+ if (igb_top_dir == NULL) -+ return; -+ -+ for (i = 0; i < E1000_MAX_SENSORS; i++) { -+ if (adapter->therm_dir[i] == NULL) -+ continue; -+ -+ for (index = 0; ; index++) { -+ if (igb_internal_entries[index].read == NULL) -+ break; -+ -+ remove_proc_entry(igb_internal_entries[index].name, -+ adapter->therm_dir[i]); -+ } -+ snprintf(buf, sizeof(buf), "sensor_%d", i); -+ remove_proc_entry(buf, adapter->info_dir); -+ } -+ -+ if (adapter->info_dir != NULL) { -+ for (index = 0; ; index++) { -+ if (igb_proc_entries[index].read == NULL) -+ break; -+ remove_proc_entry(igb_proc_entries[index].name, -+ adapter->info_dir); -+ } -+ remove_proc_entry("info", adapter->eth_dir); -+ } -+ -+ if (adapter->eth_dir != NULL) -+ remove_proc_entry(pci_name(adapter->pdev), igb_top_dir); -+} -+ -+/* called from igb_main.c */ -+void igb_procfs_exit(struct igb_adapter *adapter) -+{ -+ igb_del_proc_entries(adapter); -+} -+ -+int igb_procfs_topdir_init(void) -+{ -+ igb_top_dir = proc_mkdir("driver/igb", NULL); -+ if (igb_top_dir == NULL) -+ return (-ENOMEM); -+ -+ return 0; -+} -+ -+void igb_procfs_topdir_exit(void) -+{ -+ remove_proc_entry("driver/igb", NULL); -+} -+ -+/* called from igb_main.c */ -+int igb_procfs_init(struct igb_adapter *adapter) -+{ -+ int rc = 0; -+ int i; -+ int index; -+ char buf[16]; /* much larger than the sensor number will ever be */ -+ -+ adapter->eth_dir = NULL; -+ adapter->info_dir = NULL; -+ for (i = 0; i < E1000_MAX_SENSORS; i++) -+ adapter->therm_dir[i] = NULL; -+ -+ if (igb_top_dir == NULL) { -+ rc = -ENOMEM; -+ goto fail; -+ } -+ -+ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir); -+ if (adapter->eth_dir == NULL) { -+ rc = -ENOMEM; -+ goto fail; -+ } -+ -+ adapter->info_dir = proc_mkdir("info", adapter->eth_dir); -+ if (adapter->info_dir == NULL) { -+ rc = -ENOMEM; -+ goto fail; -+ } -+ for (index = 0; ; index++) { -+ if (igb_proc_entries[index].read == NULL) -+ break; -+ if (!(create_proc_read_entry(igb_proc_entries[index].name, -+ 0444, -+ adapter->info_dir, -+ igb_proc_entries[index].read, -+ adapter))) { -+ -+ rc = -ENOMEM; -+ goto fail; -+ } -+ } -+ if (igb_thermal_present(adapter) == false) -+ goto exit; -+ -+ for (i = 0; i < E1000_MAX_SENSORS; i++) { -+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) -+ continue; -+ -+ snprintf(buf, sizeof(buf), "sensor_%d", i); -+ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); -+ if (adapter->therm_dir[i] == NULL) { -+ rc = -ENOMEM; -+ goto fail; -+ } -+ for (index = 0; ; index++) { -+ if (igb_internal_entries[index].read == NULL) -+ break; -+ /* -+ * therm_data struct contains pointer the read func -+ * will be needing -+ */ -+ adapter->therm_data[i].hw = &adapter->hw; -+ adapter->therm_data[i].sensor_data = -+ &adapter->hw.mac.thermal_sensor_data.sensor[i]; -+ -+ if (!(create_proc_read_entry( -+ igb_internal_entries[index].name, -+ 0444, -+ adapter->therm_dir[i], -+ igb_internal_entries[index].read, -+ &adapter->therm_data[i]))) { -+ rc = -ENOMEM; -+ goto fail; -+ } -+ } -+ } -+ goto exit; -+ -+fail: -+ igb_del_proc_entries(adapter); -+exit: -+ return rc; -+} -+ -+#endif /* !IGB_HWMON */ -+#endif /* IGB_PROCFS */ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c ---- a/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-13 09:20:24.790171605 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-14 14:32:08.579567168 +0000 -@@ -1,31 +1,46 @@ --/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 -- * -- * Copyright (C) 2011 Richard Cochran -- * -- * This program is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License as published by -- * the Free Software Foundation; either version 2 of the License, or -- * (at your option) any later version. -- * -- * This program is distributed in the hope that it will be useful, -- * but WITHOUT ANY WARRANTY; without even the implied warranty of -- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- * GNU General Public License for more details. -- * -- * You should have received a copy of the GNU General Public License along with -- * this program; if not, see . -- */ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+/****************************************************************************** -+ Copyright(c) 2011 Richard Cochran for some of the -+ 82576 and 82580 code -+******************************************************************************/ -+ -+#include "igb.h" -+ -+#ifdef HAVE_PTP_1588_CLOCK - #include - #include - #include - #include -- --#include "igb.h" -+#include - - #define INCVALUE_MASK 0x7fffffff - #define ISGN 0x80000000 - --/* The 82580 timesync updates the system timer every 8ns by 8ns, -+/* -+ * The 82580 timesync updates the system timer every 8ns by 8ns, - * and this update value cannot be reprogrammed. - * - * Neither the 82576 nor the 82580 offer registers wide enough to hold -@@ -74,9 +89,10 @@ - #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) - #define IGB_NBITS_82580 40 - --static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); -+/* -+ * SYSTIM read access for the 82576 -+ */ - --/* SYSTIM read access for the 82576 */ - static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) - { - struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); -@@ -84,8 +100,8 @@ - u64 val; - u32 lo, hi; - -- lo = rd32(E1000_SYSTIML); -- hi = rd32(E1000_SYSTIMH); -+ lo = E1000_READ_REG(hw, E1000_SYSTIML); -+ hi = E1000_READ_REG(hw, E1000_SYSTIMH); - - val = ((u64) hi) << 32; - val |= lo; -@@ -93,21 +109,24 @@ - return val; - } - --/* SYSTIM read access for the 82580 */ -+/* -+ * SYSTIM read access for the 82580 -+ */ -+ - static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) - { - struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); - struct e1000_hw *hw = &igb->hw; -- u32 lo, hi; - u64 val; -+ u32 lo, hi; - - /* The timestamp latches on lowest register read. For the 82580 - * the lowest register is SYSTIMR instead of SYSTIML. However we only - * need to provide nanosecond resolution, so we just ignore it. - */ -- rd32(E1000_SYSTIMR); -- lo = rd32(E1000_SYSTIML); -- hi = rd32(E1000_SYSTIMH); -+ E1000_READ_REG(hw, E1000_SYSTIMR); -+ lo = E1000_READ_REG(hw, E1000_SYSTIML); -+ hi = E1000_READ_REG(hw, E1000_SYSTIMH); - - val = ((u64) hi) << 32; - val |= lo; -@@ -115,7 +134,10 @@ - return val; - } - --/* SYSTIM read access for I210/I211 */ -+/* -+ * SYSTIM read access for I210/I211 -+ */ -+ - static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) - { - struct e1000_hw *hw = &adapter->hw; -@@ -125,9 +147,9 @@ - * lowest register is SYSTIMR. Since we only need to provide nanosecond - * resolution, we can ignore it. - */ -- rd32(E1000_SYSTIMR); -- nsec = rd32(E1000_SYSTIML); -- sec = rd32(E1000_SYSTIMH); -+ E1000_READ_REG(hw, E1000_SYSTIMR); -+ nsec = E1000_READ_REG(hw, E1000_SYSTIML); -+ sec = E1000_READ_REG(hw, E1000_SYSTIMH); - - ts->tv_sec = sec; - ts->tv_nsec = nsec; -@@ -138,11 +160,12 @@ - { - struct e1000_hw *hw = &adapter->hw; - -- /* Writing the SYSTIMR register is not necessary as it only provides -+ /* -+ * Writing the SYSTIMR register is not necessary as it only provides - * sub-nanosecond resolution. - */ -- wr32(E1000_SYSTIML, ts->tv_nsec); -- wr32(E1000_SYSTIMH, ts->tv_sec); -+ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec); -+ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec); - } - - /** -@@ -172,8 +195,8 @@ - switch (adapter->hw.mac.type) { - case e1000_82576: - case e1000_82580: -- case e1000_i354: - case e1000_i350: -+ case e1000_i354: - spin_lock_irqsave(&adapter->tmreg_lock, flags); - - ns = timecounter_cyc2time(&adapter->tc, systim); -@@ -195,7 +218,10 @@ - } - } - --/* PTP clock operations */ -+/* -+ * PTP clock operations -+ */ -+ - static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) - { - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, -@@ -220,7 +246,8 @@ - else - incvalue += rate; - -- wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); -+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 -+ | (incvalue & INCVALUE_82576_MASK)); - - return 0; - } -@@ -242,11 +269,24 @@ - rate <<= 26; - rate = div_u64(rate, 1953125); - -+ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x -+ * as quickly. Account for this by dividing the adjustment by 2.5. -+ */ -+ if (hw->mac.type == e1000_i354) { -+ u32 status = E1000_READ_REG(hw, E1000_STATUS); -+ -+ if ((status & E1000_STATUS_2P5_SKU) && -+ !(status & E1000_STATUS_2P5_SKU_OVER)) { -+ rate <<= 1; -+ rate = div_u64(rate, 5); -+ } -+ } -+ - inca = rate & INCVALUE_MASK; - if (neg_adj) - inca |= ISGN; - -- wr32(E1000_TIMINCA, inca); -+ E1000_WRITE_REG(hw, E1000_TIMINCA, inca); - - return 0; - } -@@ -287,14 +327,13 @@ - return 0; - } - --static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, -- struct timespec *ts) -+static int igb_ptp_gettime64_82576(struct ptp_clock_info *ptp, -+ struct timespec64 *ts64) - { - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, - ptp_caps); - unsigned long flags; - u64 ns; -- u32 remainder; - - spin_lock_irqsave(&igb->tmreg_lock, flags); - -@@ -302,28 +341,99 @@ - - spin_unlock_irqrestore(&igb->tmreg_lock, flags); - -- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); -- ts->tv_nsec = remainder; -+ *ts64 = ns_to_timespec64(ns); - - return 0; - } - --static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, -- struct timespec *ts) -+static int igb_ptp_gettime64_i210(struct ptp_clock_info *ptp, -+ struct timespec64 *ts64) -+{ -+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter, -+ ptp_caps); -+ struct timespec ts; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&igb->tmreg_lock, flags); -+ -+ igb_ptp_read_i210(igb, &ts); -+ *ts64 = timespec_to_timespec64(ts); -+ -+ spin_unlock_irqrestore(&igb->tmreg_lock, flags); -+ -+ return 0; -+} -+ -+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 -+static int igb_ptp_settime64_82576(struct ptp_clock_info *ptp, -+ const struct timespec64 *ts64) -+{ -+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter, -+ ptp_caps); -+ unsigned long flags; -+ u64 ns; -+ -+ ns = timespec64_to_ns(ts64); -+ -+ spin_lock_irqsave(&igb->tmreg_lock, flags); -+ -+ timecounter_init(&igb->tc, &igb->cc, ns); -+ -+ spin_unlock_irqrestore(&igb->tmreg_lock, flags); -+ -+ return 0; -+} -+ -+#endif -+static int igb_ptp_settime64_i210(struct ptp_clock_info *ptp, -+ const struct timespec64 *ts64) - { - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, - ptp_caps); -+ struct timespec ts; - unsigned long flags; - -+ ts = timespec64_to_timespec(*ts64); - spin_lock_irqsave(&igb->tmreg_lock, flags); - -- igb_ptp_read_i210(igb, ts); -+ igb_ptp_write_i210(igb, &ts); - - spin_unlock_irqrestore(&igb->tmreg_lock, flags); - - return 0; - } - -+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 -+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, -+ struct timespec *ts) -+{ -+ struct timespec64 ts64; -+ int err; -+ -+ err = igb_ptp_gettime64_82576(ptp, &ts64); -+ if (err) -+ return err; -+ -+ *ts = timespec64_to_timespec(ts64); -+ -+ return 0; -+} -+ -+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, -+ struct timespec *ts) -+{ -+ struct timespec64 ts64; -+ int err; -+ -+ err = igb_ptp_gettime64_i210(ptp, &ts64); -+ if (err) -+ return err; -+ -+ *ts = timespec64_to_timespec(ts64); -+ -+ return 0; -+} -+ - static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, - const struct timespec *ts) - { -@@ -360,8 +470,9 @@ - return 0; - } - --static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, -- struct ptp_clock_request *rq, int on) -+#endif -+static int igb_ptp_enable(struct ptp_clock_info *ptp, -+ struct ptp_clock_request *rq, int on) - { - return -EOPNOTSUPP; - } -@@ -372,8 +483,8 @@ - * - * This work function polls the TSYNCTXCTL valid bit to determine when a - * timestamp has been taken for the current stored skb. -- **/ --static void igb_ptp_tx_work(struct work_struct *work) -+ */ -+void igb_ptp_tx_work(struct work_struct *work) - { - struct igb_adapter *adapter = container_of(work, struct igb_adapter, - ptp_tx_work); -@@ -393,7 +504,7 @@ - return; - } - -- tsynctxctl = rd32(E1000_TSYNCTXCTL); -+ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); - if (tsynctxctl & E1000_TSYNCTXCTL_VALID) - igb_ptp_tx_hwtstamp(adapter); - else -@@ -401,15 +512,16 @@ - schedule_work(&adapter->ptp_tx_work); - } - --static void igb_ptp_overflow_check(struct work_struct *work) -+static void igb_ptp_overflow_check_82576(struct work_struct *work) - { - struct igb_adapter *igb = - container_of(work, struct igb_adapter, ptp_overflow_work.work); -- struct timespec ts; -+ struct timespec64 ts64; - -- igb->ptp_caps.gettime(&igb->ptp_caps, &ts); -+ igb_ptp_gettime64_82576(&igb->ptp_caps, &ts64); - -- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); -+ pr_debug("igb overflow check at %lld.%09lu\n", -+ (long long)ts64.tv_sec, ts64.tv_nsec); - - schedule_delayed_work(&igb->ptp_overflow_work, - IGB_SYSTIM_OVERFLOW_PERIOD); -@@ -423,11 +535,11 @@ - * dropped an Rx packet that was timestamped when the ring is full. The - * particular error is rare but leaves the device in a state unable to timestamp - * any future packets. -- **/ -+ */ - void igb_ptp_rx_hang(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; -- u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); -+ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); - unsigned long rx_event; - - if (hw->mac.type != e1000_82576) -@@ -448,7 +560,7 @@ - - /* Only need to read the high RXSTMP register to clear the lock */ - if (time_is_before_jiffies(rx_event + 5 * HZ)) { -- rd32(E1000_RXSTMPH); -+ E1000_READ_REG(hw, E1000_RXSTMPH); - adapter->last_rx_ptp_check = jiffies; - adapter->rx_hwtstamp_cleared++; - dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); -@@ -462,15 +574,15 @@ - * If we were asked to do hardware stamping and such a time stamp is - * available, then it must have been for this skb here because we only - * allow only one such packet into the queue. -- **/ --static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) -+ */ -+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) - { - struct e1000_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps shhwtstamps; - u64 regval; - -- regval = rd32(E1000_TXSTMPL); -- regval |= (u64)rd32(E1000_TXSTMPH) << 32; -+ regval = E1000_READ_REG(hw, E1000_TXSTMPL); -+ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32; - - igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); - skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); -@@ -488,14 +600,15 @@ - * This function is meant to retrieve a timestamp from the first buffer of an - * incoming frame. The value is stored in little endian format starting on - * byte 8. -- **/ -+ */ - void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, - unsigned char *va, - struct sk_buff *skb) - { - __le64 *regval = (__le64 *)va; - -- /* The timestamp is recorded in little endian format. -+ /* -+ * The timestamp is recorded in little endian format. - * DWORD: 0 1 2 3 - * Field: Reserved Reserved SYSTIML SYSTIMH - */ -@@ -510,7 +623,7 @@ - * - * This function is meant to retrieve a timestamp from the internal registers - * of the adapter and store it in the skb. -- **/ -+ */ - void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, - struct sk_buff *skb) - { -@@ -518,7 +631,8 @@ - struct e1000_hw *hw = &adapter->hw; - u64 regval; - -- /* If this bit is set, then the RX registers contain the time stamp. No -+ /* -+ * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register -@@ -528,11 +642,11 @@ - * If nothing went wrong, then it should have a shared tx_flags that we - * can turn into a skb_shared_hwtstamps. - */ -- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) -+ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; - -- regval = rd32(E1000_RXSTMPL); -- regval |= (u64)rd32(E1000_RXSTMPH) << 32; -+ regval = E1000_READ_REG(hw, E1000_RXSTMPL); -+ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32; - - igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); - -@@ -576,6 +690,7 @@ - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". -+ * - */ - static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, - struct hwtstamp_config *config) -@@ -631,7 +746,8 @@ - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_ALL: -- /* 82576 cannot timestamp all packets, which it needs to do to -+ /* -+ * 82576 cannot timestamp all packets, which it needs to do to - * support both V1 Sync and Delay_Req messages - */ - if (hw->mac.type != e1000_82576) { -@@ -651,9 +767,10 @@ - return 0; - } - -- /* Per-packet timestamping only works if all packets are -+ /* -+ * Per-packet timestamping only works if all packets are - * timestamped, so enable timestamping in all packets as -- * long as one Rx filter was configured. -+ * long as one rx filter was configured. - */ - if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { - tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; -@@ -664,63 +781,63 @@ - - if ((hw->mac.type == e1000_i210) || - (hw->mac.type == e1000_i211)) { -- regval = rd32(E1000_RXPBS); -+ regval = E1000_READ_REG(hw, E1000_RXPBS); - regval |= E1000_RXPBS_CFG_TS_EN; -- wr32(E1000_RXPBS, regval); -+ E1000_WRITE_REG(hw, E1000_RXPBS, regval); - } - } - - /* enable/disable TX */ -- regval = rd32(E1000_TSYNCTXCTL); -+ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL); - regval &= ~E1000_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; -- wr32(E1000_TSYNCTXCTL, regval); -+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval); - - /* enable/disable RX */ -- regval = rd32(E1000_TSYNCRXCTL); -+ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL); - regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; -- wr32(E1000_TSYNCRXCTL, regval); -+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval); - - /* define which PTP packets are time stamped */ -- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); -+ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg); - - /* define ethertype filter for timestamped packets */ - if (is_l2) -- wr32(E1000_ETQF(3), -+ E1000_WRITE_REG(hw, E1000_ETQF(3), - (E1000_ETQF_FILTER_ENABLE | /* enable filter */ - E1000_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else -- wr32(E1000_ETQF(3), 0); -+ E1000_WRITE_REG(hw, E1000_ETQF(3), 0); - - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IPPROTO_UDP /* UDP */ -- | E1000_FTQF_VF_BP /* VF not compared */ -- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ -- | E1000_FTQF_MASK); /* mask all inputs */ -+ | E1000_FTQF_VF_BP /* VF not compared */ -+ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamp */ -+ | E1000_FTQF_MASK); /* mask all inputs */ - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - -- wr32(E1000_IMIR(3), htons(PTP_EV_PORT)); -- wr32(E1000_IMIREXT(3), -+ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT)); -+ E1000_WRITE_REG(hw, E1000_IMIREXT(3), - (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); - if (hw->mac.type == e1000_82576) { - /* enable source port check */ -- wr32(E1000_SPQF(3), htons(PTP_EV_PORT)); -+ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT)); - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - } -- wr32(E1000_FTQF(3), ftqf); -+ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf); - } else { -- wr32(E1000_FTQF(3), E1000_FTQF_MASK); -+ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK); - } -- wrfl(); -+ E1000_WRITE_FLUSH(hw); - - /* clear TX/RX time stamp registers, just to be sure */ -- regval = rd32(E1000_TXSTMPL); -- regval = rd32(E1000_TXSTMPH); -- regval = rd32(E1000_RXSTMPL); -- regval = rd32(E1000_RXSTMPH); -+ regval = E1000_READ_REG(hw, E1000_TXSTMPL); -+ regval = E1000_READ_REG(hw, E1000_TXSTMPH); -+ regval = E1000_READ_REG(hw, E1000_RXSTMPL); -+ regval = E1000_READ_REG(hw, E1000_RXSTMPH); - - return 0; - } -@@ -766,19 +883,25 @@ - adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; - adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; -+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 -+ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576; -+ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576; -+#else - adapter->ptp_caps.gettime = igb_ptp_gettime_82576; - adapter->ptp_caps.settime = igb_ptp_settime_82576; -- adapter->ptp_caps.enable = igb_ptp_feature_enable; -+#endif -+ adapter->ptp_caps.enable = igb_ptp_enable; - adapter->cc.read = igb_ptp_read_82576; - adapter->cc.mask = CLOCKSOURCE_MASK(64); - adapter->cc.mult = 1; - adapter->cc.shift = IGB_82576_TSYNC_SHIFT; - /* Dial the nominal frequency. */ -- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); -+ E1000_WRITE_REG(hw, E1000_TIMINCA, -+ INCPERIOD_82576 | INCVALUE_82576); - break; - case e1000_82580: -- case e1000_i354: - case e1000_i350: -+ case e1000_i354: - snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); - adapter->ptp_caps.owner = THIS_MODULE; - adapter->ptp_caps.max_adj = 62499999; -@@ -786,15 +909,20 @@ - adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; - adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; -+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 -+ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576; -+ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576; -+#else - adapter->ptp_caps.gettime = igb_ptp_gettime_82576; - adapter->ptp_caps.settime = igb_ptp_settime_82576; -- adapter->ptp_caps.enable = igb_ptp_feature_enable; -+#endif -+ adapter->ptp_caps.enable = igb_ptp_enable; - adapter->cc.read = igb_ptp_read_82580; - adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); - adapter->cc.mult = 1; - adapter->cc.shift = 0; - /* Enable the timer functions by clearing bit 31. */ -- wr32(E1000_TSAUXC, 0x0); -+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); - break; - case e1000_i210: - case e1000_i211: -@@ -805,33 +933,38 @@ - adapter->ptp_caps.pps = 0; - adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; - adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; -+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 -+ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_i210; -+ adapter->ptp_caps.settime64 = igb_ptp_settime64_i210; -+#else - adapter->ptp_caps.gettime = igb_ptp_gettime_i210; - adapter->ptp_caps.settime = igb_ptp_settime_i210; -- adapter->ptp_caps.enable = igb_ptp_feature_enable; -+#endif -+ adapter->ptp_caps.enable = igb_ptp_enable; - /* Enable the timer functions by clearing bit 31. */ -- wr32(E1000_TSAUXC, 0x0); -+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); - break; - default: - adapter->ptp_clock = NULL; - return; - } - -- wrfl(); -+ E1000_WRITE_FLUSH(hw); - - spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - - /* Initialize the clock and overflow work for devices that need it. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { -- struct timespec ts = ktime_to_timespec(ktime_get_real()); -+ struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); - -- igb_ptp_settime_i210(&adapter->ptp_caps, &ts); -+ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts); - } else { - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); - - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, -- igb_ptp_overflow_check); -+ igb_ptp_overflow_check_82576); - - schedule_delayed_work(&adapter->ptp_overflow_work, - IGB_SYSTIM_OVERFLOW_PERIOD); -@@ -839,8 +972,8 @@ - - /* Initialize the time sync interrupts for devices that support it. */ - if (hw->mac.type >= e1000_82580) { -- wr32(E1000_TSIM, TSYNC_INTERRUPTS); -- wr32(E1000_IMS, E1000_IMS_TS); -+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); -+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); - } - - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; -@@ -869,8 +1002,8 @@ - switch (adapter->hw.mac.type) { - case e1000_82576: - case e1000_82580: -- case e1000_i354: - case e1000_i350: -+ case e1000_i354: - cancel_delayed_work_sync(&adapter->ptp_overflow_work); - break; - case e1000_i210: -@@ -915,17 +1048,18 @@ - switch (adapter->hw.mac.type) { - case e1000_82576: - /* Dial the nominal frequency. */ -- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); -+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | -+ INCVALUE_82576); - break; - case e1000_82580: -- case e1000_i354: - case e1000_i350: -+ case e1000_i354: - case e1000_i210: - case e1000_i211: - /* Enable the timer functions and interrupts. */ -- wr32(E1000_TSAUXC, 0x0); -- wr32(E1000_TSIM, TSYNC_INTERRUPTS); -- wr32(E1000_IMS, E1000_IMS_TS); -+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0); -+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS); -+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS); - break; - default: - /* No work to do. */ -@@ -934,11 +1068,12 @@ - - /* Re-initialize the timer. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { -- struct timespec ts = ktime_to_timespec(ktime_get_real()); -+ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real()); - -- igb_ptp_settime_i210(&adapter->ptp_caps, &ts); -+ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts64); - } else { - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); - } - } -+#endif /* HAVE_PTP_1588_CLOCK */ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_regtest.h b/drivers/net/ethernet/intel/igb/igb_regtest.h ---- a/drivers/net/ethernet/intel/igb/igb_regtest.h 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_regtest.h 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,256 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+/* ethtool register test data */ -+struct igb_reg_test { -+ u16 reg; -+ u16 reg_offset; -+ u16 array_len; -+ u16 test_type; -+ u32 mask; -+ u32 write; -+}; -+ -+/* In the hardware, registers are laid out either singly, in arrays -+ * spaced 0x100 bytes apart, or in contiguous tables. We assume -+ * most tests take place on arrays or single registers (handled -+ * as a single-element array) and special-case the tables. -+ * Table tests are always pattern tests. -+ * -+ * We also make provision for some required setup steps by specifying -+ * registers to be written without any read-back testing. -+ */ -+ -+#define PATTERN_TEST 1 -+#define SET_READ_TEST 2 -+#define WRITE_NO_TEST 3 -+#define TABLE32_TEST 4 -+#define TABLE64_TEST_LO 5 -+#define TABLE64_TEST_HI 6 -+ -+/* i210 reg test */ -+static struct igb_reg_test reg_test_i210[] = { -+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ /* RDH is read-only for i210, only test RDT. */ -+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, -+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RA, 0, 16, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA, 0, 16, TABLE64_TEST_HI, -+ 0x900FFFFF, 0xFFFFFFFF }, -+ { E1000_MTA, 0, 128, TABLE32_TEST, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { 0, 0, 0, 0 } -+}; -+ -+/* i350 reg test */ -+static struct igb_reg_test reg_test_i350[] = { -+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ /* VET is readonly on i350 */ -+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ /* RDH is read-only for i350, only test RDT. */ -+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RA, 0, 16, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA, 0, 16, TABLE64_TEST_HI, -+ 0xC3FFFFFF, 0xFFFFFFFF }, -+ { E1000_RA2, 0, 16, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA2, 0, 16, TABLE64_TEST_HI, -+ 0xC3FFFFFF, 0xFFFFFFFF }, -+ { E1000_MTA, 0, 128, TABLE32_TEST, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { 0, 0, 0, 0 } -+}; -+ -+/* 82580 reg test */ -+static struct igb_reg_test reg_test_82580[] = { -+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ /* RDH is read-only for 82580, only test RDT. */ -+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RA, 0, 16, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA, 0, 16, TABLE64_TEST_HI, -+ 0x83FFFFFF, 0xFFFFFFFF }, -+ { E1000_RA2, 0, 8, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA2, 0, 8, TABLE64_TEST_HI, -+ 0x83FFFFFF, 0xFFFFFFFF }, -+ { E1000_MTA, 0, 128, TABLE32_TEST, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { 0, 0, 0, 0 } -+}; -+ -+/* 82576 reg test */ -+static struct igb_reg_test reg_test_82576[] = { -+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ /* Enable all queues before testing. */ -+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, -+ E1000_RXDCTL_QUEUE_ENABLE }, -+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, -+ E1000_RXDCTL_QUEUE_ENABLE }, -+ /* RDH is read-only for 82576, only test RDT. */ -+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, -+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, -+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, -+ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, -+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RA, 0, 16, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA, 0, 16, TABLE64_TEST_HI, -+ 0x83FFFFFF, 0xFFFFFFFF }, -+ { E1000_RA2, 0, 8, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA2, 0, 8, TABLE64_TEST_HI, -+ 0x83FFFFFF, 0xFFFFFFFF }, -+ { E1000_MTA, 0, 128, TABLE32_TEST, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { 0, 0, 0, 0 } -+}; -+ -+/* 82575 register test */ -+static struct igb_reg_test reg_test_82575[] = { -+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, -+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, -+ 0xFFFFFFFF }, -+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, -+ 0xFFFFFFFF }, -+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, -+ /* Enable all four RX queues before testing. */ -+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, -+ E1000_RXDCTL_QUEUE_ENABLE }, -+ /* RDH is read-only for 82575, only test RDT. */ -+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, -+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, -+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, -+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, -+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, -+ 0xFFFFFFFF }, -+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, -+ 0xFFFFFFFF }, -+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, -+ 0x000FFFFF }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, -+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, -+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, -+ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, -+ { E1000_RA, 0, 16, TABLE64_TEST_LO, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { E1000_RA, 0, 16, TABLE64_TEST_HI, -+ 0x800FFFFF, 0xFFFFFFFF }, -+ { E1000_MTA, 0, 128, TABLE32_TEST, -+ 0xFFFFFFFF, 0xFFFFFFFF }, -+ { 0, 0, 0, 0 } -+}; -+ -+ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.c b/drivers/net/ethernet/intel/igb/igb_vmdq.c ---- a/drivers/net/ethernet/intel/igb/igb_vmdq.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_vmdq.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,433 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+ -+#include -+ -+#include "igb.h" -+#include "igb_vmdq.h" -+#include -+ -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+int igb_vmdq_open(struct net_device *dev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ struct net_device *main_netdev = adapter->netdev; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ if (test_bit(__IGB_DOWN, &adapter->state)) { -+ DPRINTK(DRV, WARNING, -+ "Open %s before opening this device.\n", -+ main_netdev->name); -+ return -EAGAIN; -+ } -+ netif_carrier_off(dev); -+ vadapter->tx_ring->vmdq_netdev = dev; -+ vadapter->rx_ring->vmdq_netdev = dev; -+ if (is_valid_ether_addr(dev->dev_addr)) { -+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); -+ igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); -+ } -+ netif_carrier_on(dev); -+ return 0; -+} -+ -+int igb_vmdq_close(struct net_device *dev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ netif_carrier_off(dev); -+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); -+ -+ vadapter->tx_ring->vmdq_netdev = NULL; -+ vadapter->rx_ring->vmdq_netdev = NULL; -+ return 0; -+} -+ -+netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ -+ return igb_xmit_frame_ring(skb, vadapter->tx_ring); -+} -+ -+struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ struct e1000_hw *hw = &adapter->hw; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ vadapter->net_stats.rx_packets += -+ E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue)); -+ E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0); -+ vadapter->net_stats.tx_packets += -+ E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue)); -+ E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0); -+ vadapter->net_stats.rx_bytes += -+ E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue)); -+ E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0); -+ vadapter->net_stats.tx_bytes += -+ E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue)); -+ E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0); -+ vadapter->net_stats.multicast += -+ E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue)); -+ E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0); -+ /* only return the current stats */ -+ return &vadapter->net_stats; -+} -+ -+/** -+ * igb_write_vm_addr_list - write unicast addresses to RAR table -+ * @netdev: network interface device structure -+ * -+ * Writes unicast address list to the RAR table. -+ * Returns: -ENOMEM on failure/insufficient address space -+ * 0 on no addresses written -+ * X on writing X addresses to the RAR table -+ **/ -+static int igb_write_vm_addr_list(struct net_device *netdev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ int count = 0; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ /* return ENOMEM indicating insufficient memory for addresses */ -+ if (netdev_uc_count(netdev) > igb_available_rars(adapter)) -+ return -ENOMEM; -+ -+ if (!netdev_uc_empty(netdev)) { -+#ifdef NETDEV_HW_ADDR_T_UNICAST -+ struct netdev_hw_addr *ha; -+#else -+ struct dev_mc_list *ha; -+#endif -+ netdev_for_each_uc_addr(ha, netdev) { -+#ifdef NETDEV_HW_ADDR_T_UNICAST -+ igb_del_mac_filter(adapter, ha->addr, hw_queue); -+ igb_add_mac_filter(adapter, ha->addr, hw_queue); -+#else -+ igb_del_mac_filter(adapter, ha->da_addr, hw_queue); -+ igb_add_mac_filter(adapter, ha->da_addr, hw_queue); -+#endif -+ count++; -+ } -+ } -+ return count; -+} -+ -+ -+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */ -+void igb_vmdq_set_rx_mode(struct net_device *dev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ struct e1000_hw *hw = &adapter->hw; -+ u32 vmolr, rctl; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ /* Check for Promiscuous and All Multicast modes */ -+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue)); -+ -+ /* clear the affected bits */ -+ vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME | -+ E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE); -+ -+ if (dev->flags & IFF_PROMISC) { -+ vmolr |= E1000_VMOLR_UPE; -+ rctl = E1000_READ_REG(hw, E1000_RCTL); -+ rctl |= E1000_RCTL_UPE; -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); -+ } else { -+ rctl = E1000_READ_REG(hw, E1000_RCTL); -+ rctl &= ~E1000_RCTL_UPE; -+ E1000_WRITE_REG(hw, E1000_RCTL, rctl); -+ if (dev->flags & IFF_ALLMULTI) { -+ vmolr |= E1000_VMOLR_MPME; -+ } else { -+ /* -+ * Write addresses to the MTA, if the attempt fails -+ * then we should just turn on promiscous mode so -+ * that we can at least receive multicast traffic -+ */ -+ if (igb_write_mc_addr_list(adapter->netdev) != 0) -+ vmolr |= E1000_VMOLR_ROMPE; -+ } -+#ifdef HAVE_SET_RX_MODE -+ /* -+ * Write addresses to available RAR registers, if there is not -+ * sufficient space to store all the addresses then enable -+ * unicast promiscous mode -+ */ -+ if (igb_write_vm_addr_list(dev) < 0) -+ vmolr |= E1000_VMOLR_UPE; -+#endif -+ } -+ E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr); -+ -+ return; -+} -+ -+int igb_vmdq_set_mac(struct net_device *dev, void *p) -+{ -+ struct sockaddr *addr = p; -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue); -+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); -+ return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue); -+} -+ -+int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ -+ if (adapter->netdev->mtu < new_mtu) { -+ DPRINTK(PROBE, INFO, -+ "Set MTU on %s to >= %d before changing MTU on %s\n", -+ adapter->netdev->name, new_mtu, dev->name); -+ return -EINVAL; -+ } -+ dev->mtu = new_mtu; -+ return 0; -+} -+ -+void igb_vmdq_tx_timeout(struct net_device *dev) -+{ -+ return; -+} -+ -+void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ struct e1000_hw *hw = &adapter->hw; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ vadapter->vlgrp = grp; -+ -+ igb_enable_vlan_tags(adapter); -+ E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0); -+ -+ return; -+} -+void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+#ifndef HAVE_NETDEV_VLAN_FEATURES -+ struct net_device *v_netdev; -+#endif -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ /* attempt to add filter to vlvf array */ -+ igb_vlvf_set(adapter, vid, TRUE, hw_queue); -+ -+#ifndef HAVE_NETDEV_VLAN_FEATURES -+ -+ /* Copy feature flags from netdev to the vlan netdev for this vid. -+ * This allows things like TSO to bubble down to our vlan device. -+ */ -+ v_netdev = vlan_group_get_device(vadapter->vlgrp, vid); -+ v_netdev->features |= adapter->netdev->features; -+ vlan_group_set_device(vadapter->vlgrp, vid, v_netdev); -+#endif -+ -+ return; -+} -+void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ int hw_queue = vadapter->rx_ring->queue_index + -+ adapter->vfs_allocated_count; -+ -+ vlan_group_set_device(vadapter->vlgrp, vid, NULL); -+ /* remove vlan from VLVF table array */ -+ igb_vlvf_set(adapter, vid, FALSE, hw_queue); -+ -+ -+ return; -+} -+ -+static int igb_vmdq_get_settings(struct net_device *netdev, -+ struct ethtool_cmd *ecmd) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ struct e1000_hw *hw = &adapter->hw; -+ u32 status; -+ -+ if (hw->phy.media_type == e1000_media_type_copper) { -+ -+ ecmd->supported = (SUPPORTED_10baseT_Half | -+ SUPPORTED_10baseT_Full | -+ SUPPORTED_100baseT_Half | -+ SUPPORTED_100baseT_Full | -+ SUPPORTED_1000baseT_Full| -+ SUPPORTED_Autoneg | -+ SUPPORTED_TP); -+ ecmd->advertising = ADVERTISED_TP; -+ -+ if (hw->mac.autoneg == 1) { -+ ecmd->advertising |= ADVERTISED_Autoneg; -+ /* the e1000 autoneg seems to match ethtool nicely */ -+ ecmd->advertising |= hw->phy.autoneg_advertised; -+ } -+ -+ ecmd->port = PORT_TP; -+ ecmd->phy_address = hw->phy.addr; -+ } else { -+ ecmd->supported = (SUPPORTED_1000baseT_Full | -+ SUPPORTED_FIBRE | -+ SUPPORTED_Autoneg); -+ -+ ecmd->advertising = (ADVERTISED_1000baseT_Full | -+ ADVERTISED_FIBRE | -+ ADVERTISED_Autoneg); -+ -+ ecmd->port = PORT_FIBRE; -+ } -+ -+ ecmd->transceiver = XCVR_INTERNAL; -+ -+ status = E1000_READ_REG(hw, E1000_STATUS); -+ -+ if (status & E1000_STATUS_LU) { -+ -+ if ((status & E1000_STATUS_SPEED_1000) || -+ hw->phy.media_type != e1000_media_type_copper) -+ ethtool_cmd_speed_set(ecmd, SPEED_1000); -+ else if (status & E1000_STATUS_SPEED_100) -+ ethtool_cmd_speed_set(ecmd, SPEED_100); -+ else -+ ethtool_cmd_speed_set(ecmd, SPEED_10); -+ -+ if ((status & E1000_STATUS_FD) || -+ hw->phy.media_type != e1000_media_type_copper) -+ ecmd->duplex = DUPLEX_FULL; -+ else -+ ecmd->duplex = DUPLEX_HALF; -+ } else { -+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); -+ ecmd->duplex = -1; -+ } -+ -+ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; -+ return 0; -+} -+ -+ -+static u32 igb_vmdq_get_msglevel(struct net_device *netdev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ return adapter->msg_enable; -+} -+ -+static void igb_vmdq_get_drvinfo(struct net_device *netdev, -+ struct ethtool_drvinfo *drvinfo) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ struct net_device *main_netdev = adapter->netdev; -+ -+ strncpy(drvinfo->driver, igb_driver_name, 32); -+ strncpy(drvinfo->version, igb_driver_version, 32); -+ -+ strncpy(drvinfo->fw_version, "N/A", 4); -+ snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name, -+ vadapter->rx_ring->queue_index); -+ drvinfo->n_stats = 0; -+ drvinfo->testinfo_len = 0; -+ drvinfo->regdump_len = 0; -+} -+ -+static void igb_vmdq_get_ringparam(struct net_device *netdev, -+ struct ethtool_ringparam *ring) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); -+ -+ struct igb_ring *tx_ring = vadapter->tx_ring; -+ struct igb_ring *rx_ring = vadapter->rx_ring; -+ -+ ring->rx_max_pending = IGB_MAX_RXD; -+ ring->tx_max_pending = IGB_MAX_TXD; -+ ring->rx_mini_max_pending = 0; -+ ring->rx_jumbo_max_pending = 0; -+ ring->rx_pending = rx_ring->count; -+ ring->tx_pending = tx_ring->count; -+ ring->rx_mini_pending = 0; -+ ring->rx_jumbo_pending = 0; -+} -+static u32 igb_vmdq_get_rx_csum(struct net_device *netdev) -+{ -+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev); -+ struct igb_adapter *adapter = vadapter->real_adapter; -+ -+ return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags); -+} -+ -+ -+static struct ethtool_ops igb_vmdq_ethtool_ops = { -+ .get_settings = igb_vmdq_get_settings, -+ .get_drvinfo = igb_vmdq_get_drvinfo, -+ .get_link = ethtool_op_get_link, -+ .get_ringparam = igb_vmdq_get_ringparam, -+ .get_rx_csum = igb_vmdq_get_rx_csum, -+ .get_tx_csum = ethtool_op_get_tx_csum, -+ .get_sg = ethtool_op_get_sg, -+ .set_sg = ethtool_op_set_sg, -+ .get_msglevel = igb_vmdq_get_msglevel, -+#ifdef NETIF_F_TSO -+ .get_tso = ethtool_op_get_tso, -+#endif -+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR -+ .get_perm_addr = ethtool_op_get_perm_addr, -+#endif -+}; -+ -+void igb_vmdq_set_ethtool_ops(struct net_device *netdev) -+{ -+ SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops); -+} -+ -+ -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+ -diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.h b/drivers/net/ethernet/intel/igb/igb_vmdq.h ---- a/drivers/net/ethernet/intel/igb/igb_vmdq.h 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_vmdq.h 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,43 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#ifndef _IGB_VMDQ_H_ -+#define _IGB_VMDQ_H_ -+ -+#ifdef CONFIG_IGB_VMDQ_NETDEV -+int igb_vmdq_open(struct net_device *dev); -+int igb_vmdq_close(struct net_device *dev); -+netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev); -+struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev); -+void igb_vmdq_set_rx_mode(struct net_device *dev); -+int igb_vmdq_set_mac(struct net_device *dev, void *addr); -+int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu); -+void igb_vmdq_tx_timeout(struct net_device *dev); -+void igb_vmdq_vlan_rx_register(struct net_device *dev, -+ struct vlan_group *grp); -+void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); -+void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); -+void igb_vmdq_set_ethtool_ops(struct net_device *netdev); -+#endif /* CONFIG_IGB_VMDQ_NETDEV */ -+#endif /* _IGB_VMDQ_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.c b/drivers/net/ethernet/intel/igb/kcompat.c ---- a/drivers/net/ethernet/intel/igb/kcompat.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/kcompat.c 2016-11-14 14:32:08.579567168 +0000 -@@ -0,0 +1,2082 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#include "igb.h" -+#include "kcompat.h" -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) -+/* From lib/vsprintf.c */ -+#include -+ -+static int skip_atoi(const char **s) -+{ -+ int i=0; -+ -+ while (isdigit(**s)) -+ i = i*10 + *((*s)++) - '0'; -+ return i; -+} -+ -+#define _kc_ZEROPAD 1 /* pad with zero */ -+#define _kc_SIGN 2 /* unsigned/signed long */ -+#define _kc_PLUS 4 /* show plus */ -+#define _kc_SPACE 8 /* space if plus */ -+#define _kc_LEFT 16 /* left justified */ -+#define _kc_SPECIAL 32 /* 0x */ -+#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ -+ -+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) -+{ -+ char c,sign,tmp[66]; -+ const char *digits; -+ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; -+ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; -+ int i; -+ -+ digits = (type & _kc_LARGE) ? large_digits : small_digits; -+ if (type & _kc_LEFT) -+ type &= ~_kc_ZEROPAD; -+ if (base < 2 || base > 36) -+ return 0; -+ c = (type & _kc_ZEROPAD) ? '0' : ' '; -+ sign = 0; -+ if (type & _kc_SIGN) { -+ if (num < 0) { -+ sign = '-'; -+ num = -num; -+ size--; -+ } else if (type & _kc_PLUS) { -+ sign = '+'; -+ size--; -+ } else if (type & _kc_SPACE) { -+ sign = ' '; -+ size--; -+ } -+ } -+ if (type & _kc_SPECIAL) { -+ if (base == 16) -+ size -= 2; -+ else if (base == 8) -+ size--; -+ } -+ i = 0; -+ if (num == 0) -+ tmp[i++]='0'; -+ else while (num != 0) -+ tmp[i++] = digits[do_div(num,base)]; -+ if (i > precision) -+ precision = i; -+ size -= precision; -+ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { -+ while(size-->0) { -+ if (buf <= end) -+ *buf = ' '; -+ ++buf; -+ } -+ } -+ if (sign) { -+ if (buf <= end) -+ *buf = sign; -+ ++buf; -+ } -+ if (type & _kc_SPECIAL) { -+ if (base==8) { -+ if (buf <= end) -+ *buf = '0'; -+ ++buf; -+ } else if (base==16) { -+ if (buf <= end) -+ *buf = '0'; -+ ++buf; -+ if (buf <= end) -+ *buf = digits[33]; -+ ++buf; -+ } -+ } -+ if (!(type & _kc_LEFT)) { -+ while (size-- > 0) { -+ if (buf <= end) -+ *buf = c; -+ ++buf; -+ } -+ } -+ while (i < precision--) { -+ if (buf <= end) -+ *buf = '0'; -+ ++buf; -+ } -+ while (i-- > 0) { -+ if (buf <= end) -+ *buf = tmp[i]; -+ ++buf; -+ } -+ while (size-- > 0) { -+ if (buf <= end) -+ *buf = ' '; -+ ++buf; -+ } -+ return buf; -+} -+ -+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) -+{ -+ int len; -+ unsigned long long num; -+ int i, base; -+ char *str, *end, c; -+ const char *s; -+ -+ int flags; /* flags to number() */ -+ -+ int field_width; /* width of output field */ -+ int precision; /* min. # of digits for integers; max -+ number of chars for from string */ -+ int qualifier; /* 'h', 'l', or 'L' for integer fields */ -+ /* 'z' support added 23/7/1999 S.H. */ -+ /* 'z' changed to 'Z' --davidm 1/25/99 */ -+ -+ str = buf; -+ end = buf + size - 1; -+ -+ if (end < buf - 1) { -+ end = ((void *) -1); -+ size = end - buf + 1; -+ } -+ -+ for (; *fmt ; ++fmt) { -+ if (*fmt != '%') { -+ if (str <= end) -+ *str = *fmt; -+ ++str; -+ continue; -+ } -+ -+ /* process flags */ -+ flags = 0; -+ repeat: -+ ++fmt; /* this also skips first '%' */ -+ switch (*fmt) { -+ case '-': flags |= _kc_LEFT; goto repeat; -+ case '+': flags |= _kc_PLUS; goto repeat; -+ case ' ': flags |= _kc_SPACE; goto repeat; -+ case '#': flags |= _kc_SPECIAL; goto repeat; -+ case '0': flags |= _kc_ZEROPAD; goto repeat; -+ } -+ -+ /* get field width */ -+ field_width = -1; -+ if (isdigit(*fmt)) -+ field_width = skip_atoi(&fmt); -+ else if (*fmt == '*') { -+ ++fmt; -+ /* it's the next argument */ -+ field_width = va_arg(args, int); -+ if (field_width < 0) { -+ field_width = -field_width; -+ flags |= _kc_LEFT; -+ } -+ } -+ -+ /* get the precision */ -+ precision = -1; -+ if (*fmt == '.') { -+ ++fmt; -+ if (isdigit(*fmt)) -+ precision = skip_atoi(&fmt); -+ else if (*fmt == '*') { -+ ++fmt; -+ /* it's the next argument */ -+ precision = va_arg(args, int); -+ } -+ if (precision < 0) -+ precision = 0; -+ } -+ -+ /* get the conversion qualifier */ -+ qualifier = -1; -+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { -+ qualifier = *fmt; -+ ++fmt; -+ } -+ -+ /* default base */ -+ base = 10; -+ -+ switch (*fmt) { -+ case 'c': -+ if (!(flags & _kc_LEFT)) { -+ while (--field_width > 0) { -+ if (str <= end) -+ *str = ' '; -+ ++str; -+ } -+ } -+ c = (unsigned char) va_arg(args, int); -+ if (str <= end) -+ *str = c; -+ ++str; -+ while (--field_width > 0) { -+ if (str <= end) -+ *str = ' '; -+ ++str; -+ } -+ continue; -+ -+ case 's': -+ s = va_arg(args, char *); -+ if (!s) -+ s = ""; -+ -+ len = strnlen(s, precision); -+ -+ if (!(flags & _kc_LEFT)) { -+ while (len < field_width--) { -+ if (str <= end) -+ *str = ' '; -+ ++str; -+ } -+ } -+ for (i = 0; i < len; ++i) { -+ if (str <= end) -+ *str = *s; -+ ++str; ++s; -+ } -+ while (len < field_width--) { -+ if (str <= end) -+ *str = ' '; -+ ++str; -+ } -+ continue; -+ -+ case 'p': -+ if (field_width == -1) { -+ field_width = 2*sizeof(void *); -+ flags |= _kc_ZEROPAD; -+ } -+ str = number(str, end, -+ (unsigned long) va_arg(args, void *), -+ 16, field_width, precision, flags); -+ continue; -+ -+ -+ case 'n': -+ /* FIXME: -+ * What does C99 say about the overflow case here? */ -+ if (qualifier == 'l') { -+ long * ip = va_arg(args, long *); -+ *ip = (str - buf); -+ } else if (qualifier == 'Z') { -+ size_t * ip = va_arg(args, size_t *); -+ *ip = (str - buf); -+ } else { -+ int * ip = va_arg(args, int *); -+ *ip = (str - buf); -+ } -+ continue; -+ -+ case '%': -+ if (str <= end) -+ *str = '%'; -+ ++str; -+ continue; -+ -+ /* integer number formats - set up the flags and "break" */ -+ case 'o': -+ base = 8; -+ break; -+ -+ case 'X': -+ flags |= _kc_LARGE; -+ case 'x': -+ base = 16; -+ break; -+ -+ case 'd': -+ case 'i': -+ flags |= _kc_SIGN; -+ case 'u': -+ break; -+ -+ default: -+ if (str <= end) -+ *str = '%'; -+ ++str; -+ if (*fmt) { -+ if (str <= end) -+ *str = *fmt; -+ ++str; -+ } else { -+ --fmt; -+ } -+ continue; -+ } -+ if (qualifier == 'L') -+ num = va_arg(args, long long); -+ else if (qualifier == 'l') { -+ num = va_arg(args, unsigned long); -+ if (flags & _kc_SIGN) -+ num = (signed long) num; -+ } else if (qualifier == 'Z') { -+ num = va_arg(args, size_t); -+ } else if (qualifier == 'h') { -+ num = (unsigned short) va_arg(args, int); -+ if (flags & _kc_SIGN) -+ num = (signed short) num; -+ } else { -+ num = va_arg(args, unsigned int); -+ if (flags & _kc_SIGN) -+ num = (signed int) num; -+ } -+ str = number(str, end, num, base, -+ field_width, precision, flags); -+ } -+ if (str <= end) -+ *str = '\0'; -+ else if (size > 0) -+ /* don't write out a null byte if the buf size is zero */ -+ *end = '\0'; -+ /* the trailing null byte doesn't count towards the total -+ * ++str; -+ */ -+ return str-buf; -+} -+ -+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) -+{ -+ va_list args; -+ int i; -+ -+ va_start(args, fmt); -+ i = _kc_vsnprintf(buf,size,fmt,args); -+ va_end(args); -+ return i; -+} -+#endif /* < 2.4.8 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) -+ -+/**************************************/ -+/* PCI DMA MAPPING */ -+ -+#if defined(CONFIG_HIGHMEM) -+ -+#ifndef PCI_DRAM_OFFSET -+#define PCI_DRAM_OFFSET 0 -+#endif -+ -+u64 -+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, -+ size_t size, int direction) -+{ -+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + -+ PCI_DRAM_OFFSET); -+} -+ -+#else /* CONFIG_HIGHMEM */ -+ -+u64 -+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, -+ size_t size, int direction) -+{ -+ return pci_map_single(dev, (void *)page_address(page) + offset, size, -+ direction); -+} -+ -+#endif /* CONFIG_HIGHMEM */ -+ -+void -+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, -+ int direction) -+{ -+ return pci_unmap_single(dev, dma_addr, size, direction); -+} -+ -+#endif /* 2.4.13 => 2.4.3 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) -+ -+/**************************************/ -+/* PCI DRIVER API */ -+ -+int -+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) -+{ -+ if (!pci_dma_supported(dev, mask)) -+ return -EIO; -+ dev->dma_mask = mask; -+ return 0; -+} -+ -+int -+_kc_pci_request_regions(struct pci_dev *dev, char *res_name) -+{ -+ int i; -+ -+ for (i = 0; i < 6; i++) { -+ if (pci_resource_len(dev, i) == 0) -+ continue; -+ -+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) { -+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { -+ pci_release_regions(dev); -+ return -EBUSY; -+ } -+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { -+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { -+ pci_release_regions(dev); -+ return -EBUSY; -+ } -+ } -+ } -+ return 0; -+} -+ -+void -+_kc_pci_release_regions(struct pci_dev *dev) -+{ -+ int i; -+ -+ for (i = 0; i < 6; i++) { -+ if (pci_resource_len(dev, i) == 0) -+ continue; -+ -+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) -+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); -+ -+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) -+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); -+ } -+} -+ -+/**************************************/ -+/* NETWORK DRIVER API */ -+ -+struct net_device * -+_kc_alloc_etherdev(int sizeof_priv) -+{ -+ struct net_device *dev; -+ int alloc_size; -+ -+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; -+ dev = kzalloc(alloc_size, GFP_KERNEL); -+ if (!dev) -+ return NULL; -+ -+ if (sizeof_priv) -+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); -+ dev->name[0] = '\0'; -+ ether_setup(dev); -+ -+ return dev; -+} -+ -+int -+_kc_is_valid_ether_addr(u8 *addr) -+{ -+ const char zaddr[6] = { 0, }; -+ -+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6); -+} -+ -+#endif /* 2.4.3 => 2.4.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) -+ -+int -+_kc_pci_set_power_state(struct pci_dev *dev, int state) -+{ -+ return 0; -+} -+ -+int -+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) -+{ -+ return 0; -+} -+ -+#endif /* 2.4.6 => 2.4.3 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, -+ int off, int size) -+{ -+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -+ frag->page = page; -+ frag->page_offset = off; -+ frag->size = size; -+ skb_shinfo(skb)->nr_frags = i + 1; -+} -+ -+/* -+ * Original Copyright: -+ * find_next_bit.c: fallback find next bit implementation -+ * -+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. -+ * Written by David Howells (dhowells@redhat.com) -+ */ -+ -+/** -+ * find_next_bit - find the next set bit in a memory region -+ * @addr: The address to base the search on -+ * @offset: The bitnumber to start searching at -+ * @size: The maximum size to search -+ */ -+unsigned long find_next_bit(const unsigned long *addr, unsigned long size, -+ unsigned long offset) -+{ -+ const unsigned long *p = addr + BITOP_WORD(offset); -+ unsigned long result = offset & ~(BITS_PER_LONG-1); -+ unsigned long tmp; -+ -+ if (offset >= size) -+ return size; -+ size -= result; -+ offset %= BITS_PER_LONG; -+ if (offset) { -+ tmp = *(p++); -+ tmp &= (~0UL << offset); -+ if (size < BITS_PER_LONG) -+ goto found_first; -+ if (tmp) -+ goto found_middle; -+ size -= BITS_PER_LONG; -+ result += BITS_PER_LONG; -+ } -+ while (size & ~(BITS_PER_LONG-1)) { -+ if ((tmp = *(p++))) -+ goto found_middle; -+ result += BITS_PER_LONG; -+ size -= BITS_PER_LONG; -+ } -+ if (!size) -+ return result; -+ tmp = *p; -+ -+found_first: -+ tmp &= (~0UL >> (BITS_PER_LONG - size)); -+ if (tmp == 0UL) /* Are any bits set? */ -+ return result + size; /* Nope. */ -+found_middle: -+ return result + ffs(tmp); -+} -+ -+size_t _kc_strlcpy(char *dest, const char *src, size_t size) -+{ -+ size_t ret = strlen(src); -+ -+ if (size) { -+ size_t len = (ret >= size) ? size - 1 : ret; -+ memcpy(dest, src, len); -+ dest[len] = '\0'; -+ } -+ return ret; -+} -+ -+#ifndef do_div -+#if BITS_PER_LONG == 32 -+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) -+{ -+ uint64_t rem = *n; -+ uint64_t b = base; -+ uint64_t res, d = 1; -+ uint32_t high = rem >> 32; -+ -+ /* Reduce the thing a bit first */ -+ res = 0; -+ if (high >= base) { -+ high /= base; -+ res = (uint64_t) high << 32; -+ rem -= (uint64_t) (high*base) << 32; -+ } -+ -+ while ((int64_t)b > 0 && b < rem) { -+ b = b+b; -+ d = d+d; -+ } -+ -+ do { -+ if (rem >= b) { -+ rem -= b; -+ res += d; -+ } -+ b >>= 1; -+ d >>= 1; -+ } while (d); -+ -+ *n = res; -+ return rem; -+} -+#endif /* BITS_PER_LONG == 32 */ -+#endif /* do_div */ -+#endif /* 2.6.0 => 2.4.6 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) -+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) -+{ -+ va_list args; -+ int i; -+ -+ va_start(args, fmt); -+ i = vsnprintf(buf, size, fmt, args); -+ va_end(args); -+ return (i >= size) ? (size - 1) : i; -+} -+#endif /* < 2.6.4 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) -+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; -+#endif /* < 2.6.10 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) -+char *_kc_kstrdup(const char *s, unsigned int gfp) -+{ -+ size_t len; -+ char *buf; -+ -+ if (!s) -+ return NULL; -+ -+ len = strlen(s) + 1; -+ buf = kmalloc(len, gfp); -+ if (buf) -+ memcpy(buf, s, len); -+ return buf; -+} -+#endif /* < 2.6.13 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) -+void *_kc_kzalloc(size_t size, int flags) -+{ -+ void *ret = kmalloc(size, flags); -+ if (ret) -+ memset(ret, 0, size); -+ return ret; -+} -+#endif /* <= 2.6.13 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) -+int _kc_skb_pad(struct sk_buff *skb, int pad) -+{ -+ int ntail; -+ -+ /* If the skbuff is non linear tailroom is always zero.. */ -+ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { -+ memset(skb->data+skb->len, 0, pad); -+ return 0; -+ } -+ -+ ntail = skb->data_len + pad - (skb->end - skb->tail); -+ if (likely(skb_cloned(skb) || ntail > 0)) { -+ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) -+ goto free_skb; -+ } -+ -+#ifdef MAX_SKB_FRAGS -+ if (skb_is_nonlinear(skb) && -+ !__pskb_pull_tail(skb, skb->data_len)) -+ goto free_skb; -+ -+#endif -+ memset(skb->data + skb->len, 0, pad); -+ return 0; -+ -+free_skb: -+ kfree_skb(skb); -+ return -ENOMEM; -+} -+ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) -+int _kc_pci_save_state(struct pci_dev *pdev) -+{ -+ struct net_device *netdev = pci_get_drvdata(pdev); -+ struct adapter_struct *adapter = netdev_priv(netdev); -+ int size = PCI_CONFIG_SPACE_LEN, i; -+ u16 pcie_cap_offset, pcie_link_status; -+ -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) -+ /* no ->dev for 2.4 kernels */ -+ WARN_ON(pdev->dev.driver_data == NULL); -+#endif -+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); -+ if (pcie_cap_offset) { -+ if (!pci_read_config_word(pdev, -+ pcie_cap_offset + PCIE_LINK_STATUS, -+ &pcie_link_status)) -+ size = PCIE_CONFIG_SPACE_LEN; -+ } -+ pci_config_space_ich8lan(); -+#ifdef HAVE_PCI_ERS -+ if (adapter->config_space == NULL) -+#else -+ WARN_ON(adapter->config_space != NULL); -+#endif -+ adapter->config_space = kmalloc(size, GFP_KERNEL); -+ if (!adapter->config_space) { -+ printk(KERN_ERR "Out of memory in pci_save_state\n"); -+ return -ENOMEM; -+ } -+ for (i = 0; i < (size / 4); i++) -+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); -+ return 0; -+} -+ -+void _kc_pci_restore_state(struct pci_dev *pdev) -+{ -+ struct net_device *netdev = pci_get_drvdata(pdev); -+ struct adapter_struct *adapter = netdev_priv(netdev); -+ int size = PCI_CONFIG_SPACE_LEN, i; -+ u16 pcie_cap_offset; -+ u16 pcie_link_status; -+ -+ if (adapter->config_space != NULL) { -+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); -+ if (pcie_cap_offset && -+ !pci_read_config_word(pdev, -+ pcie_cap_offset + PCIE_LINK_STATUS, -+ &pcie_link_status)) -+ size = PCIE_CONFIG_SPACE_LEN; -+ -+ pci_config_space_ich8lan(); -+ for (i = 0; i < (size / 4); i++) -+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); -+#ifndef HAVE_PCI_ERS -+ kfree(adapter->config_space); -+ adapter->config_space = NULL; -+#endif -+ } -+} -+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ -+ -+#ifdef HAVE_PCI_ERS -+void _kc_free_netdev(struct net_device *netdev) -+{ -+ struct adapter_struct *adapter = netdev_priv(netdev); -+ -+ kfree(adapter->config_space); -+#ifdef CONFIG_SYSFS -+ if (netdev->reg_state == NETREG_UNINITIALIZED) { -+ kfree((char *)netdev - netdev->padded); -+ } else { -+ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); -+ netdev->reg_state = NETREG_RELEASED; -+ class_device_put(&netdev->class_dev); -+ } -+#else -+ kfree((char *)netdev - netdev->padded); -+#endif -+} -+#endif -+ -+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) -+{ -+ void *p; -+ -+ p = kzalloc(len, gfp); -+ if (p) -+ memcpy(p, src, len); -+ return p; -+} -+#endif /* <= 2.6.19 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) -+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) -+{ -+ return ((struct adapter_struct *)netdev_priv(netdev))->pdev; -+} -+#endif /* < 2.6.21 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) -+/* hexdump code taken from lib/hexdump.c */ -+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, -+ int groupsize, unsigned char *linebuf, -+ size_t linebuflen, bool ascii) -+{ -+ const u8 *ptr = buf; -+ u8 ch; -+ int j, lx = 0; -+ int ascii_column; -+ -+ if (rowsize != 16 && rowsize != 32) -+ rowsize = 16; -+ -+ if (!len) -+ goto nil; -+ if (len > rowsize) /* limit to one line at a time */ -+ len = rowsize; -+ if ((len % groupsize) != 0) /* no mixed size output */ -+ groupsize = 1; -+ -+ switch (groupsize) { -+ case 8: { -+ const u64 *ptr8 = buf; -+ int ngroups = len / groupsize; -+ -+ for (j = 0; j < ngroups; j++) -+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, -+ "%s%16.16llx", j ? " " : "", -+ (unsigned long long)*(ptr8 + j)); -+ ascii_column = 17 * ngroups + 2; -+ break; -+ } -+ -+ case 4: { -+ const u32 *ptr4 = buf; -+ int ngroups = len / groupsize; -+ -+ for (j = 0; j < ngroups; j++) -+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, -+ "%s%8.8x", j ? " " : "", *(ptr4 + j)); -+ ascii_column = 9 * ngroups + 2; -+ break; -+ } -+ -+ case 2: { -+ const u16 *ptr2 = buf; -+ int ngroups = len / groupsize; -+ -+ for (j = 0; j < ngroups; j++) -+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, -+ "%s%4.4x", j ? " " : "", *(ptr2 + j)); -+ ascii_column = 5 * ngroups + 2; -+ break; -+ } -+ -+ default: -+ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { -+ ch = ptr[j]; -+ linebuf[lx++] = hex_asc(ch >> 4); -+ linebuf[lx++] = hex_asc(ch & 0x0f); -+ linebuf[lx++] = ' '; -+ } -+ if (j) -+ lx--; -+ -+ ascii_column = 3 * rowsize + 2; -+ break; -+ } -+ if (!ascii) -+ goto nil; -+ -+ while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) -+ linebuf[lx++] = ' '; -+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) -+ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] -+ : '.'; -+nil: -+ linebuf[lx++] = '\0'; -+} -+ -+void _kc_print_hex_dump(const char *level, -+ const char *prefix_str, int prefix_type, -+ int rowsize, int groupsize, -+ const void *buf, size_t len, bool ascii) -+{ -+ const u8 *ptr = buf; -+ int i, linelen, remaining = len; -+ unsigned char linebuf[200]; -+ -+ if (rowsize != 16 && rowsize != 32) -+ rowsize = 16; -+ -+ for (i = 0; i < len; i += rowsize) { -+ linelen = min(remaining, rowsize); -+ remaining -= rowsize; -+ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, -+ linebuf, sizeof(linebuf), ascii); -+ -+ switch (prefix_type) { -+ case DUMP_PREFIX_ADDRESS: -+ printk("%s%s%*p: %s\n", level, prefix_str, -+ (int)(2 * sizeof(void *)), ptr + i, linebuf); -+ break; -+ case DUMP_PREFIX_OFFSET: -+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); -+ break; -+ default: -+ printk("%s%s%s\n", level, prefix_str, linebuf); -+ break; -+ } -+ } -+} -+ -+#ifdef HAVE_I2C_SUPPORT -+struct i2c_client * -+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) -+{ -+ struct i2c_client *client; -+ int status; -+ -+ client = kzalloc(sizeof *client, GFP_KERNEL); -+ if (!client) -+ return NULL; -+ -+ client->adapter = adap; -+ -+ client->dev.platform_data = info->platform_data; -+ -+ client->flags = info->flags; -+ client->addr = info->addr; -+ -+ strlcpy(client->name, info->type, sizeof(client->name)); -+ -+ /* Check for address business */ -+ status = i2c_check_addr(adap, client->addr); -+ if (status) -+ goto out_err; -+ -+ client->dev.parent = &client->adapter->dev; -+ client->dev.bus = &i2c_bus_type; -+ -+ status = i2c_attach_client(client); -+ if (status) -+ goto out_err; -+ -+ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n", -+ client->name, dev_name(&client->dev)); -+ -+ return client; -+ -+out_err: -+ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x " -+ "(%d)\n", client->name, client->addr, status); -+ kfree(client); -+ return NULL; -+} -+#endif /* HAVE_I2C_SUPPORT */ -+#endif /* < 2.6.22 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -+#ifdef NAPI -+struct net_device *napi_to_poll_dev(const struct napi_struct *napi) -+{ -+ struct adapter_q_vector *q_vector = container_of(napi, -+ struct adapter_q_vector, -+ napi); -+ return &q_vector->poll_dev; -+} -+ -+int __kc_adapter_clean(struct net_device *netdev, int *budget) -+{ -+ int work_done; -+ int work_to_do = min(*budget, netdev->quota); -+ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ -+ struct napi_struct *napi = netdev->priv; -+ work_done = napi->poll(napi, work_to_do); -+ *budget -= work_done; -+ netdev->quota -= work_done; -+ return (work_done >= work_to_do) ? 1 : 0; -+} -+#endif /* NAPI */ -+#endif /* <= 2.6.24 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) -+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) -+{ -+ struct pci_dev *parent = pdev->bus->self; -+ u16 link_state; -+ int pos; -+ -+ if (!parent) -+ return; -+ -+ pos = pci_find_capability(parent, PCI_CAP_ID_EXP); -+ if (pos) { -+ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); -+ link_state &= ~state; -+ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); -+ } -+} -+#endif /* < 2.6.26 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) -+#ifdef HAVE_TX_MQ -+void _kc_netif_tx_stop_all_queues(struct net_device *netdev) -+{ -+ struct adapter_struct *adapter = netdev_priv(netdev); -+ int i; -+ -+ netif_stop_queue(netdev); -+ if (netif_is_multiqueue(netdev)) -+ for (i = 0; i < adapter->num_tx_queues; i++) -+ netif_stop_subqueue(netdev, i); -+} -+void _kc_netif_tx_wake_all_queues(struct net_device *netdev) -+{ -+ struct adapter_struct *adapter = netdev_priv(netdev); -+ int i; -+ -+ netif_wake_queue(netdev); -+ if (netif_is_multiqueue(netdev)) -+ for (i = 0; i < adapter->num_tx_queues; i++) -+ netif_wake_subqueue(netdev, i); -+} -+void _kc_netif_tx_start_all_queues(struct net_device *netdev) -+{ -+ struct adapter_struct *adapter = netdev_priv(netdev); -+ int i; -+ -+ netif_start_queue(netdev); -+ if (netif_is_multiqueue(netdev)) -+ for (i = 0; i < adapter->num_tx_queues; i++) -+ netif_start_subqueue(netdev, i); -+} -+#endif /* HAVE_TX_MQ */ -+ -+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) -+{ -+ va_list args; -+ -+ printk(KERN_WARNING "------------[ cut here ]------------\n"); -+ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); -+ va_start(args, fmt); -+ vprintk(fmt, args); -+ va_end(args); -+ -+ dump_stack(); -+} -+#endif /* __VMKLNX__ */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) -+ -+int -+_kc_pci_prepare_to_sleep(struct pci_dev *dev) -+{ -+ pci_power_t target_state; -+ int error; -+ -+ target_state = pci_choose_state(dev, PMSG_SUSPEND); -+ -+ pci_enable_wake(dev, target_state, true); -+ -+ error = pci_set_power_state(dev, target_state); -+ -+ if (error) -+ pci_enable_wake(dev, target_state, false); -+ -+ return error; -+} -+ -+int -+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) -+{ -+ int err; -+ -+ err = pci_enable_wake(dev, PCI_D3cold, enable); -+ if (err) -+ goto out; -+ -+ err = pci_enable_wake(dev, PCI_D3hot, enable); -+ -+out: -+ return err; -+} -+#endif /* < 2.6.28 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) -+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) -+{ -+ u16 old_cmd, cmd; -+ -+ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); -+ if (enable) -+ cmd = old_cmd | PCI_COMMAND_MASTER; -+ else -+ cmd = old_cmd & ~PCI_COMMAND_MASTER; -+ if (cmd != old_cmd) { -+ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", -+ enable ? "enabling" : "disabling"); -+ pci_write_config_word(pdev, PCI_COMMAND, cmd); -+ } -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) -+ pdev->is_busmaster = enable; -+#endif -+} -+ -+void _kc_pci_clear_master(struct pci_dev *dev) -+{ -+ __kc_pci_set_master(dev, false); -+} -+#endif /* < 2.6.29 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) -+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) -+int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) -+{ -+ int num_vf = 0; -+#ifdef CONFIG_PCI_IOV -+ struct pci_dev *vfdev; -+ -+ /* loop through all ethernet devices starting at PF dev */ -+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); -+ while (vfdev) { -+ if (vfdev->is_virtfn && vfdev->physfn == dev) -+ num_vf++; -+ -+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); -+ } -+ -+#endif -+ return num_vf; -+} -+#endif /* RHEL_RELEASE_CODE */ -+#endif /* < 2.6.34 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) -+#ifdef HAVE_TX_MQ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) -+#ifndef CONFIG_NETDEVICES_MULTIQUEUE -+int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) -+{ -+ unsigned int real_num = dev->real_num_tx_queues; -+ struct Qdisc *qdisc; -+ int i; -+ -+ if (txq < 1 || txq > dev->num_tx_queues) -+ return -EINVAL; -+ -+ else if (txq > real_num) -+ dev->real_num_tx_queues = txq; -+ else if (txq < real_num) { -+ dev->real_num_tx_queues = txq; -+ for (i = txq; i < dev->num_tx_queues; i++) { -+ qdisc = netdev_get_tx_queue(dev, i)->qdisc; -+ if (qdisc) { -+ spin_lock_bh(qdisc_lock(qdisc)); -+ qdisc_reset(qdisc); -+ spin_unlock_bh(qdisc_lock(qdisc)); -+ } -+ } -+ } -+ -+ return 0; -+} -+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ -+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ -+#endif /* HAVE_TX_MQ */ -+ -+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, -+ const void __user *from, size_t count) -+{ -+ loff_t pos = *ppos; -+ size_t res; -+ -+ if (pos < 0) -+ return -EINVAL; -+ if (pos >= available || !count) -+ return 0; -+ if (count > available - pos) -+ count = available - pos; -+ res = copy_from_user(to + pos, from, count); -+ if (res == count) -+ return -EFAULT; -+ count -= res; -+ *ppos = pos + count; -+ return count; -+} -+ -+#endif /* < 2.6.35 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) -+static const u32 _kc_flags_dup_features = -+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); -+ -+u32 _kc_ethtool_op_get_flags(struct net_device *dev) -+{ -+ return dev->features & _kc_flags_dup_features; -+} -+ -+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) -+{ -+ if (data & ~supported) -+ return -EINVAL; -+ -+ dev->features = ((dev->features & ~_kc_flags_dup_features) | -+ (data & _kc_flags_dup_features)); -+ return 0; -+} -+#endif /* < 2.6.36 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) -+ -+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ -+#endif /* < 2.6.39 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) -+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, -+ int off, int size, unsigned int truesize) -+{ -+ skb_fill_page_desc(skb, i, page, off, size); -+ skb->len += size; -+ skb->data_len += size; -+ skb->truesize += truesize; -+} -+ -+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -+int _kc_simple_open(struct inode *inode, struct file *file) -+{ -+ if (inode->i_private) -+ file->private_data = inode->i_private; -+ -+ return 0; -+} -+#endif /* SLE_VERSION < 11,3,0 */ -+ -+#endif /* < 3.4.0 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) -+static inline int __kc_pcie_cap_version(struct pci_dev *dev) -+{ -+ int pos; -+ u16 reg16; -+ -+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); -+ if (!pos) -+ return 0; -+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); -+ return reg16 & PCI_EXP_FLAGS_VERS; -+} -+ -+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) -+{ -+ return true; -+} -+ -+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) -+{ -+ int type = pci_pcie_type(dev); -+ -+ return __kc_pcie_cap_version(dev) > 1 || -+ type == PCI_EXP_TYPE_ROOT_PORT || -+ type == PCI_EXP_TYPE_ENDPOINT || -+ type == PCI_EXP_TYPE_LEG_END; -+} -+ -+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) -+{ -+ int type = pci_pcie_type(dev); -+ int pos; -+ u16 pcie_flags_reg; -+ -+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); -+ if (!pos) -+ return false; -+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); -+ -+ return __kc_pcie_cap_version(dev) > 1 || -+ type == PCI_EXP_TYPE_ROOT_PORT || -+ (type == PCI_EXP_TYPE_DOWNSTREAM && -+ pcie_flags_reg & PCI_EXP_FLAGS_SLOT); -+} -+ -+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) -+{ -+ int type = pci_pcie_type(dev); -+ -+ return __kc_pcie_cap_version(dev) > 1 || -+ type == PCI_EXP_TYPE_ROOT_PORT || -+ type == PCI_EXP_TYPE_RC_EC; -+} -+ -+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) -+{ -+ if (!pci_is_pcie(dev)) -+ return false; -+ -+ switch (pos) { -+ case PCI_EXP_FLAGS_TYPE: -+ return true; -+ case PCI_EXP_DEVCAP: -+ case PCI_EXP_DEVCTL: -+ case PCI_EXP_DEVSTA: -+ return __kc_pcie_cap_has_devctl(dev); -+ case PCI_EXP_LNKCAP: -+ case PCI_EXP_LNKCTL: -+ case PCI_EXP_LNKSTA: -+ return __kc_pcie_cap_has_lnkctl(dev); -+ case PCI_EXP_SLTCAP: -+ case PCI_EXP_SLTCTL: -+ case PCI_EXP_SLTSTA: -+ return __kc_pcie_cap_has_sltctl(dev); -+ case PCI_EXP_RTCTL: -+ case PCI_EXP_RTCAP: -+ case PCI_EXP_RTSTA: -+ return __kc_pcie_cap_has_rtctl(dev); -+ case PCI_EXP_DEVCAP2: -+ case PCI_EXP_DEVCTL2: -+ case PCI_EXP_LNKCAP2: -+ case PCI_EXP_LNKCTL2: -+ case PCI_EXP_LNKSTA2: -+ return __kc_pcie_cap_version(dev) > 1; -+ default: -+ return false; -+ } -+} -+ -+/* -+ * Note that these accessor functions are only for the "PCI Express -+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the -+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) -+ */ -+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) -+{ -+ int ret; -+ -+ *val = 0; -+ if (pos & 1) -+ return -EINVAL; -+ -+ if (__kc_pcie_capability_reg_implemented(dev, pos)) { -+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); -+ /* -+ * Reset *val to 0 if pci_read_config_word() fails, it may -+ * have been written as 0xFFFF if hardware error happens -+ * during pci_read_config_word(). -+ */ -+ if (ret) -+ *val = 0; -+ return ret; -+ } -+ -+ /* -+ * For Functions that do not implement the Slot Capabilities, -+ * Slot Status, and Slot Control registers, these spaces must -+ * be hardwired to 0b, with the exception of the Presence Detect -+ * State bit in the Slot Status register of Downstream Ports, -+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) -+ */ -+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && -+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { -+ *val = PCI_EXP_SLTSTA_PDS; -+ } -+ -+ return 0; -+} -+ -+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) -+{ -+ if (pos & 1) -+ return -EINVAL; -+ -+ if (!__kc_pcie_capability_reg_implemented(dev, pos)) -+ return 0; -+ -+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); -+} -+ -+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, -+ u16 clear, u16 set) -+{ -+ int ret; -+ u16 val; -+ -+ ret = __kc_pcie_capability_read_word(dev, pos, &val); -+ if (!ret) { -+ val &= ~clear; -+ val |= set; -+ ret = __kc_pcie_capability_write_word(dev, pos, val); -+ } -+ -+ return ret; -+} -+ -+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, -+ u16 clear) -+{ -+ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); -+} -+#endif /* < 3.7.0 */ -+ -+/****************************************************************************** -+ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, -+ * inferred copyright from kernel -+ */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) -+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, -+ int target, unsigned short *fragoff, int *flags) -+{ -+ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); -+ u8 nexthdr = ipv6_hdr(skb)->nexthdr; -+ unsigned int len; -+ bool found; -+ -+#define __KC_IP6_FH_F_FRAG BIT(0) -+#define __KC_IP6_FH_F_AUTH BIT(1) -+#define __KC_IP6_FH_F_SKIP_RH BIT(2) -+ -+ if (fragoff) -+ *fragoff = 0; -+ -+ if (*offset) { -+ struct ipv6hdr _ip6, *ip6; -+ -+ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); -+ if (!ip6 || (ip6->version != 6)) { -+ printk(KERN_ERR "IPv6 header not found\n"); -+ return -EBADMSG; -+ } -+ start = *offset + sizeof(struct ipv6hdr); -+ nexthdr = ip6->nexthdr; -+ } -+ len = skb->len - start; -+ -+ do { -+ struct ipv6_opt_hdr _hdr, *hp; -+ unsigned int hdrlen; -+ found = (nexthdr == target); -+ -+ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { -+ if (target < 0 || found) -+ break; -+ return -ENOENT; -+ } -+ -+ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); -+ if (!hp) -+ return -EBADMSG; -+ -+ if (nexthdr == NEXTHDR_ROUTING) { -+ struct ipv6_rt_hdr _rh, *rh; -+ -+ rh = skb_header_pointer(skb, start, sizeof(_rh), -+ &_rh); -+ if (!rh) -+ return -EBADMSG; -+ -+ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && -+ rh->segments_left == 0) -+ found = false; -+ } -+ -+ if (nexthdr == NEXTHDR_FRAGMENT) { -+ unsigned short _frag_off; -+ __be16 *fp; -+ -+ if (flags) /* Indicate that this is a fragment */ -+ *flags |= __KC_IP6_FH_F_FRAG; -+ fp = skb_header_pointer(skb, -+ start+offsetof(struct frag_hdr, -+ frag_off), -+ sizeof(_frag_off), -+ &_frag_off); -+ if (!fp) -+ return -EBADMSG; -+ -+ _frag_off = ntohs(*fp) & ~0x7; -+ if (_frag_off) { -+ if (target < 0 && -+ ((!ipv6_ext_hdr(hp->nexthdr)) || -+ hp->nexthdr == NEXTHDR_NONE)) { -+ if (fragoff) -+ *fragoff = _frag_off; -+ return hp->nexthdr; -+ } -+ return -ENOENT; -+ } -+ hdrlen = 8; -+ } else if (nexthdr == NEXTHDR_AUTH) { -+ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) -+ break; -+ hdrlen = (hp->hdrlen + 2) << 2; -+ } else -+ hdrlen = ipv6_optlen(hp); -+ -+ if (!found) { -+ nexthdr = hp->nexthdr; -+ len -= hdrlen; -+ start += hdrlen; -+ } -+ } while (!found); -+ -+ *offset = start; -+ return nexthdr; -+} -+#endif /* < 3.8.0 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) -+#endif /* 3.9.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+#ifdef HAVE_FDB_OPS -+#ifdef USE_CONST_DEV_UC_CHAR -+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *dev, const unsigned char *addr, -+ u16 flags) -+#else -+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, -+ unsigned char *addr, u16 flags) -+#endif -+{ -+ int err = -EINVAL; -+ -+ /* If aging addresses are supported device will need to -+ * implement its own handler for this. -+ */ -+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { -+ pr_info("%s: FDB only supports static addresses\n", dev->name); -+ return err; -+ } -+ -+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) -+ err = dev_uc_add_excl(dev, addr); -+ else if (is_multicast_ether_addr(addr)) -+ err = dev_mc_add_excl(dev, addr); -+ -+ /* Only return duplicate errors if NLM_F_EXCL is set */ -+ if (err == -EEXIST && !(flags & NLM_F_EXCL)) -+ err = 0; -+ -+ return err; -+} -+ -+#ifdef USE_CONST_DEV_UC_CHAR -+#ifdef HAVE_FDB_DEL_NLATTR -+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *dev, const unsigned char *addr) -+#else -+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, -+ const unsigned char *addr) -+#endif -+#else -+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, -+ unsigned char *addr) -+#endif -+{ -+ int err = -EINVAL; -+ -+ /* If aging addresses are supported device will need to -+ * implement its own handler for this. -+ */ -+ if (!(ndm->ndm_state & NUD_PERMANENT)) { -+ pr_info("%s: FDB only supports static addresses\n", dev->name); -+ return err; -+ } -+ -+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) -+ err = dev_uc_del(dev, addr); -+ else if (is_multicast_ether_addr(addr)) -+ err = dev_mc_del(dev, addr); -+ -+ return err; -+} -+ -+#endif /* HAVE_FDB_OPS */ -+#ifdef CONFIG_PCI_IOV -+int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) -+{ -+ unsigned int vfs_assigned = 0; -+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED -+ int pos; -+ struct pci_dev *vfdev; -+ unsigned short dev_id; -+ -+ /* only search if we are a PF */ -+ if (!dev->is_physfn) -+ return 0; -+ -+ /* find SR-IOV capability */ -+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); -+ if (!pos) -+ return 0; -+ -+ /* -+ * determine the device ID for the VFs, the vendor ID will be the -+ * same as the PF so there is no need to check for that one -+ */ -+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); -+ -+ /* loop through all the VFs to see if we own any that are assigned */ -+ vfdev = pci_get_device(dev->vendor, dev_id, NULL); -+ while (vfdev) { -+ /* -+ * It is considered assigned if it is a virtual function with -+ * our dev as the physical function and the assigned bit is set -+ */ -+ if (vfdev->is_virtfn && (vfdev->physfn == dev) && -+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) -+ vfs_assigned++; -+ -+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev); -+ } -+ -+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ -+ return vfs_assigned; -+} -+ -+#endif /* CONFIG_PCI_IOV */ -+#endif /* 3.10.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) -+const unsigned char pcie_link_speed[] = { -+ PCI_SPEED_UNKNOWN, /* 0 */ -+ PCIE_SPEED_2_5GT, /* 1 */ -+ PCIE_SPEED_5_0GT, /* 2 */ -+ PCIE_SPEED_8_0GT, /* 3 */ -+ PCI_SPEED_UNKNOWN, /* 4 */ -+ PCI_SPEED_UNKNOWN, /* 5 */ -+ PCI_SPEED_UNKNOWN, /* 6 */ -+ PCI_SPEED_UNKNOWN, /* 7 */ -+ PCI_SPEED_UNKNOWN, /* 8 */ -+ PCI_SPEED_UNKNOWN, /* 9 */ -+ PCI_SPEED_UNKNOWN, /* A */ -+ PCI_SPEED_UNKNOWN, /* B */ -+ PCI_SPEED_UNKNOWN, /* C */ -+ PCI_SPEED_UNKNOWN, /* D */ -+ PCI_SPEED_UNKNOWN, /* E */ -+ PCI_SPEED_UNKNOWN /* F */ -+}; -+ -+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, -+ enum pcie_link_width *width) -+{ -+ int ret; -+ -+ *speed = PCI_SPEED_UNKNOWN; -+ *width = PCIE_LNK_WIDTH_UNKNOWN; -+ -+ while (dev) { -+ u16 lnksta; -+ enum pci_bus_speed next_speed; -+ enum pcie_link_width next_width; -+ -+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); -+ if (ret) -+ return ret; -+ -+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; -+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> -+ PCI_EXP_LNKSTA_NLW_SHIFT; -+ -+ if (next_speed < *speed) -+ *speed = next_speed; -+ -+ if (next_width < *width) -+ *width = next_width; -+ -+ dev = dev->bus->self; -+ } -+ -+ return 0; -+} -+ -+#endif -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) -+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) -+{ -+ int err = dma_set_mask(dev, mask); -+ -+ if (!err) -+ /* coherent mask for the same size will always succeed if -+ * dma_set_mask does. However we store the error anyways, due -+ * to some kernels which use gcc's warn_unused_result on their -+ * definition of dma_set_coherent_mask. -+ */ -+ err = dma_set_coherent_mask(dev, mask); -+ return err; -+} -+ -+void __kc_netdev_rss_key_fill(void *buffer, size_t len) -+{ -+ /* Set of random keys generated using kernel random number generator */ -+ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, -+ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, -+ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, -+ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, -+ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, -+ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, -+ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; -+ -+ BUG_ON(len > NETDEV_RSS_KEY_LEN); -+ memcpy(buffer, seed, len); -+} -+#endif /* 3.13.0 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) -+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, -+ int minvec, int maxvec) -+{ -+ int nvec = maxvec; -+ int rc; -+ -+ if (maxvec < minvec) -+ return -ERANGE; -+ -+ do { -+ rc = pci_enable_msix(dev, entries, nvec); -+ if (rc < 0) { -+ return rc; -+ } else if (rc > 0) { -+ if (rc < minvec) -+ return -ENOSPC; -+ nvec = rc; -+ } -+ } while (rc); -+ -+ return nvec; -+} -+#endif /* 3.14.0 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) -+#ifdef HAVE_SET_RX_MODE -+#ifdef NETDEV_HW_ADDR_T_UNICAST -+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, -+ struct net_device *dev, -+ int (*sync)(struct net_device *, const unsigned char *), -+ int (*unsync)(struct net_device *, const unsigned char *)) -+{ -+ struct netdev_hw_addr *ha, *tmp; -+ int err; -+ -+ /* first go through and flush out any stale entries */ -+ list_for_each_entry_safe(ha, tmp, &list->list, list) { -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+ if (!ha->synced || ha->refcount != 1) -+#else -+ if (!ha->sync_cnt || ha->refcount != 1) -+#endif -+ continue; -+ -+ if (unsync && unsync(dev, ha->addr)) -+ continue; -+ -+ list_del_rcu(&ha->list); -+ kfree_rcu(ha, rcu_head); -+ list->count--; -+ } -+ -+ /* go through and sync new entries to the list */ -+ list_for_each_entry_safe(ha, tmp, &list->list, list) { -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+ if (ha->synced) -+#else -+ if (ha->sync_cnt) -+#endif -+ continue; -+ -+ err = sync(dev, ha->addr); -+ if (err) -+ return err; -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+ ha->synced = true; -+#else -+ ha->sync_cnt++; -+#endif -+ ha->refcount++; -+ } -+ -+ return 0; -+} -+ -+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, -+ struct net_device *dev, -+ int (*unsync)(struct net_device *, const unsigned char *)) -+{ -+ struct netdev_hw_addr *ha, *tmp; -+ -+ list_for_each_entry_safe(ha, tmp, &list->list, list) { -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+ if (!ha->synced) -+#else -+ if (!ha->sync_cnt) -+#endif -+ continue; -+ -+ if (unsync && unsync(dev, ha->addr)) -+ continue; -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+ ha->synced = false; -+#else -+ ha->sync_cnt--; -+#endif -+ if (--ha->refcount) -+ continue; -+ -+ list_del_rcu(&ha->list); -+ kfree_rcu(ha, rcu_head); -+ list->count--; -+ } -+} -+ -+#endif /* NETDEV_HW_ADDR_T_UNICAST */ -+#ifndef NETDEV_HW_ADDR_T_MULTICAST -+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, -+ struct net_device *dev, -+ int (*sync)(struct net_device *, const unsigned char *), -+ int (*unsync)(struct net_device *, const unsigned char *)) -+{ -+ struct dev_addr_list *da, **next = list; -+ int err; -+ -+ /* first go through and flush out any stale entries */ -+ while ((da = *next) != NULL) { -+ if (da->da_synced && da->da_users == 1) { -+ if (!unsync || !unsync(dev, da->da_addr)) { -+ *next = da->next; -+ kfree(da); -+ (*count)--; -+ continue; -+ } -+ } -+ next = &da->next; -+ } -+ -+ /* go through and sync new entries to the list */ -+ for (da = *list; da != NULL; da = da->next) { -+ if (da->da_synced) -+ continue; -+ -+ err = sync(dev, da->da_addr); -+ if (err) -+ return err; -+ -+ da->da_synced++; -+ da->da_users++; -+ } -+ -+ return 0; -+} -+ -+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, -+ struct net_device *dev, -+ int (*unsync)(struct net_device *, const unsigned char *)) -+{ -+ struct dev_addr_list *da; -+ -+ while ((da = *list) != NULL) { -+ if (da->da_synced) { -+ if (!unsync || !unsync(dev, da->da_addr)) { -+ da->da_synced--; -+ if (--da->da_users == 0) { -+ *list = da->next; -+ kfree(da); -+ (*count)--; -+ continue; -+ } -+ } -+ } -+ list = &da->next; -+ } -+} -+#endif /* NETDEV_HW_ADDR_T_MULTICAST */ -+#endif /* HAVE_SET_RX_MODE */ -+#endif /* 3.16.0 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) -+#ifndef NO_PTP_SUPPORT -+static void __kc_sock_efree(struct sk_buff *skb) -+{ -+ sock_put(skb->sk); -+} -+ -+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb) -+{ -+ struct sock *sk = skb->sk; -+ struct sk_buff *clone; -+ -+ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) -+ return NULL; -+ -+ clone = skb_clone(skb, GFP_ATOMIC); -+ if (!clone) { -+ sock_put(sk); -+ return NULL; -+ } -+ -+ clone->sk = sk; -+ clone->destructor = __kc_sock_efree; -+ -+ return clone; -+} -+ -+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, -+ struct skb_shared_hwtstamps *hwtstamps) -+{ -+ struct sock_exterr_skb *serr; -+ struct sock *sk = skb->sk; -+ int err; -+ -+ sock_hold(sk); -+ -+ *skb_hwtstamps(skb) = *hwtstamps; -+ -+ serr = SKB_EXT_ERR(skb); -+ memset(serr, 0, sizeof(*serr)); -+ serr->ee.ee_errno = ENOMSG; -+ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; -+ -+ err = sock_queue_err_skb(sk, skb); -+ if (err) -+ kfree_skb(skb); -+ -+ sock_put(sk); -+} -+#endif -+ -+/* include headers needed for get_headlen function */ -+#ifdef HAVE_SCTP -+#include -+#endif -+ -+unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len) -+{ -+ union { -+ unsigned char *network; -+ /* l2 headers */ -+ struct ethhdr *eth; -+ struct vlan_hdr *vlan; -+ /* l3 headers */ -+ struct iphdr *ipv4; -+ struct ipv6hdr *ipv6; -+ } hdr; -+ __be16 proto; -+ u8 nexthdr = 0; /* default to not TCP */ -+ u8 hlen; -+ -+ /* this should never happen, but better safe than sorry */ -+ if (max_len < ETH_HLEN) -+ return max_len; -+ -+ /* initialize network frame pointer */ -+ hdr.network = data; -+ -+ /* set first protocol and move network header forward */ -+ proto = hdr.eth->h_proto; -+ hdr.network += ETH_HLEN; -+ -+again: -+ switch (proto) { -+ /* handle any vlan tag if present */ -+ case __constant_htons(ETH_P_8021AD): -+ case __constant_htons(ETH_P_8021Q): -+ if ((hdr.network - data) > (max_len - VLAN_HLEN)) -+ return max_len; -+ -+ proto = hdr.vlan->h_vlan_encapsulated_proto; -+ hdr.network += VLAN_HLEN; -+ goto again; -+ /* handle L3 protocols */ -+ case __constant_htons(ETH_P_IP): -+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) -+ return max_len; -+ -+ /* access ihl as a u8 to avoid unaligned access on ia64 */ -+ hlen = (hdr.network[0] & 0x0F) << 2; -+ -+ /* verify hlen meets minimum size requirements */ -+ if (hlen < sizeof(struct iphdr)) -+ return hdr.network - data; -+ -+ /* record next protocol if header is present */ -+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) -+ nexthdr = hdr.ipv4->protocol; -+ -+ hdr.network += hlen; -+ break; -+#ifdef NETIF_F_TSO6 -+ case __constant_htons(ETH_P_IPV6): -+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) -+ return max_len; -+ -+ /* record next protocol */ -+ nexthdr = hdr.ipv6->nexthdr; -+ hdr.network += sizeof(struct ipv6hdr); -+ break; -+#endif /* NETIF_F_TSO6 */ -+ default: -+ return hdr.network - data; -+ } -+ -+ /* finally sort out L4 */ -+ switch (nexthdr) { -+ case IPPROTO_TCP: -+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) -+ return max_len; -+ -+ /* access doff as a u8 to avoid unaligned access on ia64 */ -+ hdr.network += max_t(u8, sizeof(struct tcphdr), -+ (hdr.network[12] & 0xF0) >> 2); -+ -+ break; -+ case IPPROTO_UDP: -+ case IPPROTO_UDPLITE: -+ hdr.network += sizeof(struct udphdr); -+ break; -+#ifdef HAVE_SCTP -+ case IPPROTO_SCTP: -+ hdr.network += sizeof(struct sctphdr); -+ break; -+#endif -+ } -+ -+ /* -+ * If everything has gone correctly hdr.network should be the -+ * data section of the packet and will be the end of the header. -+ * If not then it probably represents the end of the last recognized -+ * header. -+ */ -+ return min_t(unsigned int, hdr.network - data, max_len); -+} -+ -+#endif /* < 3.18.0 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) -+#ifdef HAVE_NET_GET_RANDOM_ONCE -+static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; -+ -+void __kc_netdev_rss_key_fill(void *buffer, size_t len) -+{ -+ BUG_ON(len > sizeof(__kc_netdev_rss_key)); -+ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); -+ memcpy(buffer, __kc_netdev_rss_key, len); -+} -+#endif -+#endif -diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.h b/drivers/net/ethernet/intel/igb/kcompat.h ---- a/drivers/net/ethernet/intel/igb/kcompat.h 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/kcompat.h 2016-11-14 14:32:08.583567168 +0000 -@@ -0,0 +1,5071 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+#ifndef _KCOMPAT_H_ -+#define _KCOMPAT_H_ -+ -+#ifndef LINUX_VERSION_CODE -+#include -+#else -+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -+#endif -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ -+#ifndef UTS_RELEASE -+/* utsrelease.h changed locations in 2.6.33 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) -+#include -+#else -+#include -+#endif -+#endif -+ -+/* NAPI enable/disable flags here */ -+#define NAPI -+ -+#define adapter_struct igb_adapter -+#define adapter_q_vector igb_q_vector -+#define NAPI -+ -+/* and finally set defines so that the code sees the changes */ -+#ifdef NAPI -+#else -+#endif /* NAPI */ -+ -+/* Dynamic LTR and deeper C-State support disable/enable */ -+ -+/* packet split disable/enable */ -+#ifdef DISABLE_PACKET_SPLIT -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -+#define CONFIG_IGB_DISABLE_PACKET_SPLIT -+#endif -+#endif /* DISABLE_PACKET_SPLIT */ -+ -+/* MSI compatibility code for all kernels and drivers */ -+#ifdef DISABLE_PCI_MSI -+#undef CONFIG_PCI_MSI -+#endif -+#ifndef CONFIG_PCI_MSI -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) -+struct msix_entry { -+ u16 vector; /* kernel uses to write allocated vector */ -+ u16 entry; /* driver uses to specify entry, OS writes */ -+}; -+#endif -+#undef pci_enable_msi -+#define pci_enable_msi(a) -ENOTSUPP -+#undef pci_disable_msi -+#define pci_disable_msi(a) do {} while (0) -+#undef pci_enable_msix -+#define pci_enable_msix(a, b, c) -ENOTSUPP -+#undef pci_disable_msix -+#define pci_disable_msix(a) do {} while (0) -+#define msi_remove_pci_irq_vectors(a) do {} while (0) -+#endif /* CONFIG_PCI_MSI */ -+#ifdef DISABLE_PM -+#undef CONFIG_PM -+#endif -+ -+#ifdef DISABLE_NET_POLL_CONTROLLER -+#undef CONFIG_NET_POLL_CONTROLLER -+#endif -+ -+#ifndef PMSG_SUSPEND -+#define PMSG_SUSPEND 3 -+#endif -+ -+/* generic boolean compatibility */ -+#undef TRUE -+#undef FALSE -+#define TRUE true -+#define FALSE false -+#ifdef GCC_VERSION -+#if ( GCC_VERSION < 3000 ) -+#define _Bool char -+#endif -+#else -+#define _Bool char -+#endif -+ -+#undef __always_unused -+#define __always_unused __attribute__((__unused__)) -+ -+#undef __maybe_unused -+#define __maybe_unused __attribute__((__unused__)) -+ -+/* kernels less than 2.4.14 don't have this */ -+#ifndef ETH_P_8021Q -+#define ETH_P_8021Q 0x8100 -+#endif -+ -+#ifndef module_param -+#define module_param(v,t,p) MODULE_PARM(v, "i"); -+#endif -+ -+#ifndef DMA_64BIT_MASK -+#define DMA_64BIT_MASK 0xffffffffffffffffULL -+#endif -+ -+#ifndef DMA_32BIT_MASK -+#define DMA_32BIT_MASK 0x00000000ffffffffULL -+#endif -+ -+#ifndef PCI_CAP_ID_EXP -+#define PCI_CAP_ID_EXP 0x10 -+#endif -+ -+#ifndef uninitialized_var -+#define uninitialized_var(x) x = x -+#endif -+ -+#ifndef PCIE_LINK_STATE_L0S -+#define PCIE_LINK_STATE_L0S 1 -+#endif -+#ifndef PCIE_LINK_STATE_L1 -+#define PCIE_LINK_STATE_L1 2 -+#endif -+ -+#ifndef mmiowb -+#ifdef CONFIG_IA64 -+#define mmiowb() asm volatile ("mf.a" ::: "memory") -+#else -+#define mmiowb() -+#endif -+#endif -+ -+#ifndef SET_NETDEV_DEV -+#define SET_NETDEV_DEV(net, pdev) -+#endif -+ -+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) -+#define free_netdev(x) kfree(x) -+#endif -+ -+#ifdef HAVE_POLL_CONTROLLER -+#define CONFIG_NET_POLL_CONTROLLER -+#endif -+ -+#ifndef SKB_DATAREF_SHIFT -+/* if we do not have the infrastructure to detect if skb_header is cloned -+ just return false in all cases */ -+#define skb_header_cloned(x) 0 -+#endif -+ -+#ifndef NETIF_F_GSO -+#define gso_size tso_size -+#define gso_segs tso_segs -+#endif -+ -+#ifndef NETIF_F_GRO -+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ -+ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) -+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) -+#endif -+ -+#ifndef NETIF_F_SCTP_CSUM -+#define NETIF_F_SCTP_CSUM 0 -+#endif -+ -+#ifndef NETIF_F_LRO -+#define NETIF_F_LRO (1 << 15) -+#endif -+ -+#ifndef NETIF_F_NTUPLE -+#define NETIF_F_NTUPLE (1 << 27) -+#endif -+ -+#ifndef NETIF_F_ALL_FCOE -+#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ -+ NETIF_F_FSO) -+#endif -+ -+#ifndef IPPROTO_SCTP -+#define IPPROTO_SCTP 132 -+#endif -+ -+#ifndef IPPROTO_UDPLITE -+#define IPPROTO_UDPLITE 136 -+#endif -+ -+#ifndef CHECKSUM_PARTIAL -+#define CHECKSUM_PARTIAL CHECKSUM_HW -+#define CHECKSUM_COMPLETE CHECKSUM_HW -+#endif -+ -+#ifndef __read_mostly -+#define __read_mostly -+#endif -+ -+#ifndef MII_RESV1 -+#define MII_RESV1 0x17 /* Reserved... */ -+#endif -+ -+#ifndef unlikely -+#define unlikely(_x) _x -+#define likely(_x) _x -+#endif -+ -+#ifndef WARN_ON -+#define WARN_ON(x) -+#endif -+ -+#ifndef PCI_DEVICE -+#define PCI_DEVICE(vend,dev) \ -+ .vendor = (vend), .device = (dev), \ -+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID -+#endif -+ -+#ifndef node_online -+#define node_online(node) ((node) == 0) -+#endif -+ -+#ifndef num_online_cpus -+#define num_online_cpus() smp_num_cpus -+#endif -+ -+#ifndef cpu_online -+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) -+#endif -+ -+#ifndef _LINUX_RANDOM_H -+#include -+#endif -+ -+#ifndef DECLARE_BITMAP -+#ifndef BITS_TO_LONGS -+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) -+#endif -+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] -+#endif -+ -+#ifndef VLAN_HLEN -+#define VLAN_HLEN 4 -+#endif -+ -+#ifndef VLAN_ETH_HLEN -+#define VLAN_ETH_HLEN 18 -+#endif -+ -+#ifndef VLAN_ETH_FRAME_LEN -+#define VLAN_ETH_FRAME_LEN 1518 -+#endif -+ -+#ifndef DCA_GET_TAG_TWO_ARGS -+#define dca3_get_tag(a,b) dca_get_tag(b) -+#endif -+ -+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -+#if defined(__i386__) || defined(__x86_64__) -+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -+#endif -+#endif -+ -+/* taken from 2.6.24 definition in linux/kernel.h */ -+#ifndef IS_ALIGNED -+#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) -+#endif -+ -+#ifdef IS_ENABLED -+#undef IS_ENABLED -+#undef __ARG_PLACEHOLDER_1 -+#undef config_enabled -+#undef _config_enabled -+#undef __config_enabled -+#undef ___config_enabled -+#endif -+ -+#define __ARG_PLACEHOLDER_1 0, -+#define config_enabled(cfg) _config_enabled(cfg) -+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) -+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) -+#define ___config_enabled(__ignored, val, ...) val -+ -+#define IS_ENABLED(option) \ -+ (config_enabled(option) || config_enabled(option##_MODULE)) -+ -+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) -+struct _kc_vlan_ethhdr { -+ unsigned char h_dest[ETH_ALEN]; -+ unsigned char h_source[ETH_ALEN]; -+ __be16 h_vlan_proto; -+ __be16 h_vlan_TCI; -+ __be16 h_vlan_encapsulated_proto; -+}; -+#define vlan_ethhdr _kc_vlan_ethhdr -+struct _kc_vlan_hdr { -+ __be16 h_vlan_TCI; -+ __be16 h_vlan_encapsulated_proto; -+}; -+#define vlan_hdr _kc_vlan_hdr -+#define vlan_tx_tag_present(_skb) 0 -+#define vlan_tx_tag_get(_skb) 0 -+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ -+ -+#ifndef VLAN_PRIO_SHIFT -+#define VLAN_PRIO_SHIFT 13 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_CLS_2_5GB -+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_CLS_5_0GB -+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_CLS_8_0GB -+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_NLW_X1 -+#define PCI_EXP_LNKSTA_NLW_X1 0x0010 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_NLW_X2 -+#define PCI_EXP_LNKSTA_NLW_X2 0x0020 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_NLW_X4 -+#define PCI_EXP_LNKSTA_NLW_X4 0x0040 -+#endif -+ -+#ifndef PCI_EXP_LNKSTA_NLW_X8 -+#define PCI_EXP_LNKSTA_NLW_X8 0x0080 -+#endif -+ -+#ifndef __GFP_COLD -+#define __GFP_COLD 0 -+#endif -+ -+#ifndef __GFP_COMP -+#define __GFP_COMP 0 -+#endif -+ -+#ifndef IP_OFFSET -+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ -+#endif -+ -+/*****************************************************************************/ -+/* Installations with ethtool version without eeprom, adapter id, or statistics -+ * support */ -+ -+#ifndef ETH_GSTRING_LEN -+#define ETH_GSTRING_LEN 32 -+#endif -+ -+#ifndef ETHTOOL_GSTATS -+#define ETHTOOL_GSTATS 0x1d -+#undef ethtool_drvinfo -+#define ethtool_drvinfo k_ethtool_drvinfo -+struct k_ethtool_drvinfo { -+ u32 cmd; -+ char driver[32]; -+ char version[32]; -+ char fw_version[32]; -+ char bus_info[32]; -+ char reserved1[32]; -+ char reserved2[16]; -+ u32 n_stats; -+ u32 testinfo_len; -+ u32 eedump_len; -+ u32 regdump_len; -+}; -+ -+struct ethtool_stats { -+ u32 cmd; -+ u32 n_stats; -+ u64 data[0]; -+}; -+#endif /* ETHTOOL_GSTATS */ -+ -+#ifndef ETHTOOL_PHYS_ID -+#define ETHTOOL_PHYS_ID 0x1c -+#endif /* ETHTOOL_PHYS_ID */ -+ -+#ifndef ETHTOOL_GSTRINGS -+#define ETHTOOL_GSTRINGS 0x1b -+enum ethtool_stringset { -+ ETH_SS_TEST = 0, -+ ETH_SS_STATS, -+}; -+struct ethtool_gstrings { -+ u32 cmd; /* ETHTOOL_GSTRINGS */ -+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ -+ u32 len; /* number of strings in the string set */ -+ u8 data[0]; -+}; -+#endif /* ETHTOOL_GSTRINGS */ -+ -+#ifndef ETHTOOL_TEST -+#define ETHTOOL_TEST 0x1a -+enum ethtool_test_flags { -+ ETH_TEST_FL_OFFLINE = (1 << 0), -+ ETH_TEST_FL_FAILED = (1 << 1), -+}; -+struct ethtool_test { -+ u32 cmd; -+ u32 flags; -+ u32 reserved; -+ u32 len; -+ u64 data[0]; -+}; -+#endif /* ETHTOOL_TEST */ -+ -+#ifndef ETHTOOL_GEEPROM -+#define ETHTOOL_GEEPROM 0xb -+#undef ETHTOOL_GREGS -+struct ethtool_eeprom { -+ u32 cmd; -+ u32 magic; -+ u32 offset; -+ u32 len; -+ u8 data[0]; -+}; -+ -+struct ethtool_value { -+ u32 cmd; -+ u32 data; -+}; -+#endif /* ETHTOOL_GEEPROM */ -+ -+#ifndef ETHTOOL_GLINK -+#define ETHTOOL_GLINK 0xa -+#endif /* ETHTOOL_GLINK */ -+ -+#ifndef ETHTOOL_GWOL -+#define ETHTOOL_GWOL 0x5 -+#define ETHTOOL_SWOL 0x6 -+#define SOPASS_MAX 6 -+struct ethtool_wolinfo { -+ u32 cmd; -+ u32 supported; -+ u32 wolopts; -+ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ -+}; -+#endif /* ETHTOOL_GWOL */ -+ -+#ifndef ETHTOOL_GREGS -+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ -+#define ethtool_regs _kc_ethtool_regs -+/* for passing big chunks of data */ -+struct _kc_ethtool_regs { -+ u32 cmd; -+ u32 version; /* driver-specific, indicates different chips/revs */ -+ u32 len; /* bytes */ -+ u8 data[0]; -+}; -+#endif /* ETHTOOL_GREGS */ -+ -+#ifndef ETHTOOL_GMSGLVL -+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ -+#endif -+#ifndef ETHTOOL_SMSGLVL -+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ -+#endif -+#ifndef ETHTOOL_NWAY_RST -+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ -+#endif -+#ifndef ETHTOOL_GLINK -+#define ETHTOOL_GLINK 0x0000000a /* Get link status */ -+#endif -+#ifndef ETHTOOL_GEEPROM -+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ -+#endif -+#ifndef ETHTOOL_SEEPROM -+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ -+#endif -+#ifndef ETHTOOL_GCOALESCE -+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ -+/* for configuring coalescing parameters of chip */ -+#define ethtool_coalesce _kc_ethtool_coalesce -+struct _kc_ethtool_coalesce { -+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */ -+ -+ /* How many usecs to delay an RX interrupt after -+ * a packet arrives. If 0, only rx_max_coalesced_frames -+ * is used. -+ */ -+ u32 rx_coalesce_usecs; -+ -+ /* How many packets to delay an RX interrupt after -+ * a packet arrives. If 0, only rx_coalesce_usecs is -+ * used. It is illegal to set both usecs and max frames -+ * to zero as this would cause RX interrupts to never be -+ * generated. -+ */ -+ u32 rx_max_coalesced_frames; -+ -+ /* Same as above two parameters, except that these values -+ * apply while an IRQ is being serviced by the host. Not -+ * all cards support this feature and the values are ignored -+ * in that case. -+ */ -+ u32 rx_coalesce_usecs_irq; -+ u32 rx_max_coalesced_frames_irq; -+ -+ /* How many usecs to delay a TX interrupt after -+ * a packet is sent. If 0, only tx_max_coalesced_frames -+ * is used. -+ */ -+ u32 tx_coalesce_usecs; -+ -+ /* How many packets to delay a TX interrupt after -+ * a packet is sent. If 0, only tx_coalesce_usecs is -+ * used. It is illegal to set both usecs and max frames -+ * to zero as this would cause TX interrupts to never be -+ * generated. -+ */ -+ u32 tx_max_coalesced_frames; -+ -+ /* Same as above two parameters, except that these values -+ * apply while an IRQ is being serviced by the host. Not -+ * all cards support this feature and the values are ignored -+ * in that case. -+ */ -+ u32 tx_coalesce_usecs_irq; -+ u32 tx_max_coalesced_frames_irq; -+ -+ /* How many usecs to delay in-memory statistics -+ * block updates. Some drivers do not have an in-memory -+ * statistic block, and in such cases this value is ignored. -+ * This value must not be zero. -+ */ -+ u32 stats_block_coalesce_usecs; -+ -+ /* Adaptive RX/TX coalescing is an algorithm implemented by -+ * some drivers to improve latency under low packet rates and -+ * improve throughput under high packet rates. Some drivers -+ * only implement one of RX or TX adaptive coalescing. Anything -+ * not implemented by the driver causes these values to be -+ * silently ignored. -+ */ -+ u32 use_adaptive_rx_coalesce; -+ u32 use_adaptive_tx_coalesce; -+ -+ /* When the packet rate (measured in packets per second) -+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are -+ * used. -+ */ -+ u32 pkt_rate_low; -+ u32 rx_coalesce_usecs_low; -+ u32 rx_max_coalesced_frames_low; -+ u32 tx_coalesce_usecs_low; -+ u32 tx_max_coalesced_frames_low; -+ -+ /* When the packet rate is below pkt_rate_high but above -+ * pkt_rate_low (both measured in packets per second) the -+ * normal {rx,tx}_* coalescing parameters are used. -+ */ -+ -+ /* When the packet rate is (measured in packets per second) -+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are -+ * used. -+ */ -+ u32 pkt_rate_high; -+ u32 rx_coalesce_usecs_high; -+ u32 rx_max_coalesced_frames_high; -+ u32 tx_coalesce_usecs_high; -+ u32 tx_max_coalesced_frames_high; -+ -+ /* How often to do adaptive coalescing packet rate sampling, -+ * measured in seconds. Must not be zero. -+ */ -+ u32 rate_sample_interval; -+}; -+#endif /* ETHTOOL_GCOALESCE */ -+ -+#ifndef ETHTOOL_SCOALESCE -+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ -+#endif -+#ifndef ETHTOOL_GRINGPARAM -+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ -+/* for configuring RX/TX ring parameters */ -+#define ethtool_ringparam _kc_ethtool_ringparam -+struct _kc_ethtool_ringparam { -+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ -+ -+ /* Read only attributes. These indicate the maximum number -+ * of pending RX/TX ring entries the driver will allow the -+ * user to set. -+ */ -+ u32 rx_max_pending; -+ u32 rx_mini_max_pending; -+ u32 rx_jumbo_max_pending; -+ u32 tx_max_pending; -+ -+ /* Values changeable by the user. The valid values are -+ * in the range 1 to the "*_max_pending" counterpart above. -+ */ -+ u32 rx_pending; -+ u32 rx_mini_pending; -+ u32 rx_jumbo_pending; -+ u32 tx_pending; -+}; -+#endif /* ETHTOOL_GRINGPARAM */ -+ -+#ifndef ETHTOOL_SRINGPARAM -+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ -+#endif -+#ifndef ETHTOOL_GPAUSEPARAM -+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ -+/* for configuring link flow control parameters */ -+#define ethtool_pauseparam _kc_ethtool_pauseparam -+struct _kc_ethtool_pauseparam { -+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ -+ -+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg -+ * being true) the user may set 'autoneg' here non-zero to have the -+ * pause parameters be auto-negotiated too. In such a case, the -+ * {rx,tx}_pause values below determine what capabilities are -+ * advertised. -+ * -+ * If 'autoneg' is zero or the link is not being auto-negotiated, -+ * then {rx,tx}_pause force the driver to use/not-use pause -+ * flow control. -+ */ -+ u32 autoneg; -+ u32 rx_pause; -+ u32 tx_pause; -+}; -+#endif /* ETHTOOL_GPAUSEPARAM */ -+ -+#ifndef ETHTOOL_SPAUSEPARAM -+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ -+#endif -+#ifndef ETHTOOL_GRXCSUM -+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ -+#endif -+#ifndef ETHTOOL_SRXCSUM -+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ -+#endif -+#ifndef ETHTOOL_GTXCSUM -+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ -+#endif -+#ifndef ETHTOOL_STXCSUM -+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ -+#endif -+#ifndef ETHTOOL_GSG -+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable -+ * (ethtool_value) */ -+#endif -+#ifndef ETHTOOL_SSG -+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable -+ * (ethtool_value). */ -+#endif -+#ifndef ETHTOOL_TEST -+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ -+#endif -+#ifndef ETHTOOL_GSTRINGS -+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ -+#endif -+#ifndef ETHTOOL_PHYS_ID -+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ -+#endif -+#ifndef ETHTOOL_GSTATS -+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ -+#endif -+#ifndef ETHTOOL_GTSO -+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ -+#endif -+#ifndef ETHTOOL_STSO -+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ -+#endif -+ -+#ifndef ETHTOOL_BUSINFO_LEN -+#define ETHTOOL_BUSINFO_LEN 32 -+#endif -+ -+#ifndef SPEED_2500 -+#define SPEED_2500 2500 -+#endif -+#ifndef SPEED_5000 -+#define SPEED_5000 5000 -+#endif -+ -+#ifndef RHEL_RELEASE_VERSION -+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) -+#endif -+#ifndef AX_RELEASE_VERSION -+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) -+#endif -+ -+#ifndef AX_RELEASE_CODE -+#define AX_RELEASE_CODE 0 -+#endif -+ -+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) -+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) -+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) -+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) -+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) -+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) -+#endif -+ -+#ifndef RHEL_RELEASE_CODE -+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ -+#define RHEL_RELEASE_CODE 0 -+#endif -+ -+/* RHEL 7 didn't backport the parameter change in -+ * create_singlethread_workqueue. -+ * If/when RH corrects this we will want to tighten up the version check. -+ */ -+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) -+#undef create_singlethread_workqueue -+#define create_singlethread_workqueue(name) \ -+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) -+#endif -+ -+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find -+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new -+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of -+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in -+ * the linux-source package, but in the linux-headers package. It begins to -+ * appear in later releases of 14.04 and 14.10. -+ * -+ * Ex: -+ * -+ * $uname -r -+ * 3.13.0-45-generic -+ * ABI is 45 -+ * -+ * -+ * $uname -r -+ * 3.16.0-23-generic -+ * ABI is 23 -+ */ -+#ifndef UTS_UBUNTU_RELEASE_ABI -+#define UTS_UBUNTU_RELEASE_ABI 0 -+#define UBUNTU_VERSION_CODE 0 -+#else -+/* Ubuntu does not provide actual release version macro, so we use the kernel -+ * version plus the ABI to generate a unique version code specific to Ubuntu. -+ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to -+ * ignore differences in sublevel which are not important since we have the -+ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for -+ * ordering checks. -+ */ -+#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ -+ UTS_UBUNTU_RELEASE_ABI) -+ -+#if UTS_UBUNTU_RELEASE_ABI > 255 -+#error UTS_UBUNTU_RELEASE_ABI is too large... -+#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ -+ -+#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) -+/* Our version code scheme does not make sense for non 3.x or newer kernels, -+ * and we have no support in kcompat for this scenario. Thus, treat this as a -+ * non-Ubuntu kernel. Possibly might be better to error here. -+ */ -+#define UTS_UBUNTU_RELEASE_ABI 0 -+#define UBUNTU_VERSION_CODE 0 -+#endif -+ -+#endif -+ -+/* Note that the 3rd digit is always zero, and will be ignored. This is -+ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux -+ * version codes are 3 digit, this 3rd digit is superseded by the ABI value. -+ */ -+#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) -+ -+/* SuSE version macro is the same as Linux kernel version */ -+#ifndef SLE_VERSION -+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) -+#endif -+#ifdef CONFIG_SUSE_KERNEL -+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) -+/* SLES11 GA is 2.6.27 based */ -+#define SLE_VERSION_CODE SLE_VERSION(11,0,0) -+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) -+/* SLES11 SP1 is 2.6.32 based */ -+#define SLE_VERSION_CODE SLE_VERSION(11,1,0) -+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) -+/* SLES11 SP2 is 3.0.13 based */ -+#define SLE_VERSION_CODE SLE_VERSION(11,2,0) -+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) -+/* SLES11 SP3 is 3.0.76 based */ -+#define SLE_VERSION_CODE SLE_VERSION(11,3,0) -+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101))) -+/* SLES11 SP4 is 3.0.101 based */ -+#define SLE_VERSION_CODE SLE_VERSION(11,4,0) -+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28))) -+/* SLES12 GA is 3.12.28 based */ -+#define SLE_VERSION_CODE SLE_VERSION(12,0,0) -+/* new SLES kernels must be added here with >= based on kernel -+ * the idea is to order from newest to oldest and just catch all -+ * of them using the >= -+ */ -+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47))) -+/* SLES12 SP1 is 3.12.47-based */ -+#define SLE_VERSION_CODE SLE_VERSION(12,1,0) -+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ -+#endif /* CONFIG_SUSE_KERNEL */ -+#ifndef SLE_VERSION_CODE -+#define SLE_VERSION_CODE 0 -+#endif /* SLE_VERSION_CODE */ -+ -+#ifdef __KLOCWORK__ -+#ifdef ARRAY_SIZE -+#undef ARRAY_SIZE -+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -+#endif -+#endif /* __KLOCWORK__ */ -+ -+/*****************************************************************************/ -+/* 2.4.3 => 2.4.0 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) -+ -+/**************************************/ -+/* PCI DRIVER API */ -+ -+#ifndef pci_set_dma_mask -+#define pci_set_dma_mask _kc_pci_set_dma_mask -+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); -+#endif -+ -+#ifndef pci_request_regions -+#define pci_request_regions _kc_pci_request_regions -+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); -+#endif -+ -+#ifndef pci_release_regions -+#define pci_release_regions _kc_pci_release_regions -+extern void _kc_pci_release_regions(struct pci_dev *pdev); -+#endif -+ -+/**************************************/ -+/* NETWORK DRIVER API */ -+ -+#ifndef alloc_etherdev -+#define alloc_etherdev _kc_alloc_etherdev -+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); -+#endif -+ -+#ifndef is_valid_ether_addr -+#define is_valid_ether_addr _kc_is_valid_ether_addr -+extern int _kc_is_valid_ether_addr(u8 *addr); -+#endif -+ -+/**************************************/ -+/* MISCELLANEOUS */ -+ -+#ifndef INIT_TQUEUE -+#define INIT_TQUEUE(_tq, _routine, _data) \ -+ do { \ -+ INIT_LIST_HEAD(&(_tq)->list); \ -+ (_tq)->sync = 0; \ -+ (_tq)->routine = _routine; \ -+ (_tq)->data = _data; \ -+ } while (0) -+#endif -+ -+#endif /* 2.4.3 => 2.4.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) -+/* Generic MII registers. */ -+#define MII_BMCR 0x00 /* Basic mode control register */ -+#define MII_BMSR 0x01 /* Basic mode status register */ -+#define MII_PHYSID1 0x02 /* PHYS ID 1 */ -+#define MII_PHYSID2 0x03 /* PHYS ID 2 */ -+#define MII_ADVERTISE 0x04 /* Advertisement control reg */ -+#define MII_LPA 0x05 /* Link partner ability reg */ -+#define MII_EXPANSION 0x06 /* Expansion register */ -+/* Basic mode control register. */ -+#define BMCR_FULLDPLX 0x0100 /* Full duplex */ -+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ -+/* Basic mode status register. */ -+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ -+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ -+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ -+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ -+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ -+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ -+/* Advertisement control register. */ -+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ -+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ -+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ -+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ -+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ -+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ -+ ADVERTISE_100HALF | ADVERTISE_100FULL) -+/* Expansion register for auto-negotiation. */ -+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ -+#endif -+ -+/*****************************************************************************/ -+/* 2.4.6 => 2.4.3 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) -+ -+#ifndef pci_set_power_state -+#define pci_set_power_state _kc_pci_set_power_state -+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); -+#endif -+ -+#ifndef pci_enable_wake -+#define pci_enable_wake _kc_pci_enable_wake -+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); -+#endif -+ -+#ifndef pci_disable_device -+#define pci_disable_device _kc_pci_disable_device -+extern void _kc_pci_disable_device(struct pci_dev *pdev); -+#endif -+ -+/* PCI PM entry point syntax changed, so don't support suspend/resume */ -+#undef CONFIG_PM -+ -+#endif /* 2.4.6 => 2.4.3 */ -+ -+#ifndef HAVE_PCI_SET_MWI -+#define pci_set_mwi(X) pci_write_config_word(X, \ -+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ -+ PCI_COMMAND_INVALIDATE); -+#define pci_clear_mwi(X) pci_write_config_word(X, \ -+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ -+ ~PCI_COMMAND_INVALIDATE); -+#endif -+ -+/*****************************************************************************/ -+/* 2.4.10 => 2.4.9 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) -+ -+/**************************************/ -+/* MODULE API */ -+ -+#ifndef MODULE_LICENSE -+ #define MODULE_LICENSE(X) -+#endif -+ -+/**************************************/ -+/* OTHER */ -+ -+#undef min -+#define min(x,y) ({ \ -+ const typeof(x) _x = (x); \ -+ const typeof(y) _y = (y); \ -+ (void) (&_x == &_y); \ -+ _x < _y ? _x : _y; }) -+ -+#undef max -+#define max(x,y) ({ \ -+ const typeof(x) _x = (x); \ -+ const typeof(y) _y = (y); \ -+ (void) (&_x == &_y); \ -+ _x > _y ? _x : _y; }) -+ -+#define min_t(type,x,y) ({ \ -+ type _x = (x); \ -+ type _y = (y); \ -+ _x < _y ? _x : _y; }) -+ -+#define max_t(type,x,y) ({ \ -+ type _x = (x); \ -+ type _y = (y); \ -+ _x > _y ? _x : _y; }) -+ -+#ifndef list_for_each_safe -+#define list_for_each_safe(pos, n, head) \ -+ for (pos = (head)->next, n = pos->next; pos != (head); \ -+ pos = n, n = pos->next) -+#endif -+ -+#ifndef ____cacheline_aligned_in_smp -+#ifdef CONFIG_SMP -+#define ____cacheline_aligned_in_smp ____cacheline_aligned -+#else -+#define ____cacheline_aligned_in_smp -+#endif /* CONFIG_SMP */ -+#endif -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) -+extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); -+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) -+extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); -+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) -+#else /* 2.4.8 => 2.4.9 */ -+extern int snprintf(char * buf, size_t size, const char *fmt, ...); -+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); -+#endif -+#endif /* 2.4.10 -> 2.4.6 */ -+ -+ -+/*****************************************************************************/ -+/* 2.4.12 => 2.4.10 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) -+#ifndef HAVE_NETIF_MSG -+#define HAVE_NETIF_MSG 1 -+enum { -+ NETIF_MSG_DRV = 0x0001, -+ NETIF_MSG_PROBE = 0x0002, -+ NETIF_MSG_LINK = 0x0004, -+ NETIF_MSG_TIMER = 0x0008, -+ NETIF_MSG_IFDOWN = 0x0010, -+ NETIF_MSG_IFUP = 0x0020, -+ NETIF_MSG_RX_ERR = 0x0040, -+ NETIF_MSG_TX_ERR = 0x0080, -+ NETIF_MSG_TX_QUEUED = 0x0100, -+ NETIF_MSG_INTR = 0x0200, -+ NETIF_MSG_TX_DONE = 0x0400, -+ NETIF_MSG_RX_STATUS = 0x0800, -+ NETIF_MSG_PKTDATA = 0x1000, -+ NETIF_MSG_HW = 0x2000, -+ NETIF_MSG_WOL = 0x4000, -+}; -+ -+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) -+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) -+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) -+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) -+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) -+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) -+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) -+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) -+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) -+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) -+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) -+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) -+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) -+#endif /* !HAVE_NETIF_MSG */ -+#endif /* 2.4.12 => 2.4.10 */ -+ -+/*****************************************************************************/ -+/* 2.4.13 => 2.4.12 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) -+ -+/**************************************/ -+/* PCI DMA MAPPING */ -+ -+#ifndef virt_to_page -+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) -+#endif -+ -+#ifndef pci_map_page -+#define pci_map_page _kc_pci_map_page -+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); -+#endif -+ -+#ifndef pci_unmap_page -+#define pci_unmap_page _kc_pci_unmap_page -+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); -+#endif -+ -+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ -+ -+#undef DMA_32BIT_MASK -+#define DMA_32BIT_MASK 0xffffffff -+#undef DMA_64BIT_MASK -+#define DMA_64BIT_MASK 0xffffffff -+ -+/**************************************/ -+/* OTHER */ -+ -+#ifndef cpu_relax -+#define cpu_relax() rep_nop() -+#endif -+ -+struct vlan_ethhdr { -+ unsigned char h_dest[ETH_ALEN]; -+ unsigned char h_source[ETH_ALEN]; -+ unsigned short h_vlan_proto; -+ unsigned short h_vlan_TCI; -+ unsigned short h_vlan_encapsulated_proto; -+}; -+#endif /* 2.4.13 => 2.4.12 */ -+ -+/*****************************************************************************/ -+/* 2.4.17 => 2.4.12 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) -+ -+#ifndef __devexit_p -+ #define __devexit_p(x) &(x) -+#endif -+ -+#endif /* 2.4.17 => 2.4.13 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) -+#define NETIF_MSG_HW 0x2000 -+#define NETIF_MSG_WOL 0x4000 -+ -+#ifndef netif_msg_hw -+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) -+#endif -+#ifndef netif_msg_wol -+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) -+#endif -+#endif /* 2.4.18 */ -+ -+/*****************************************************************************/ -+ -+/*****************************************************************************/ -+/* 2.4.20 => 2.4.19 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) -+ -+/* we won't support NAPI on less than 2.4.20 */ -+#ifdef NAPI -+#undef NAPI -+#endif -+ -+#endif /* 2.4.20 => 2.4.19 */ -+ -+/*****************************************************************************/ -+/* 2.4.22 => 2.4.17 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) -+#define pci_name(x) ((x)->slot_name) -+ -+#ifndef SUPPORTED_10000baseT_Full -+#define SUPPORTED_10000baseT_Full (1 << 12) -+#endif -+#ifndef ADVERTISED_10000baseT_Full -+#define ADVERTISED_10000baseT_Full (1 << 12) -+#endif -+#endif -+ -+/*****************************************************************************/ -+/* 2.4.22 => 2.4.17 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) -+#ifndef IGB_NO_LRO -+#define IGB_NO_LRO -+#endif -+#endif -+ -+/*****************************************************************************/ -+/*****************************************************************************/ -+/* 2.4.23 => 2.4.22 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) -+/*****************************************************************************/ -+#ifdef NAPI -+#ifndef netif_poll_disable -+#define netif_poll_disable(x) _kc_netif_poll_disable(x) -+static inline void _kc_netif_poll_disable(struct net_device *netdev) -+{ -+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { -+ /* No hurry */ -+ current->state = TASK_INTERRUPTIBLE; -+ schedule_timeout(1); -+ } -+} -+#endif -+#ifndef netif_poll_enable -+#define netif_poll_enable(x) _kc_netif_poll_enable(x) -+static inline void _kc_netif_poll_enable(struct net_device *netdev) -+{ -+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); -+} -+#endif -+#endif /* NAPI */ -+#ifndef netif_tx_disable -+#define netif_tx_disable(x) _kc_netif_tx_disable(x) -+static inline void _kc_netif_tx_disable(struct net_device *dev) -+{ -+ spin_lock_bh(&dev->xmit_lock); -+ netif_stop_queue(dev); -+ spin_unlock_bh(&dev->xmit_lock); -+} -+#endif -+#else /* 2.4.23 => 2.4.22 */ -+#define HAVE_SCTP -+#endif /* 2.4.23 => 2.4.22 */ -+ -+/*****************************************************************************/ -+/* 2.6.4 => 2.6.0 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ -+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ -+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) -+#define ETHTOOL_OPS_COMPAT -+#endif /* 2.6.4 => 2.6.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) -+#define __user -+#endif /* < 2.4.27 */ -+ -+/*****************************************************************************/ -+/* 2.5.71 => 2.4.x */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) -+#define sk_protocol protocol -+#define pci_get_device pci_find_device -+#endif /* 2.5.70 => 2.4.x */ -+ -+/*****************************************************************************/ -+/* < 2.4.27 or 2.6.0 <= 2.6.5 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ -+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ -+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) -+ -+#ifndef netif_msg_init -+#define netif_msg_init _kc_netif_msg_init -+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) -+{ -+ /* use default */ -+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) -+ return default_msg_enable_bits; -+ if (debug_value == 0) /* no output */ -+ return 0; -+ /* set low N bits */ -+ return (1 << debug_value) -1; -+} -+#endif -+ -+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ -+/*****************************************************************************/ -+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ -+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ -+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) -+#define netdev_priv(x) x->priv -+#endif -+ -+/*****************************************************************************/ -+/* <= 2.5.0 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) -+#include -+#undef pci_register_driver -+#define pci_register_driver pci_module_init -+ -+/* -+ * Most of the dma compat code is copied/modifed from the 2.4.37 -+ * /include/linux/libata-compat.h header file -+ */ -+/* These definitions mirror those in pci.h, so they can be used -+ * interchangeably with their PCI_ counterparts */ -+enum dma_data_direction { -+ DMA_BIDIRECTIONAL = 0, -+ DMA_TO_DEVICE = 1, -+ DMA_FROM_DEVICE = 2, -+ DMA_NONE = 3, -+}; -+ -+struct device { -+ struct pci_dev pdev; -+}; -+ -+static inline struct pci_dev *to_pci_dev (struct device *dev) -+{ -+ return (struct pci_dev *) dev; -+} -+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) -+{ -+ return (struct device *) pdev; -+} -+ -+#define pdev_printk(lvl, pdev, fmt, args...) \ -+ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) -+#define dev_err(dev, fmt, args...) \ -+ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) -+#define dev_info(dev, fmt, args...) \ -+ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) -+#define dev_warn(dev, fmt, args...) \ -+ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) -+#define dev_notice(dev, fmt, args...) \ -+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) -+#define dev_dbg(dev, fmt, args...) \ -+ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) -+ -+/* NOTE: dangerous! we ignore the 'gfp' argument */ -+#define dma_alloc_coherent(dev,sz,dma,gfp) \ -+ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) -+#define dma_free_coherent(dev,sz,addr,dma_addr) \ -+ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) -+ -+#define dma_map_page(dev,a,b,c,d) \ -+ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) -+#define dma_unmap_page(dev,a,b,c) \ -+ pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) -+ -+#define dma_map_single(dev,a,b,c) \ -+ pci_map_single(to_pci_dev(dev),(a),(b),(c)) -+#define dma_unmap_single(dev,a,b,c) \ -+ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) -+ -+#define dma_map_sg(dev, sg, nents, dir) \ -+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) -+#define dma_unmap_sg(dev, sg, nents, dir) \ -+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) -+ -+#define dma_sync_single(dev,a,b,c) \ -+ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) -+ -+/* for range just sync everything, that's all the pci API can do */ -+#define dma_sync_single_range(dev,addr,off,sz,dir) \ -+ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) -+ -+#define dma_set_mask(dev,mask) \ -+ pci_set_dma_mask(to_pci_dev(dev),(mask)) -+ -+/* hlist_* code - double linked lists */ -+struct hlist_head { -+ struct hlist_node *first; -+}; -+ -+struct hlist_node { -+ struct hlist_node *next, **pprev; -+}; -+ -+static inline void __hlist_del(struct hlist_node *n) -+{ -+ struct hlist_node *next = n->next; -+ struct hlist_node **pprev = n->pprev; -+ *pprev = next; -+ if (next) -+ next->pprev = pprev; -+} -+ -+static inline void hlist_del(struct hlist_node *n) -+{ -+ __hlist_del(n); -+ n->next = NULL; -+ n->pprev = NULL; -+} -+ -+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) -+{ -+ struct hlist_node *first = h->first; -+ n->next = first; -+ if (first) -+ first->pprev = &n->next; -+ h->first = n; -+ n->pprev = &h->first; -+} -+ -+static inline int hlist_empty(const struct hlist_head *h) -+{ -+ return !h->first; -+} -+#define HLIST_HEAD_INIT { .first = NULL } -+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } -+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) -+static inline void INIT_HLIST_NODE(struct hlist_node *h) -+{ -+ h->next = NULL; -+ h->pprev = NULL; -+} -+ -+#ifndef might_sleep -+#define might_sleep() -+#endif -+#else -+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) -+{ -+ return &pdev->dev; -+} -+#endif /* <= 2.5.0 */ -+ -+/*****************************************************************************/ -+/* 2.5.28 => 2.4.23 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) -+ -+#include -+#define work_struct tq_struct -+#undef INIT_WORK -+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) -+#undef container_of -+#define container_of list_entry -+#define schedule_work schedule_task -+#define flush_scheduled_work flush_scheduled_tasks -+#define cancel_work_sync(x) flush_scheduled_work() -+ -+#endif /* 2.5.28 => 2.4.17 */ -+ -+/*****************************************************************************/ -+/* 2.6.0 => 2.5.28 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -+#ifndef read_barrier_depends -+#define read_barrier_depends() rmb() -+#endif -+ -+#ifndef rcu_head -+struct __kc_callback_head { -+ struct __kc_callback_head *next; -+ void (*func)(struct callback_head *head); -+}; -+#define rcu_head __kc_callback_head -+#endif -+ -+#undef get_cpu -+#define get_cpu() smp_processor_id() -+#undef put_cpu -+#define put_cpu() do { } while(0) -+#define MODULE_INFO(version, _version) -+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT -+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 -+#endif -+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT -+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 -+#endif -+ -+#define dma_set_coherent_mask(dev,mask) 1 -+ -+#undef dev_put -+#define dev_put(dev) __dev_put(dev) -+ -+#ifndef skb_fill_page_desc -+#define skb_fill_page_desc _kc_skb_fill_page_desc -+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); -+#endif -+ -+#undef ALIGN -+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) -+ -+#ifndef page_count -+#define page_count(p) atomic_read(&(p)->count) -+#endif -+ -+#ifdef MAX_NUMNODES -+#undef MAX_NUMNODES -+#endif -+#define MAX_NUMNODES 1 -+ -+/* find_first_bit and find_next bit are not defined for most -+ * 2.4 kernels (except for the redhat 2.4.21 kernels -+ */ -+#include -+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -+#undef find_next_bit -+#define find_next_bit _kc_find_next_bit -+extern unsigned long _kc_find_next_bit(const unsigned long *addr, -+ unsigned long size, -+ unsigned long offset); -+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -+ -+#ifndef netdev_name -+static inline const char *_kc_netdev_name(const struct net_device *dev) -+{ -+ if (strchr(dev->name, '%')) -+ return "(unregistered net_device)"; -+ return dev->name; -+} -+#define netdev_name(netdev) _kc_netdev_name(netdev) -+#endif /* netdev_name */ -+ -+#ifndef strlcpy -+#define strlcpy _kc_strlcpy -+extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); -+#endif /* strlcpy */ -+ -+#ifndef do_div -+#if BITS_PER_LONG == 64 -+# define do_div(n,base) ({ \ -+ uint32_t __base = (base); \ -+ uint32_t __rem; \ -+ __rem = ((uint64_t)(n)) % __base; \ -+ (n) = ((uint64_t)(n)) / __base; \ -+ __rem; \ -+ }) -+#elif BITS_PER_LONG == 32 -+extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); -+# define do_div(n,base) ({ \ -+ uint32_t __base = (base); \ -+ uint32_t __rem; \ -+ if (likely(((n) >> 32) == 0)) { \ -+ __rem = (uint32_t)(n) % __base; \ -+ (n) = (uint32_t)(n) / __base; \ -+ } else \ -+ __rem = _kc__div64_32(&(n), __base); \ -+ __rem; \ -+ }) -+#else /* BITS_PER_LONG == ?? */ -+# error do_div() does not yet support the C64 -+#endif /* BITS_PER_LONG */ -+#endif /* do_div */ -+ -+#ifndef NSEC_PER_SEC -+#define NSEC_PER_SEC 1000000000L -+#endif -+ -+#undef HAVE_I2C_SUPPORT -+#else /* 2.6.0 */ -+#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \ -+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9))) -+#define HAVE_I2C_SUPPORT -+#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */ -+ -+#endif /* 2.6.0 => 2.5.28 */ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) -+#define dma_pool pci_pool -+#define dma_pool_destroy pci_pool_destroy -+#define dma_pool_alloc pci_pool_alloc -+#define dma_pool_free pci_pool_free -+ -+#define dma_pool_create(name,dev,size,align,allocation) \ -+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) -+#endif /* < 2.6.3 */ -+ -+/*****************************************************************************/ -+/* 2.6.4 => 2.6.0 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) -+#define MODULE_VERSION(_version) MODULE_INFO(version, _version) -+#endif /* 2.6.4 => 2.6.0 */ -+ -+/*****************************************************************************/ -+/* 2.6.5 => 2.6.0 */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) -+#define dma_sync_single_for_cpu dma_sync_single -+#define dma_sync_single_for_device dma_sync_single -+#define dma_sync_single_range_for_cpu dma_sync_single_range -+#define dma_sync_single_range_for_device dma_sync_single_range -+#ifndef pci_dma_mapping_error -+#define pci_dma_mapping_error _kc_pci_dma_mapping_error -+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) -+{ -+ return dma_addr == 0; -+} -+#endif -+#endif /* 2.6.5 => 2.6.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) -+extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); -+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) -+#endif /* < 2.6.4 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) -+/* taken from 2.6 include/linux/bitmap.h */ -+#undef bitmap_zero -+#define bitmap_zero _kc_bitmap_zero -+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) -+{ -+ if (nbits <= BITS_PER_LONG) -+ *dst = 0UL; -+ else { -+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); -+ memset(dst, 0, len); -+ } -+} -+#define page_to_nid(x) 0 -+ -+#endif /* < 2.6.6 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) -+#undef if_mii -+#define if_mii _kc_if_mii -+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) -+{ -+ return (struct mii_ioctl_data *) &rq->ifr_ifru; -+} -+ -+#ifndef __force -+#define __force -+#endif -+#endif /* < 2.6.7 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) -+#ifndef PCI_EXP_DEVCTL -+#define PCI_EXP_DEVCTL 8 -+#endif -+#ifndef PCI_EXP_DEVCTL_CERE -+#define PCI_EXP_DEVCTL_CERE 0x0001 -+#endif -+#define PCI_EXP_FLAGS 2 /* Capabilities register */ -+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ -+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ -+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ -+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ -+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ -+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ -+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ -+#define PCI_EXP_DEVCAP 4 /* Device capabilities */ -+#define PCI_EXP_DEVSTA 10 /* Device Status */ -+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ -+ schedule_timeout((x * HZ)/1000 + 2); \ -+ } while (0) -+ -+#endif /* < 2.6.8 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) -+#include -+#define __iomem -+ -+#ifndef kcalloc -+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) -+extern void *_kc_kzalloc(size_t size, int flags); -+#endif -+#define MSEC_PER_SEC 1000L -+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) -+{ -+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) -+ return (MSEC_PER_SEC / HZ) * j; -+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) -+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); -+#else -+ return (j * MSEC_PER_SEC) / HZ; -+#endif -+} -+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) -+{ -+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) -+ return MAX_JIFFY_OFFSET; -+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) -+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); -+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) -+ return m * (HZ / MSEC_PER_SEC); -+#else -+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; -+#endif -+} -+ -+#define msleep_interruptible _kc_msleep_interruptible -+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) -+{ -+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; -+ -+ while (timeout && !signal_pending(current)) { -+ __set_current_state(TASK_INTERRUPTIBLE); -+ timeout = schedule_timeout(timeout); -+ } -+ return _kc_jiffies_to_msecs(timeout); -+} -+ -+/* Basic mode control register. */ -+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ -+ -+#ifndef __le16 -+#define __le16 u16 -+#endif -+#ifndef __le32 -+#define __le32 u32 -+#endif -+#ifndef __le64 -+#define __le64 u64 -+#endif -+#ifndef __be16 -+#define __be16 u16 -+#endif -+#ifndef __be32 -+#define __be32 u32 -+#endif -+#ifndef __be64 -+#define __be64 u64 -+#endif -+ -+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) -+{ -+ return (struct vlan_ethhdr *)skb->mac.raw; -+} -+ -+/* Wake-On-Lan options. */ -+#define WAKE_PHY (1 << 0) -+#define WAKE_UCAST (1 << 1) -+#define WAKE_MCAST (1 << 2) -+#define WAKE_BCAST (1 << 3) -+#define WAKE_ARP (1 << 4) -+#define WAKE_MAGIC (1 << 5) -+#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ -+ -+#define skb_header_pointer _kc_skb_header_pointer -+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, -+ int offset, int len, void *buffer) -+{ -+ int hlen = skb_headlen(skb); -+ -+ if (hlen - offset >= len) -+ return skb->data + offset; -+ -+#ifdef MAX_SKB_FRAGS -+ if (skb_copy_bits(skb, offset, buffer, len) < 0) -+ return NULL; -+ -+ return buffer; -+#else -+ return NULL; -+#endif -+ -+#ifndef NETDEV_TX_OK -+#define NETDEV_TX_OK 0 -+#endif -+#ifndef NETDEV_TX_BUSY -+#define NETDEV_TX_BUSY 1 -+#endif -+#ifndef NETDEV_TX_LOCKED -+#define NETDEV_TX_LOCKED -1 -+#endif -+} -+ -+#ifndef __bitwise -+#define __bitwise -+#endif -+#endif /* < 2.6.9 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) -+#ifdef module_param_array_named -+#undef module_param_array_named -+#define module_param_array_named(name, array, type, nump, perm) \ -+ static struct kparam_array __param_arr_##name \ -+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ -+ sizeof(array[0]), array }; \ -+ module_param_call(name, param_array_set, param_array_get, \ -+ &__param_arr_##name, perm) -+#endif /* module_param_array_named */ -+/* -+ * num_online is broken for all < 2.6.10 kernels. This is needed to support -+ * Node module parameter of ixgbe. -+ */ -+#undef num_online_nodes -+#define num_online_nodes(n) 1 -+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); -+#undef node_online_map -+#define node_online_map _kcompat_node_online_map -+#define pci_get_class pci_find_class -+#endif /* < 2.6.10 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) -+#define PCI_D0 0 -+#define PCI_D1 1 -+#define PCI_D2 2 -+#define PCI_D3hot 3 -+#define PCI_D3cold 4 -+typedef int pci_power_t; -+#define pci_choose_state(pdev,state) state -+#define PMSG_SUSPEND 3 -+#define PCI_EXP_LNKCTL 16 -+ -+#undef NETIF_F_LLTX -+ -+#ifndef ARCH_HAS_PREFETCH -+#define prefetch(X) -+#endif -+ -+#ifndef NET_IP_ALIGN -+#define NET_IP_ALIGN 2 -+#endif -+ -+#define KC_USEC_PER_SEC 1000000L -+#define usecs_to_jiffies _kc_usecs_to_jiffies -+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) -+{ -+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) -+ return (KC_USEC_PER_SEC / HZ) * j; -+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) -+ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); -+#else -+ return (j * KC_USEC_PER_SEC) / HZ; -+#endif -+} -+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) -+{ -+ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) -+ return MAX_JIFFY_OFFSET; -+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) -+ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); -+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) -+ return m * (HZ / KC_USEC_PER_SEC); -+#else -+ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; -+#endif -+} -+ -+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ -+#define PCI_EXP_LNKSTA 18 /* Link Status */ -+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ -+#define PCI_EXP_SLTCTL 24 /* Slot Control */ -+#define PCI_EXP_SLTSTA 26 /* Slot Status */ -+#define PCI_EXP_RTCTL 28 /* Root Control */ -+#define PCI_EXP_RTCAP 30 /* Root Capabilities */ -+#define PCI_EXP_RTSTA 32 /* Root Status */ -+#endif /* < 2.6.11 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) -+#include -+#define USE_REBOOT_NOTIFIER -+ -+/* Generic MII registers. */ -+#define MII_CTRL1000 0x09 /* 1000BASE-T control */ -+#define MII_STAT1000 0x0a /* 1000BASE-T status */ -+/* Advertisement control register. */ -+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ -+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ -+/* Link partner ability register. */ -+#define LPA_PAUSE_CAP 0x0400 /* Can pause */ -+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ -+/* 1000BASE-T Control register */ -+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ -+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ -+/* 1000BASE-T Status register */ -+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ -+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ -+ -+#ifndef is_zero_ether_addr -+#define is_zero_ether_addr _kc_is_zero_ether_addr -+static inline int _kc_is_zero_ether_addr(const u8 *addr) -+{ -+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); -+} -+#endif /* is_zero_ether_addr */ -+#ifndef is_multicast_ether_addr -+#define is_multicast_ether_addr _kc_is_multicast_ether_addr -+static inline int _kc_is_multicast_ether_addr(const u8 *addr) -+{ -+ return addr[0] & 0x01; -+} -+#endif /* is_multicast_ether_addr */ -+#endif /* < 2.6.12 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) -+#ifndef kstrdup -+#define kstrdup _kc_kstrdup -+extern char *_kc_kstrdup(const char *s, unsigned int gfp); -+#endif -+#endif /* < 2.6.13 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) -+#define pm_message_t u32 -+#ifndef kzalloc -+#define kzalloc _kc_kzalloc -+extern void *_kc_kzalloc(size_t size, int flags); -+#endif -+ -+/* Generic MII registers. */ -+#define MII_ESTATUS 0x0f /* Extended Status */ -+/* Basic mode status register. */ -+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ -+/* Extended status register. */ -+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ -+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ -+ -+#define SUPPORTED_Pause (1 << 13) -+#define SUPPORTED_Asym_Pause (1 << 14) -+#define ADVERTISED_Pause (1 << 13) -+#define ADVERTISED_Asym_Pause (1 << 14) -+ -+#if (!(RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) -+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) -+#define gfp_t unsigned -+#else -+typedef unsigned gfp_t; -+#endif -+#endif /* !RHEL4.3->RHEL5.0 */ -+ -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) -+#ifdef CONFIG_X86_64 -+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ -+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) -+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ -+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) -+#endif -+#endif -+#endif /* < 2.6.14 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) -+#ifndef kfree_rcu -+/* this is placed here due to a lack of rcu_barrier in previous kernels */ -+#define kfree_rcu(_ptr, _offset) kfree(_ptr) -+#endif /* kfree_rcu */ -+#ifndef vmalloc_node -+#define vmalloc_node(a,b) vmalloc(a) -+#endif /* vmalloc_node*/ -+ -+#define setup_timer(_timer, _function, _data) \ -+do { \ -+ (_timer)->function = _function; \ -+ (_timer)->data = _data; \ -+ init_timer(_timer); \ -+} while (0) -+#ifndef device_can_wakeup -+#define device_can_wakeup(dev) (1) -+#endif -+#ifndef device_set_wakeup_enable -+#define device_set_wakeup_enable(dev, val) do{}while(0) -+#endif -+#ifndef device_init_wakeup -+#define device_init_wakeup(dev,val) do {} while (0) -+#endif -+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) -+{ -+ const u16 *a = (const u16 *) addr1; -+ const u16 *b = (const u16 *) addr2; -+ -+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; -+} -+#undef compare_ether_addr -+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) -+#endif /* < 2.6.15 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) -+#undef DEFINE_MUTEX -+#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) -+#define mutex_lock(x) down_interruptible(x) -+#define mutex_unlock(x) up(x) -+ -+#ifndef ____cacheline_internodealigned_in_smp -+#ifdef CONFIG_SMP -+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp -+#else -+#define ____cacheline_internodealigned_in_smp -+#endif /* CONFIG_SMP */ -+#endif /* ____cacheline_internodealigned_in_smp */ -+#undef HAVE_PCI_ERS -+#else /* 2.6.16 and above */ -+#undef HAVE_PCI_ERS -+#define HAVE_PCI_ERS -+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) -+#ifdef device_can_wakeup -+#undef device_can_wakeup -+#endif /* device_can_wakeup */ -+#define device_can_wakeup(dev) 1 -+#endif /* SLE_VERSION(10,4,0) */ -+#endif /* < 2.6.16 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) -+#ifndef dev_notice -+#define dev_notice(dev, fmt, args...) \ -+ dev_printk(KERN_NOTICE, dev, fmt, ## args) -+#endif -+ -+#ifndef first_online_node -+#define first_online_node 0 -+#endif -+#ifndef NET_SKB_PAD -+#define NET_SKB_PAD 16 -+#endif -+#endif /* < 2.6.17 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) -+ -+#ifndef IRQ_HANDLED -+#define irqreturn_t void -+#define IRQ_HANDLED -+#define IRQ_NONE -+#endif -+ -+#ifndef IRQF_PROBE_SHARED -+#ifdef SA_PROBEIRQ -+#define IRQF_PROBE_SHARED SA_PROBEIRQ -+#else -+#define IRQF_PROBE_SHARED 0 -+#endif -+#endif -+ -+#ifndef IRQF_SHARED -+#define IRQF_SHARED SA_SHIRQ -+#endif -+ -+#ifndef ARRAY_SIZE -+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -+#endif -+ -+#ifndef FIELD_SIZEOF -+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -+#endif -+ -+#ifndef skb_is_gso -+#ifdef NETIF_F_TSO -+#define skb_is_gso _kc_skb_is_gso -+static inline int _kc_skb_is_gso(const struct sk_buff *skb) -+{ -+ return skb_shinfo(skb)->gso_size; -+} -+#else -+#define skb_is_gso(a) 0 -+#endif -+#endif -+ -+#ifndef resource_size_t -+#define resource_size_t unsigned long -+#endif -+ -+#ifdef skb_pad -+#undef skb_pad -+#endif -+#define skb_pad(x,y) _kc_skb_pad(x, y) -+int _kc_skb_pad(struct sk_buff *skb, int pad); -+#ifdef skb_padto -+#undef skb_padto -+#endif -+#define skb_padto(x,y) _kc_skb_padto(x, y) -+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) -+{ -+ unsigned int size = skb->len; -+ if(likely(size >= len)) -+ return 0; -+ return _kc_skb_pad(skb, len - size); -+} -+ -+#ifndef DECLARE_PCI_UNMAP_ADDR -+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ -+ dma_addr_t ADDR_NAME -+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ -+ u32 LEN_NAME -+#define pci_unmap_addr(PTR, ADDR_NAME) \ -+ ((PTR)->ADDR_NAME) -+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ -+ (((PTR)->ADDR_NAME) = (VAL)) -+#define pci_unmap_len(PTR, LEN_NAME) \ -+ ((PTR)->LEN_NAME) -+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ -+ (((PTR)->LEN_NAME) = (VAL)) -+#endif /* DECLARE_PCI_UNMAP_ADDR */ -+#endif /* < 2.6.18 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) -+enum pcie_link_width { -+ PCIE_LNK_WIDTH_RESRV = 0x00, -+ PCIE_LNK_X1 = 0x01, -+ PCIE_LNK_X2 = 0x02, -+ PCIE_LNK_X4 = 0x04, -+ PCIE_LNK_X8 = 0x08, -+ PCIE_LNK_X12 = 0x0C, -+ PCIE_LNK_X16 = 0x10, -+ PCIE_LNK_X32 = 0x20, -+ PCIE_LNK_WIDTH_UNKNOWN = 0xFF, -+}; -+ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) -+#define i_private u.generic_ip -+#endif /* >= RHEL 5.0 */ -+ -+#ifndef DIV_ROUND_UP -+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) -+#endif -+#ifndef __ALIGN_MASK -+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) -+#endif -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) -+#if (!((RHEL_RELEASE_CODE && \ -+ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ -+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ -+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) -+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); -+#endif -+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) -+#undef CONFIG_INET_LRO -+#undef CONFIG_INET_LRO_MODULE -+#endif -+typedef irqreturn_t (*new_handler_t)(int, void*); -+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) -+#else /* 2.4.x */ -+typedef void (*irq_handler_t)(int, void*, struct pt_regs *); -+typedef void (*new_handler_t)(int, void*); -+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) -+#endif /* >= 2.5.x */ -+{ -+ irq_handler_t new_handler = (irq_handler_t) handler; -+ return request_irq(irq, new_handler, flags, devname, dev_id); -+} -+ -+#undef request_irq -+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) -+ -+#define irq_handler_t new_handler_t -+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) -+#define PCIE_CONFIG_SPACE_LEN 256 -+#define PCI_CONFIG_SPACE_LEN 64 -+#define PCIE_LINK_STATUS 0x12 -+#define pci_config_space_ich8lan() do {} while(0) -+#undef pci_save_state -+extern int _kc_pci_save_state(struct pci_dev *); -+#define pci_save_state(pdev) _kc_pci_save_state(pdev) -+#undef pci_restore_state -+extern void _kc_pci_restore_state(struct pci_dev *); -+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) -+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ -+ -+#ifdef HAVE_PCI_ERS -+#undef free_netdev -+extern void _kc_free_netdev(struct net_device *); -+#define free_netdev(netdev) _kc_free_netdev(netdev) -+#endif -+static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) -+{ -+ return 0; -+} -+#define pci_disable_pcie_error_reporting(dev) do {} while (0) -+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) -+ -+extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); -+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) -+#ifndef bool -+#define bool _Bool -+#define true 1 -+#define false 0 -+#endif -+#else /* 2.6.19 */ -+#include -+#include -+#include -+#endif /* < 2.6.19 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) -+#undef INIT_WORK -+#define INIT_WORK(_work, _func) \ -+do { \ -+ INIT_LIST_HEAD(&(_work)->entry); \ -+ (_work)->pending = 0; \ -+ (_work)->func = (void (*)(void *))_func; \ -+ (_work)->data = _work; \ -+ init_timer(&(_work)->timer); \ -+} while (0) -+#endif -+ -+#ifndef PCI_VDEVICE -+#define PCI_VDEVICE(ven, dev) \ -+ PCI_VENDOR_ID_##ven, (dev), \ -+ PCI_ANY_ID, PCI_ANY_ID, 0, 0 -+#endif -+ -+#ifndef PCI_VENDOR_ID_INTEL -+#define PCI_VENDOR_ID_INTEL 0x8086 -+#endif -+ -+#ifndef round_jiffies -+#define round_jiffies(x) x -+#endif -+ -+#define csum_offset csum -+ -+#define HAVE_EARLY_VMALLOC_NODE -+#define dev_to_node(dev) -1 -+#undef set_dev_node -+/* remove compiler warning with b=b, for unused variable */ -+#define set_dev_node(a, b) do { (b) = (b); } while(0) -+ -+#if (!(RHEL_RELEASE_CODE && \ -+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ -+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) -+typedef __u16 __bitwise __sum16; -+typedef __u32 __bitwise __wsum; -+#endif -+ -+#if (!(RHEL_RELEASE_CODE && \ -+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ -+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) -+static inline __wsum csum_unfold(__sum16 n) -+{ -+ return (__force __wsum)n; -+} -+#endif -+ -+#else /* < 2.6.20 */ -+#define HAVE_DEVICE_NUMA_NODE -+#endif /* < 2.6.20 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) -+#define to_net_dev(class) container_of(class, struct net_device, class_dev) -+#define NETDEV_CLASS_DEV -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) -+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) -+#define vlan_group_set_device(vg, id, dev) \ -+ do { \ -+ if (vg) vg->vlan_devices[id] = dev; \ -+ } while (0) -+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ -+#define pci_channel_offline(pdev) (pdev->error_state && \ -+ pdev->error_state != pci_channel_io_normal) -+#define pci_request_selected_regions(pdev, bars, name) \ -+ pci_request_regions(pdev, name) -+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); -+ -+#ifndef __aligned -+#define __aligned(x) __attribute__((aligned(x))) -+#endif -+ -+extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); -+#define netdev_to_dev(netdev) \ -+ pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) -+#else -+static inline struct device *netdev_to_dev(struct net_device *netdev) -+{ -+ return &netdev->dev; -+} -+ -+#endif /* < 2.6.21 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) -+#define tcp_hdr(skb) (skb->h.th) -+#define tcp_hdrlen(skb) (skb->h.th->doff << 2) -+#define skb_transport_offset(skb) (skb->h.raw - skb->data) -+#define skb_transport_header(skb) (skb->h.raw) -+#define ipv6_hdr(skb) (skb->nh.ipv6h) -+#define ip_hdr(skb) (skb->nh.iph) -+#define skb_network_offset(skb) (skb->nh.raw - skb->data) -+#define skb_network_header(skb) (skb->nh.raw) -+#define skb_tail_pointer(skb) skb->tail -+#define skb_reset_tail_pointer(skb) \ -+ do { \ -+ skb->tail = skb->data; \ -+ } while (0) -+#define skb_set_tail_pointer(skb, offset) \ -+ do { \ -+ skb->tail = skb->data + offset; \ -+ } while (0) -+#define skb_copy_to_linear_data(skb, from, len) \ -+ memcpy(skb->data, from, len) -+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ -+ memcpy(skb->data + offset, from, len) -+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) -+#define pci_register_driver pci_module_init -+#define skb_mac_header(skb) skb->mac.raw -+ -+#ifdef NETIF_F_MULTI_QUEUE -+#ifndef alloc_etherdev_mq -+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) -+#endif -+#endif /* NETIF_F_MULTI_QUEUE */ -+ -+#ifndef ETH_FCS_LEN -+#define ETH_FCS_LEN 4 -+#endif -+#define cancel_work_sync(x) flush_scheduled_work() -+#ifndef udp_hdr -+#define udp_hdr _udp_hdr -+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) -+{ -+ return (struct udphdr *)skb_transport_header(skb); -+} -+#endif -+ -+#ifdef cpu_to_be16 -+#undef cpu_to_be16 -+#endif -+#define cpu_to_be16(x) __constant_htons(x) -+ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) -+enum { -+ DUMP_PREFIX_NONE, -+ DUMP_PREFIX_ADDRESS, -+ DUMP_PREFIX_OFFSET -+}; -+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ -+#ifndef hex_asc -+#define hex_asc(x) "0123456789abcdef"[x] -+#endif -+#include -+extern void _kc_print_hex_dump(const char *level, const char *prefix_str, -+ int prefix_type, int rowsize, int groupsize, -+ const void *buf, size_t len, bool ascii); -+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ -+ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) -+#ifndef ADVERTISED_2500baseX_Full -+#define ADVERTISED_2500baseX_Full (1 << 15) -+#endif -+#ifndef SUPPORTED_2500baseX_Full -+#define SUPPORTED_2500baseX_Full (1 << 15) -+#endif -+ -+#ifdef HAVE_I2C_SUPPORT -+#include -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) -+struct i2c_board_info { -+ char driver_name[KOBJ_NAME_LEN]; -+ char type[I2C_NAME_SIZE]; -+ unsigned short flags; -+ unsigned short addr; -+ void *platform_data; -+}; -+#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\ -+ .addr = (dev_addr) -+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ -+#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info) -+extern struct i2c_client * -+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); -+#endif /* HAVE_I2C_SUPPORT */ -+ -+#ifndef ETH_P_PAUSE -+#define ETH_P_PAUSE 0x8808 -+#endif -+ -+#else /* 2.6.22 */ -+#define ETH_TYPE_TRANS_SETS_DEV -+#define HAVE_NETDEV_STATS_IN_NETDEV -+#endif /* < 2.6.22 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) -+#undef SET_MODULE_OWNER -+#define SET_MODULE_OWNER(dev) do { } while (0) -+#endif /* > 2.6.22 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) -+#define netif_subqueue_stopped(_a, _b) 0 -+#ifndef PTR_ALIGN -+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) -+#endif -+ -+#ifndef CONFIG_PM_SLEEP -+#define CONFIG_PM_SLEEP CONFIG_PM -+#endif -+ -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) -+#define HAVE_ETHTOOL_GET_PERM_ADDR -+#endif /* 2.6.14 through 2.6.22 */ -+ -+static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) -+{ -+ int delta = 0; -+ -+ if (headroom > (skb->data - skb->head)) -+ delta = headroom - (skb->data - skb->head); -+ -+ if (delta || skb_header_cloned(skb)) -+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, -+ GFP_ATOMIC); -+ return 0; -+} -+#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) -+#endif /* < 2.6.23 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -+#ifndef ETH_FLAG_LRO -+#define ETH_FLAG_LRO NETIF_F_LRO -+#endif -+ -+#ifndef ACCESS_ONCE -+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) -+#endif -+ -+/* if GRO is supported then the napi struct must already exist */ -+#ifndef NETIF_F_GRO -+/* NAPI API changes in 2.6.24 break everything */ -+struct napi_struct { -+ /* used to look up the real NAPI polling routine */ -+ int (*poll)(struct napi_struct *, int); -+ struct net_device *dev; -+ int weight; -+}; -+#endif -+ -+#ifdef NAPI -+extern int __kc_adapter_clean(struct net_device *, int *); -+/* The following definitions are multi-queue aware, and thus we have a driver -+ * define list which determines which drivers support multiple queues, and -+ * thus need these stronger defines. If a driver does not support multi-queue -+ * functionality, you don't need to add it to this list. -+ */ -+extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); -+ -+static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, -+ int (*poll)(struct napi_struct *, int), int weight) -+{ -+ struct net_device *poll_dev = napi_to_poll_dev(napi); -+ poll_dev->poll = __kc_adapter_clean; -+ poll_dev->priv = napi; -+ poll_dev->weight = weight; -+ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); -+ set_bit(__LINK_STATE_START, &poll_dev->state); -+ dev_hold(poll_dev); -+ napi->poll = poll; -+ napi->weight = weight; -+ napi->dev = dev; -+} -+#define netif_napi_add __kc_mq_netif_napi_add -+ -+static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) -+{ -+ struct net_device *poll_dev = napi_to_poll_dev(napi); -+ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); -+ dev_put(poll_dev); -+ memset(poll_dev, 0, sizeof(struct net_device)); -+} -+ -+#define netif_napi_del __kc_mq_netif_napi_del -+ -+static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) -+{ -+ return netif_running(napi->dev) && -+ netif_rx_schedule_prep(napi_to_poll_dev(napi)); -+} -+#define napi_schedule_prep __kc_mq_napi_schedule_prep -+ -+static inline void __kc_mq_napi_schedule(struct napi_struct *napi) -+{ -+ if (napi_schedule_prep(napi)) -+ __netif_rx_schedule(napi_to_poll_dev(napi)); -+} -+#define napi_schedule __kc_mq_napi_schedule -+ -+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) -+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) -+#ifdef CONFIG_SMP -+static inline void napi_synchronize(const struct napi_struct *n) -+{ -+ struct net_device *dev = napi_to_poll_dev(n); -+ -+ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { -+ /* No hurry. */ -+ msleep(1); -+ } -+} -+#else -+#define napi_synchronize(n) barrier() -+#endif /* CONFIG_SMP */ -+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) -+static inline void _kc_napi_complete(struct napi_struct *napi) -+{ -+#ifdef NETIF_F_GRO -+ napi_gro_flush(napi); -+#endif -+ netif_rx_complete(napi_to_poll_dev(napi)); -+} -+#define napi_complete _kc_napi_complete -+#else /* NAPI */ -+ -+/* The following definitions are only used if we don't support NAPI at all. */ -+ -+static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, -+ int (*poll)(struct napi_struct *, int), int weight) -+{ -+ dev->poll = poll; -+ dev->weight = weight; -+ napi->poll = poll; -+ napi->weight = weight; -+ napi->dev = dev; -+} -+#define netif_napi_del(_a) do {} while (0) -+#endif /* NAPI */ -+ -+#undef dev_get_by_name -+#define dev_get_by_name(_a, _b) dev_get_by_name(_b) -+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) -+#ifndef DMA_BIT_MASK -+#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) -+#endif -+ -+#ifdef NETIF_F_TSO6 -+#define skb_is_gso_v6 _kc_skb_is_gso_v6 -+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) -+{ -+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; -+} -+#endif /* NETIF_F_TSO6 */ -+ -+#ifndef KERN_CONT -+#define KERN_CONT "" -+#endif -+#ifndef pr_err -+#define pr_err(fmt, arg...) \ -+ printk(KERN_ERR fmt, ##arg) -+#endif -+ -+#ifndef rounddown_pow_of_two -+#define rounddown_pow_of_two(n) \ -+ __builtin_constant_p(n) ? ( \ -+ (n == 1) ? 0 : \ -+ (1UL << ilog2(n))) : \ -+ (1UL << (fls_long(n) - 1)) -+#endif -+ -+#ifndef BIT -+#define BIT(nr) (1UL << (nr)) -+#endif -+ -+#else /* < 2.6.24 */ -+#define HAVE_ETHTOOL_GET_SSET_COUNT -+#define HAVE_NETDEV_NAPI_LIST -+#endif /* < 2.6.24 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) -+#define INCLUDE_PM_QOS_PARAMS_H -+#include -+#else /* >= 3.2.0 */ -+#include -+#endif /* else >= 3.2.0 */ -+#endif /* > 2.6.24 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) -+#define PM_QOS_CPU_DMA_LATENCY 1 -+ -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) -+#include -+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY -+#define pm_qos_add_requirement(pm_qos_class, name, value) \ -+ set_acceptable_latency(name, value) -+#define pm_qos_remove_requirement(pm_qos_class, name) \ -+ remove_acceptable_latency(name) -+#define pm_qos_update_requirement(pm_qos_class, name, value) \ -+ modify_acceptable_latency(name, value) -+#else -+#define PM_QOS_DEFAULT_VALUE -1 -+#define pm_qos_add_requirement(pm_qos_class, name, value) -+#define pm_qos_remove_requirement(pm_qos_class, name) -+#define pm_qos_update_requirement(pm_qos_class, name, value) { \ -+ if (value != PM_QOS_DEFAULT_VALUE) { \ -+ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ -+ pci_name(adapter->pdev)); \ -+ } \ -+} -+ -+#endif /* > 2.6.18 */ -+ -+#define pci_enable_device_mem(pdev) pci_enable_device(pdev) -+ -+#ifndef DEFINE_PCI_DEVICE_TABLE -+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] -+#endif /* DEFINE_PCI_DEVICE_TABLE */ -+ -+#ifndef strict_strtol -+#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) -+static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) -+{ -+ /* adapted from strict_strtoul() in 2.6.25 */ -+ char *tail; -+ long val; -+ size_t len; -+ -+ *res = 0; -+ len = strlen(buf); -+ if (!len) -+ return -EINVAL; -+ val = simple_strtol(buf, &tail, base); -+ if (tail == buf) -+ return -EINVAL; -+ if ((*tail == '\0') || -+ ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { -+ *res = val; -+ return 0; -+ } -+ -+ return -EINVAL; -+} -+#endif -+ -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) -+#ifndef IGB_PROCFS -+#define IGB_PROCFS -+#endif /* IGB_PROCFS */ -+#endif /* >= 2.6.0 */ -+ -+#else /* < 2.6.25 */ -+ -+#if IS_ENABLED(CONFIG_HWMON) -+#ifndef IGB_HWMON -+#define IGB_HWMON -+#endif /* IGB_HWMON */ -+#endif /* CONFIG_HWMON */ -+ -+#endif /* < 2.6.25 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) -+#ifndef clamp_t -+#define clamp_t(type, val, min, max) ({ \ -+ type __val = (val); \ -+ type __min = (min); \ -+ type __max = (max); \ -+ __val = __val < __min ? __min : __val; \ -+ __val > __max ? __max : __val; }) -+#endif /* clamp_t */ -+#undef kzalloc_node -+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) -+ -+extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); -+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) -+#else /* < 2.6.26 */ -+#define NETDEV_CAN_SET_GSO_MAX_SIZE -+#include -+#define HAVE_NETDEV_VLAN_FEATURES -+#ifndef PCI_EXP_LNKCAP_ASPMS -+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ -+#endif /* PCI_EXP_LNKCAP_ASPMS */ -+#endif /* < 2.6.26 */ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) -+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, -+ __u32 speed) -+{ -+ ep->speed = (__u16)speed; -+ /* ep->speed_hi = (__u16)(speed >> 16); */ -+} -+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set -+ -+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) -+{ -+ /* no speed_hi before 2.6.27, and probably no need for it yet */ -+ return (__u32)ep->speed; -+} -+#define ethtool_cmd_speed _kc_ethtool_cmd_speed -+ -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) -+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) -+#define ANCIENT_PM 1 -+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ -+ defined(CONFIG_PM_SLEEP)) -+#define NEWER_PM 1 -+#endif -+#if defined(ANCIENT_PM) || defined(NEWER_PM) -+#undef device_set_wakeup_enable -+#define device_set_wakeup_enable(dev, val) \ -+ do { \ -+ u16 pmc = 0; \ -+ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ -+ if (pm) { \ -+ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ -+ &pmc); \ -+ } \ -+ (dev)->power.can_wakeup = !!(pmc >> 11); \ -+ (dev)->power.should_wakeup = (val && (pmc >> 11)); \ -+ } while (0) -+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ -+#endif /* 2.6.15 through 2.6.27 */ -+#ifndef netif_napi_del -+#define netif_napi_del(_a) do {} while (0) -+#ifdef NAPI -+#ifdef CONFIG_NETPOLL -+#undef netif_napi_del -+#define netif_napi_del(_a) list_del(&(_a)->dev_list); -+#endif -+#endif -+#endif /* netif_napi_del */ -+#ifdef dma_mapping_error -+#undef dma_mapping_error -+#endif -+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) -+ -+#ifdef CONFIG_NETDEVICES_MULTIQUEUE -+#define HAVE_TX_MQ -+#endif -+ -+#ifdef HAVE_TX_MQ -+extern void _kc_netif_tx_stop_all_queues(struct net_device *); -+extern void _kc_netif_tx_wake_all_queues(struct net_device *); -+extern void _kc_netif_tx_start_all_queues(struct net_device *); -+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) -+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) -+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) -+#undef netif_stop_subqueue -+#define netif_stop_subqueue(_ndev,_qi) do { \ -+ if (netif_is_multiqueue((_ndev))) \ -+ netif_stop_subqueue((_ndev), (_qi)); \ -+ else \ -+ netif_stop_queue((_ndev)); \ -+ } while (0) -+#undef netif_start_subqueue -+#define netif_start_subqueue(_ndev,_qi) do { \ -+ if (netif_is_multiqueue((_ndev))) \ -+ netif_start_subqueue((_ndev), (_qi)); \ -+ else \ -+ netif_start_queue((_ndev)); \ -+ } while (0) -+#else /* HAVE_TX_MQ */ -+#define netif_tx_stop_all_queues(a) netif_stop_queue(a) -+#define netif_tx_wake_all_queues(a) netif_wake_queue(a) -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) -+#define netif_tx_start_all_queues(a) netif_start_queue(a) -+#else -+#define netif_tx_start_all_queues(a) do {} while (0) -+#endif -+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) -+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) -+#endif /* HAVE_TX_MQ */ -+#ifndef NETIF_F_MULTI_QUEUE -+#define NETIF_F_MULTI_QUEUE 0 -+#define netif_is_multiqueue(a) 0 -+#define netif_wake_subqueue(a, b) -+#endif /* NETIF_F_MULTI_QUEUE */ -+ -+#ifndef __WARN_printf -+extern void __kc_warn_slowpath(const char *file, const int line, -+ const char *fmt, ...) __attribute__((format(printf, 3, 4))); -+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) -+#endif /* __WARN_printf */ -+ -+#ifndef WARN -+#define WARN(condition, format...) ({ \ -+ int __ret_warn_on = !!(condition); \ -+ if (unlikely(__ret_warn_on)) \ -+ __WARN_printf(format); \ -+ unlikely(__ret_warn_on); \ -+}) -+#endif /* WARN */ -+#undef HAVE_IXGBE_DEBUG_FS -+#undef HAVE_IGB_DEBUG_FS -+#else /* < 2.6.27 */ -+#define HAVE_TX_MQ -+#define HAVE_NETDEV_SELECT_QUEUE -+#ifdef CONFIG_DEBUG_FS -+#define HAVE_IXGBE_DEBUG_FS -+#define HAVE_IGB_DEBUG_FS -+#endif /* CONFIG_DEBUG_FS */ -+#endif /* < 2.6.27 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) -+#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ -+ pci_resource_len(pdev, bar)) -+#define pci_wake_from_d3 _kc_pci_wake_from_d3 -+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep -+extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); -+extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); -+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) -+#ifndef __skb_queue_head_init -+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) -+{ -+ list->prev = list->next = (struct sk_buff *)list; -+ list->qlen = 0; -+} -+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) -+#endif -+ -+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ -+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ -+ -+#endif /* < 2.6.28 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) -+#ifndef swap -+#define swap(a, b) \ -+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) -+#endif -+#define pci_request_selected_regions_exclusive(pdev, bars, name) \ -+ pci_request_selected_regions(pdev, bars, name) -+#ifndef CONFIG_NR_CPUS -+#define CONFIG_NR_CPUS 1 -+#endif /* CONFIG_NR_CPUS */ -+#ifndef pcie_aspm_enabled -+#define pcie_aspm_enabled() (1) -+#endif /* pcie_aspm_enabled */ -+ -+#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ -+ -+#ifndef PCI_EXP_LNKSTA_CLS -+#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ -+#endif -+#ifndef PCI_EXP_LNKSTA_NLW -+#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ -+#endif -+ -+#ifndef pci_clear_master -+extern void _kc_pci_clear_master(struct pci_dev *dev); -+#define pci_clear_master(dev) _kc_pci_clear_master(dev) -+#endif -+ -+#ifndef PCI_EXP_LNKCTL_ASPMC -+#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ -+#endif -+#else /* < 2.6.29 */ -+#ifndef HAVE_NET_DEVICE_OPS -+#define HAVE_NET_DEVICE_OPS -+#endif -+#ifdef CONFIG_DCB -+#define HAVE_PFC_MODE_ENABLE -+#endif /* CONFIG_DCB */ -+#endif /* < 2.6.29 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) -+#define NO_PTP_SUPPORT -+#define skb_rx_queue_recorded(a) false -+#define skb_get_rx_queue(a) 0 -+#define skb_record_rx_queue(a, b) do {} while (0) -+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) -+#ifndef CONFIG_PCI_IOV -+#undef pci_enable_sriov -+#define pci_enable_sriov(a, b) -ENOTSUPP -+#undef pci_disable_sriov -+#define pci_disable_sriov(a) do {} while (0) -+#endif /* CONFIG_PCI_IOV */ -+#ifndef pr_cont -+#define pr_cont(fmt, ...) \ -+ printk(KERN_CONT fmt, ##__VA_ARGS__) -+#endif /* pr_cont */ -+static inline void _kc_synchronize_irq(unsigned int a) -+{ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) -+ synchronize_irq(); -+#else /* < 2.5.28 */ -+ synchronize_irq(a); -+#endif /* < 2.5.28 */ -+} -+#undef synchronize_irq -+#define synchronize_irq(a) _kc_synchronize_irq(a) -+ -+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ -+ -+#ifdef nr_cpus_node -+#undef nr_cpus_node -+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) -+#endif -+ -+#else /* < 2.6.30 */ -+#define HAVE_ASPM_QUIRKS -+#endif /* < 2.6.30 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) -+#define ETH_P_1588 0x88F7 -+#define ETH_P_FIP 0x8914 -+#ifndef netdev_uc_count -+#define netdev_uc_count(dev) ((dev)->uc_count) -+#endif -+#ifndef netdev_for_each_uc_addr -+#define netdev_for_each_uc_addr(uclist, dev) \ -+ for (uclist = dev->uc_list; uclist; uclist = uclist->next) -+#endif -+#ifndef PORT_OTHER -+#define PORT_OTHER 0xff -+#endif -+#ifndef MDIO_PHY_ID_PRTAD -+#define MDIO_PHY_ID_PRTAD 0x03e0 -+#endif -+#ifndef MDIO_PHY_ID_DEVAD -+#define MDIO_PHY_ID_DEVAD 0x001f -+#endif -+#ifndef skb_dst -+#define skb_dst(s) ((s)->dst) -+#endif -+ -+#ifndef SUPPORTED_1000baseKX_Full -+#define SUPPORTED_1000baseKX_Full (1 << 17) -+#endif -+#ifndef SUPPORTED_10000baseKX4_Full -+#define SUPPORTED_10000baseKX4_Full (1 << 18) -+#endif -+#ifndef SUPPORTED_10000baseKR_Full -+#define SUPPORTED_10000baseKR_Full (1 << 19) -+#endif -+ -+#ifndef ADVERTISED_1000baseKX_Full -+#define ADVERTISED_1000baseKX_Full (1 << 17) -+#endif -+#ifndef ADVERTISED_10000baseKX4_Full -+#define ADVERTISED_10000baseKX4_Full (1 << 18) -+#endif -+#ifndef ADVERTISED_10000baseKR_Full -+#define ADVERTISED_10000baseKR_Full (1 << 19) -+#endif -+ -+#else /* < 2.6.31 */ -+#ifndef HAVE_NETDEV_STORAGE_ADDRESS -+#define HAVE_NETDEV_STORAGE_ADDRESS -+#endif -+#ifndef HAVE_NETDEV_HW_ADDR -+#define HAVE_NETDEV_HW_ADDR -+#endif -+#ifndef HAVE_TRANS_START_IN_QUEUE -+#define HAVE_TRANS_START_IN_QUEUE -+#endif -+#ifndef HAVE_INCLUDE_LINUX_MDIO_H -+#define HAVE_INCLUDE_LINUX_MDIO_H -+#endif -+#include -+#endif /* < 2.6.31 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) -+#undef netdev_tx_t -+#define netdev_tx_t int -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -+static inline int _kc_pm_runtime_get_sync() -+{ -+ return 1; -+} -+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() -+#else /* 2.6.0 => 2.6.32 */ -+static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) -+{ -+ return 1; -+} -+#ifndef pm_runtime_get_sync -+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) -+#endif -+#endif /* 2.6.0 => 2.6.32 */ -+#ifndef pm_runtime_put -+#define pm_runtime_put(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_put_sync -+#define pm_runtime_put_sync(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_resume -+#define pm_runtime_resume(dev) do {} while (0) -+#endif -+#ifndef pm_schedule_suspend -+#define pm_schedule_suspend(dev, t) do {} while (0) -+#endif -+#ifndef pm_runtime_set_suspended -+#define pm_runtime_set_suspended(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_disable -+#define pm_runtime_disable(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_put_noidle -+#define pm_runtime_put_noidle(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_set_active -+#define pm_runtime_set_active(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_enable -+#define pm_runtime_enable(dev) do {} while (0) -+#endif -+#ifndef pm_runtime_get_noresume -+#define pm_runtime_get_noresume(dev) do {} while (0) -+#endif -+#else /* < 2.6.32 */ -+#if (RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -+#define HAVE_RHEL6_NET_DEVICE_EXTENDED -+#endif /* RHEL >= 6.2 && RHEL < 7.0 */ -+#if (RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT -+#define HAVE_NDO_SET_FEATURES -+#endif /* RHEL >= 6.6 && RHEL < 7.0 */ -+#ifdef CONFIG_DCB -+#ifndef HAVE_DCBNL_OPS_GETAPP -+#define HAVE_DCBNL_OPS_GETAPP -+#endif -+#endif /* CONFIG_DCB */ -+#include -+/* IOV bad DMA target work arounds require at least this kernel rev support */ -+#define HAVE_PCIE_TYPE -+#endif /* < 2.6.32 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) -+#ifndef pci_pcie_cap -+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) -+#endif -+#ifndef IPV4_FLOW -+#define IPV4_FLOW 0x10 -+#endif /* IPV4_FLOW */ -+#ifndef IPV6_FLOW -+#define IPV6_FLOW 0x11 -+#endif /* IPV6_FLOW */ -+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ -+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ -+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) -+#endif /* RHEL6 or SLES11 SP1 */ -+#ifndef __percpu -+#define __percpu -+#endif /* __percpu */ -+#ifndef PORT_DA -+#define PORT_DA PORT_OTHER -+#endif -+#ifndef PORT_NONE -+#define PORT_NONE PORT_OTHER -+#endif -+ -+#if ((RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) -+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) -+#undef DEFINE_DMA_UNMAP_ADDR -+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME -+#undef DEFINE_DMA_UNMAP_LEN -+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME -+#undef dma_unmap_addr -+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) -+#undef dma_unmap_addr_set -+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) -+#undef dma_unmap_len -+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) -+#undef dma_unmap_len_set -+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) -+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ -+#endif /* RHEL_RELEASE_CODE */ -+ -+#if (!(RHEL_RELEASE_CODE && \ -+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ -+ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) -+static inline bool pci_is_pcie(struct pci_dev *dev) -+{ -+ return !!pci_pcie_cap(dev); -+} -+#endif /* RHEL_RELEASE_CODE */ -+ -+#if (!(RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) -+#define sk_tx_queue_get(_sk) (-1) -+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) -+#endif /* !(RHEL >= 6.2) */ -+ -+#if (RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT -+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE -+#define HAVE_ETHTOOL_SET_PHYS_ID -+#define HAVE_ETHTOOL_GET_TS_INFO -+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) -+#define HAVE_ETHTOOL_GSRSSH -+#define HAVE_RHEL6_SRIOV_CONFIGURE -+#define HAVE_RXFH_NONCONST -+#endif /* RHEL > 6.5 */ -+#endif /* RHEL >= 6.4 && RHEL < 7.0 */ -+ -+#else /* < 2.6.33 */ -+#endif /* < 2.6.33 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) -+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) -+#ifndef pci_num_vf -+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) -+extern int _kc_pci_num_vf(struct pci_dev *dev); -+#endif -+#endif /* RHEL_RELEASE_CODE */ -+ -+#ifndef ETH_FLAG_NTUPLE -+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE -+#endif -+ -+#ifndef netdev_mc_count -+#define netdev_mc_count(dev) ((dev)->mc_count) -+#endif -+#ifndef netdev_mc_empty -+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) -+#endif -+#ifndef netdev_for_each_mc_addr -+#define netdev_for_each_mc_addr(mclist, dev) \ -+ for (mclist = dev->mc_list; mclist; mclist = mclist->next) -+#endif -+#ifndef netdev_uc_count -+#define netdev_uc_count(dev) ((dev)->uc.count) -+#endif -+#ifndef netdev_uc_empty -+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) -+#endif -+#ifndef netdev_for_each_uc_addr -+#define netdev_for_each_uc_addr(ha, dev) \ -+ list_for_each_entry(ha, &dev->uc.list, list) -+#endif -+#ifndef dma_set_coherent_mask -+#define dma_set_coherent_mask(dev,mask) \ -+ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) -+#endif -+#ifndef pci_dev_run_wake -+#define pci_dev_run_wake(pdev) (0) -+#endif -+ -+/* netdev logging taken from include/linux/netdevice.h */ -+#ifndef netdev_name -+static inline const char *_kc_netdev_name(const struct net_device *dev) -+{ -+ if (dev->reg_state != NETREG_REGISTERED) -+ return "(unregistered net_device)"; -+ return dev->name; -+} -+#define netdev_name(netdev) _kc_netdev_name(netdev) -+#endif /* netdev_name */ -+ -+#undef netdev_printk -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -+#define netdev_printk(level, netdev, format, args...) \ -+do { \ -+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ -+ printk(level "%s: " format, pci_name(pdev), ##args); \ -+} while(0) -+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) -+#define netdev_printk(level, netdev, format, args...) \ -+do { \ -+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ -+ struct device *dev = pci_dev_to_dev(pdev); \ -+ dev_printk(level, dev, "%s: " format, \ -+ netdev_name(netdev), ##args); \ -+} while(0) -+#else /* 2.6.21 => 2.6.34 */ -+#define netdev_printk(level, netdev, format, args...) \ -+ dev_printk(level, (netdev)->dev.parent, \ -+ "%s: " format, \ -+ netdev_name(netdev), ##args) -+#endif /* <2.6.0 <2.6.21 <2.6.34 */ -+#undef netdev_emerg -+#define netdev_emerg(dev, format, args...) \ -+ netdev_printk(KERN_EMERG, dev, format, ##args) -+#undef netdev_alert -+#define netdev_alert(dev, format, args...) \ -+ netdev_printk(KERN_ALERT, dev, format, ##args) -+#undef netdev_crit -+#define netdev_crit(dev, format, args...) \ -+ netdev_printk(KERN_CRIT, dev, format, ##args) -+#undef netdev_err -+#define netdev_err(dev, format, args...) \ -+ netdev_printk(KERN_ERR, dev, format, ##args) -+#undef netdev_warn -+#define netdev_warn(dev, format, args...) \ -+ netdev_printk(KERN_WARNING, dev, format, ##args) -+#undef netdev_notice -+#define netdev_notice(dev, format, args...) \ -+ netdev_printk(KERN_NOTICE, dev, format, ##args) -+#undef netdev_info -+#define netdev_info(dev, format, args...) \ -+ netdev_printk(KERN_INFO, dev, format, ##args) -+#undef netdev_dbg -+#if defined(DEBUG) -+#define netdev_dbg(__dev, format, args...) \ -+ netdev_printk(KERN_DEBUG, __dev, format, ##args) -+#elif defined(CONFIG_DYNAMIC_DEBUG) -+#define netdev_dbg(__dev, format, args...) \ -+do { \ -+ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ -+ netdev_name(__dev), ##args); \ -+} while (0) -+#else /* DEBUG */ -+#define netdev_dbg(__dev, format, args...) \ -+({ \ -+ if (0) \ -+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ -+ 0; \ -+}) -+#endif /* DEBUG */ -+ -+#undef netif_printk -+#define netif_printk(priv, type, level, dev, fmt, args...) \ -+do { \ -+ if (netif_msg_##type(priv)) \ -+ netdev_printk(level, (dev), fmt, ##args); \ -+} while (0) -+ -+#undef netif_emerg -+#define netif_emerg(priv, type, dev, fmt, args...) \ -+ netif_level(emerg, priv, type, dev, fmt, ##args) -+#undef netif_alert -+#define netif_alert(priv, type, dev, fmt, args...) \ -+ netif_level(alert, priv, type, dev, fmt, ##args) -+#undef netif_crit -+#define netif_crit(priv, type, dev, fmt, args...) \ -+ netif_level(crit, priv, type, dev, fmt, ##args) -+#undef netif_err -+#define netif_err(priv, type, dev, fmt, args...) \ -+ netif_level(err, priv, type, dev, fmt, ##args) -+#undef netif_warn -+#define netif_warn(priv, type, dev, fmt, args...) \ -+ netif_level(warn, priv, type, dev, fmt, ##args) -+#undef netif_notice -+#define netif_notice(priv, type, dev, fmt, args...) \ -+ netif_level(notice, priv, type, dev, fmt, ##args) -+#undef netif_info -+#define netif_info(priv, type, dev, fmt, args...) \ -+ netif_level(info, priv, type, dev, fmt, ##args) -+#undef netif_dbg -+#define netif_dbg(priv, type, dev, fmt, args...) \ -+ netif_level(dbg, priv, type, dev, fmt, ##args) -+ -+#ifdef SET_SYSTEM_SLEEP_PM_OPS -+#define HAVE_SYSTEM_SLEEP_PM_OPS -+#endif -+ -+#ifndef for_each_set_bit -+#define for_each_set_bit(bit, addr, size) \ -+ for ((bit) = find_first_bit((addr), (size)); \ -+ (bit) < (size); \ -+ (bit) = find_next_bit((addr), (size), (bit) + 1)) -+#endif /* for_each_set_bit */ -+ -+#ifndef DEFINE_DMA_UNMAP_ADDR -+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR -+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN -+#define dma_unmap_addr pci_unmap_addr -+#define dma_unmap_addr_set pci_unmap_addr_set -+#define dma_unmap_len pci_unmap_len -+#define dma_unmap_len_set pci_unmap_len_set -+#endif /* DEFINE_DMA_UNMAP_ADDR */ -+ -+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) -+#ifdef IGB_HWMON -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+#define sysfs_attr_init(attr) \ -+ do { \ -+ static struct lock_class_key __key; \ -+ (attr)->key = &__key; \ -+ } while (0) -+#else -+#define sysfs_attr_init(attr) do {} while (0) -+#endif /* CONFIG_DEBUG_LOCK_ALLOC */ -+#endif /* IGB_HWMON */ -+#endif /* RHEL_RELEASE_CODE */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) -+static inline bool _kc_pm_runtime_suspended() -+{ -+ return false; -+} -+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() -+#else /* 2.6.0 => 2.6.34 */ -+static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) -+{ -+ return false; -+} -+#ifndef pm_runtime_suspended -+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) -+#endif -+#endif /* 2.6.0 => 2.6.34 */ -+ -+#ifndef pci_bus_speed -+/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ -+enum _kc_pci_bus_speed { -+ _KC_PCIE_SPEED_2_5GT = 0x14, -+ _KC_PCIE_SPEED_5_0GT = 0x15, -+ _KC_PCIE_SPEED_8_0GT = 0x16, -+ _KC_PCI_SPEED_UNKNOWN = 0xff, -+}; -+#define pci_bus_speed _kc_pci_bus_speed -+#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT -+#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT -+#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT -+#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN -+#endif /* pci_bus_speed */ -+ -+#else /* < 2.6.34 */ -+#define HAVE_SYSTEM_SLEEP_PM_OPS -+#ifndef HAVE_SET_RX_MODE -+#define HAVE_SET_RX_MODE -+#endif -+ -+#endif /* < 2.6.34 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) -+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, -+ const void __user *from, size_t count); -+#define simple_write_to_buffer _kc_simple_write_to_buffer -+ -+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT -+#define PCI_EXP_LNKSTA_NLW_SHIFT 4 -+#endif -+ -+#ifndef numa_node_id -+#define numa_node_id() 0 -+#endif -+#ifndef numa_mem_id -+#define numa_mem_id numa_node_id -+#endif -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) -+#ifdef HAVE_TX_MQ -+#include -+#ifndef CONFIG_NETDEVICES_MULTIQUEUE -+int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); -+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ -+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, -+ unsigned int txq) -+{ -+ dev->egress_subqueue_count = txq; -+ return 0; -+} -+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ -+#else /* HAVE_TX_MQ */ -+static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, -+ unsigned int __always_unused txq) -+{ -+ return 0; -+} -+#endif /* HAVE_TX_MQ */ -+#define netif_set_real_num_tx_queues(dev, txq) \ -+ _kc_netif_set_real_num_tx_queues(dev, txq) -+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ -+#ifndef ETH_FLAG_RXHASH -+#define ETH_FLAG_RXHASH (1<<28) -+#endif /* ETH_FLAG_RXHASH */ -+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) -+#define HAVE_IRQ_AFFINITY_HINT -+#endif -+#else /* < 2.6.35 */ -+#define HAVE_PM_QOS_REQUEST_LIST -+#define HAVE_IRQ_AFFINITY_HINT -+#endif /* < 2.6.35 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) -+extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); -+#define ethtool_op_set_flags _kc_ethtool_op_set_flags -+extern u32 _kc_ethtool_op_get_flags(struct net_device *); -+#define ethtool_op_get_flags _kc_ethtool_op_get_flags -+ -+enum { -+ WQ_UNBOUND = 0, -+ WQ_RESCUER = 0, -+}; -+ -+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -+#ifdef NET_IP_ALIGN -+#undef NET_IP_ALIGN -+#endif -+#define NET_IP_ALIGN 0 -+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ -+ -+#ifdef NET_SKB_PAD -+#undef NET_SKB_PAD -+#endif -+ -+#if (L1_CACHE_BYTES > 32) -+#define NET_SKB_PAD L1_CACHE_BYTES -+#else -+#define NET_SKB_PAD 32 -+#endif -+ -+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, -+ unsigned int length) -+{ -+ struct sk_buff *skb; -+ -+ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); -+ if (skb) { -+#if (NET_IP_ALIGN + NET_SKB_PAD) -+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); -+#endif -+ skb->dev = dev; -+ } -+ return skb; -+} -+ -+#ifdef netdev_alloc_skb_ip_align -+#undef netdev_alloc_skb_ip_align -+#endif -+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) -+ -+#undef netif_level -+#define netif_level(level, priv, type, dev, fmt, args...) \ -+do { \ -+ if (netif_msg_##type(priv)) \ -+ netdev_##level(dev, fmt, ##args); \ -+} while (0) -+ -+#undef usleep_range -+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) -+ -+#define u64_stats_update_begin(a) do { } while(0) -+#define u64_stats_update_end(a) do { } while(0) -+#define u64_stats_fetch_begin(a) do { } while(0) -+#define u64_stats_fetch_retry_bh(a,b) (0) -+#define u64_stats_fetch_begin_bh(a) (0) -+ -+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) -+#define HAVE_8021P_SUPPORT -+#endif -+ -+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ -+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ -+ !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) -+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) -+{ -+ return; -+} -+#endif -+ -+#else /* < 2.6.36 */ -+ -+#define HAVE_PM_QOS_REQUEST_ACTIVE -+#define HAVE_8021P_SUPPORT -+#define HAVE_NDO_GET_STATS64 -+#endif /* < 2.6.36 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) -+#define HAVE_NON_CONST_PCI_DRIVER_NAME -+#ifndef netif_set_real_num_tx_queues -+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, -+ unsigned int txq) -+{ -+ netif_set_real_num_tx_queues(dev, txq); -+ return 0; -+} -+#define netif_set_real_num_tx_queues(dev, txq) \ -+ _kc_netif_set_real_num_tx_queues(dev, txq) -+#endif -+#ifndef netif_set_real_num_rx_queues -+static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, -+ unsigned int __always_unused rxq) -+{ -+ return 0; -+} -+#define netif_set_real_num_rx_queues(dev, rxq) \ -+ __kc_netif_set_real_num_rx_queues((dev), (rxq)) -+#endif -+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR -+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) -+#endif -+#ifndef VLAN_N_VID -+#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN -+#endif /* VLAN_N_VID */ -+#ifndef ETH_FLAG_TXVLAN -+#define ETH_FLAG_TXVLAN (1 << 7) -+#endif /* ETH_FLAG_TXVLAN */ -+#ifndef ETH_FLAG_RXVLAN -+#define ETH_FLAG_RXVLAN (1 << 8) -+#endif /* ETH_FLAG_RXVLAN */ -+ -+#define WQ_MEM_RECLAIM WQ_RESCUER -+ -+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) -+{ -+ WARN_ON(skb->ip_summed != CHECKSUM_NONE); -+} -+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) -+ -+static inline void *_kc_vzalloc_node(unsigned long size, int node) -+{ -+ void *addr = vmalloc_node(size, node); -+ if (addr) -+ memset(addr, 0, size); -+ return addr; -+} -+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) -+ -+static inline void *_kc_vzalloc(unsigned long size) -+{ -+ void *addr = vmalloc(size); -+ if (addr) -+ memset(addr, 0, size); -+ return addr; -+} -+#define vzalloc(_size) _kc_vzalloc(_size) -+ -+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ -+ (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) -+static inline __be16 vlan_get_protocol(const struct sk_buff *skb) -+{ -+ if (vlan_tx_tag_present(skb) || -+ skb->protocol != cpu_to_be16(ETH_P_8021Q)) -+ return skb->protocol; -+ -+ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) -+ return 0; -+ -+ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; -+} -+#endif /* !RHEL5.7+ || RHEL6.0 */ -+ -+#ifdef HAVE_HW_TIME_STAMP -+#define SKBTX_HW_TSTAMP (1 << 0) -+#define SKBTX_IN_PROGRESS (1 << 2) -+#define SKB_SHARED_TX_IS_UNION -+#endif -+ -+#ifndef device_wakeup_enable -+#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) -+#endif -+ -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) -+#ifndef HAVE_VLAN_RX_REGISTER -+#define HAVE_VLAN_RX_REGISTER -+#endif -+#endif /* > 2.4.18 */ -+#endif /* < 2.6.37 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) -+#define skb_checksum_start_offset(skb) skb_transport_offset(skb) -+#else /* 2.6.22 -> 2.6.37 */ -+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) -+{ -+ return skb->csum_start - skb_headroom(skb); -+} -+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) -+#endif /* 2.6.22 -> 2.6.37 */ -+#if IS_ENABLED(CONFIG_DCB) -+#ifndef IEEE_8021QAZ_MAX_TCS -+#define IEEE_8021QAZ_MAX_TCS 8 -+#endif -+#ifndef DCB_CAP_DCBX_HOST -+#define DCB_CAP_DCBX_HOST 0x01 -+#endif -+#ifndef DCB_CAP_DCBX_LLD_MANAGED -+#define DCB_CAP_DCBX_LLD_MANAGED 0x02 -+#endif -+#ifndef DCB_CAP_DCBX_VER_CEE -+#define DCB_CAP_DCBX_VER_CEE 0x04 -+#endif -+#ifndef DCB_CAP_DCBX_VER_IEEE -+#define DCB_CAP_DCBX_VER_IEEE 0x08 -+#endif -+#ifndef DCB_CAP_DCBX_STATIC -+#define DCB_CAP_DCBX_STATIC 0x10 -+#endif -+#endif /* CONFIG_DCB */ -+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) -+#define CONFIG_XPS -+#endif /* RHEL_RELEASE_VERSION(6,2) */ -+#endif /* < 2.6.38 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) -+#ifndef TC_BITMASK -+#define TC_BITMASK 15 -+#endif -+#ifndef NETIF_F_RXCSUM -+#define NETIF_F_RXCSUM (1 << 29) -+#endif -+#ifndef skb_queue_reverse_walk_safe -+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ -+ for (skb = (queue)->prev, tmp = skb->prev; \ -+ skb != (struct sk_buff *)(queue); \ -+ skb = tmp, tmp = skb->prev) -+#endif -+ -+#ifndef udp_csum -+#define udp_csum __kc_udp_csum -+static inline __wsum __kc_udp_csum(struct sk_buff *skb) -+{ -+ __wsum csum = csum_partial(skb_transport_header(skb), -+ sizeof(struct udphdr), skb->csum); -+ -+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { -+ csum = csum_add(csum, skb->csum); -+ } -+ return csum; -+} -+#endif /* udp_csum */ -+#else /* < 2.6.39 */ -+#ifndef HAVE_MQPRIO -+#define HAVE_MQPRIO -+#endif -+#ifndef HAVE_SETUP_TC -+#define HAVE_SETUP_TC -+#endif -+#ifdef CONFIG_DCB -+#ifndef HAVE_DCBNL_IEEE -+#define HAVE_DCBNL_IEEE -+#endif -+#endif /* CONFIG_DCB */ -+#ifndef HAVE_NDO_SET_FEATURES -+#define HAVE_NDO_SET_FEATURES -+#endif -+#endif /* < 2.6.39 */ -+ -+/*****************************************************************************/ -+/* use < 2.6.40 because of a Fedora 15 kernel update where they -+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features -+ * like set_phys_id for ethtool. -+ */ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) -+#ifdef ETHTOOL_GRXRINGS -+#ifndef FLOW_EXT -+#define FLOW_EXT 0x80000000 -+union _kc_ethtool_flow_union { -+ struct ethtool_tcpip4_spec tcp_ip4_spec; -+ struct ethtool_usrip4_spec usr_ip4_spec; -+ __u8 hdata[60]; -+}; -+struct _kc_ethtool_flow_ext { -+ __be16 vlan_etype; -+ __be16 vlan_tci; -+ __be32 data[2]; -+}; -+struct _kc_ethtool_rx_flow_spec { -+ __u32 flow_type; -+ union _kc_ethtool_flow_union h_u; -+ struct _kc_ethtool_flow_ext h_ext; -+ union _kc_ethtool_flow_union m_u; -+ struct _kc_ethtool_flow_ext m_ext; -+ __u64 ring_cookie; -+ __u32 location; -+}; -+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec -+#endif /* FLOW_EXT */ -+#endif -+ -+#define pci_disable_link_state_locked pci_disable_link_state -+ -+#ifndef PCI_LTR_VALUE_MASK -+#define PCI_LTR_VALUE_MASK 0x000003ff -+#endif -+#ifndef PCI_LTR_SCALE_MASK -+#define PCI_LTR_SCALE_MASK 0x00001c00 -+#endif -+#ifndef PCI_LTR_SCALE_SHIFT -+#define PCI_LTR_SCALE_SHIFT 10 -+#endif -+ -+#else /* < 2.6.40 */ -+#define HAVE_ETHTOOL_SET_PHYS_ID -+#endif /* < 2.6.40 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) -+#define USE_LEGACY_PM_SUPPORT -+#ifndef kfree_rcu -+#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) -+#endif /* kfree_rcu */ -+#ifndef kstrtol_from_user -+#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) -+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, -+ unsigned int base, long *res) -+{ -+ /* sign, base 2 representation, newline, terminator */ -+ char buf[1 + sizeof(long) * 8 + 1 + 1]; -+ -+ count = min(count, sizeof(buf) - 1); -+ if (copy_from_user(buf, s, count)) -+ return -EFAULT; -+ buf[count] = '\0'; -+ return strict_strtol(buf, base, res); -+} -+#endif -+ -+/* 20000base_blah_full Supported and Advertised Registers */ -+#define SUPPORTED_20000baseMLD2_Full (1 << 21) -+#define SUPPORTED_20000baseKR2_Full (1 << 22) -+#define ADVERTISED_20000baseMLD2_Full (1 << 21) -+#define ADVERTISED_20000baseKR2_Full (1 << 22) -+#endif /* < 3.0.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) -+#ifndef __netdev_alloc_skb_ip_align -+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) -+#endif /* __netdev_alloc_skb_ip_align */ -+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) -+#define dcb_ieee_delapp(dev, app) 0 -+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) -+ -+/* 1000BASE-T Control register */ -+#define CTL1000_AS_MASTER 0x0800 -+#define CTL1000_ENABLE_MASTER 0x1000 -+ -+/* kernels less than 3.0.0 don't have this */ -+#ifndef ETH_P_8021AD -+#define ETH_P_8021AD 0x88A8 -+#endif -+#else /* < 3.1.0 */ -+#ifndef HAVE_DCBNL_IEEE_DELAPP -+#define HAVE_DCBNL_IEEE_DELAPP -+#endif -+#endif /* < 3.1.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) -+#ifndef dma_zalloc_coherent -+#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) -+static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, -+ dma_addr_t *dma_handle, gfp_t flag) -+{ -+ void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); -+ if (ret) -+ memset(ret, 0, size); -+ return ret; -+} -+#endif -+#ifdef ETHTOOL_GRXRINGS -+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS -+#endif /* ETHTOOL_GRXRINGS */ -+ -+#ifndef skb_frag_size -+#define skb_frag_size(frag) _kc_skb_frag_size(frag) -+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) -+{ -+ return frag->size; -+} -+#endif /* skb_frag_size */ -+ -+#ifndef skb_frag_size_sub -+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) -+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) -+{ -+ frag->size -= delta; -+} -+#endif /* skb_frag_size_sub */ -+ -+#ifndef skb_frag_page -+#define skb_frag_page(frag) _kc_skb_frag_page(frag) -+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) -+{ -+ return frag->page; -+} -+#endif /* skb_frag_page */ -+ -+#ifndef skb_frag_address -+#define skb_frag_address(frag) _kc_skb_frag_address(frag) -+static inline void *_kc_skb_frag_address(const skb_frag_t *frag) -+{ -+ return page_address(skb_frag_page(frag)) + frag->page_offset; -+} -+#endif /* skb_frag_address */ -+ -+#ifndef skb_frag_dma_map -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) -+#include -+#endif -+#define skb_frag_dma_map(dev,frag,offset,size,dir) \ -+ _kc_skb_frag_dma_map(dev,frag,offset,size,dir) -+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, -+ const skb_frag_t *frag, -+ size_t offset, size_t size, -+ enum dma_data_direction dir) -+{ -+ return dma_map_page(dev, skb_frag_page(frag), -+ frag->page_offset + offset, size, dir); -+} -+#endif /* skb_frag_dma_map */ -+ -+#ifndef __skb_frag_unref -+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) -+static inline void __kc_skb_frag_unref(skb_frag_t *frag) -+{ -+ put_page(skb_frag_page(frag)); -+} -+#endif /* __skb_frag_unref */ -+ -+#ifndef SPEED_UNKNOWN -+#define SPEED_UNKNOWN -1 -+#endif -+#ifndef DUPLEX_UNKNOWN -+#define DUPLEX_UNKNOWN 0xff -+#endif -+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ -+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) -+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED -+#define HAVE_PCI_DEV_FLAGS_ASSIGNED -+#endif -+#endif -+#else /* < 3.2.0 */ -+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED -+#define HAVE_PCI_DEV_FLAGS_ASSIGNED -+#define HAVE_VF_SPOOFCHK_CONFIGURE -+#endif -+#ifndef HAVE_SKB_L4_RXHASH -+#define HAVE_SKB_L4_RXHASH -+#endif -+#define HAVE_IOMMU_PRESENT -+#define HAVE_PM_QOS_REQUEST_LIST_NEW -+#endif /* < 3.2.0 */ -+ -+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) -+#undef ixgbe_get_netdev_tc_txq -+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) -+#endif -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) -+/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than -+ * alloc_workqueue() to avoid compiler warning from -Wvarargs -+ */ -+static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) -+_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, -+ const char *fmt, ...) -+{ -+ struct workqueue_struct *wq; -+ va_list args, temp; -+ unsigned int len; -+ char *p; -+ -+ va_start(args, fmt); -+ va_copy(temp, args); -+ len = vsnprintf(NULL, 0, fmt, temp); -+ va_end(temp); -+ -+ p = kmalloc(len + 1, GFP_KERNEL); -+ if (!p) { -+ va_end(args); -+ return NULL; -+ } -+ -+ vsnprintf(p, len + 1, fmt, args); -+ va_end(args); -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) -+ wq = create_workqueue(p); -+#else -+ wq = alloc_workqueue(p, flags, max_active); -+#endif -+ kfree(p); -+ -+ return wq; -+} -+#ifdef alloc_workqueue -+#undef alloc_workqueue -+#endif -+#define alloc_workqueue(fmt, flags, max_active, args...) \ -+ _kc_alloc_workqueue(flags, max_active, fmt, ##args) -+ -+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) -+typedef u32 netdev_features_t; -+#endif -+#undef PCI_EXP_TYPE_RC_EC -+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ -+#ifndef CONFIG_BQL -+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) -+#define netdev_completed_queue(_n, _p, _b) do {} while (0) -+#define netdev_tx_sent_queue(_q, _b) do {} while (0) -+#define netdev_sent_queue(_n, _b) do {} while (0) -+#define netdev_tx_reset_queue(_q) do {} while (0) -+#define netdev_reset_queue(_n) do {} while (0) -+#endif -+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE -+#endif /* SLE_VERSION(11,3,0) */ -+#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) -+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) -+static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, -+ u8 *nexthdrp, -+ __be16 __always_unused *frag_offp) -+{ -+ return ipv6_skip_exthdr(skb, start, nexthdrp); -+} -+#undef ipv6_skip_exthdr -+#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) -+#endif /* !SLES11sp4 or greater */ -+ -+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ -+ !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) -+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) -+{ -+ return index % n_rx_rings; -+} -+#endif -+ -+#else /* ! < 3.3.0 */ -+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE -+#define HAVE_INT_NDO_VLAN_RX_ADD_VID -+#ifdef ETHTOOL_SRXNTUPLE -+#undef ETHTOOL_SRXNTUPLE -+#endif -+#endif /* < 3.3.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) -+#ifndef NETIF_F_RXFCS -+#define NETIF_F_RXFCS 0 -+#endif /* NETIF_F_RXFCS */ -+#ifndef NETIF_F_RXALL -+#define NETIF_F_RXALL 0 -+#endif /* NETIF_F_RXALL */ -+ -+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -+#define NUMTCS_RETURNS_U8 -+ -+int _kc_simple_open(struct inode *inode, struct file *file); -+#define simple_open _kc_simple_open -+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ -+ -+#ifndef skb_add_rx_frag -+#define skb_add_rx_frag _kc_skb_add_rx_frag -+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, -+ int, int, unsigned int); -+#endif -+#ifdef NET_ADDR_RANDOM -+#define eth_hw_addr_random(N) do { \ -+ eth_random_addr(N->dev_addr); \ -+ N->addr_assign_type |= NET_ADDR_RANDOM; \ -+ } while (0) -+#else /* NET_ADDR_RANDOM */ -+#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) -+#endif /* NET_ADDR_RANDOM */ -+ -+#ifndef for_each_set_bit_from -+#define for_each_set_bit_from(bit, addr, size) \ -+ for ((bit) = find_next_bit((addr), (size), (bit)); \ -+ (bit) < (size); \ -+ (bit) = find_next_bit((addr), (size), (bit) + 1)) -+#endif /* for_each_set_bit_from */ -+ -+#else /* < 3.4.0 */ -+#include -+#endif /* >= 3.4.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ -+ ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) -+#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) -+#define HAVE_PTP_1588_CLOCK -+#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ -+#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) -+ -+#ifndef ether_addr_equal -+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) -+{ -+ return !compare_ether_addr(addr1, addr2); -+} -+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) -+#endif -+ -+#else -+#define HAVE_FDB_OPS -+#define HAVE_ETHTOOL_GET_TS_INFO -+#endif /* < 3.5.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) -+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ -+ -+#ifndef MDIO_EEE_100TX -+#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ -+#endif -+#ifndef MDIO_EEE_1000T -+#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ -+#endif -+#ifndef MDIO_EEE_10GT -+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ -+#endif -+#ifndef MDIO_EEE_1000KX -+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ -+#endif -+#ifndef MDIO_EEE_10GKX4 -+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ -+#endif -+#ifndef MDIO_EEE_10GKR -+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ -+#endif -+ -+#ifndef __GFP_MEMALLOC -+#define __GFP_MEMALLOC 0 -+#endif -+ -+#ifndef eth_random_addr -+#define eth_random_addr _kc_eth_random_addr -+static inline void _kc_eth_random_addr(u8 *addr) -+{ -+ get_random_bytes(addr, ETH_ALEN); -+ addr[0] &= 0xfe; /* clear multicast */ -+ addr[0] |= 0x02; /* set local assignment */ -+} -+#endif /* eth_random_addr */ -+#else /* < 3.6.0 */ -+#define HAVE_STRUCT_PAGE_PFMEMALLOC -+#endif /* < 3.6.0 */ -+ -+/******************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) -+#ifndef ADVERTISED_40000baseKR4_Full -+/* these defines were all added in one commit, so should be safe -+ * to trigger activiation on one define -+ */ -+#define SUPPORTED_40000baseKR4_Full (1 << 23) -+#define SUPPORTED_40000baseCR4_Full (1 << 24) -+#define SUPPORTED_40000baseSR4_Full (1 << 25) -+#define SUPPORTED_40000baseLR4_Full (1 << 26) -+#define ADVERTISED_40000baseKR4_Full (1 << 23) -+#define ADVERTISED_40000baseCR4_Full (1 << 24) -+#define ADVERTISED_40000baseSR4_Full (1 << 25) -+#define ADVERTISED_40000baseLR4_Full (1 << 26) -+#endif -+ -+#ifndef mmd_eee_cap_to_ethtool_sup_t -+/** -+ * mmd_eee_cap_to_ethtool_sup_t -+ * @eee_cap: value of the MMD EEE Capability register -+ * -+ * A small helper function that translates MMD EEE Capability (3.20) bits -+ * to ethtool supported settings. -+ */ -+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) -+{ -+ u32 supported = 0; -+ -+ if (eee_cap & MDIO_EEE_100TX) -+ supported |= SUPPORTED_100baseT_Full; -+ if (eee_cap & MDIO_EEE_1000T) -+ supported |= SUPPORTED_1000baseT_Full; -+ if (eee_cap & MDIO_EEE_10GT) -+ supported |= SUPPORTED_10000baseT_Full; -+ if (eee_cap & MDIO_EEE_1000KX) -+ supported |= SUPPORTED_1000baseKX_Full; -+ if (eee_cap & MDIO_EEE_10GKX4) -+ supported |= SUPPORTED_10000baseKX4_Full; -+ if (eee_cap & MDIO_EEE_10GKR) -+ supported |= SUPPORTED_10000baseKR_Full; -+ -+ return supported; -+} -+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ -+ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) -+#endif /* mmd_eee_cap_to_ethtool_sup_t */ -+ -+#ifndef mmd_eee_adv_to_ethtool_adv_t -+/** -+ * mmd_eee_adv_to_ethtool_adv_t -+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers -+ * -+ * A small helper function that translates the MMD EEE Advertisment (7.60) -+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement -+ * settings. -+ */ -+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) -+{ -+ u32 adv = 0; -+ -+ if (eee_adv & MDIO_EEE_100TX) -+ adv |= ADVERTISED_100baseT_Full; -+ if (eee_adv & MDIO_EEE_1000T) -+ adv |= ADVERTISED_1000baseT_Full; -+ if (eee_adv & MDIO_EEE_10GT) -+ adv |= ADVERTISED_10000baseT_Full; -+ if (eee_adv & MDIO_EEE_1000KX) -+ adv |= ADVERTISED_1000baseKX_Full; -+ if (eee_adv & MDIO_EEE_10GKX4) -+ adv |= ADVERTISED_10000baseKX4_Full; -+ if (eee_adv & MDIO_EEE_10GKR) -+ adv |= ADVERTISED_10000baseKR_Full; -+ -+ return adv; -+} -+ -+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ -+ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) -+#endif /* mmd_eee_adv_to_ethtool_adv_t */ -+ -+#ifndef ethtool_adv_to_mmd_eee_adv_t -+/** -+ * ethtool_adv_to_mmd_eee_adv_t -+ * @adv: the ethtool advertisement settings -+ * -+ * A small helper function that translates ethtool advertisement settings -+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and -+ * MMD EEE Link Partner Ability (7.61) registers. -+ */ -+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) -+{ -+ u16 reg = 0; -+ -+ if (adv & ADVERTISED_100baseT_Full) -+ reg |= MDIO_EEE_100TX; -+ if (adv & ADVERTISED_1000baseT_Full) -+ reg |= MDIO_EEE_1000T; -+ if (adv & ADVERTISED_10000baseT_Full) -+ reg |= MDIO_EEE_10GT; -+ if (adv & ADVERTISED_1000baseKX_Full) -+ reg |= MDIO_EEE_1000KX; -+ if (adv & ADVERTISED_10000baseKX4_Full) -+ reg |= MDIO_EEE_10GKX4; -+ if (adv & ADVERTISED_10000baseKR_Full) -+ reg |= MDIO_EEE_10GKR; -+ -+ return reg; -+} -+#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) -+#endif /* ethtool_adv_to_mmd_eee_adv_t */ -+ -+#ifndef pci_pcie_type -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) -+static inline u8 pci_pcie_type(struct pci_dev *pdev) -+{ -+ int pos; -+ u16 reg16; -+ -+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); -+ BUG_ON(!pos); -+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); -+ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; -+} -+#else /* < 2.6.24 */ -+#define pci_pcie_type(x) (x)->pcie_type -+#endif /* < 2.6.24 */ -+#endif /* pci_pcie_type */ -+ -+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ -+ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ -+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) -+#define ptp_clock_register(caps, args...) ptp_clock_register(caps) -+#endif -+ -+#ifndef pcie_capability_read_word -+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); -+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) -+#endif /* pcie_capability_read_word */ -+ -+#ifndef pcie_capability_write_word -+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); -+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) -+#endif /* pcie_capability_write_word */ -+ -+#ifndef pcie_capability_clear_and_set_word -+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, -+ u16 clear, u16 set); -+#define pcie_capability_clear_and_set_word(d,p,c,s) \ -+ __kc_pcie_capability_clear_and_set_word(d,p,c,s) -+#endif /* pcie_capability_clear_and_set_word */ -+ -+#ifndef pcie_capability_clear_word -+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, -+ u16 clear); -+#define pcie_capability_clear_word(d, p, c) \ -+ __kc_pcie_capability_clear_word(d, p, c) -+#endif /* pcie_capability_clear_word */ -+ -+#ifndef PCI_EXP_LNKSTA2 -+#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ -+#endif -+ -+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) -+#define USE_CONST_DEV_UC_CHAR -+#endif -+ -+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) -+#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) -+#endif /* !RHEL6.8+ */ -+ -+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) -+#include -+#else -+ -+#define DEFINE_HASHTABLE(name, bits) \ -+ struct hlist_head name[1 << (bits)] = \ -+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } -+ -+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ -+ struct hlist_head name[1 << (bits)] __read_mostly = \ -+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } -+ -+#define DECLARE_HASHTABLE(name, bits) \ -+ struct hlist_head name[1 << (bits)] -+ -+#define HASH_SIZE(name) (ARRAY_SIZE(name)) -+#define HASH_BITS(name) ilog2(HASH_SIZE(name)) -+ -+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ -+#define hash_min(val, bits) \ -+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) -+ -+static inline void __hash_init(struct hlist_head *ht, unsigned int sz) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < sz; i++) -+ INIT_HLIST_HEAD(&ht[i]); -+} -+ -+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) -+ -+#define hash_add(hashtable, node, key) \ -+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) -+ -+static inline bool hash_hashed(struct hlist_node *node) -+{ -+ return !hlist_unhashed(node); -+} -+ -+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < sz; i++) -+ if (!hlist_empty(&ht[i])) -+ return false; -+ -+ return true; -+} -+ -+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) -+ -+static inline void hash_del(struct hlist_node *node) -+{ -+ hlist_del_init(node); -+} -+#endif /* RHEL >= 6.6 */ -+ -+#else /* >= 3.7.0 */ -+#include -+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS -+#define USE_CONST_DEV_UC_CHAR -+#endif /* >= 3.7.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) -+#ifndef pci_sriov_set_totalvfs -+static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) -+{ -+ return 0; -+} -+#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) -+#endif -+#ifndef PCI_EXP_LNKCTL_ASPM_L0S -+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ -+#endif -+#ifndef PCI_EXP_LNKCTL_ASPM_L1 -+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ -+#endif -+#define HAVE_CONFIG_HOTPLUG -+/* Reserved Ethernet Addresses per IEEE 802.1Q */ -+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { -+ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -+ -+#ifndef is_link_local_ether_addr -+static inline bool __kc_is_link_local_ether_addr(const u8 *addr) -+{ -+ __be16 *a = (__be16 *)addr; -+ static const __be16 *b = (const __be16 *)eth_reserved_addr_base; -+ static const __be16 m = cpu_to_be16(0xfff0); -+ -+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; -+} -+#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) -+#endif /* is_link_local_ether_addr */ -+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, -+ int target, unsigned short *fragoff, int *flags); -+#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) -+ -+#ifndef FLOW_MAC_EXT -+#define FLOW_MAC_EXT 0x40000000 -+#endif /* FLOW_MAC_EXT */ -+ -+#else /* >= 3.8.0 */ -+#ifndef __devinit -+#define __devinit -+#endif -+ -+#ifndef __devinitdata -+#define __devinitdata -+#endif -+ -+#ifndef __devinitconst -+#define __devinitconst -+#endif -+ -+#ifndef __devexit -+#define __devexit -+#endif -+ -+#ifndef __devexit_p -+#define __devexit_p -+#endif -+ -+#ifndef HAVE_ENCAP_CSUM_OFFLOAD -+#define HAVE_ENCAP_CSUM_OFFLOAD -+#endif -+ -+#ifndef HAVE_GRE_ENCAP_OFFLOAD -+#define HAVE_GRE_ENCAP_OFFLOAD -+#endif -+ -+#ifndef HAVE_SRIOV_CONFIGURE -+#define HAVE_SRIOV_CONFIGURE -+#endif -+ -+#define HAVE_BRIDGE_ATTRIBS -+#ifndef BRIDGE_MODE_VEB -+#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ -+#endif /* BRIDGE_MODE_VEB */ -+#ifndef BRIDGE_MODE_VEPA -+#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ -+#endif /* BRIDGE_MODE_VEPA */ -+#endif /* >= 3.8.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) -+ -+#undef BUILD_BUG_ON -+#ifdef __CHECKER__ -+#define BUILD_BUG_ON(condition) (0) -+#else /* __CHECKER__ */ -+#ifndef __compiletime_warning -+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) -+#define __compiletime_warning(message) __attribute__((warning(message))) -+#else /* __GNUC__ */ -+#define __compiletime_warning(message) -+#endif /* __GNUC__ */ -+#endif /* __compiletime_warning */ -+#ifndef __compiletime_error -+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) -+#define __compiletime_error(message) __attribute__((error(message))) -+#define __compiletime_error_fallback(condition) do { } while (0) -+#else /* __GNUC__ */ -+#define __compiletime_error(message) -+#define __compiletime_error_fallback(condition) \ -+ do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -+#endif /* __GNUC__ */ -+#else /* __compiletime_error */ -+#define __compiletime_error_fallback(condition) do { } while (0) -+#endif /* __compiletime_error */ -+#define __compiletime_assert(condition, msg, prefix, suffix) \ -+ do { \ -+ bool __cond = !(condition); \ -+ extern void prefix ## suffix(void) __compiletime_error(msg); \ -+ if (__cond) \ -+ prefix ## suffix(); \ -+ __compiletime_error_fallback(__cond); \ -+ } while (0) -+ -+#define _compiletime_assert(condition, msg, prefix, suffix) \ -+ __compiletime_assert(condition, msg, prefix, suffix) -+#define compiletime_assert(condition, msg) \ -+ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) -+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) -+#ifndef __OPTIMIZE__ -+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -+#else /* __OPTIMIZE__ */ -+#define BUILD_BUG_ON(condition) \ -+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) -+#endif /* __OPTIMIZE__ */ -+#endif /* __CHECKER__ */ -+ -+#undef hlist_entry -+#define hlist_entry(ptr, type, member) container_of(ptr,type,member) -+ -+#undef hlist_entry_safe -+#define hlist_entry_safe(ptr, type, member) \ -+ ({ typeof(ptr) ____ptr = (ptr); \ -+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ -+ }) -+ -+#undef hlist_for_each_entry -+#define hlist_for_each_entry(pos, head, member) \ -+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ -+ pos; \ -+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) -+ -+#undef hlist_for_each_entry_safe -+#define hlist_for_each_entry_safe(pos, n, head, member) \ -+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ -+ pos && ({ n = pos->member.next; 1; }); \ -+ pos = hlist_entry_safe(n, typeof(*pos), member)) -+ -+#undef hash_for_each -+#define hash_for_each(name, bkt, obj, member) \ -+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ -+ (bkt)++)\ -+ hlist_for_each_entry(obj, &name[bkt], member) -+ -+#undef hash_for_each_safe -+#define hash_for_each_safe(name, bkt, tmp, obj, member) \ -+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ -+ (bkt)++)\ -+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) -+ -+#undef hash_for_each_possible -+#define hash_for_each_possible(name, obj, member, key) \ -+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) -+ -+#undef hash_for_each_possible_safe -+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ -+ hlist_for_each_entry_safe(obj, tmp,\ -+ &name[hash_min(key, HASH_BITS(name))], member) -+ -+#ifdef CONFIG_XPS -+extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16); -+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) -+#else /* CONFIG_XPS */ -+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) -+#endif /* CONFIG_XPS */ -+ -+#ifdef HAVE_NETDEV_SELECT_QUEUE -+#define _kc_hashrnd 0xd631614b /* not so random hash salt */ -+extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); -+#define __netdev_pick_tx __kc_netdev_pick_tx -+#endif /* HAVE_NETDEV_SELECT_QUEUE */ -+#else -+#define HAVE_BRIDGE_FILTER -+#define HAVE_FDB_DEL_NLATTR -+#endif /* < 3.9.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) -+#ifndef NAPI_POLL_WEIGHT -+#define NAPI_POLL_WEIGHT 64 -+#endif -+#ifdef CONFIG_PCI_IOV -+extern int __kc_pci_vfs_assigned(struct pci_dev *dev); -+#else -+static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) -+{ -+ return 0; -+} -+#endif -+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) -+ -+#ifndef list_first_entry_or_null -+#define list_first_entry_or_null(ptr, type, member) \ -+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) -+#endif -+ -+#ifndef VLAN_TX_COOKIE_MAGIC -+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, -+ u16 vlan_tci) -+{ -+#ifdef VLAN_TAG_PRESENT -+ vlan_tci |= VLAN_TAG_PRESENT; -+#endif -+ skb->vlan_tci = vlan_tci; -+ return skb; -+} -+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ -+ __kc__vlan_hwaccel_put_tag(skb, vlan_tci) -+#endif -+ -+#ifdef HAVE_FDB_OPS -+#ifdef USE_CONST_DEV_UC_CHAR -+extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *dev, -+ const unsigned char *addr, u16 flags); -+#ifdef HAVE_FDB_DEL_NLATTR -+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], -+ struct net_device *dev, -+ const unsigned char *addr); -+#else -+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, -+ const unsigned char *addr); -+#endif -+#else -+extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, -+ unsigned char *addr, u16 flags); -+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, -+ unsigned char *addr); -+#endif -+#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add -+#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del -+#endif /* HAVE_FDB_OPS */ -+ -+#ifndef PCI_DEVID -+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) -+#endif -+#else /* >= 3.10.0 */ -+#define HAVE_ENCAP_TSO_OFFLOAD -+#define USE_DEFAULT_FDB_DEL_DUMP -+#define HAVE_SKB_INNER_NETWORK_HEADER -+#endif /* >= 3.10.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) -+#define netdev_notifier_info_to_dev(ptr) ptr -+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ -+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) -+#define HAVE_NDO_SET_VF_LINK_STATE -+#endif -+#else /* >= 3.11.0 */ -+#define HAVE_NDO_SET_VF_LINK_STATE -+#define HAVE_SKB_INNER_PROTOCOL -+#define HAVE_MPLS_FEATURES -+#endif /* >= 3.11.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) -+extern int __kc_pcie_get_minimum_link(struct pci_dev *dev, -+ enum pci_bus_speed *speed, -+ enum pcie_link_width *width); -+#ifndef pcie_get_minimum_link -+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) -+#endif -+#else /* >= 3.12.0 */ -+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) -+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -+#endif -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) -+#define HAVE_VXLAN_RX_OFFLOAD -+#endif /* < 4.8.0 */ -+#define HAVE_NDO_GET_PHYS_PORT_ID -+#endif /* >= 3.12.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) -+#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) -+extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); -+#ifndef u64_stats_init -+#define u64_stats_init(a) do { } while(0) -+#endif -+#ifndef BIT_ULL -+#define BIT_ULL(n) (1ULL << (n)) -+#endif -+ -+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) -+#undef HAVE_STRUCT_PAGE_PFMEMALLOC -+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT -+#endif -+#ifndef list_next_entry -+#define list_next_entry(pos, member) \ -+ list_entry((pos)->member.next, typeof(*(pos)), member) -+#endif -+ -+#else /* >= 3.13.0 */ -+#define HAVE_VXLAN_CHECKS -+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) -+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -+#else -+#define HAVE_NDO_SELECT_QUEUE_ACCEL -+#endif -+#define HAVE_NET_GET_RANDOM_ONCE -+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS -+#endif -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) -+ -+#ifndef U16_MAX -+#define U16_MAX ((u16)~0U) -+#endif -+ -+#ifndef U32_MAX -+#define U32_MAX ((u32)~0U) -+#endif -+ -+#define dev_consume_skb_any(x) dev_kfree_skb_any(x) -+ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ -+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) -+ -+/* it isn't expected that this would be a #define unless we made it so */ -+#ifndef skb_set_hash -+ -+#define PKT_HASH_TYPE_NONE 0 -+#define PKT_HASH_TYPE_L2 1 -+#define PKT_HASH_TYPE_L3 2 -+#define PKT_HASH_TYPE_L4 3 -+ -+#define skb_set_hash __kc_skb_set_hash -+static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, -+ u32 __maybe_unused hash, -+ int __maybe_unused type) -+{ -+#ifdef HAVE_SKB_L4_RXHASH -+ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); -+#endif -+#ifdef NETIF_F_RXHASH -+ skb->rxhash = hash; -+#endif -+} -+#endif /* !skb_set_hash */ -+ -+#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ -+ -+#ifndef HAVE_VXLAN_RX_OFFLOAD -+#define HAVE_VXLAN_RX_OFFLOAD -+#endif /* HAVE_VXLAN_RX_OFFLOAD */ -+ -+#ifndef HAVE_VXLAN_CHECKS -+#define HAVE_VXLAN_CHECKS -+#endif /* HAVE_VXLAN_CHECKS */ -+#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ -+ -+#ifndef pci_enable_msix_range -+extern int __kc_pci_enable_msix_range(struct pci_dev *dev, -+ struct msix_entry *entries, -+ int minvec, int maxvec); -+#define pci_enable_msix_range __kc_pci_enable_msix_range -+#endif -+ -+#ifndef ether_addr_copy -+#define ether_addr_copy __kc_ether_addr_copy -+static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) -+{ -+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) -+ *(u32 *)dst = *(const u32 *)src; -+ *(u16 *)(dst + 4) = *(const u16 *)(src + 4); -+#else -+ u16 *a = (u16 *)dst; -+ const u16 *b = (const u16 *)src; -+ -+ a[0] = b[0]; -+ a[1] = b[1]; -+ a[2] = b[2]; -+#endif -+} -+#endif /* ether_addr_copy */ -+ -+#else /* >= 3.14.0 */ -+ -+/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ -+#ifndef HAVE_NDO_DFWD_OPS -+#define HAVE_NDO_DFWD_OPS -+#endif -+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK -+#endif /* 3.14.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) -+ -+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ -+ !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) -+#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh -+#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh -+#endif -+ -+#else -+#define HAVE_PTP_1588_CLOCK_PINS -+#define HAVE_NETDEV_PORT -+#endif /* 3.15.0 */ -+ -+/*****************************************************************************/ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) -+#ifndef smp_mb__before_atomic -+#define smp_mb__before_atomic() smp_mb() -+#define smp_mb__after_atomic() smp_mb() -+#endif -+#ifndef __dev_uc_sync -+#ifdef HAVE_SET_RX_MODE -+#ifdef NETDEV_HW_ADDR_T_UNICAST -+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, -+ struct net_device *dev, -+ int (*sync)(struct net_device *, const unsigned char *), -+ int (*unsync)(struct net_device *, const unsigned char *)); -+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, -+ struct net_device *dev, -+ int (*unsync)(struct net_device *, const unsigned char *)); -+#endif -+#ifndef NETDEV_HW_ADDR_T_MULTICAST -+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, -+ struct net_device *dev, -+ int (*sync)(struct net_device *, const unsigned char *), -+ int (*unsync)(struct net_device *, const unsigned char *)); -+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, -+ struct net_device *dev, -+ int (*unsync)(struct net_device *, const unsigned char *)); -+#endif -+#endif /* HAVE_SET_RX_MODE */ -+ -+static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, -+ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), -+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -+{ -+#ifdef NETDEV_HW_ADDR_T_UNICAST -+ return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); -+#elif defined(HAVE_SET_RX_MODE) -+ return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, -+ dev, sync, unsync); -+#else -+ return 0; -+#endif -+} -+#define __dev_uc_sync __kc_dev_uc_sync -+ -+static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, -+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -+{ -+#ifdef HAVE_SET_RX_MODE -+#ifdef NETDEV_HW_ADDR_T_UNICAST -+ __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); -+#else /* NETDEV_HW_ADDR_T_MULTICAST */ -+ __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); -+#endif /* NETDEV_HW_ADDR_T_UNICAST */ -+#endif /* HAVE_SET_RX_MODE */ -+} -+#define __dev_uc_unsync __kc_dev_uc_unsync -+ -+static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, -+ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), -+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -+{ -+#ifdef NETDEV_HW_ADDR_T_MULTICAST -+ return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); -+#elif defined(HAVE_SET_RX_MODE) -+ return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, -+ dev, sync, unsync); -+#else -+ return 0; -+#endif -+ -+} -+#define __dev_mc_sync __kc_dev_mc_sync -+ -+static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, -+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) -+{ -+#ifdef HAVE_SET_RX_MODE -+#ifdef NETDEV_HW_ADDR_T_MULTICAST -+ __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); -+#else /* NETDEV_HW_ADDR_T_MULTICAST */ -+ __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); -+#endif /* NETDEV_HW_ADDR_T_MULTICAST */ -+#endif /* HAVE_SET_RX_MODE */ -+} -+#define __dev_mc_unsync __kc_dev_mc_unsync -+#endif /* __dev_uc_sync */ -+ -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) -+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -+#endif -+ -+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM -+/* if someone backports this, hopefully they backport as a #define. -+ * declare it as zero on older kernels so that if it get's or'd in -+ * it won't effect anything, therefore preventing core driver changes -+ */ -+#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 -+#define SKB_GSO_UDP_TUNNEL_CSUM 0 -+#endif -+ -+#else -+#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY -+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE -+#endif /* 3.16.0 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) -+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ -+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ -+ !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) -+#ifndef timespec64 -+#define timespec64 timespec -+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) -+{ -+ return ts; -+} -+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) -+{ -+ return ts64; -+} -+#define timespec64_equal timespec_equal -+#define timespec64_compare timespec_compare -+#define set_normalized_timespec64 set_normalized_timespec -+#define timespec64_add_safe timespec_add_safe -+#define timespec64_add timespec_add -+#define timespec64_sub timespec_sub -+#define timespec64_valid timespec_valid -+#define timespec64_valid_strict timespec_valid_strict -+#define timespec64_to_ns timespec_to_ns -+#define ns_to_timespec64 ns_to_timespec -+#define ktime_to_timespec64 ktime_to_timespec -+#define timespec64_add_ns timespec_add_ns -+#endif /* timespec64 */ -+#endif /* !(RHEL6.8 -+extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); -+extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, -+ struct skb_shared_hwtstamps *hwtstamps); -+#define skb_clone_sk __kc_skb_clone_sk -+#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp -+#endif -+extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); -+#define eth_get_headlen __kc_eth_get_headlen -+#ifndef ETH_P_XDSA -+#define ETH_P_XDSA 0x00F8 -+#endif -+/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) -+#define HAVE_SKBUFF_CSUM_LEVEL -+#endif /* >= RH 7.1 */ -+ -+#undef GENMASK -+#define GENMASK(h, l) \ -+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) -+#undef GENMASK_ULL -+#define GENMASK_ULL(h, l) \ -+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) -+ -+#else /* 3.18.0 */ -+#define HAVE_SKBUFF_CSUM_LEVEL -+#define HAVE_SKB_XMIT_MORE -+#define HAVE_SKB_INNER_PROTOCOL_TYPE -+#endif /* 3.18.0 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) -+#else -+#define HAVE_NDO_FEATURES_CHECK -+#endif /* 3.18.4 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) -+/* netdev_phys_port_id renamed to netdev_phys_item_id */ -+#define netdev_phys_item_id netdev_phys_port_id -+ -+static inline void _kc_napi_complete_done(struct napi_struct *napi, -+ int __always_unused work_done) { -+ napi_complete(napi); -+} -+#define napi_complete_done _kc_napi_complete_done -+ -+#ifndef NETDEV_RSS_KEY_LEN -+#define NETDEV_RSS_KEY_LEN (13 * 4) -+#endif -+#if ( !(RHEL_RELEASE_CODE && \ -+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ) -+#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) -+#endif /* RHEL_RELEASE_CODE */ -+extern void __kc_netdev_rss_key_fill(void *buffer, size_t len); -+#define SPEED_20000 20000 -+#define SPEED_40000 40000 -+#ifndef dma_rmb -+#define dma_rmb() rmb() -+#endif -+#ifndef dev_alloc_pages -+#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) -+#endif -+#ifndef dev_alloc_page -+#define dev_alloc_page() dev_alloc_pages(0) -+#endif -+#if !defined(eth_skb_pad) && !defined(skb_put_padto) -+/** -+ * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size -+ * @skb: buffer to pad -+ * @len: minimal length -+ * -+ * Pads up a buffer to ensure the trailing bytes exist and are -+ * blanked. If the buffer already contains sufficient data it -+ * is untouched. Otherwise it is extended. Returns zero on -+ * success. The skb is freed on error. -+ */ -+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) -+{ -+ unsigned int size = skb->len; -+ -+ if (unlikely(size < len)) { -+ len -= size; -+ if (skb_pad(skb, len)) -+ return -ENOMEM; -+ __skb_put(skb, len); -+ } -+ return 0; -+} -+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) -+ -+static inline int __kc_eth_skb_pad(struct sk_buff *skb) -+{ -+ return __kc_skb_put_padto(skb, ETH_ZLEN); -+} -+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) -+#endif /* eth_skb_pad && skb_put_padto */ -+ -+#ifndef SKB_ALLOC_NAPI -+/* RHEL 7.2 backported napi_alloc_skb and friends */ -+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) -+{ -+ return netdev_alloc_skb_ip_align(napi->dev, length); -+} -+#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) -+#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) -+#endif /* SKB_ALLOC_NAPI */ -+#define HAVE_CONFIG_PM_RUNTIME -+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ -+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) -+#define HAVE_RXFH_HASHFUNC -+#endif /* 6.7 < RHEL < 7.0 */ -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) -+#define HAVE_RXFH_HASHFUNC -+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS -+#endif /* RHEL > 7.1 */ -+#ifndef napi_schedule_irqoff -+#define napi_schedule_irqoff napi_schedule -+#endif -+#ifndef READ_ONCE -+#define READ_ONCE(_x) ACCESS_ONCE(_x) -+#endif -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -+#define HAVE_NDO_FDB_ADD_VID -+#endif -+#else /* 3.19.0 */ -+#define HAVE_NDO_FDB_ADD_VID -+#define HAVE_RXFH_HASHFUNC -+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS -+#endif /* 3.19.0 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) -+/* vlan_tx_xx functions got renamed to skb_vlan */ -+#ifndef skb_vlan_tag_get -+#define skb_vlan_tag_get vlan_tx_tag_get -+#endif -+#ifndef skb_vlan_tag_present -+#define skb_vlan_tag_present vlan_tx_tag_present -+#endif -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) -+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H -+#endif -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS -+#endif -+#else -+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H -+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS -+#endif /* 3.20.0 */ -+ -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) -+#ifndef NO_PTP_SUPPORT -+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H -+#include -+#else -+#include -+#endif -+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) -+{ -+ tc->nsec += delta; -+} -+#define timecounter_adjtime __kc_timecounter_adjtime -+#endif -+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) -+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -+#endif -+#else -+#define HAVE_PTP_CLOCK_INFO_GETTIME64 -+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS -+#define HAVE_PASSTHRU_FEATURES_CHECK -+#define HAVE_NDO_SET_VF_RSS_QUERY_EN -+#endif /* 4,1,0 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) -+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) && \ -+ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) -+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) -+{ -+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC -+ return page->pfmemalloc; -+#else -+ return false; -+#endif -+} -+#endif /* !SLES12sp1 */ -+#else -+#undef HAVE_STRUCT_PAGE_PFMEMALLOC -+#endif /* 4.1.9 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) -+#else -+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT -+#endif /* 4.2.0 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) -+#ifndef CONFIG_64BIT -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) -+#include /* 32-bit readq/writeq */ -+#else /* 3.3.0 => 4.3.x */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) -+#include -+#endif /* 2.6.26 => 3.3.0 */ -+#ifndef readq -+static inline __u64 readq(const volatile void __iomem *addr) -+{ -+ const volatile u32 __iomem *p = addr; -+ u32 low, high; -+ -+ low = readl(p); -+ high = readl(p + 1); -+ -+ return low + ((u64)high << 32); -+} -+#define readq readq -+#endif -+ -+#ifndef writeq -+static inline void writeq(__u64 val, volatile void __iomem *addr) -+{ -+ writel(val, addr); -+ writel(val >> 32, addr + 4); -+} -+#define writeq writeq -+#endif -+#endif /* < 3.3.0 */ -+#endif /* !CONFIG_64BIT */ -+#else -+#define HAVE_NDO_SET_VF_TRUST -+ -+#ifndef CONFIG_64BIT -+#include /* 32-bit readq/writeq */ -+#endif /* !CONFIG_64BIT */ -+#endif /* 4.4.0 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) -+/* protect against a likely backport */ -+#ifndef NETIF_F_CSUM_MASK -+#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM -+#endif /* NETIF_F_CSUM_MASK */ -+#ifndef NETIF_F_SCTP_CRC -+#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM -+#endif /* NETIF_F_SCTP_CRC */ -+#else -+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) -+#define HAVE_GENEVE_RX_OFFLOAD -+#endif /* < 4.8.0 */ -+#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD -+#endif /* 4.5.0 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) -+#if !(UBUNTU_VERSION_CODE && \ -+ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ -+ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) -+static inline void napi_consume_skb(struct sk_buff *skb, -+ int __always_unused budget) -+{ -+ dev_consume_skb_any(skb); -+} -+ -+#endif /* UBUNTU_VERSION(4,4,0,21) */ -+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) -+{ -+ * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); -+} -+ -+#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) -+static inline void page_ref_inc(struct page *page) -+{ -+ atomic_inc(&page->_count); -+} -+ -+#endif -+ -+#endif /* 4.6.0 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) -+#else -+#define HAVE_NETIF_TRANS_UPDATE -+#endif /* 4.7.0 */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) -+enum udp_parsable_tunnel_type { -+ UDP_TUNNEL_TYPE_VXLAN, -+ UDP_TUNNEL_TYPE_GENEVE, -+}; -+struct udp_tunnel_info { -+ unsigned short type; -+ sa_family_t sa_family; -+ __be16 port; -+}; -+#else -+#define HAVE_UDP_ENC_RX_OFFLOAD -+#endif /* 4.8.0 */ -+ -+#endif /* _KCOMPAT_H_ */ -diff -Nu a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c ---- a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c 2016-11-14 14:32:08.583567168 +0000 -@@ -0,0 +1,1169 @@ -+/******************************************************************************* -+ -+ Intel(R) Gigabit Ethernet Linux driver -+ Copyright(c) 2007-2015 Intel Corporation. -+ -+ This program is free software; you can redistribute it and/or modify it -+ under the terms and conditions of the GNU General Public License, -+ version 2, as published by the Free Software Foundation. -+ -+ This program is distributed in the hope it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ The full GNU General Public License is included in this distribution in -+ the file called "COPYING". -+ -+ Contact Information: -+ Linux NICS -+ e1000-devel Mailing List -+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -+ -+*******************************************************************************/ -+ -+/* -+ * net/core/ethtool.c - Ethtool ioctl handler -+ * Copyright (c) 2003 Matthew Wilcox -+ * -+ * This file is where we call all the ethtool_ops commands to get -+ * the information ethtool needs. We fall back to calling do_ioctl() -+ * for drivers which haven't been converted to ethtool_ops yet. -+ * -+ * It's GPL, stupid. -+ * -+ * Modification by sfeldma@pobox.com to work as backward compat -+ * solution for pre-ethtool_ops kernels. -+ * - copied struct ethtool_ops from ethtool.h -+ * - defined SET_ETHTOOL_OPS -+ * - put in some #ifndef NETIF_F_xxx wrappers -+ * - changes refs to dev->ethtool_ops to ethtool_ops -+ * - changed dev_ethtool to ethtool_ioctl -+ * - remove EXPORT_SYMBOL()s -+ * - added _kc_ prefix in built-in ethtool_op_xxx ops. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "kcompat.h" -+ -+#undef SUPPORTED_10000baseT_Full -+#define SUPPORTED_10000baseT_Full (1 << 12) -+#undef ADVERTISED_10000baseT_Full -+#define ADVERTISED_10000baseT_Full (1 << 12) -+#undef SPEED_10000 -+#define SPEED_10000 10000 -+ -+#undef ethtool_ops -+#define ethtool_ops _kc_ethtool_ops -+ -+struct _kc_ethtool_ops { -+ int (*get_settings)(struct net_device *, struct ethtool_cmd *); -+ int (*set_settings)(struct net_device *, struct ethtool_cmd *); -+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); -+ int (*get_regs_len)(struct net_device *); -+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); -+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); -+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); -+ u32 (*get_msglevel)(struct net_device *); -+ void (*set_msglevel)(struct net_device *, u32); -+ int (*nway_reset)(struct net_device *); -+ u32 (*get_link)(struct net_device *); -+ int (*get_eeprom_len)(struct net_device *); -+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); -+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); -+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); -+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); -+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); -+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); -+ void (*get_pauseparam)(struct net_device *, -+ struct ethtool_pauseparam*); -+ int (*set_pauseparam)(struct net_device *, -+ struct ethtool_pauseparam*); -+ u32 (*get_rx_csum)(struct net_device *); -+ int (*set_rx_csum)(struct net_device *, u32); -+ u32 (*get_tx_csum)(struct net_device *); -+ int (*set_tx_csum)(struct net_device *, u32); -+ u32 (*get_sg)(struct net_device *); -+ int (*set_sg)(struct net_device *, u32); -+ u32 (*get_tso)(struct net_device *); -+ int (*set_tso)(struct net_device *, u32); -+ int (*self_test_count)(struct net_device *); -+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); -+ void (*get_strings)(struct net_device *, u32 stringset, u8 *); -+ int (*phys_id)(struct net_device *, u32); -+ int (*get_stats_count)(struct net_device *); -+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, -+ u64 *); -+} *ethtool_ops = NULL; -+ -+#undef SET_ETHTOOL_OPS -+#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) -+ -+/* -+ * Some useful ethtool_ops methods that are device independent. If we find that -+ * all drivers want to do the same thing here, we can turn these into dev_() -+ * function calls. -+ */ -+ -+#undef ethtool_op_get_link -+#define ethtool_op_get_link _kc_ethtool_op_get_link -+u32 _kc_ethtool_op_get_link(struct net_device *dev) -+{ -+ return netif_carrier_ok(dev) ? 1 : 0; -+} -+ -+#undef ethtool_op_get_tx_csum -+#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum -+u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) -+{ -+#ifdef NETIF_F_IP_CSUM -+ return (dev->features & NETIF_F_IP_CSUM) != 0; -+#else -+ return 0; -+#endif -+} -+ -+#undef ethtool_op_set_tx_csum -+#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum -+int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) -+{ -+#ifdef NETIF_F_IP_CSUM -+ if (data) -+#ifdef NETIF_F_IPV6_CSUM -+ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); -+ else -+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); -+#else -+ dev->features |= NETIF_F_IP_CSUM; -+ else -+ dev->features &= ~NETIF_F_IP_CSUM; -+#endif -+#endif -+ -+ return 0; -+} -+ -+#undef ethtool_op_get_sg -+#define ethtool_op_get_sg _kc_ethtool_op_get_sg -+u32 _kc_ethtool_op_get_sg(struct net_device *dev) -+{ -+#ifdef NETIF_F_SG -+ return (dev->features & NETIF_F_SG) != 0; -+#else -+ return 0; -+#endif -+} -+ -+#undef ethtool_op_set_sg -+#define ethtool_op_set_sg _kc_ethtool_op_set_sg -+int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) -+{ -+#ifdef NETIF_F_SG -+ if (data) -+ dev->features |= NETIF_F_SG; -+ else -+ dev->features &= ~NETIF_F_SG; -+#endif -+ -+ return 0; -+} -+ -+#undef ethtool_op_get_tso -+#define ethtool_op_get_tso _kc_ethtool_op_get_tso -+u32 _kc_ethtool_op_get_tso(struct net_device *dev) -+{ -+#ifdef NETIF_F_TSO -+ return (dev->features & NETIF_F_TSO) != 0; -+#else -+ return 0; -+#endif -+} -+ -+#undef ethtool_op_set_tso -+#define ethtool_op_set_tso _kc_ethtool_op_set_tso -+int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) -+{ -+#ifdef NETIF_F_TSO -+ if (data) -+ dev->features |= NETIF_F_TSO; -+ else -+ dev->features &= ~NETIF_F_TSO; -+#endif -+ -+ return 0; -+} -+ -+/* Handlers for each ethtool command */ -+ -+static int ethtool_get_settings(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_cmd cmd = { ETHTOOL_GSET }; -+ int err; -+ -+ if (!ethtool_ops->get_settings) -+ return -EOPNOTSUPP; -+ -+ err = ethtool_ops->get_settings(dev, &cmd); -+ if (err < 0) -+ return err; -+ -+ if (copy_to_user(useraddr, &cmd, sizeof(cmd))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_settings(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_cmd cmd; -+ -+ if (!ethtool_ops->set_settings) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&cmd, useraddr, sizeof(cmd))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_settings(dev, &cmd); -+} -+ -+static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_drvinfo info; -+ struct ethtool_ops *ops = ethtool_ops; -+ -+ if (!ops->get_drvinfo) -+ return -EOPNOTSUPP; -+ -+ memset(&info, 0, sizeof(info)); -+ info.cmd = ETHTOOL_GDRVINFO; -+ ops->get_drvinfo(dev, &info); -+ -+ if (ops->self_test_count) -+ info.testinfo_len = ops->self_test_count(dev); -+ if (ops->get_stats_count) -+ info.n_stats = ops->get_stats_count(dev); -+ if (ops->get_regs_len) -+ info.regdump_len = ops->get_regs_len(dev); -+ if (ops->get_eeprom_len) -+ info.eedump_len = ops->get_eeprom_len(dev); -+ -+ if (copy_to_user(useraddr, &info, sizeof(info))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_get_regs(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_regs regs; -+ struct ethtool_ops *ops = ethtool_ops; -+ void *regbuf; -+ int reglen, ret; -+ -+ if (!ops->get_regs || !ops->get_regs_len) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(®s, useraddr, sizeof(regs))) -+ return -EFAULT; -+ -+ reglen = ops->get_regs_len(dev); -+ if (regs.len > reglen) -+ regs.len = reglen; -+ -+ regbuf = kmalloc(reglen, GFP_USER); -+ if (!regbuf) -+ return -ENOMEM; -+ -+ ops->get_regs(dev, ®s, regbuf); -+ -+ ret = -EFAULT; -+ if (copy_to_user(useraddr, ®s, sizeof(regs))) -+ goto out; -+ useraddr += offsetof(struct ethtool_regs, data); -+ if (copy_to_user(useraddr, regbuf, reglen)) -+ goto out; -+ ret = 0; -+ -+out: -+ kfree(regbuf); -+ return ret; -+} -+ -+static int ethtool_get_wol(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; -+ -+ if (!ethtool_ops->get_wol) -+ return -EOPNOTSUPP; -+ -+ ethtool_ops->get_wol(dev, &wol); -+ -+ if (copy_to_user(useraddr, &wol, sizeof(wol))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_wol(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_wolinfo wol; -+ -+ if (!ethtool_ops->set_wol) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&wol, useraddr, sizeof(wol))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_wol(dev, &wol); -+} -+ -+static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata = { ETHTOOL_GMSGLVL }; -+ -+ if (!ethtool_ops->get_msglevel) -+ return -EOPNOTSUPP; -+ -+ edata.data = ethtool_ops->get_msglevel(dev); -+ -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata; -+ -+ if (!ethtool_ops->set_msglevel) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&edata, useraddr, sizeof(edata))) -+ return -EFAULT; -+ -+ ethtool_ops->set_msglevel(dev, edata.data); -+ return 0; -+} -+ -+static int ethtool_nway_reset(struct net_device *dev) -+{ -+ if (!ethtool_ops->nway_reset) -+ return -EOPNOTSUPP; -+ -+ return ethtool_ops->nway_reset(dev); -+} -+ -+static int ethtool_get_link(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_value edata = { ETHTOOL_GLINK }; -+ -+ if (!ethtool_ops->get_link) -+ return -EOPNOTSUPP; -+ -+ edata.data = ethtool_ops->get_link(dev); -+ -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_eeprom eeprom; -+ struct ethtool_ops *ops = ethtool_ops; -+ u8 *data; -+ int ret; -+ -+ if (!ops->get_eeprom || !ops->get_eeprom_len) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) -+ return -EFAULT; -+ -+ /* Check for wrap and zero */ -+ if (eeprom.offset + eeprom.len <= eeprom.offset) -+ return -EINVAL; -+ -+ /* Check for exceeding total eeprom len */ -+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) -+ return -EINVAL; -+ -+ data = kmalloc(eeprom.len, GFP_USER); -+ if (!data) -+ return -ENOMEM; -+ -+ ret = -EFAULT; -+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) -+ goto out; -+ -+ ret = ops->get_eeprom(dev, &eeprom, data); -+ if (ret) -+ goto out; -+ -+ ret = -EFAULT; -+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) -+ goto out; -+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) -+ goto out; -+ ret = 0; -+ -+out: -+ kfree(data); -+ return ret; -+} -+ -+static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_eeprom eeprom; -+ struct ethtool_ops *ops = ethtool_ops; -+ u8 *data; -+ int ret; -+ -+ if (!ops->set_eeprom || !ops->get_eeprom_len) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) -+ return -EFAULT; -+ -+ /* Check for wrap and zero */ -+ if (eeprom.offset + eeprom.len <= eeprom.offset) -+ return -EINVAL; -+ -+ /* Check for exceeding total eeprom len */ -+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) -+ return -EINVAL; -+ -+ data = kmalloc(eeprom.len, GFP_USER); -+ if (!data) -+ return -ENOMEM; -+ -+ ret = -EFAULT; -+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) -+ goto out; -+ -+ ret = ops->set_eeprom(dev, &eeprom, data); -+ if (ret) -+ goto out; -+ -+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) -+ ret = -EFAULT; -+ -+out: -+ kfree(data); -+ return ret; -+} -+ -+static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; -+ -+ if (!ethtool_ops->get_coalesce) -+ return -EOPNOTSUPP; -+ -+ ethtool_ops->get_coalesce(dev, &coalesce); -+ -+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_coalesce coalesce; -+ -+ if (!ethtool_ops->get_coalesce) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_coalesce(dev, &coalesce); -+} -+ -+static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; -+ -+ if (!ethtool_ops->get_ringparam) -+ return -EOPNOTSUPP; -+ -+ ethtool_ops->get_ringparam(dev, &ringparam); -+ -+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_ringparam ringparam; -+ -+ if (!ethtool_ops->get_ringparam) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_ringparam(dev, &ringparam); -+} -+ -+static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; -+ -+ if (!ethtool_ops->get_pauseparam) -+ return -EOPNOTSUPP; -+ -+ ethtool_ops->get_pauseparam(dev, &pauseparam); -+ -+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_pauseparam pauseparam; -+ -+ if (!ethtool_ops->get_pauseparam) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_pauseparam(dev, &pauseparam); -+} -+ -+static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata = { ETHTOOL_GRXCSUM }; -+ -+ if (!ethtool_ops->get_rx_csum) -+ return -EOPNOTSUPP; -+ -+ edata.data = ethtool_ops->get_rx_csum(dev); -+ -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata; -+ -+ if (!ethtool_ops->set_rx_csum) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&edata, useraddr, sizeof(edata))) -+ return -EFAULT; -+ -+ ethtool_ops->set_rx_csum(dev, edata.data); -+ return 0; -+} -+ -+static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata = { ETHTOOL_GTXCSUM }; -+ -+ if (!ethtool_ops->get_tx_csum) -+ return -EOPNOTSUPP; -+ -+ edata.data = ethtool_ops->get_tx_csum(dev); -+ -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata; -+ -+ if (!ethtool_ops->set_tx_csum) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&edata, useraddr, sizeof(edata))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_tx_csum(dev, edata.data); -+} -+ -+static int ethtool_get_sg(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata = { ETHTOOL_GSG }; -+ -+ if (!ethtool_ops->get_sg) -+ return -EOPNOTSUPP; -+ -+ edata.data = ethtool_ops->get_sg(dev); -+ -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_sg(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata; -+ -+ if (!ethtool_ops->set_sg) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&edata, useraddr, sizeof(edata))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_sg(dev, edata.data); -+} -+ -+static int ethtool_get_tso(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata = { ETHTOOL_GTSO }; -+ -+ if (!ethtool_ops->get_tso) -+ return -EOPNOTSUPP; -+ -+ edata.data = ethtool_ops->get_tso(dev); -+ -+ if (copy_to_user(useraddr, &edata, sizeof(edata))) -+ return -EFAULT; -+ return 0; -+} -+ -+static int ethtool_set_tso(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_value edata; -+ -+ if (!ethtool_ops->set_tso) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&edata, useraddr, sizeof(edata))) -+ return -EFAULT; -+ -+ return ethtool_ops->set_tso(dev, edata.data); -+} -+ -+static int ethtool_self_test(struct net_device *dev, char *useraddr) -+{ -+ struct ethtool_test test; -+ struct ethtool_ops *ops = ethtool_ops; -+ u64 *data; -+ int ret; -+ -+ if (!ops->self_test || !ops->self_test_count) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&test, useraddr, sizeof(test))) -+ return -EFAULT; -+ -+ test.len = ops->self_test_count(dev); -+ data = kmalloc(test.len * sizeof(u64), GFP_USER); -+ if (!data) -+ return -ENOMEM; -+ -+ ops->self_test(dev, &test, data); -+ -+ ret = -EFAULT; -+ if (copy_to_user(useraddr, &test, sizeof(test))) -+ goto out; -+ useraddr += sizeof(test); -+ if (copy_to_user(useraddr, data, test.len * sizeof(u64))) -+ goto out; -+ ret = 0; -+ -+out: -+ kfree(data); -+ return ret; -+} -+ -+static int ethtool_get_strings(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_gstrings gstrings; -+ struct ethtool_ops *ops = ethtool_ops; -+ u8 *data; -+ int ret; -+ -+ if (!ops->get_strings) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) -+ return -EFAULT; -+ -+ switch (gstrings.string_set) { -+ case ETH_SS_TEST: -+ if (!ops->self_test_count) -+ return -EOPNOTSUPP; -+ gstrings.len = ops->self_test_count(dev); -+ break; -+ case ETH_SS_STATS: -+ if (!ops->get_stats_count) -+ return -EOPNOTSUPP; -+ gstrings.len = ops->get_stats_count(dev); -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); -+ if (!data) -+ return -ENOMEM; -+ -+ ops->get_strings(dev, gstrings.string_set, data); -+ -+ ret = -EFAULT; -+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) -+ goto out; -+ useraddr += sizeof(gstrings); -+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) -+ goto out; -+ ret = 0; -+ -+out: -+ kfree(data); -+ return ret; -+} -+ -+static int ethtool_phys_id(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_value id; -+ -+ if (!ethtool_ops->phys_id) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&id, useraddr, sizeof(id))) -+ return -EFAULT; -+ -+ return ethtool_ops->phys_id(dev, id.data); -+} -+ -+static int ethtool_get_stats(struct net_device *dev, void *useraddr) -+{ -+ struct ethtool_stats stats; -+ struct ethtool_ops *ops = ethtool_ops; -+ u64 *data; -+ int ret; -+ -+ if (!ops->get_ethtool_stats || !ops->get_stats_count) -+ return -EOPNOTSUPP; -+ -+ if (copy_from_user(&stats, useraddr, sizeof(stats))) -+ return -EFAULT; -+ -+ stats.n_stats = ops->get_stats_count(dev); -+ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); -+ if (!data) -+ return -ENOMEM; -+ -+ ops->get_ethtool_stats(dev, &stats, data); -+ -+ ret = -EFAULT; -+ if (copy_to_user(useraddr, &stats, sizeof(stats))) -+ goto out; -+ useraddr += sizeof(stats); -+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) -+ goto out; -+ ret = 0; -+ -+out: -+ kfree(data); -+ return ret; -+} -+ -+/* The main entry point in this file. Called from net/core/dev.c */ -+ -+#define ETHTOOL_OPS_COMPAT -+int ethtool_ioctl(struct ifreq *ifr) -+{ -+ struct net_device *dev = __dev_get_by_name(ifr->ifr_name); -+ void *useraddr = (void *) ifr->ifr_data; -+ u32 ethcmd; -+ -+ /* -+ * XXX: This can be pushed down into the ethtool_* handlers that -+ * need it. Keep existing behavior for the moment. -+ */ -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ -+ if (!dev || !netif_device_present(dev)) -+ return -ENODEV; -+ -+ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) -+ return -EFAULT; -+ -+ switch (ethcmd) { -+ case ETHTOOL_GSET: -+ return ethtool_get_settings(dev, useraddr); -+ case ETHTOOL_SSET: -+ return ethtool_set_settings(dev, useraddr); -+ case ETHTOOL_GDRVINFO: -+ return ethtool_get_drvinfo(dev, useraddr); -+ case ETHTOOL_GREGS: -+ return ethtool_get_regs(dev, useraddr); -+ case ETHTOOL_GWOL: -+ return ethtool_get_wol(dev, useraddr); -+ case ETHTOOL_SWOL: -+ return ethtool_set_wol(dev, useraddr); -+ case ETHTOOL_GMSGLVL: -+ return ethtool_get_msglevel(dev, useraddr); -+ case ETHTOOL_SMSGLVL: -+ return ethtool_set_msglevel(dev, useraddr); -+ case ETHTOOL_NWAY_RST: -+ return ethtool_nway_reset(dev); -+ case ETHTOOL_GLINK: -+ return ethtool_get_link(dev, useraddr); -+ case ETHTOOL_GEEPROM: -+ return ethtool_get_eeprom(dev, useraddr); -+ case ETHTOOL_SEEPROM: -+ return ethtool_set_eeprom(dev, useraddr); -+ case ETHTOOL_GCOALESCE: -+ return ethtool_get_coalesce(dev, useraddr); -+ case ETHTOOL_SCOALESCE: -+ return ethtool_set_coalesce(dev, useraddr); -+ case ETHTOOL_GRINGPARAM: -+ return ethtool_get_ringparam(dev, useraddr); -+ case ETHTOOL_SRINGPARAM: -+ return ethtool_set_ringparam(dev, useraddr); -+ case ETHTOOL_GPAUSEPARAM: -+ return ethtool_get_pauseparam(dev, useraddr); -+ case ETHTOOL_SPAUSEPARAM: -+ return ethtool_set_pauseparam(dev, useraddr); -+ case ETHTOOL_GRXCSUM: -+ return ethtool_get_rx_csum(dev, useraddr); -+ case ETHTOOL_SRXCSUM: -+ return ethtool_set_rx_csum(dev, useraddr); -+ case ETHTOOL_GTXCSUM: -+ return ethtool_get_tx_csum(dev, useraddr); -+ case ETHTOOL_STXCSUM: -+ return ethtool_set_tx_csum(dev, useraddr); -+ case ETHTOOL_GSG: -+ return ethtool_get_sg(dev, useraddr); -+ case ETHTOOL_SSG: -+ return ethtool_set_sg(dev, useraddr); -+ case ETHTOOL_GTSO: -+ return ethtool_get_tso(dev, useraddr); -+ case ETHTOOL_STSO: -+ return ethtool_set_tso(dev, useraddr); -+ case ETHTOOL_TEST: -+ return ethtool_self_test(dev, useraddr); -+ case ETHTOOL_GSTRINGS: -+ return ethtool_get_strings(dev, useraddr); -+ case ETHTOOL_PHYS_ID: -+ return ethtool_phys_id(dev, useraddr); -+ case ETHTOOL_GSTATS: -+ return ethtool_get_stats(dev, useraddr); -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return -EOPNOTSUPP; -+} -+ -+#define mii_if_info _kc_mii_if_info -+struct _kc_mii_if_info { -+ int phy_id; -+ int advertising; -+ int phy_id_mask; -+ int reg_num_mask; -+ -+ unsigned int full_duplex : 1; /* is full duplex? */ -+ unsigned int force_media : 1; /* is autoneg. disabled? */ -+ -+ struct net_device *dev; -+ int (*mdio_read) (struct net_device *dev, int phy_id, int location); -+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); -+}; -+ -+struct ethtool_cmd; -+struct mii_ioctl_data; -+ -+#undef mii_link_ok -+#define mii_link_ok _kc_mii_link_ok -+#undef mii_nway_restart -+#define mii_nway_restart _kc_mii_nway_restart -+#undef mii_ethtool_gset -+#define mii_ethtool_gset _kc_mii_ethtool_gset -+#undef mii_ethtool_sset -+#define mii_ethtool_sset _kc_mii_ethtool_sset -+#undef mii_check_link -+#define mii_check_link _kc_mii_check_link -+extern int _kc_mii_link_ok (struct mii_if_info *mii); -+extern int _kc_mii_nway_restart (struct mii_if_info *mii); -+extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, -+ struct ethtool_cmd *ecmd); -+extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, -+ struct ethtool_cmd *ecmd); -+extern void _kc_mii_check_link (struct mii_if_info *mii); -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) -+#undef generic_mii_ioctl -+#define generic_mii_ioctl _kc_generic_mii_ioctl -+extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, -+ struct mii_ioctl_data *mii_data, int cmd, -+ unsigned int *duplex_changed); -+#endif /* > 2.4.6 */ -+ -+ -+struct _kc_pci_dev_ext { -+ struct pci_dev *dev; -+ void *pci_drvdata; -+ struct pci_driver *driver; -+}; -+ -+struct _kc_net_dev_ext { -+ struct net_device *dev; -+ unsigned int carrier; -+}; -+ -+ -+/**************************************/ -+/* mii support */ -+ -+int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) -+{ -+ struct net_device *dev = mii->dev; -+ u32 advert, bmcr, lpa, nego; -+ -+ ecmd->supported = -+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | -+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | -+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); -+ -+ /* only supports twisted-pair */ -+ ecmd->port = PORT_MII; -+ -+ /* only supports internal transceiver */ -+ ecmd->transceiver = XCVR_INTERNAL; -+ -+ /* this isn't fully supported at higher layers */ -+ ecmd->phy_address = mii->phy_id; -+ -+ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; -+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); -+ if (advert & ADVERTISE_10HALF) -+ ecmd->advertising |= ADVERTISED_10baseT_Half; -+ if (advert & ADVERTISE_10FULL) -+ ecmd->advertising |= ADVERTISED_10baseT_Full; -+ if (advert & ADVERTISE_100HALF) -+ ecmd->advertising |= ADVERTISED_100baseT_Half; -+ if (advert & ADVERTISE_100FULL) -+ ecmd->advertising |= ADVERTISED_100baseT_Full; -+ -+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); -+ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); -+ if (bmcr & BMCR_ANENABLE) { -+ ecmd->advertising |= ADVERTISED_Autoneg; -+ ecmd->autoneg = AUTONEG_ENABLE; -+ -+ nego = mii_nway_result(advert & lpa); -+ if (nego == LPA_100FULL || nego == LPA_100HALF) -+ ecmd->speed = SPEED_100; -+ else -+ ecmd->speed = SPEED_10; -+ if (nego == LPA_100FULL || nego == LPA_10FULL) { -+ ecmd->duplex = DUPLEX_FULL; -+ mii->full_duplex = 1; -+ } else { -+ ecmd->duplex = DUPLEX_HALF; -+ mii->full_duplex = 0; -+ } -+ } else { -+ ecmd->autoneg = AUTONEG_DISABLE; -+ -+ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; -+ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; -+ } -+ -+ /* ignore maxtxpkt, maxrxpkt for now */ -+ -+ return 0; -+} -+ -+int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) -+{ -+ struct net_device *dev = mii->dev; -+ -+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) -+ return -EINVAL; -+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) -+ return -EINVAL; -+ if (ecmd->port != PORT_MII) -+ return -EINVAL; -+ if (ecmd->transceiver != XCVR_INTERNAL) -+ return -EINVAL; -+ if (ecmd->phy_address != mii->phy_id) -+ return -EINVAL; -+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) -+ return -EINVAL; -+ -+ /* ignore supported, maxtxpkt, maxrxpkt */ -+ -+ if (ecmd->autoneg == AUTONEG_ENABLE) { -+ u32 bmcr, advert, tmp; -+ -+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half | -+ ADVERTISED_10baseT_Full | -+ ADVERTISED_100baseT_Half | -+ ADVERTISED_100baseT_Full)) == 0) -+ return -EINVAL; -+ -+ /* advertise only what has been requested */ -+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); -+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); -+ if (ADVERTISED_10baseT_Half) -+ tmp |= ADVERTISE_10HALF; -+ if (ADVERTISED_10baseT_Full) -+ tmp |= ADVERTISE_10FULL; -+ if (ADVERTISED_100baseT_Half) -+ tmp |= ADVERTISE_100HALF; -+ if (ADVERTISED_100baseT_Full) -+ tmp |= ADVERTISE_100FULL; -+ if (advert != tmp) { -+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); -+ mii->advertising = tmp; -+ } -+ -+ /* turn on autonegotiation, and force a renegotiate */ -+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); -+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); -+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); -+ -+ mii->force_media = 0; -+ } else { -+ u32 bmcr, tmp; -+ -+ /* turn off auto negotiation, set speed and duplexity */ -+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); -+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); -+ if (ecmd->speed == SPEED_100) -+ tmp |= BMCR_SPEED100; -+ if (ecmd->duplex == DUPLEX_FULL) { -+ tmp |= BMCR_FULLDPLX; -+ mii->full_duplex = 1; -+ } else -+ mii->full_duplex = 0; -+ if (bmcr != tmp) -+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); -+ -+ mii->force_media = 1; -+ } -+ return 0; -+} -+ -+int _kc_mii_link_ok (struct mii_if_info *mii) -+{ -+ /* first, a dummy read, needed to latch some MII phys */ -+ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); -+ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) -+ return 1; -+ return 0; -+} -+ -+int _kc_mii_nway_restart (struct mii_if_info *mii) -+{ -+ int bmcr; -+ int r = -EINVAL; -+ -+ /* if autoneg is off, it's an error */ -+ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); -+ -+ if (bmcr & BMCR_ANENABLE) { -+ bmcr |= BMCR_ANRESTART; -+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); -+ r = 0; -+ } -+ -+ return r; -+} -+ -+void _kc_mii_check_link (struct mii_if_info *mii) -+{ -+ int cur_link = mii_link_ok(mii); -+ int prev_link = netif_carrier_ok(mii->dev); -+ -+ if (cur_link && !prev_link) -+ netif_carrier_on(mii->dev); -+ else if (prev_link && !cur_link) -+ netif_carrier_off(mii->dev); -+} -+ -+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) -+int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, -+ struct mii_ioctl_data *mii_data, int cmd, -+ unsigned int *duplex_chg_out) -+{ -+ int rc = 0; -+ unsigned int duplex_changed = 0; -+ -+ if (duplex_chg_out) -+ *duplex_chg_out = 0; -+ -+ mii_data->phy_id &= mii_if->phy_id_mask; -+ mii_data->reg_num &= mii_if->reg_num_mask; -+ -+ switch(cmd) { -+ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ -+ case SIOCGMIIPHY: -+ mii_data->phy_id = mii_if->phy_id; -+ /* fall through */ -+ -+ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ -+ case SIOCGMIIREG: -+ mii_data->val_out = -+ mii_if->mdio_read(mii_if->dev, mii_data->phy_id, -+ mii_data->reg_num); -+ break; -+ -+ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ -+ case SIOCSMIIREG: { -+ u16 val = mii_data->val_in; -+ -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ -+ if (mii_data->phy_id == mii_if->phy_id) { -+ switch(mii_data->reg_num) { -+ case MII_BMCR: { -+ unsigned int new_duplex = 0; -+ if (val & (BMCR_RESET|BMCR_ANENABLE)) -+ mii_if->force_media = 0; -+ else -+ mii_if->force_media = 1; -+ if (mii_if->force_media && -+ (val & BMCR_FULLDPLX)) -+ new_duplex = 1; -+ if (mii_if->full_duplex != new_duplex) { -+ duplex_changed = 1; -+ mii_if->full_duplex = new_duplex; -+ } -+ break; -+ } -+ case MII_ADVERTISE: -+ mii_if->advertising = val; -+ break; -+ default: -+ /* do nothing */ -+ break; -+ } -+ } -+ -+ mii_if->mdio_write(mii_if->dev, mii_data->phy_id, -+ mii_data->reg_num, val); -+ break; -+ } -+ -+ default: -+ rc = -EOPNOTSUPP; -+ break; -+ } -+ -+ if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) -+ *duplex_chg_out = 1; -+ -+ return rc; -+} -+#endif /* > 2.4.6 */ -+ diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch deleted file mode 100644 index 5de8cb5b..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-support-intel-igb-bcm5461X-phy.patch +++ /dev/null @@ -1,242 +0,0 @@ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c ---- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 15:48:41.379628151 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-15 09:36:04.608478513 +0000 -@@ -302,6 +302,16 @@ - phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; - phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; - break; -+ case BCM5461S_PHY_ID: -+ phy->type = e1000_phy_bcm5461s; -+ phy->ops.check_polarity = NULL; -+ phy->ops.get_info = igb_get_phy_info_5461s; -+ phy->ops.get_cable_length = NULL; -+ phy->ops.force_speed_duplex = igb_e1000_phy_force_speed_duplex_82577; -+ break; -+ case BCM54616_E_PHY_ID: -+ phy->type = e1000_phy_bcm54616; -+ break; - default: - ret_val = -E1000_ERR_PHY; - goto out; -@@ -701,6 +711,17 @@ - break; - } - ret_val = e1000_get_phy_id(hw); -+ -+ if (ret_val && hw->mac.type == e1000_i354) { -+ /* we do a special check for bcm5461s phy by setting -+ * the phy->addr to 5 and doing the phy check again. This -+ * call will succeed and retrieve a valid phy id if we have -+ * the bcm5461s phy -+ */ -+ phy->addr = 5; -+ phy->type = e1000_phy_bcm5461s; -+ ret_val = e1000_get_phy_id(hw); -+ } - goto out; - } - -@@ -1148,6 +1169,9 @@ - (hw->phy.type == e1000_phy_igp_3)) - e1000_phy_init_script_igp3(hw); - -+ if (hw->phy.type == e1000_phy_bcm5461s) -+ igb_phy_init_script_5461s(hw); -+ - return E1000_SUCCESS; - } - -@@ -1557,6 +1581,7 @@ - case e1000_i350: - case e1000_i210: - case e1000_i211: -+ case e1000_i354: - phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); - phpm_reg &= ~E1000_82580_PM_GO_LINKD; - E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); -@@ -1602,6 +1627,10 @@ - case e1000_phy_82580: - ret_val = igb_e1000_copper_link_setup_82577(hw); - break; -+ case e1000_phy_bcm54616: -+ break; -+ case e1000_phy_bcm5461s: -+ break; - default: - ret_val = -E1000_ERR_PHY; - break; -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h ---- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 15:48:41.383628151 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 17:13:16.567695539 +0000 -@@ -1184,6 +1184,8 @@ - #define I350_I_PHY_ID 0x015403B0 - #define I210_I_PHY_ID 0x01410C00 - #define IGP04E1000_E_PHY_ID 0x02A80391 -+#define BCM54616_E_PHY_ID 0x3625D10 -+#define BCM5461S_PHY_ID 0x002060C0 - #define M88_VENDOR 0x0141 - - /* M88E1000 Specific Registers */ -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h ---- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 15:48:41.387628151 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 17:11:55.735694465 +0000 -@@ -133,6 +133,8 @@ - e1000_phy_82580, - e1000_phy_vf, - e1000_phy_i210, -+ e1000_phy_bcm54616, -+ e1000_phy_bcm5461s, - }; - - enum e1000_bus_type { -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c ---- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 15:48:41.403628151 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-15 09:48:09.668488140 +0000 -@@ -272,6 +272,13 @@ - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ -+ if (phy->type == e1000_phy_bcm5461s) { -+ mdic = E1000_READ_REG(hw, E1000_MDICNFG); -+ mdic &= ~E1000_MDICNFG_PHY_MASK; -+ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); -+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdic); -+ } -+ - mdic = ((offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); -@@ -331,6 +338,13 @@ - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ -+ if (phy->type == e1000_phy_bcm5461s) { -+ mdic = E1000_READ_REG(hw, E1000_MDICNFG); -+ mdic &= ~E1000_MDICNFG_PHY_MASK; -+ mdic |= (phy->addr << E1000_MDICNFG_PHY_SHIFT); -+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdic); -+ } -+ - mdic = (((u32)data) | - (offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | -@@ -1614,10 +1628,12 @@ - * depending on user settings. - */ - DEBUGOUT("Forcing Speed and Duplex\n"); -- ret_val = hw->phy.ops.force_speed_duplex(hw); -- if (ret_val) { -- DEBUGOUT("Error Forcing Speed and Duplex\n"); -- return ret_val; -+ if (hw->phy.ops.force_speed_duplex) { -+ ret_val = hw->phy.ops.force_speed_duplex(hw); -+ if (ret_val) { -+ DEBUGOUT("Error Forcing Speed and Duplex\n"); -+ return ret_val; -+ } - } - } - -@@ -3407,3 +3423,67 @@ - - return ready; - } -+ -+/** -+ * igb_phy_init_script_5461s - Inits the BCM5461S PHY -+ * @hw: pointer to the HW structure -+ * -+ * Initializes a Broadcom Gigabit PHY. -+ **/ -+s32 igb_phy_init_script_5461s(struct e1000_hw *hw) -+{ -+ u16 mii_reg_led = 0; -+ -+ /* 1. Speed LED (Set the Link LED mode), Shadow 00010, 0x1C.bit2=1 */ -+ hw->phy.ops.write_reg(hw, 0x1C, 0x0800); -+ hw->phy.ops.read_reg(hw, 0x1C, &mii_reg_led); -+ mii_reg_led |= 0x0004; -+ hw->phy.ops.write_reg(hw, 0x1C, mii_reg_led | 0x8000); -+ -+ /* 2. Active LED (Set the Link LED mode), Shadow 01001, 0x1C.bit4=1, 0x10.bit5=0 */ -+ hw->phy.ops.write_reg(hw, 0x1C, 0x2400); -+ hw->phy.ops.read_reg(hw, 0x1C, &mii_reg_led); -+ mii_reg_led |= 0x0010; -+ hw->phy.ops.write_reg(hw, 0x1C, mii_reg_led | 0x8000); -+ hw->phy.ops.read_reg(hw, 0x10, &mii_reg_led); -+ mii_reg_led &= 0xffdf; -+ hw->phy.ops.write_reg(hw, 0x10, mii_reg_led); -+ -+ return 0; -+} -+ -+ -+/** -+ * igb_get_phy_info_5461s - Retrieve 5461s PHY information -+ * @hw: pointer to the HW structure -+ * -+ * Read PHY status to determine if link is up. If link is up, then -+ * set/determine 10base-T extended distance and polarity correction. Read -+ * PHY port status to determine MDI/MDIx and speed. Based on the speed, -+ * determine on the cable length, local and remote receiver. -+ **/ -+s32 igb_get_phy_info_5461s(struct e1000_hw *hw) -+{ -+ struct e1000_phy_info *phy = &hw->phy; -+ s32 ret_val; -+ bool link; -+ -+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); -+ if (ret_val) -+ goto out; -+ -+ if (!link) { -+ ret_val = -E1000_ERR_CONFIG; -+ goto out; -+ } -+ -+ phy->polarity_correction = true; -+ -+ phy->is_mdix = true; -+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; -+ phy->local_rx = e1000_1000t_rx_status_ok; -+ phy->remote_rx = e1000_1000t_rx_status_ok; -+ -+out: -+ return ret_val; -+} -diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h ---- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 15:48:41.403628151 +0000 -+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 17:21:08.243701801 +0000 -@@ -74,6 +74,8 @@ - s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success); - s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); -+s32 igb_phy_init_script_5461s(struct e1000_hw *hw); -+s32 igb_get_phy_info_5461s(struct e1000_hw *hw); - enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); - s32 e1000_determine_phy_address(struct e1000_hw *hw); - s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); -diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c ---- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 15:48:41.411628151 +0000 -+++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 19:07:51.867786828 +0000 -@@ -8607,11 +8607,19 @@ - case SIOCGMIIREG: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; -+ adapter->hw.phy.addr = data->phy_id; - if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, - &data->val_out)) - return -EIO; - break; - case SIOCSMIIREG: -+ if (!capable(CAP_NET_ADMIN)) -+ return -EPERM; -+ adapter->hw.phy.addr = data->phy_id; -+ if (igb_e1000_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, -+ data->val_in)) -+ return -EIO; -+ break; - default: - return -EOPNOTSUPP; - } diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom-update.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom-update.patch deleted file mode 100644 index 3deb7cdb..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom-update.patch +++ /dev/null @@ -1,141 +0,0 @@ -Update SFF8436 EEPROM driver - -From: Shuotian Cheng - -Support newer kernel and remove eeprom_class dependency ---- - drivers/misc/eeprom/sff_8436_eeprom.c | 27 +++++++-------------------- - include/linux/i2c/sff-8436.h | 2 -- - 2 files changed, 7 insertions(+), 22 deletions(-) - -diff --git a/drivers/misc/eeprom/sff_8436_eeprom.c b/drivers/misc/eeprom/sff_8436_eeprom.c -index 0b6bf31..f5627bf 100644 ---- a/drivers/misc/eeprom/sff_8436_eeprom.c -+++ b/drivers/misc/eeprom/sff_8436_eeprom.c -@@ -82,7 +82,6 @@ - #include - #include - #include --#include - - #include - #include -@@ -116,7 +115,6 @@ struct sff_8436_data { - unsigned num_addresses; - - u8 data[SFF_8436_EEPROM_SIZE]; -- struct eeprom_device *eeprom_dev; - - struct i2c_client *client[]; - }; -@@ -421,10 +419,9 @@ static ssize_t sff_8436_eeprom_write(struct sff_8436_data *sff_8436, const char - { - struct i2c_client *client = sff_8436->client[0]; - struct i2c_msg msg; -- ssize_t status; - unsigned long timeout, write_time; - unsigned next_page; -- int i = 0; -+ int status, i = 0; - - /* write max is at most a page */ - if (count > sff_8436->write_max) -@@ -528,7 +525,7 @@ static ssize_t sff_8436_eeprom_update_client(struct sff_8436_data *sff_8436, - page = sff_8436_translate_offset(sff_8436, &phy_offset); - - dev_dbg(&client->dev, -- "sff_8436_eeprom_update_client off %lld page:%d phy_offset:%lld, count:%d, opcode:%d\n", -+ "sff_8436_eeprom_update_client off %lld page:%d phy_offset:%lld, count:%zu, opcode:%d\n", - off, page, phy_offset, count, opcode); - if (page > 0) { - ret = sff_8436_write_page_reg(sff_8436, page); -@@ -705,18 +702,18 @@ static ssize_t sff_8436_read_write(struct sff_8436_data *sff_8436, - pending_len = pending_len - page_len; - - dev_dbg(&client->dev, -- "sff_read off %lld len %d page_start_offset %lld page_offset %lld page_len %d pending_len %d\n", -+ "sff_read off %lld len %zu page_start_offset %lld page_offset %lld page_len %zu pending_len %zu\n", - off, len, page_start_offset, page_offset, page_len, pending_len); - - /* Refresh the data from offset for specified len */ - ret = sff_8436_eeprom_update_client(sff_8436, page_offset, page_len, opcode); - if (ret != page_len) { - if (err_timeout) { -- dev_dbg(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", -+ dev_dbg(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %zu failed %d!\n", - (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); - goto err; - } else { -- dev_err(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", -+ dev_err(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %zu failed %d!\n", - (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); - } - } -@@ -780,15 +777,13 @@ static ssize_t sff_8436_macc_write(struct memory_accessor *macc, const char *buf - - /*-------------------------------------------------------------------------*/ - --static int __devexit sff_8436_remove(struct i2c_client *client) -+static int sff_8436_remove(struct i2c_client *client) - { - struct sff_8436_data *sff_8436; - - sff_8436 = i2c_get_clientdata(client); - sysfs_remove_bin_file(&client->dev.kobj, &sff_8436->bin); - -- eeprom_device_unregister(sff_8436->eeprom_dev); -- - kfree(sff_8436->writebuf); - kfree(sff_8436); - return 0; -@@ -821,7 +816,6 @@ static int sff_8436_eeprom_probe(struct i2c_client *client, - - chip.setup = NULL; - chip.context = NULL; -- chip.eeprom_data = NULL; - } - - if (!is_power_of_2(chip.byte_len)) -@@ -923,13 +917,6 @@ static int sff_8436_eeprom_probe(struct i2c_client *client, - if (err) - goto err_struct; - -- sff_8436->eeprom_dev = eeprom_device_register(&client->dev, chip.eeprom_data); -- if (IS_ERR(sff_8436->eeprom_dev)) { -- dev_err(&client->dev, "error registering eeprom device.\n"); -- err = PTR_ERR(sff_8436->eeprom_dev); -- goto err_sysfs_cleanup; -- } -- - i2c_set_clientdata(client, sff_8436); - - dev_info(&client->dev, "%zu byte %s EEPROM, %s\n", -@@ -968,7 +955,7 @@ static struct i2c_driver sff_8436_driver = { - .owner = THIS_MODULE, - }, - .probe = sff_8436_eeprom_probe, -- .remove = __devexit_p(sff_8436_remove), -+ .remove = sff_8436_remove, - .id_table = sff8436_ids, - }; - -diff --git a/include/linux/i2c/sff-8436.h b/include/linux/i2c/sff-8436.h -index cd46896..4df48ad 100644 ---- a/include/linux/i2c/sff-8436.h -+++ b/include/linux/i2c/sff-8436.h -@@ -3,7 +3,6 @@ - - #include - #include --#include - - /* - * As seen through Linux I2C, differences between the most common types of I2C -@@ -27,7 +26,6 @@ struct sff_8436_platform_data { - - void (*setup)(struct memory_accessor *, void *context); - void *context; -- struct eeprom_platform_data *eeprom_data; /* extra data for the eeprom_class */ - }; - - #endif /* _LINUX_SFF_8436_H */ diff --git a/packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom.patch b/packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom.patch deleted file mode 100644 index 86d8c3e0..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/driver-support-sff-8436-eeprom.patch +++ /dev/null @@ -1,1086 +0,0 @@ -Driver to expose eeprom information including DOM for QSFPs - -From: Cumulus Networks - - ---- - drivers/misc/eeprom/Kconfig | 12 - drivers/misc/eeprom/Makefile | 1 - drivers/misc/eeprom/sff_8436_eeprom.c | 995 +++++++++++++++++++++++++++++++++ - include/linux/i2c/sff-8436.h | 33 + - 4 files changed, 1041 insertions(+) - create mode 100644 drivers/misc/eeprom/sff_8436_eeprom.c - create mode 100644 include/linux/i2c/sff-8436.h - -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 9536852..484e3e1 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -96,6 +96,18 @@ config EEPROM_DIGSY_MTC_CFG - - If unsure, say N. - -+config EEPROM_SFF_8436 -+ tristate "SFF-8436 QSFP EEPROMs support" -+ depends on I2C && SYSFS -+ help -+ If you say yes here you get read-only support for the EEPROM of -+ the QSFPs which are implemented as per SFF-8436. -+ -+ All other features of this chip should be accessed via i2c-dev. -+ -+ This driver can also be built as a module. If so, the module -+ will be called sff_8436. -+ - config EEPROM_SUNXI_SID - tristate "Allwinner sunxi security ID support" - depends on ARCH_SUNXI && SYSFS -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 9507aec..235b5cc 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -6,3 +6,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o - obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o - obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o - obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o -+obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/sff_8436_eeprom.c b/drivers/misc/eeprom/sff_8436_eeprom.c -new file mode 100644 -index 0000000..0b6bf31 ---- /dev/null -+++ b/drivers/misc/eeprom/sff_8436_eeprom.c -@@ -0,0 +1,995 @@ -+/* -+ * sff_8436_eeprom.c - handle most SFF-8436 based QSFP EEPROMs -+ * -+ * Copyright (C) 2014 Cumulus networks Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Freeoftware Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+/* -+ * Description: -+ * a) SFF 8436 based qsfp read/write transactions are just like the at24 eeproms -+ * b) The register/memory layout is up to 5 128 byte pages defined by a "pages valid" -+ * register and switched via a "page select" register as explained in below diagram. -+ * c) 256 bytes are mapped at a time. page 0 is always mapped to the first 128 bytes and -+ * the other 4 pages are selectively mapped to the second 128 bytes -+ * -+ * SFF 8436 based QSFP Memory Map -+ * -+ * 2-Wire Serial Address: 1010000x -+ * -+ * Lower Page 00h (128 bytes) -+ * ===================== -+ * | | -+ * | | -+ * | | -+ * | | -+ * | | -+ * | | -+ * | | -+ * | | -+ * | | -+ * | | -+ * |Page Select Byte(127)| -+ * ===================== -+ * | -+ * | -+ * | -+ * | -+ * V -+ * ----------------------------------------------------------------- -+ * | | | | -+ * | | | | -+ * | | | | -+ * | | | | -+ * | | | | -+ * | | | | -+ * | | | | -+ * | | | | -+ * | | | | -+ * V V V V -+ * ------------- ---------------- ----------------- -------------- -+ * | | | | | | | | -+ * | Upper | | Upper | | Upper | | Upper | -+ * | Page 00h | | Page 01h | | Page 02h | | Page 03h | -+ * | | | (Optional) | | (Optional) | | (Optional | -+ * | | | | | | | for Cable | -+ * | | | | | | | Assemblies) | -+ * | ID | | AST | | User | | | -+ * | Fields | | Table | | EEPROM Data | | | -+ * | | | | | | | | -+ * | | | | | | | | -+ * | | | | | | | | -+ * ------------- ---------------- ----------------- -------------- -+ * -+ * -+ **/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#define SFF_8436_EEPROM_SIZE 5*128 -+#define SFF_8436_MAX_PAGE_COUNT 5 -+#define SFF_8436_MMAP_SIZE 256 -+#define SFF_8436_PAGE_SELECT_REG 0x7F -+ -+#define SFF_8436_OPTION_4_OFFSET 0xC3 -+#define SFF_8436_PAGE_02_PRESENT (1 << 7) /* Memory Page 02 present */ -+#define SFF_8436_PAGE_01_PRESENT (1 << 6) /* Memory Page 01 present */ -+#define SFF_8436_STATUS_2_OFFSET 0x02 -+#define SFF_8436_STATUS_PAGE_03_PRESENT_L (1 << 2) /* Flat Memory:0- Paging, 1- Page 0 only */ -+ -+struct sff_8436_data { -+ struct sff_8436_platform_data chip; -+ struct memory_accessor macc; -+ int use_smbus; -+ -+ /* -+ * Lock protects against activities from other Linux tasks, -+ * but not from changes by other I2C masters. -+ */ -+ struct mutex lock; -+ struct bin_attribute bin; -+ -+ u8 *writebuf; -+ unsigned write_max; -+ -+ unsigned num_addresses; -+ -+ u8 data[SFF_8436_EEPROM_SIZE]; -+ struct eeprom_device *eeprom_dev; -+ -+ struct i2c_client *client[]; -+}; -+ -+typedef enum qsfp_opcode { -+ QSFP_READ_OP = 0, -+ QSFP_WRITE_OP = 1 -+} qsfp_opcode_e; -+ -+/* -+ * This parameter is to help this driver avoid blocking other drivers out -+ * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C -+ * clock, one 256 byte read takes about 1/43 second which is excessive; -+ * but the 1/170 second it takes at 400 kHz may be quite reasonable; and -+ * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible. -+ * -+ * This value is forced to be a power of two so that writes align on pages. -+ */ -+static unsigned io_limit = 128; -+ -+/* -+ *pecs often allow 5 msec for a page write, sometimes 20 msec; -+ * it's important to recover from write timeouts. -+ */ -+static unsigned write_timeout = 25; -+ -+#define SFF_8436_PAGE_SIZE 128 -+#define SFF_8436_SIZE_BYTELEN 5 -+#define SFF_8436_SIZE_FLAGS 8 -+ -+#define SFF_8436_BITMASK(x) (BIT(x) - 1) -+ -+ -+/* create non-zero magic value for given eeprom parameters */ -+#define SFF_8436_DEVICE_MAGIC(_len, _flags) \ -+ ((1 << SFF_8436_SIZE_FLAGS | (_flags)) \ -+ << SFF_8436_SIZE_BYTELEN | ilog2(_len)) -+ -+static const struct i2c_device_id sff8436_ids[] = { -+ { "sff8436",SFF_8436_DEVICE_MAGIC(2048 / 8, 0) }, -+ { /* END OF LIST */ } -+}; -+MODULE_DEVICE_TABLE(i2c, sff8436_ids); -+ -+/*-------------------------------------------------------------------------*/ -+/* -+ * This routine computes the addressing information to be used for a given r/w request. -+ * Assumes that sanity checks for offset happened at sysfs-layer. -+ * Offset within Lower Page 00h and Upper Page 00h are not recomputed -+ */ -+static uint8_t sff_8436_translate_offset(struct sff_8436_data *sff_8436, -+ loff_t *offset) -+{ -+ unsigned page = 0; -+ -+ if (*offset < SFF_8436_MMAP_SIZE) { -+ return 0; -+ } -+ -+ page = (*offset >> 7)-1; -+ -+ if (page > 0 ) { -+ *offset = 0x80 + (*offset & 0x7f); -+ } else { -+ *offset &= 0xff; -+ } -+ -+ return page; -+} -+ -+static int sff_8436_read_reg(struct sff_8436_data *sff_8436, -+ uint8_t reg, uint8_t *val) -+{ -+ int count = 1, i = 0; -+ struct i2c_client *client = sff_8436->client[0]; -+ struct i2c_msg msg[2]; -+ u8 msgbuf[2]; -+ ssize_t status; -+ unsigned long timeout, read_time; -+ -+ memset(msg, 0, sizeof(msg)); -+ -+ /* -+ * Writes fail if the previous one didn't complete yet. We may -+ * loop a few times until this one succeeds, waiting at least -+ * long enough for one entire page write to work. -+ */ -+ timeout = jiffies + msecs_to_jiffies(write_timeout); -+ do { -+ read_time = jiffies; -+ switch (sff_8436->use_smbus) { -+ case I2C_SMBUS_I2C_BLOCK_DATA: -+ status = i2c_smbus_read_i2c_block_data(client, -+ reg, count, val); -+ break; -+ case I2C_SMBUS_WORD_DATA: -+ status = i2c_smbus_read_word_data(client, reg); -+ -+ if (status >= 0) { -+ *val = status & 0xff; -+ status = count; -+ } -+ break; -+ case I2C_SMBUS_BYTE_DATA: -+ status = i2c_smbus_read_byte_data(client, reg); -+ -+ if (status >= 0) { -+ *val = status; -+ status = count; -+ } -+ break; -+ -+ default: -+ i = 0; -+ msgbuf[i++] = reg; -+ -+ msg[0].addr = client->addr; -+ msg[0].buf = msgbuf; -+ msg[0].len = i; -+ -+ msg[1].addr = client->addr; -+ msg[1].flags = I2C_M_RD; -+ msg[1].buf = val; -+ msg[1].len = count; -+ -+ status = i2c_transfer(client->adapter, msg, 2); -+ if (status == 2) -+ status = count; -+ break; -+ } -+ dev_dbg(&client->dev, "read (using smbus %d) %d@%d --> %zd (%ld)\n", -+ sff_8436->use_smbus, count, reg, status, jiffies); -+ -+ if (status == count) -+ return count; -+ -+ /* REVISIT: at HZ=100, this is sloooow */ -+ msleep(1); -+ } while (time_before(read_time, timeout)); -+ -+ return -ETIMEDOUT; -+} -+ -+static int sff_8436_write_reg(struct sff_8436_data *sff_8436, -+ uint8_t reg, uint8_t val) -+{ -+ uint8_t data[2] = { reg, val }; -+ int count = 1; -+ struct i2c_client *client = sff_8436->client[0]; -+ struct i2c_msg msg; -+ ssize_t status; -+ unsigned long timeout, write_time; -+ -+ /* -+ * Writes fail if the previous one didn't complete yet. We may -+ * loop a few times until this one succeeds, waiting at least -+ * long enough for one entire page write to work. -+ */ -+ timeout = jiffies + msecs_to_jiffies(write_timeout); -+ do { -+ write_time = jiffies; -+ switch (sff_8436->use_smbus) { -+ case I2C_SMBUS_I2C_BLOCK_DATA: -+ status = i2c_smbus_write_i2c_block_data(client, -+ reg, count, &val); -+ if (status == 0) -+ status = count; -+ break; -+ case I2C_SMBUS_WORD_DATA: -+ case I2C_SMBUS_BYTE_DATA: -+ status = i2c_smbus_write_byte_data(client, reg, val); -+ -+ if (status == 0) -+ status = count; -+ break; -+ default: -+ msg.addr = client->addr; -+ msg.flags = 0; -+ msg.len = sizeof(data); -+ msg.buf = (char *) data; -+ -+ status = i2c_transfer(client->adapter, &msg, 1); -+ if (status == 1) -+ status = count; -+ break; -+ } -+ dev_dbg(&client->dev, "write (using smbus %d) %d@%d --> %zd (%ld)\n", -+ sff_8436->use_smbus, count, reg, status, jiffies); -+ -+ if (status == count) -+ return count; -+ -+ /* REVISIT: at HZ=100, this is sloooow */ -+ msleep(1); -+ } while (time_before(write_time, timeout)); -+ -+ return -ETIMEDOUT; -+} -+ -+static int sff_8436_write_page_reg(struct sff_8436_data *sff_8436, -+ uint8_t val) -+{ -+ return sff_8436_write_reg(sff_8436, SFF_8436_PAGE_SELECT_REG, val); -+} -+ -+static ssize_t sff_8436_eeprom_read(struct sff_8436_data *sff_8436, char *buf, -+ unsigned offset, size_t count) -+{ -+ struct i2c_msg msg[2]; -+ u8 msgbuf[2]; -+ struct i2c_client *client = sff_8436->client[0]; -+ unsigned long timeout, read_time; -+ int status, i; -+ -+ memset(msg, 0, sizeof(msg)); -+ -+ switch (sff_8436->use_smbus) { -+ case I2C_SMBUS_I2C_BLOCK_DATA: -+ /*smaller eeproms can work given some SMBus extension calls */ -+ if (count > I2C_SMBUS_BLOCK_MAX) -+ count = I2C_SMBUS_BLOCK_MAX; -+ break; -+ case I2C_SMBUS_WORD_DATA: -+ /* Check for odd length transaction */ -+ count = (count == 1) ? 1 : 2; -+ break; -+ case I2C_SMBUS_BYTE_DATA: -+ count = 1; -+ break; -+ default: -+ /* -+ * When we have a better choice than SMBus calls, use a -+ * combined I2C message. Write address; then read up to -+ * io_limit data bytes. Note that read page rollover helps us -+ * here (unlike writes). msgbuf is u8 and will cast to our -+ * needs. -+ */ -+ i = 0; -+ msgbuf[i++] = offset; -+ -+ msg[0].addr = client->addr; -+ msg[0].buf = msgbuf; -+ msg[0].len = i; -+ -+ msg[1].addr = client->addr; -+ msg[1].flags = I2C_M_RD; -+ msg[1].buf = buf; -+ msg[1].len = count; -+ } -+ -+ /* -+ * Reads fail if the previous write didn't complete yet. We may -+ * loop a few times until this one succeeds, waiting at least -+ * long enough for one entire page write to work. -+ */ -+ timeout = jiffies + msecs_to_jiffies(write_timeout); -+ do { -+ read_time = jiffies; -+ -+ switch (sff_8436->use_smbus) { -+ case I2C_SMBUS_I2C_BLOCK_DATA: -+ status = i2c_smbus_read_i2c_block_data(client, offset, -+ count, buf); -+ break; -+ case I2C_SMBUS_WORD_DATA: -+ status = i2c_smbus_read_word_data(client, offset); -+ if (status >= 0) { -+ buf[0] = status & 0xff; -+ if (count == 2) -+ buf[1] = status >> 8; -+ status = count; -+ } -+ break; -+ case I2C_SMBUS_BYTE_DATA: -+ status = i2c_smbus_read_byte_data(client, offset); -+ if (status >= 0) { -+ buf[0] = status; -+ status = count; -+ } -+ break; -+ default: -+ status = i2c_transfer(client->adapter, msg, 2); -+ if (status == 2) -+ status = count; -+ } -+ -+ dev_dbg(&client->dev, "eeprom read %zu@%d --> %d (%ld)\n", -+ count, offset, status, jiffies); -+ -+ if (status == count) -+ return count; -+ -+ /* REVISIT: at HZ=100, this is sloooow */ -+ msleep(1); -+ } while (time_before(read_time, timeout)); -+ -+ return -ETIMEDOUT; -+} -+ -+static ssize_t sff_8436_eeprom_write(struct sff_8436_data *sff_8436, const char *buf, -+ unsigned offset, size_t count) -+{ -+ struct i2c_client *client = sff_8436->client[0]; -+ struct i2c_msg msg; -+ ssize_t status; -+ unsigned long timeout, write_time; -+ unsigned next_page; -+ int i = 0; -+ -+ /* write max is at most a page */ -+ if (count > sff_8436->write_max) -+ count = sff_8436->write_max; -+ -+ /* Never roll over backwards, to the start of this page */ -+ next_page = roundup(offset + 1, SFF_8436_PAGE_SIZE); -+ if (offset + count > next_page) -+ count = next_page - offset; -+ -+ switch (sff_8436->use_smbus) { -+ case I2C_SMBUS_I2C_BLOCK_DATA: -+ /*smaller eeproms can work given some SMBus extension calls */ -+ if (count > I2C_SMBUS_BLOCK_MAX) -+ count = I2C_SMBUS_BLOCK_MAX; -+ break; -+ case I2C_SMBUS_WORD_DATA: -+ /* Check for odd length transaction */ -+ count = (count == 1) ? 1 : 2; -+ break; -+ case I2C_SMBUS_BYTE_DATA: -+ count = 1; -+ break; -+ default: -+ /* If we'll use I2C calls for I/O, set up the message */ -+ msg.addr = client->addr; -+ msg.flags = 0; -+ -+ /* msg.buf is u8 and casts will mask the values */ -+ msg.buf = sff_8436->writebuf; -+ -+ msg.buf[i++] = offset; -+ memcpy(&msg.buf[i], buf, count); -+ msg.len = i + count; -+ break; -+ } -+ -+ /* -+ * Reads fail if the previous write didn't complete yet. We may -+ * loop a few times until this one succeeds, waiting at least -+ * long enough for one entire page write to work. -+ */ -+ timeout = jiffies + msecs_to_jiffies(write_timeout); -+ do { -+ write_time = jiffies; -+ -+ switch (sff_8436->use_smbus) { -+ case I2C_SMBUS_I2C_BLOCK_DATA: -+ status = i2c_smbus_write_i2c_block_data(client, -+ offset, count, buf); -+ if (status == 0) -+ status = count; -+ break; -+ case I2C_SMBUS_WORD_DATA: -+ if (count == 2) { -+ status = i2c_smbus_write_word_data( -+ client,offset,(u16)((buf[0]) | -+ (buf[1] << 8))); -+ } else { -+ /* count = 1 */ -+ status = i2c_smbus_write_byte_data( -+ client, offset, buf[0]); -+ } -+ if (status == 0) -+ status = count; -+ break; -+ case I2C_SMBUS_BYTE_DATA: -+ status = i2c_smbus_write_byte_data(client, offset, buf[0]); -+ if (status == 0) -+ status = count; -+ break; -+ default: -+ status = i2c_transfer(client->adapter, &msg, 1); -+ if (status == 1) -+ status = count; -+ break; -+ } -+ -+ dev_dbg(&client->dev, "eeprom write %zu@%d --> %d (%ld)\n", -+ count, offset, status, jiffies); -+ -+ if (status == count) -+ return count; -+ -+ /* REVISIT: at HZ=100, this is sloooow */ -+ msleep(1); -+ } while (time_before(write_time, timeout)); -+ -+ return -ETIMEDOUT; -+} -+ -+static ssize_t sff_8436_eeprom_update_client(struct sff_8436_data *sff_8436, -+ loff_t off, size_t count, qsfp_opcode_e opcode) -+{ -+ struct i2c_client *client = sff_8436->client[0]; -+ ssize_t retval = 0; -+ u8 page = 0; -+ loff_t phy_offset = off; -+ int ret = 0; -+ -+ page = sff_8436_translate_offset(sff_8436, &phy_offset); -+ -+ dev_dbg(&client->dev, -+ "sff_8436_eeprom_update_client off %lld page:%d phy_offset:%lld, count:%d, opcode:%d\n", -+ off, page, phy_offset, count, opcode); -+ if (page > 0) { -+ ret = sff_8436_write_page_reg(sff_8436, page); -+ if (ret < 0) { -+ dev_err(&client->dev, -+ "sff_8436_write_page_reg for page %d failed ret:%d!\n", -+ page, ret); -+ return ret; -+ } -+ } -+ -+ while (count) { -+ ssize_t status; -+ -+ if (opcode == QSFP_READ_OP) { -+ status = sff_8436_eeprom_read(sff_8436, (char *)(&sff_8436->data[off]), phy_offset, count); -+ } else { -+ status = sff_8436_eeprom_write(sff_8436, (char *)(&sff_8436->data[off]), phy_offset, count); -+ } -+ if (status <= 0) { -+ if (retval == 0) -+ retval = status; -+ break; -+ } -+ phy_offset += status; -+ off += status; -+ count -= status; -+ retval += status; -+ } -+ -+ -+ if (page > 0) { -+ ret = sff_8436_write_page_reg(sff_8436, 0); -+ if (ret < 0) { -+ dev_err(&client->dev, -+ "sff_8436_write_page_reg for page 0 failed ret:%d!\n", ret); -+ return ret; -+ } -+ } -+ return retval; -+} -+ -+static ssize_t sff_8436_read_write(struct sff_8436_data *sff_8436, -+ char *buf, loff_t off, size_t len, qsfp_opcode_e opcode) -+{ -+ struct i2c_client *client = sff_8436->client[0]; -+ u8 page; -+ u8 refresh_page = 0; -+ int ret = 0; -+ u8 val = 0; -+ int err_timeout = 0; -+ size_t pending_len = 0, page_len = 0; -+ loff_t page_offset = 0, page_start_offset = 0; -+ -+ if (unlikely(!len)) -+ return len; -+ -+ if (off > SFF_8436_EEPROM_SIZE) -+ return 0; -+ -+ if (off + len > SFF_8436_EEPROM_SIZE) -+ len = SFF_8436_EEPROM_SIZE - off; -+ -+ if (opcode == QSFP_READ_OP) { -+ memset(sff_8436->data, 0xff, SFF_8436_EEPROM_SIZE); -+ } else if (opcode == QSFP_WRITE_OP) { -+ memcpy(&sff_8436->data[off], buf, len); -+ } -+ -+ /* -+ * Read data from chip, protecting against concurrent updates -+ * from this host, but not from other I2C masters. -+ */ -+ mutex_lock(&sff_8436->lock); -+ -+ /* -+ * Refresh pages which covers the requested data -+ * from offset to off + len -+ * Only refresh pages which contain requested bytes -+ * -+ */ -+ -+ pending_len = len; -+ -+ for (page = off >> 7; page <= (off + len - 1) >> 7; page++) { -+ refresh_page = 0; -+ switch (page) { -+ case 0: -+ /* Lower page 00h */ -+ refresh_page = 1; -+ err_timeout = 1; -+ break; -+ case 1: -+ /* Upper page 00h */ -+ refresh_page = 1; -+ err_timeout = 1; -+ break; -+ case 2: -+ /* Upper page 01h */ -+ ret = sff_8436_read_reg(sff_8436, SFF_8436_OPTION_4_OFFSET, &val); -+ if (ret < 0) { -+ dev_dbg(&client->dev, -+ "sff_8436_read_reg for page 01h status failed %d!\n", ret); -+ goto err; -+ } -+ if (val & SFF_8436_PAGE_01_PRESENT) { -+ refresh_page = 1; -+ } -+ break; -+ case 3: -+ /* Upper page 02h */ -+ ret = sff_8436_read_reg(sff_8436, SFF_8436_OPTION_4_OFFSET, &val); -+ if (ret < 0) { -+ dev_dbg(&client->dev, -+ "sff_8436_read_reg for page 02h status failed %d!\n", ret); -+ goto err; -+ } -+ if (val & SFF_8436_PAGE_02_PRESENT) { -+ refresh_page = 1; -+ } -+ break; -+ case 4: -+ /* Upper page 03h */ -+ ret = sff_8436_read_reg(sff_8436, SFF_8436_STATUS_2_OFFSET, &val); -+ if (ret < 0) { -+ dev_dbg(&client->dev, -+ "sff_8436_read_reg for page 03h status failed %d!\n", ret); -+ goto err; -+ } -+ if (!(val & SFF_8436_STATUS_PAGE_03_PRESENT_L)) { -+ refresh_page = 1; -+ } -+ break; -+ default: -+ /* Invalid page index */ -+ dev_err(&client->dev, "Invalid page %d!\n", page); -+ ret = -EINVAL; -+ goto err; -+ } -+ -+ if (!refresh_page) { -+ /* if page is not valid or already refreshed */ -+ continue; -+ } -+ -+ /* -+ * Compute the offset and number of bytes to be read/write -+ * w.r.t requested page -+ * -+ * 1. start at offset 0 (within the page), and read/write the entire page -+ * 2. start at offset 0 (within the page) and read/write less than entire page -+ * 3. start at an offset not equal to 0 and read/write the rest of the page -+ * 4. start at an offset not equal to 0 and read/write less than (end of page - offset) -+ * -+ */ -+ page_start_offset = page * SFF_8436_PAGE_SIZE; -+ -+ if (page_start_offset < off) { -+ page_offset = off; -+ if (off + pending_len < page_start_offset + SFF_8436_PAGE_SIZE) { -+ page_len = pending_len; -+ } else { -+ page_len = SFF_8436_PAGE_SIZE - off; -+ } -+ } else { -+ page_offset = page_start_offset; -+ if (pending_len > SFF_8436_PAGE_SIZE) { -+ page_len = SFF_8436_PAGE_SIZE; -+ } else { -+ page_len = pending_len; -+ } -+ } -+ -+ pending_len = pending_len - page_len; -+ -+ dev_dbg(&client->dev, -+ "sff_read off %lld len %d page_start_offset %lld page_offset %lld page_len %d pending_len %d\n", -+ off, len, page_start_offset, page_offset, page_len, pending_len); -+ -+ /* Refresh the data from offset for specified len */ -+ ret = sff_8436_eeprom_update_client(sff_8436, page_offset, page_len, opcode); -+ if (ret != page_len) { -+ if (err_timeout) { -+ dev_dbg(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", -+ (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); -+ goto err; -+ } else { -+ dev_err(&client->dev, "sff_8436_update_client for %s page %d page_offset %lld page_len %d failed %d!\n", -+ (page ? "Upper" : "Lower"), (page ? (page-1) : page), page_offset, page_len, ret); -+ } -+ } -+ } -+ mutex_unlock(&sff_8436->lock); -+ -+ if (opcode == QSFP_READ_OP) { -+ memcpy(buf, &sff_8436->data[off], len); -+ } -+ return len; -+ -+err: -+ mutex_unlock(&sff_8436->lock); -+ -+ return ret; -+} -+ -+static ssize_t sff_8436_bin_read(struct file *filp, struct kobject *kobj, -+ struct bin_attribute *attr, -+ char *buf, loff_t off, size_t count) -+{ -+ struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); -+ struct sff_8436_data *sff_8436 = i2c_get_clientdata(client); -+ -+ return sff_8436_read_write(sff_8436, buf, off, count, QSFP_READ_OP); -+} -+ -+ -+static ssize_t sff_8436_bin_write(struct file *filp, struct kobject *kobj, -+ struct bin_attribute *attr, -+ char *buf, loff_t off, size_t count) -+{ -+ struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); -+ struct sff_8436_data *sff_8436 = i2c_get_clientdata(client); -+ -+ return sff_8436_read_write(sff_8436, buf, off, count, QSFP_WRITE_OP); -+} -+/*-------------------------------------------------------------------------*/ -+ -+/* -+ * This lets other kernel code access the eeprom data. For example, it -+ * might hold a board's Ethernet address, or board-specific calibration -+ * data generated on the manufacturing floor. -+ */ -+ -+static ssize_t sff_8436_macc_read(struct memory_accessor *macc, char *buf, -+ off_t offset, size_t count) -+{ -+ struct sff_8436_data *sff_8436 = container_of(macc, struct sff_8436_data, macc); -+ -+ return sff_8436_read_write(sff_8436, buf, offset, count, QSFP_READ_OP); -+} -+ -+static ssize_t sff_8436_macc_write(struct memory_accessor *macc, const char *buf, -+ off_t offset, size_t count) -+{ -+ struct sff_8436_data *sff_8436 = container_of(macc, struct sff_8436_data, macc); -+ -+ return sff_8436_read_write(sff_8436, buf, offset, count, QSFP_WRITE_OP); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static int __devexit sff_8436_remove(struct i2c_client *client) -+{ -+ struct sff_8436_data *sff_8436; -+ -+ sff_8436 = i2c_get_clientdata(client); -+ sysfs_remove_bin_file(&client->dev.kobj, &sff_8436->bin); -+ -+ eeprom_device_unregister(sff_8436->eeprom_dev); -+ -+ kfree(sff_8436->writebuf); -+ kfree(sff_8436); -+ return 0; -+} -+static int sff_8436_eeprom_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ int err; -+ int use_smbus = 0; -+ struct sff_8436_platform_data chip; -+ struct sff_8436_data *sff_8436; -+ kernel_ulong_t magic; -+ -+ if (client->dev.platform_data) { -+ chip = *(struct sff_8436_platform_data *)client->dev.platform_data; -+ } else { -+ /* -+ * SFF-8436 MMAP is 256 bytes long -+ */ -+ magic = SFF_8436_DEVICE_MAGIC(2048 / 8, 0); -+ chip.byte_len = BIT(magic & SFF_8436_BITMASK(SFF_8436_SIZE_BYTELEN)); -+ magic >>= SFF_8436_SIZE_BYTELEN; -+ chip.flags = magic & SFF_8436_BITMASK(SFF_8436_SIZE_FLAGS); -+ /* -+ * This is slow, but we can't know all eeproms, so we better -+ * play safe.pecifying custom eeprom-types via platform_data -+ * is recommended anyhow. -+ */ -+ chip.page_size = 1; -+ -+ chip.setup = NULL; -+ chip.context = NULL; -+ chip.eeprom_data = NULL; -+ } -+ -+ if (!is_power_of_2(chip.byte_len)) -+ dev_warn(&client->dev, -+ "byte_len looks suspicious (no power of 2)!\n"); -+ -+ if (!chip.page_size) { -+ dev_err(&client->dev, "page_size must not be 0!\n"); -+ err = -EINVAL; -+ goto exit; -+ } -+ if (!is_power_of_2(chip.page_size)) -+ dev_warn(&client->dev, -+ "page_size looks suspicious (no power of 2)!\n"); -+ -+ /* Use I2C operations unless we're stuck with SMBus extensions. */ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { -+ if (i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { -+ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA; -+ } else if (i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_READ_WORD_DATA)) { -+ use_smbus = I2C_SMBUS_WORD_DATA; -+ } else if (i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) { -+ use_smbus = I2C_SMBUS_BYTE_DATA; -+ } else { -+ err = -EPFNOSUPPORT; -+ goto exit; -+ } -+ } -+ -+ if (!(sff_8436 = kzalloc(sizeof(struct sff_8436_data) + sizeof(struct i2c_client *), GFP_KERNEL))) { -+ err = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&sff_8436->lock); -+ sff_8436->use_smbus = use_smbus; -+ sff_8436->chip = chip; -+ -+ /* -+ * Export the EEPROM bytes through sysfs, since that's convenient. -+ * By default, only root should see the data (maybe passwords etc) -+ */ -+ sysfs_bin_attr_init(&sff_8436->bin); -+ sff_8436->bin.attr.name = "eeprom"; -+ sff_8436->bin.attr.mode = SFF_8436_FLAG_IRUGO; -+ sff_8436->bin.read = sff_8436_bin_read; -+ sff_8436->bin.size = SFF_8436_EEPROM_SIZE; -+ -+ sff_8436->macc.read = sff_8436_macc_read; -+ -+ if (!use_smbus || -+ (i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) || -+ i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_WRITE_WORD_DATA) || -+ i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) { -+ //unsigned write_max = chip.page_size; -+ /* -+ * NOTE: AN-2079 -+ * Finisar recommends that the host implement 1 byte writes only, -+ * since this module only supports 32 byte page boundaries. -+ * 2 byte writes are acceptable for PE and Vout changes per -+ * Application Note AN-2071. -+ */ -+ unsigned write_max = 1; -+ -+ sff_8436->macc.write = sff_8436_macc_write; -+ -+ sff_8436->bin.write = sff_8436_bin_write; -+ sff_8436->bin.attr.mode |= S_IWUSR; -+ -+ if (write_max > io_limit) -+ write_max = io_limit; -+ if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX) -+ write_max = I2C_SMBUS_BLOCK_MAX; -+ sff_8436->write_max = write_max; -+ -+ /* buffer (data + address at the beginning) */ -+ sff_8436->writebuf = kmalloc(write_max + 2, GFP_KERNEL); -+ if (!sff_8436->writebuf) { -+ err = -ENOMEM; -+ goto exit_kfree; -+ } -+ } else { -+ dev_warn(&client->dev, -+ "cannot write due to controller restrictions."); -+ } -+ -+ memset(sff_8436->data, 0xff, SFF_8436_EEPROM_SIZE); -+ -+ sff_8436->client[0] = client; -+ -+ /* create the sysfs eeprom file */ -+ err = sysfs_create_bin_file(&client->dev.kobj, &sff_8436->bin); -+ if (err) -+ goto err_struct; -+ -+ sff_8436->eeprom_dev = eeprom_device_register(&client->dev, chip.eeprom_data); -+ if (IS_ERR(sff_8436->eeprom_dev)) { -+ dev_err(&client->dev, "error registering eeprom device.\n"); -+ err = PTR_ERR(sff_8436->eeprom_dev); -+ goto err_sysfs_cleanup; -+ } -+ -+ i2c_set_clientdata(client, sff_8436); -+ -+ dev_info(&client->dev, "%zu byte %s EEPROM, %s\n", -+ sff_8436->bin.size, client->name, -+ "read-only"); -+ -+ if (use_smbus == I2C_SMBUS_WORD_DATA || -+ use_smbus == I2C_SMBUS_BYTE_DATA) { -+ dev_notice(&client->dev, "Falling back to %s reads, " -+ "performance will suffer\n", use_smbus == -+ I2C_SMBUS_WORD_DATA ? "word" : "byte"); -+ } -+ -+ if (chip.setup) -+ chip.setup(&sff_8436->macc, chip.context); -+ -+ return 0; -+ -+err_sysfs_cleanup: -+ sysfs_remove_bin_file(&client->dev.kobj, &sff_8436->bin); -+err_struct: -+ kfree(sff_8436->writebuf); -+exit_kfree: -+ kfree(sff_8436); -+exit: -+ dev_dbg(&client->dev, "probe error %d\n", err); -+ -+ return err; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static struct i2c_driver sff_8436_driver = { -+ .driver = { -+ .name = "sff8436", -+ .owner = THIS_MODULE, -+ }, -+ .probe = sff_8436_eeprom_probe, -+ .remove = __devexit_p(sff_8436_remove), -+ .id_table = sff8436_ids, -+}; -+ -+static int __init sff_8436_init(void) -+{ -+ if (!io_limit) { -+ pr_err("sff_8436: io_limit must not be 0!\n"); -+ return -EINVAL; -+ } -+ -+ io_limit = rounddown_pow_of_two(io_limit); -+ return i2c_add_driver(&sff_8436_driver); -+} -+module_init(sff_8436_init); -+ -+static void __exit sff_8436_exit(void) -+{ -+ i2c_del_driver(&sff_8436_driver); -+} -+module_exit(sff_8436_exit); -+ -+MODULE_DESCRIPTION("Driver for SFF-8436 based QSFP EEPROMs"); -+MODULE_AUTHOR("VIDYA RAVIPATI "); -+MODULE_LICENSE("GPL"); -diff --git a/include/linux/i2c/sff-8436.h b/include/linux/i2c/sff-8436.h -new file mode 100644 -index 0000000..cd46896 ---- /dev/null -+++ b/include/linux/i2c/sff-8436.h -@@ -0,0 +1,33 @@ -+#ifndef _LINUX_SFF_8436_H -+#define _LINUX_SFF_8436_H -+ -+#include -+#include -+#include -+ -+/* -+ * As seen through Linux I2C, differences between the most common types of I2C -+ * memory include: -+ * - How much memory is available (usually specified in bit)? -+ * - What write page size does it support? -+ * - Special flags (read_only, world readable...)? -+ * -+ * If you set up a custom eeprom type, please double-check the parameters. -+ * Especially page_size needs extra care, as you risk data loss if your value -+ * is bigger than what the chip actually supports! -+ */ -+ -+struct sff_8436_platform_data { -+ u32 byte_len; /* size (sum of all addr) */ -+ u16 page_size; /* for writes */ -+ u8 flags; -+#define SFF_8436_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ -+#define SFF_8436_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ -+#define SFF_8436_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ -+ -+ void (*setup)(struct memory_accessor *, void *context); -+ void *context; -+ struct eeprom_platform_data *eeprom_data; /* extra data for the eeprom_class */ -+}; -+ -+#endif /* _LINUX_SFF_8436_H */ diff --git a/packages/base/any/kernels/3.16+deb8/patches/series b/packages/base/any/kernels/3.16+deb8/patches/series deleted file mode 100644 index 24a54939..00000000 --- a/packages/base/any/kernels/3.16+deb8/patches/series +++ /dev/null @@ -1,16 +0,0 @@ -driver-at24-fix-odd-length-two-byte-access.patch -driver-hwmon-max6620.patch -driver-hwmon-max6620-fix-rpm-calc.patch -driver-hwmon-max6620-update.patch -driver-hwmon-pmbus-dni_dps460.patch -driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch -driver-i2c-bus-intel-ismt-add-delay-param.patch -driver-support-sff-8436-eeprom.patch -driver-support-sff-8436-eeprom-update.patch -driver-hwmon-pmbus-add-dps460-support.patch -driver-hwmon-pmbus-ucd9200-mlnx.patch -driver-arista-piix4-mux-patch.patch -3.16-fs-overlayfs.patch -driver-igb-version-5.3.54.patch -driver-support-intel-igb-bcm5461X-phy.patch -driver-i2c-bus-intel-ismt-enable-param.patch From 05686810cd94c9c94523713ba2a710e330378686 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 22 Dec 2016 16:43:27 +0000 Subject: [PATCH 184/255] This version has been superceded by the standard 3.16 LTS version. --- .../kernel-3.16+deb8-x86-64-all/Makefile | 1 - .../kernel-3.16+deb8-x86-64-all/PKG.yml | 22 ------------------- .../builds/.gitignore | 2 -- .../builds/Makefile | 19 ---------------- 4 files changed, 44 deletions(-) delete mode 100644 packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/Makefile delete mode 100644 packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml delete mode 100644 packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/.gitignore delete mode 100644 packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/Makefile diff --git a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/Makefile b/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/Makefile deleted file mode 100644 index 003238cf..00000000 --- a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/Makefile +++ /dev/null @@ -1 +0,0 @@ -include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml deleted file mode 100644 index cf68e414..00000000 --- a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/PKG.yml +++ /dev/null @@ -1,22 +0,0 @@ - -common: - arch: amd64 - version: 1.0.0 - copyright: Copyright 2013, 2014, 2015 Big Switch Networks - maintainer: support@bigswitch.com - support: opennetworklinux@googlegroups.com - -packages: - - name: onl-kernel-3.16+deb8-x86-64-all - version: 1.0.0 - summary: Open Network Linux Kernel 3.16-deb8 for X86_64 Platforms. - - files: - builds/kernel-3.16+deb8-x86_64-all : $$PKG_INSTALL/ - builds/linux-3.16.7-ckt25-mbuild : $$PKG_INSTALL/mbuilds - - changelog: Change changes changes., - - - - diff --git a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/.gitignore b/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/.gitignore deleted file mode 100644 index ef51fa80..00000000 --- a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -linux-* -kernel-* diff --git a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/Makefile deleted file mode 100644 index 38555fac..00000000 --- a/packages/base/amd64/kernels/kernel-3.16+deb8-x86-64-all/builds/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# -*- Makefile -*- -############################################################ -# -# -# Copyright 2013, 2014 BigSwitch Networks, Inc. -# -# -# -# -############################################################ -THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) - -include $(ONL)/make/config.mk - -kernel: - $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16+deb8/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) - -clean: - rm -rf linux-3.16* kernel-3.16* From dadfed1e35c9347a4ec7714f565b06a312f4837e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 22 Dec 2016 18:12:58 +0000 Subject: [PATCH 185/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 84454401..73e54109 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 84454401a4d88f0d1b37e72ce670c2bbe75511b2 +Subproject commit 73e5410960dd945d49a27af3e53690988661d462 From d332bcc21b012e668d5266f5a88d9340baa17d31 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 22 Dec 2016 18:13:20 +0000 Subject: [PATCH 186/255] Spelling. --- tools/scripts/apply-patches.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/scripts/apply-patches.sh b/tools/scripts/apply-patches.sh index cb921001..e82d3156 100755 --- a/tools/scripts/apply-patches.sh +++ b/tools/scripts/apply-patches.sh @@ -37,7 +37,7 @@ if [ -f "${PATCH_SERIES}" ]; then if [[ $p = \#* ]]; then continue; fi - echo "*** Appying ${p}..." + echo "*** Applying ${p}..." if [ -x "${PATCHDIR}/${p}" ]; then "${PATCHDIR}/${p}" "${KERNDIR}" else From 604af0c9b3dc9504870c30273ab22f2fb62746c3 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Thu, 22 Dec 2016 18:13:22 +0000 Subject: [PATCH 187/255] The 3.16+deb8 package has been replaced by the 3.16 LTS package. --- .../src/lib/platform-config-defaults-x86-64.yml | 4 ---- packages/base/amd64/upgrade/PKG.yml | 2 +- packages/base/amd64/upgrade/builds/Makefile | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml b/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml index 7588a9e2..114ca762 100644 --- a/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml +++ b/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-x86-64.yml @@ -32,10 +32,6 @@ default: package: onl-kernel-3.18-x86-64-all:amd64 kernel-3.16: &kernel-3-16 - =: kernel-3.16+deb8-x86_64-all - package: onl-kernel-3.16+deb8-x86-64-all:amd64 - - kernel-3.16-lts: &kernel-3-16-lts =: kernel-3.16-lts-x86_64-all package: onl-kernel-3.16-lts-x86-64-all:amd64 diff --git a/packages/base/amd64/upgrade/PKG.yml b/packages/base/amd64/upgrade/PKG.yml index 213c0b3b..a905fbb7 100644 --- a/packages/base/amd64/upgrade/PKG.yml +++ b/packages/base/amd64/upgrade/PKG.yml @@ -3,7 +3,7 @@ prerequisites: - onl-kernel-3.9.6-x86-64-all:amd64 - onl-kernel-3.2-deb7-x86-64-all:amd64 - onl-kernel-3.18-x86-64-all:amd64 - - onl-kernel-3.16+deb8-x86-64-all:amd64 + - onl-kernel-3.16-lts-x86-64-all:amd64 - onl-loader-initrd:amd64 common: diff --git a/packages/base/amd64/upgrade/builds/Makefile b/packages/base/amd64/upgrade/builds/Makefile index 890edcf7..f735b07f 100644 --- a/packages/base/amd64/upgrade/builds/Makefile +++ b/packages/base/amd64/upgrade/builds/Makefile @@ -4,7 +4,6 @@ include $(ONL)/make/config.amd64.mk KERNELS := $(shell $(ONLPM) --find-file onl-kernel-3.9.6-x86-64-all:amd64 kernel-3.9.6-x86-64-all) \ $(shell $(ONLPM) --find-file onl-kernel-3.2-deb7-x86-64-all:amd64 kernel-3.2-deb7-x86_64-all) \ $(shell $(ONLPM) --find-file onl-kernel-3.18-x86-64-all:amd64 kernel-3.18-x86_64-all) \ - $(shell $(ONLPM) --find-file onl-kernel-3.16+deb8-x86-64-all:amd64 kernel-3.16+deb8-x86_64-all) \ $(shell $(ONLPM) --find-file onl-kernel-3.16-lts-x86-64-all:amd64 kernel-3.16-lts-x86_64-all) \ From 02653d58029453ba4ee7e5307fdc30e3bbcea729 Mon Sep 17 00:00:00 2001 From: brandonchuang Date: Fri, 23 Dec 2016 14:36:23 +0800 Subject: [PATCH 188/255] [as7712] Support DC12V(PSU-12V-650)/DC48V(YM-2651V) power supply --- ...orm-accton-as7712_32x-device-drivers.patch | 245 +++++++++++++----- .../onlp/builds/src/module/src/fani.c | 92 +++++-- .../onlp/builds/src/module/src/platform_lib.c | 73 ++++-- .../onlp/builds/src/module/src/platform_lib.h | 7 +- .../onlp/builds/src/module/src/psui.c | 85 +++++- 5 files changed, 381 insertions(+), 121 deletions(-) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch index 8bba8fc5..41ff38bc 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch @@ -44,10 +44,10 @@ index f8ee399..ea97f4a 100644 obj-$(CONFIG_SENSORS_AD7414) += ad7414.o diff --git a/drivers/hwmon/accton_as7712_32x_fan.c b/drivers/hwmon/accton_as7712_32x_fan.c new file mode 100644 -index 0000000..f2246e3 +index 0000000..74c577d --- /dev/null +++ b/drivers/hwmon/accton_as7712_32x_fan.c -@@ -0,0 +1,452 @@ +@@ -0,0 +1,491 @@ +/* + * A hwmon driver for the Accton as7712 32x fan + * @@ -93,6 +93,7 @@ index 0000000..f2246e3 + */ +static const u8 fan_reg[] = { + 0x0F, /* fan 1-6 present status */ ++ 0x10, /* fan 1-6 direction(0:B2F 1:F2B) */ + 0x11, /* fan PWM(for all fan) */ + 0x12, /* front fan 1 speed(rpm) */ + 0x13, /* front fan 2 speed(rpm) */ @@ -128,6 +129,7 @@ index 0000000..f2246e3 + +enum sysfs_fan_attributes { + FAN_PRESENT_REG, ++ FAN_DIRECTION_REG, + FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ + FAN1_FRONT_SPEED_RPM, + FAN2_FRONT_SPEED_RPM, @@ -141,6 +143,12 @@ index 0000000..f2246e3 + FAN4_REAR_SPEED_RPM, + FAN5_REAR_SPEED_RPM, + FAN6_REAR_SPEED_RPM, ++ FAN1_DIRECTION, ++ FAN2_DIRECTION, ++ FAN3_DIRECTION, ++ FAN4_DIRECTION, ++ FAN5_DIRECTION, ++ FAN6_DIRECTION, + FAN1_PRESENT, + FAN2_PRESENT, + FAN3_PRESENT, @@ -200,6 +208,13 @@ index 0000000..f2246e3 +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); ++/* 6 fan direction attribute in this platform */ ++DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(1); ++DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(2); ++DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(3); ++DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(4); ++DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(5); ++DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(6); +/* 1 fan duty cycle attribute in this platform */ +DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); + @@ -223,6 +238,12 @@ index 0000000..f2246e3 + DECLARE_FAN_PRESENT_ATTR(4), + DECLARE_FAN_PRESENT_ATTR(5), + DECLARE_FAN_PRESENT_ATTR(6), ++ DECLARE_FAN_DIRECTION_ATTR(1), ++ DECLARE_FAN_DIRECTION_ATTR(2), ++ DECLARE_FAN_DIRECTION_ATTR(3), ++ DECLARE_FAN_DIRECTION_ATTR(4), ++ DECLARE_FAN_DIRECTION_ATTR(5), ++ DECLARE_FAN_DIRECTION_ATTR(6), + DECLARE_FAN_DUTY_CYCLE_ATTR(), + NULL +}; @@ -259,6 +280,14 @@ index 0000000..f2246e3 + return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; +} + ++static u8 reg_val_to_direction(u8 reg_val, enum fan_id id) ++{ ++ u8 mask = (1 << id); ++ ++ reg_val &= mask; ++ ++ return reg_val ? 1 : 0; ++} +static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) +{ + u8 mask = (1 << id); @@ -349,6 +378,16 @@ index 0000000..f2246e3 + case FAN6_FAULT: + ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); + break; ++ case FAN1_DIRECTION: ++ case FAN2_DIRECTION: ++ case FAN3_DIRECTION: ++ case FAN4_DIRECTION: ++ case FAN5_DIRECTION: ++ case FAN6_DIRECTION: ++ ret = sprintf(buf, "%d\n", ++ reg_val_to_direction(data->reg_val[FAN_DIRECTION_REG], ++ attr->index - FAN1_DIRECTION)); ++ break; + default: + break; + } @@ -480,10 +519,10 @@ index 0000000..f2246e3 + +static int __init as7712_32x_fan_init(void) +{ -+ extern int platform_accton_as7712_32x(void); -+ if (!platform_accton_as7712_32x()) { -+ return -ENODEV; -+ } ++ extern int platform_accton_as7712_32x(void); ++ if (!platform_accton_as7712_32x()) { ++ return -ENODEV; ++ } + + return i2c_add_driver(&as7712_32x_fan_driver); +} @@ -502,10 +541,10 @@ index 0000000..f2246e3 + diff --git a/drivers/hwmon/accton_as7712_32x_psu.c b/drivers/hwmon/accton_as7712_32x_psu.c new file mode 100644 -index 0000000..4574150 +index 0000000..f1f11f5 --- /dev/null +++ b/drivers/hwmon/accton_as7712_32x_psu.c -@@ -0,0 +1,293 @@ +@@ -0,0 +1,384 @@ +/* + * An hwmon driver for accton as7712_32x Power Module + * @@ -542,14 +581,18 @@ index 0000000..4574150 +#include +#include + ++#define MAX_MODEL_NAME 16 ++ ++#define DC12V_FAN_DIR_OFFSET 0x34 ++#define DC12V_FAN_DIR_LEN 3 ++ +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ -+static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; ++static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; + +/* Each client has this additional data + */ @@ -560,27 +603,32 @@ index 0000000..4574150 + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[9]; /* Model name, read from eeprom */ ++ char model_name[MAX_MODEL_NAME]; /* Model name, read from eeprom */ ++ char fan_dir[DC12V_FAN_DIR_LEN+1]; /* DC12V fan direction */ +}; + ++static ssize_t show_string(struct device *dev, struct device_attribute *da, char *buf); +static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev); + +enum as7712_32x_psu_sysfs_attributes { + PSU_PRESENT, + PSU_MODEL_NAME, -+ PSU_POWER_GOOD ++ PSU_POWER_GOOD, ++ PSU_FAN_DIR /* For DC12V only */ +}; + +/* sysfs attributes for hwmon + */ -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); ++static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); ++static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_string, NULL, PSU_MODEL_NAME); ++static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); ++static SENSOR_DEVICE_ATTR(psu_fan_dir, S_IRUGO, show_string, NULL, PSU_FAN_DIR); + +static struct attribute *as7712_32x_psu_attributes[] = { + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, ++ &sensor_dev_attr_psu_fan_dir.dev_attr.attr, + NULL +}; + @@ -591,6 +639,10 @@ index 0000000..4574150 + struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); + u8 status = 0; + ++ if (!data->valid) { ++ return -EIO; ++ } ++ + if (attr->index == PSU_PRESENT) { + status = !(data->status >> (1-data->index) & 0x1); + } @@ -601,12 +653,25 @@ index 0000000..4574150 + return sprintf(buf, "%d\n", status); +} + -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, ++static ssize_t show_string(struct device *dev, struct device_attribute *da, + char *buf) +{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s\n", data->model_name); ++ char *ptr = NULL; ++ ++ if (!data->valid) { ++ return -EIO; ++ } ++ ++ if (attr->index == PSU_MODEL_NAME) { ++ ptr = data->model_name; ++ } ++ else { /* PSU_FAN_DIR */ ++ ptr = data->fan_dir; ++ } ++ ++ return sprintf(buf, "%s\n", ptr); +} + +static const struct attribute_group as7712_32x_psu_group = { @@ -703,30 +768,83 @@ index 0000000..4574150 +{ + int result = 0; + int retry_count = 5; -+ -+ while (retry_count) { -+ retry_count--; -+ -+ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) { -+ msleep(10); -+ continue; -+ } -+ -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ msleep(10); ++ ++ while (retry_count) { ++ retry_count--; ++ ++ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); ++ ++ if (unlikely(result < 0)) { ++ msleep(10); + continue; + } -+ -+ result = 0; -+ break; -+ } -+ ++ ++ if (unlikely(result != data_len)) { ++ result = -EIO; ++ msleep(10); ++ continue; ++ } ++ ++ result = 0; ++ break; ++ } ++ + return result; +} + ++enum psu_type { ++ PSU_TYPE_AC_110V, ++ PSU_TYPE_DC_48V, ++ PSU_TYPE_DC_12V ++}; ++ ++struct model_name_info { ++ enum psu_type type; ++ u8 offset; ++ u8 length; ++ char* model_name; ++}; ++ ++struct model_name_info models[] = { ++{PSU_TYPE_AC_110V, 0x20, 8, "YM-2651Y"}, ++{PSU_TYPE_DC_48V, 0x20, 8, "YM-2651V"}, ++{PSU_TYPE_DC_12V, 0x00, 11, "PSU-12V-750"}, ++}; ++ ++static int as7712_32x_psu_model_name_get(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as7712_32x_psu_data *data = i2c_get_clientdata(client); ++ int i, status; ++ ++ for (i = 0; i < ARRAY_SIZE(models); i++) { ++ memset(data->model_name, 0, sizeof(data->model_name)); ++ ++ status = as7712_32x_psu_read_block(client, models[i].offset, ++ data->model_name, models[i].length); ++ if (status < 0) { ++ data->model_name[0] = '\0'; ++ dev_dbg(&client->dev, "unable to read model name from (0x%x) offset(0x%x)\n", ++ client->addr, models[i].offset); ++ return status; ++ } ++ else { ++ data->model_name[models[i].length] = '\0'; ++ } ++ ++ /* Determine if the model name is known, if not, read next index ++ */ ++ if (strncmp(data->model_name, models[i].model_name, models[i].length) == 0) { ++ return 0; ++ } ++ else { ++ data->model_name[0] = '\0'; ++ } ++ } ++ ++ return -ENODATA; ++} ++ +static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); @@ -737,8 +855,9 @@ index 0000000..4574150 + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status; -+ int power_good = 0; ++ int power_good = 0; + ++ data->valid = 0; + dev_dbg(&client->dev, "Starting as7712_32x update\n"); + + /* Read psu status */ @@ -746,25 +865,35 @@ index 0000000..4574150 + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); ++ goto exit; + } + else { + data->status = status; + } -+ ++ + /* Read model name */ + memset(data->model_name, 0, sizeof(data->model_name)); ++ memset(data->fan_dir, 0, sizeof(data->fan_dir)); + power_good = (data->status >> (3-data->index) & 0x1); -+ -+ if (power_good) { -+ status = as7712_32x_psu_read_block(client, 0x20, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); + -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); ++ if (power_good) { ++ if (as7712_32x_psu_model_name_get(dev) < 0) { ++ goto exit; + } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; ++ ++ if (strncmp(data->model_name, ++ models[PSU_TYPE_DC_12V].model_name, ++ models[PSU_TYPE_DC_12V].length) == 0) { ++ /* Read fan direction */ ++ status = as7712_32x_psu_read_block(client, DC12V_FAN_DIR_OFFSET, ++ data->fan_dir, DC12V_FAN_DIR_LEN); ++ ++ if (status < 0) { ++ data->fan_dir[0] = '\0'; ++ dev_dbg(&client->dev, "unable to read fan direction from (0x%x) offset(0x%x)\n", ++ client->addr, DC12V_FAN_DIR_OFFSET); ++ goto exit; ++ } + } + } + @@ -772,6 +901,7 @@ index 0000000..4574150 + data->valid = 1; + } + ++exit: + mutex_unlock(&data->update_lock); + + return data; @@ -779,10 +909,10 @@ index 0000000..4574150 + +static int __init as7712_32x_psu_init(void) +{ -+ extern int platform_accton_as7712_32x(void); -+ if (!platform_accton_as7712_32x()) { -+ return -ENODEV; -+ } ++ extern int platform_accton_as7712_32x(void); ++ if (!platform_accton_as7712_32x()) { ++ return -ENODEV; ++ } + + return i2c_add_driver(&as7712_32x_psu_driver); +} @@ -803,7 +933,7 @@ diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c index 96e3490..3aeb08d 100644 --- a/drivers/hwmon/accton_i2c_cpld.c +++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -201,6 +201,29 @@ int platform_accton_as7512_32x(void) +@@ -201,6 +201,22 @@ int platform_accton_as7512_32x(void) } EXPORT_SYMBOL(platform_accton_as7512_32x); @@ -814,14 +944,7 @@ index 96e3490..3aeb08d 100644 + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), + }, -+ }, -+ { -+ .ident = "Accton AS7712", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), -+ }, -+ }, ++ } +}; + +int platform_accton_as7712_32x(void) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/fani.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/fani.c index cae38872..7e48d6da 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/fani.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/fani.c @@ -49,21 +49,23 @@ typedef struct fan_path_S { + char present[LEN_FILE_NAME]; char status[LEN_FILE_NAME]; char speed[LEN_FILE_NAME]; + char direction[LEN_FILE_NAME]; char ctrl_speed[LEN_FILE_NAME]; char r_speed[LEN_FILE_NAME]; }fan_path_T; #define _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) \ - { #prj"fan"#id"_fault", #prj"fan"#id"_front_speed_rpm", \ - #prj"fan_duty_cycle_percentage", #prj"fan"#id"_rear_speed_rpm" } + { #prj"fan"#id"_present", #prj"fan"#id"_fault", #prj"fan"#id"_front_speed_rpm", \ + #prj"fan"#id"_direction", #prj"fan"#id"_duty_cycle_percentage", #prj"fan"#id"_rear_speed_rpm" } #define MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) _MAKE_FAN_PATH_ON_MAIN_BOARD(prj,id) #define MAKE_FAN_PATH_ON_PSU(folder) \ - {#folder"/psu_fan1_fault", #folder"/psu_fan1_speed_rpm", \ - #folder"/psu_fan1_duty_cycle_percentage", "" } + {"", #folder"/psu_fan1_fault", #folder"/psu_fan1_speed_rpm", \ + "", #folder"/psu_fan1_duty_cycle_percentage", "" } static fan_path_T fan_path[] = /* must map with onlp_fan_id */ { @@ -129,6 +131,39 @@ onlp_fan_info_t linfo[] = { if (close(fd) == -1) \ return ONLP_STATUS_E_INTERNAL +static uint32_t +_onlp_fani_info_get_psu_fan_direction(void) +{ + /* Try to read direction from PSU1. + * If PSU1 is not valid, read from PSU2 + */ + int i = 0; + + for (i = PSU1_ID; i <= PSU2_ID; i++) { + psu_type_t psu_type; + psu_type = get_psu_type(i, NULL, 0); + + if (psu_type == PSU_TYPE_UNKNOWN) { + continue; + } + + switch (psu_type) { + case PSU_TYPE_AC_F2B: + case PSU_TYPE_DC_48V_F2B: + case PSU_TYPE_DC_12V_F2B: + return ONLP_FAN_STATUS_F2B; + case PSU_TYPE_AC_B2F: + case PSU_TYPE_DC_48V_B2F: + case PSU_TYPE_DC_12V_B2F: + return ONLP_FAN_STATUS_B2F; + default: + return 0; + }; + } + + return 0; +} + static int _onlp_fani_info_get_fan(int local_id, onlp_fan_info_t* info) { @@ -136,14 +171,34 @@ _onlp_fani_info_get_fan(int local_id, onlp_fan_info_t* info) char r_data[10] = {0}; char fullpath[65] = {0}; + /* check if fan is present + */ + sprintf(fullpath, "%s%s", PREFIX_PATH_ON_MAIN_BOARD, fan_path[local_id].present); + OPEN_READ_FILE(fd,fullpath,r_data,nbytes,len); + if (atoi(r_data) == 0) { + return ONLP_STATUS_OK; + } + info->status |= ONLP_FAN_STATUS_PRESENT; + /* get fan fault status (turn on when any one fails) */ sprintf(fullpath, "%s%s", PREFIX_PATH_ON_MAIN_BOARD, fan_path[local_id].status); OPEN_READ_FILE(fd,fullpath,r_data,nbytes,len); if (atoi(r_data) > 0) { info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; } + /* get fan/fanr direction (both : the same) + */ + sprintf(fullpath, "%s%s", PREFIX_PATH_ON_MAIN_BOARD, fan_path[local_id].direction); + OPEN_READ_FILE(fd,fullpath,r_data,nbytes,len); + + if (atoi(r_data) == 0) /*B2F*/ + info->status |= ONLP_FAN_STATUS_B2F; + else + info->status |= ONLP_FAN_STATUS_F2B; + /* get fan speed (take the min from two speeds) */ sprintf(fullpath, "%s%s", PREFIX_PATH_ON_MAIN_BOARD, fan_path[local_id].speed); @@ -159,16 +214,11 @@ _onlp_fani_info_get_fan(int local_id, onlp_fan_info_t* info) /* get speed percentage from rpm */ info->percentage = (info->rpm * 100)/MAX_FAN_SPEED; - /* check present */ - if (info->rpm > 0) { - info->status |= ONLP_FAN_STATUS_PRESENT; - } - return ONLP_STATUS_OK; } static int -_onlp_fani_info_get_fan_on_psu(int local_id, int psu_id, onlp_fan_info_t* info) +_onlp_fani_info_get_fan_on_psu(int local_id, onlp_fan_info_t* info) { int fd, len, nbytes = 10; char r_data[10] = {0}; @@ -176,17 +226,7 @@ _onlp_fani_info_get_fan_on_psu(int local_id, int psu_id, onlp_fan_info_t* info) /* get fan direction */ - switch(get_psu_type(psu_id, NULL, 0)) - { - case PSU_TYPE_AC_F2B: - info->status |= ONLP_FAN_STATUS_F2B; - break; - case PSU_TYPE_AC_B2F: - info->status |= ONLP_FAN_STATUS_B2F; - break; - default: - break; - } + info->status |= _onlp_fani_info_get_psu_fan_direction(); /* get fan fault status */ @@ -227,12 +267,10 @@ onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info) *info = linfo[local_id]; switch (local_id) - { - case FAN_1_ON_PSU1: - rc = _onlp_fani_info_get_fan_on_psu(local_id, PSU1_ID, info); - break; + { + case FAN_1_ON_PSU1: case FAN_1_ON_PSU2: - rc = _onlp_fani_info_get_fan_on_psu(local_id, PSU2_ID, info); + rc = _onlp_fani_info_get_fan_on_psu(local_id, info); break; case FAN_1_ON_MAIN_BOARD: case FAN_2_ON_MAIN_BOARD: @@ -245,7 +283,7 @@ onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info) default: rc = ONLP_STATUS_E_INVALID; break; - } + } return rc; } diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.c index 324d6075..b4ef2d04 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.c @@ -115,7 +115,7 @@ int deviceNodeReadString(char *filename, char *buffer, int buf_size, int data_le return ret; } -#define I2C_PSU_MODEL_NAME_LEN 9 +#define I2C_PSU_MODEL_NAME_LEN 11 #define I2C_PSU_FAN_DIR_LEN 3 #include psu_type_t get_psu_type(int id, char* modelname, int modelname_len) @@ -124,7 +124,6 @@ psu_type_t get_psu_type(int id, char* modelname, int modelname_len) char model_name[I2C_PSU_MODEL_NAME_LEN + 1] = {0}; char fan_dir[I2C_PSU_FAN_DIR_LEN + 1] = {0}; - /* Check AC model name */ node = (id == PSU1_ID) ? PSU1_AC_HWMON_NODE(psu_model_name) : PSU2_AC_HWMON_NODE(psu_model_name); @@ -132,32 +131,70 @@ psu_type_t get_psu_type(int id, char* modelname, int modelname_len) return PSU_TYPE_UNKNOWN; } - - if (strncmp(model_name, "YM-2651Y", strlen("YM-2651Y")) != 0) { - return PSU_TYPE_UNKNOWN; - } - if(isspace(model_name[strlen(model_name)-1])) { model_name[strlen(model_name)-1] = 0; } - if (modelname) { - strncpy(modelname, model_name, modelname_len-1); + if (strncmp(model_name, "YM-2651Y", 8) == 0) { + if (modelname) { + strncpy(modelname, model_name, 8); + } + + node = (id == PSU1_ID) ? PSU1_AC_PMBUS_NODE(psu_fan_dir) : PSU2_AC_PMBUS_NODE(psu_fan_dir); + if (deviceNodeReadString(node, fan_dir, sizeof(fan_dir), 0) != 0) { + return PSU_TYPE_UNKNOWN; + } + + if (strncmp(fan_dir, "F2B", strlen("F2B")) == 0) { + return PSU_TYPE_AC_F2B; + } + + if (strncmp(fan_dir, "B2F", strlen("B2F")) == 0) { + return PSU_TYPE_AC_B2F; + } } - node = (id == PSU1_ID) ? PSU1_AC_PMBUS_NODE(psu_fan_dir) : PSU2_AC_PMBUS_NODE(psu_fan_dir); + if (strncmp(model_name, "YM-2651V", 8) == 0) { + if (modelname) { + strncpy(modelname, model_name, 8); + } - if (deviceNodeReadString(node, fan_dir, sizeof(fan_dir), 0) != 0) { - return PSU_TYPE_UNKNOWN; + node = (id == PSU1_ID) ? PSU1_AC_PMBUS_NODE(psu_fan_dir) : PSU2_AC_PMBUS_NODE(psu_fan_dir); + if (deviceNodeReadString(node, fan_dir, sizeof(fan_dir), 0) != 0) { + return PSU_TYPE_UNKNOWN; + } + + if (strncmp(fan_dir, "F2B", strlen("F2B")) == 0) { + return PSU_TYPE_DC_48V_F2B; + } + + if (strncmp(fan_dir, "B2F", strlen("B2F")) == 0) { + return PSU_TYPE_DC_48V_B2F; + } } - if (strncmp(fan_dir, "F2B", strlen("F2B")) == 0) { - return PSU_TYPE_AC_F2B; - } + if (strncmp(model_name, "PSU-12V-750", 11) == 0) { + if (modelname) { + strncpy(modelname, model_name, 11); + } - if (strncmp(fan_dir, "B2F", strlen("B2F")) == 0) { - return PSU_TYPE_AC_B2F; - } + node = (id == PSU1_ID) ? PSU1_AC_HWMON_NODE(psu_fan_dir) : PSU2_AC_HWMON_NODE(psu_fan_dir); + if (deviceNodeReadString(node, fan_dir, sizeof(fan_dir), 0) != 0) { + return PSU_TYPE_UNKNOWN; + } + + if (strncmp(fan_dir, "F2B", 3) == 0) { + return PSU_TYPE_DC_12V_F2B; + } + + if (strncmp(fan_dir, "B2F", 3) == 0) { + return PSU_TYPE_DC_12V_B2F; + } + + if (strncmp(fan_dir, "NON", 3) == 0) { + return PSU_TYPE_DC_12V_FANLESS; + } + } return PSU_TYPE_UNKNOWN; } diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.h index d2a032b4..10c3f8d6 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.h +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/platform_lib.h @@ -55,7 +55,12 @@ int deviceNodeReadString(char *filename, char *buffer, int buf_size, int data_le typedef enum psu_type { PSU_TYPE_UNKNOWN, PSU_TYPE_AC_F2B, - PSU_TYPE_AC_B2F + PSU_TYPE_AC_B2F, + PSU_TYPE_DC_48V_F2B, + PSU_TYPE_DC_48V_B2F, + PSU_TYPE_DC_12V_FANLESS, + PSU_TYPE_DC_12V_F2B, + PSU_TYPE_DC_12V_B2F } psu_type_t; psu_type_t get_psu_type(int id, char* modelname, int modelname_len); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/psui.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/psui.c index 3277077c..88c02f03 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/psui.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/psui.c @@ -68,7 +68,7 @@ psu_status_info_get(int id, char *node, int *value) } static int -psu_ym2651y_pmbus_info_get(int id, char *node, int *value) +psu_ym2651_pmbus_info_get(int id, char *node, int *value) { int ret = 0; char buf[PSU_NODE_MAX_INT_LEN + 1] = {0}; @@ -99,35 +99,31 @@ onlp_psui_init(void) } static int -psu_ym2651y_info_get(onlp_psu_info_t* info) +psu_ym2651_info_get(onlp_psu_info_t* info) { int val = 0; int index = ONLP_OID_ID_GET(info->hdr.id); - - /* Set capability - */ - info->caps = ONLP_PSU_CAPS_AC; - - if (info->status & ONLP_PSU_STATUS_FAILED) { - return ONLP_STATUS_OK; - } + + if (info->status & ONLP_PSU_STATUS_FAILED) { + return ONLP_STATUS_OK; + } /* Set the associated oid_table */ info->hdr.coids[0] = ONLP_FAN_ID_CREATE(index + CHASSIS_FAN_COUNT); info->hdr.coids[1] = ONLP_THERMAL_ID_CREATE(index + CHASSIS_THERMAL_COUNT); /* Read voltage, current and power */ - if (psu_ym2651y_pmbus_info_get(index, "psu_v_out", &val) == 0) { + if (psu_ym2651_pmbus_info_get(index, "psu_v_out", &val) == 0) { info->mvout = val; info->caps |= ONLP_PSU_CAPS_VOUT; } - if (psu_ym2651y_pmbus_info_get(index, "psu_i_out", &val) == 0) { + if (psu_ym2651_pmbus_info_get(index, "psu_i_out", &val) == 0) { info->miout = val; info->caps |= ONLP_PSU_CAPS_IOUT; } - if (psu_ym2651y_pmbus_info_get(index, "psu_p_out", &val) == 0) { + if (psu_ym2651_pmbus_info_get(index, "psu_p_out", &val) == 0) { info->mpout = val; info->caps |= ONLP_PSU_CAPS_POUT; } @@ -135,6 +131,56 @@ psu_ym2651y_info_get(onlp_psu_info_t* info) return ONLP_STATUS_OK; } +#include +#define DC12V_750_REG_TO_CURRENT(low, high) (((low << 4 | high >> 4) * 20 * 1000) / 754) +#define DC12V_750_REG_TO_VOLTAGE(low, high) ((low << 4 | high >> 4) * 25) + +static int +psu_dc12v_750_info_get(onlp_psu_info_t* info) +{ + int pid = ONLP_OID_ID_GET(info->hdr.id); + int bus = (PSU1_ID == pid) ? 11 : 10; + int iout_low, iout_high; + int vout_low, vout_high; + + /* Set capability + */ + info->caps = ONLP_PSU_CAPS_DC12; + + if (info->status & ONLP_PSU_STATUS_FAILED) { + return ONLP_STATUS_OK; + } + + /* Get current + */ + iout_low = onlp_i2c_readb(bus, 0x6f, 0x0, ONLP_I2C_F_FORCE); + iout_high = onlp_i2c_readb(bus, 0x6f, 0x1, ONLP_I2C_F_FORCE); + + if ((iout_low >= 0) && (iout_high >= 0)) { + info->miout = DC12V_750_REG_TO_CURRENT(iout_low, iout_high); + info->caps |= ONLP_PSU_CAPS_IOUT; + } + + /* Get voltage + */ + vout_low = onlp_i2c_readb(bus, 0x6f, 0x2, ONLP_I2C_F_FORCE); + vout_high = onlp_i2c_readb(bus, 0x6f, 0x3, ONLP_I2C_F_FORCE); + + if ((vout_low >= 0) && (vout_high >= 0)) { + info->mvout = DC12V_750_REG_TO_VOLTAGE(vout_low, vout_high); + info->caps |= ONLP_PSU_CAPS_VOUT; + } + + /* Get power based on current and voltage + */ + if ((info->caps & ONLP_PSU_CAPS_IOUT) && (info->caps & ONLP_PSU_CAPS_VOUT)) { + info->mpout = (info->miout * info->mvout) / 1000; + info->caps |= ONLP_PSU_CAPS_POUT; + } + + return ONLP_STATUS_OK; +} + /* * Get all information about the given PSU oid. */ @@ -191,7 +237,18 @@ onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) switch (psu_type) { case PSU_TYPE_AC_F2B: case PSU_TYPE_AC_B2F: - ret = psu_ym2651y_info_get(info); + info->caps = ONLP_PSU_CAPS_AC; + ret = psu_ym2651_info_get(info); + break; + case PSU_TYPE_DC_48V_F2B: + case PSU_TYPE_DC_48V_B2F: + info->caps = ONLP_PSU_CAPS_DC48; + ret = psu_ym2651_info_get(info); + break; + case PSU_TYPE_DC_12V_F2B: + case PSU_TYPE_DC_12V_B2F: + case PSU_TYPE_DC_12V_FANLESS: + ret = psu_dc12v_750_info_get(info); break; case PSU_TYPE_UNKNOWN: /* User insert a unknown PSU or unplugged.*/ info->status |= ONLP_PSU_STATUS_UNPLUGGED; From 031be2cd46ea83145702030512d311a20b1b21f3 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 23 Dec 2016 18:19:01 +0000 Subject: [PATCH 189/255] Set default delay to zero. --- .../patches/driver-i2c-bus-intel-ismt-add-delay-param.patch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch index bf6c4fc7..3ccb64c2 100644 --- a/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch +++ b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch @@ -27,12 +27,12 @@ index d9ee43c..b2b3856 100644 /* Bus speed control bits for slow debuggers - refer to the docs for usage */ -static unsigned int bus_speed; +static unsigned int bus_speed = 100; -+static unsigned int delay = 1000; ++static unsigned int delay = 0; module_param(bus_speed, uint, S_IRUGO); -MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)"); +MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (1000 by default)"); +module_param(delay, uint, S_IRUGO); -+MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)"); ++MODULE_PARM_DESC(delay, "Delay in microsecs before access (0 by default)"); /** * __ismt_desc_dump() - dump the contents of a specific descriptor From e119c797e89a22a8204c78dc31e79ea371e2a3a1 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 23 Dec 2016 18:19:18 +0000 Subject: [PATCH 190/255] This patch provides the option to skip I2C probes on the SCH primary bus adapters. --- ...vers-i2c-busses-i2c-isch-probe-param.patch | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 packages/base/any/kernels/3.16-lts/patches/drivers-i2c-busses-i2c-isch-probe-param.patch diff --git a/packages/base/any/kernels/3.16-lts/patches/drivers-i2c-busses-i2c-isch-probe-param.patch b/packages/base/any/kernels/3.16-lts/patches/drivers-i2c-busses-i2c-isch-probe-param.patch new file mode 100644 index 00000000..269323eb --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/drivers-i2c-busses-i2c-isch-probe-param.patch @@ -0,0 +1,25 @@ +--- a/drivers/i2c/busses/i2c-isch.c 2016-11-20 01:17:41.000000000 +0000 ++++ b/drivers/i2c/busses/i2c-isch.c 2016-12-22 23:34:15.908744426 +0000 +@@ -62,6 +62,10 @@ static int backbone_speed = 33000; /* ba + module_param(backbone_speed, int, S_IRUSR | S_IWUSR); + MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)"); + ++static int probe = 1; ++module_param(probe, int, S_IRUSR | S_IWUSR); ++MODULE_PARM_DESC(probe, "Enable or disable i2c adapter probing. Default = 1"); ++ + /* + * Start the i2c transaction -- the i2c_access will prepare the transaction + * and this function will execute it. +@@ -291,6 +295,11 @@ static int smbus_sch_probe(struct platfo + snprintf(sch_adapter.name, sizeof(sch_adapter.name), + "SMBus SCH adapter at %04x", sch_smba); + ++ if(!probe) { ++ sch_adapter.class = 0; ++ dev_warn(&dev->dev, "bus probing disabled by module parameter.\n"); ++ } ++ + retval = i2c_add_adapter(&sch_adapter); + if (retval) { + dev_err(&dev->dev, "Couldn't register adapter!\n"); From 40ebb1080b96f91292cd877f01e802ed8a5716fd Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 23 Dec 2016 18:20:16 +0000 Subject: [PATCH 191/255] Platform support patch for the Dell S6000. --- ...rivers-platform-x86-dell-s6000-s1220.patch | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch diff --git a/packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch b/packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch new file mode 100644 index 00000000..1fe1d928 --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch @@ -0,0 +1,157 @@ +diff -urpN a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +--- a/drivers/platform/x86/Kconfig 2016-11-20 01:17:41.000000000 +0000 ++++ b/drivers/platform/x86/Kconfig 2016-12-22 20:28:22.048502394 +0000 +@@ -838,4 +838,10 @@ config PVPANIC + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + ++config DELL_S6000_S1220 ++ tristate "Platform Driver for the DELL S6000" ++ ---help--- ++ Support the Dell S6000. ++ ++ + endif # X86_PLATFORM_DEVICES +diff -urpN a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile +--- a/drivers/platform/x86/Makefile 2016-11-20 01:17:41.000000000 +0000 ++++ b/drivers/platform/x86/Makefile 2016-12-22 20:29:50.024504303 +0000 +@@ -57,3 +57,4 @@ obj-$(CONFIG_INTEL_SMARTCONNECT) += inte + + obj-$(CONFIG_PVPANIC) += pvpanic.o + obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o ++obj-$(CONFIG_DELL_S6000_S1220) += dell_s6000_s1220.o +diff -urpN a/drivers/platform/x86/dell_s6000_s1220.c b/drivers/platform/x86/dell_s6000_s1220.c +--- a/drivers/platform/x86/dell_s6000_s1220.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/platform/x86/dell_s6000_s1220.c 2016-12-22 20:26:50.728500412 +0000 +@@ -0,0 +1,131 @@ ++/** ++ * Dell S6000 Platform Support. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++ ++/************************************************************* ++ * ++ * I2C Bus 0 on the S6000 is muxed via gpio1 and gpio2. ++ * ++ ************************************************************/ ++static const unsigned s6000_gpiomux_gpios[] = { ++ 1, 2 ++}; ++ ++static const unsigned s6000_gpiomux_values[] = { ++ 0, 1, 2, 3 ++}; ++ ++static struct i2c_mux_gpio_platform_data s6000_i2cmux_data = { ++ /* ++ * i2c Bus 0 ++ */ ++ .parent = 0, ++ ++ /* ++ * Start the bus numbers at 10. The first digit ++ * will represent the different bus numbers based ++ * the gpio selector (00, 01, 10, 11): ++ * ++ * i2c-10 --> i2c-0, gpios = 00 ++ * i2c-11 --> i2c-0, gpios = 01 ++ * i2c-12 --> i2c-0, gpios = 10 ++ * i2c-13 --> i2c-0, gpios = 11 ++ */ ++ .base_nr = 10, ++ ++ .values = s6000_gpiomux_values, ++ .n_values = ARRAY_SIZE(s6000_gpiomux_values), ++ .gpios = s6000_gpiomux_gpios, ++ .n_gpios = ARRAY_SIZE(s6000_gpiomux_gpios), ++ .idle = 0, ++}; ++ ++static struct platform_device s6000_i2cmux = { ++ .name = "i2c-mux-gpio", ++ .id = 12, ++ .dev = { ++ .platform_data = &s6000_i2cmux_data, ++ }, ++}; ++ ++/************************************************************* ++ * ++ * Sensors on i2c-11 (See mux data above). ++ * ++ ************************************************************/ ++static struct i2c_board_info s6000_i2c_11_board_info[] = { ++ { I2C_BOARD_INFO("lm75", 0x4c) }, ++ { I2C_BOARD_INFO("lm75", 0x4d) }, ++ { I2C_BOARD_INFO("lm75", 0x4e) }, ++ { I2C_BOARD_INFO("ltc4215", 0x42) }, ++ { I2C_BOARD_INFO("ltc4215", 0x40) }, ++ { I2C_BOARD_INFO("max6620", 0x29) }, ++ { I2C_BOARD_INFO("max6620", 0x2A) }, ++ { I2C_BOARD_INFO("24c02", 0x51) }, ++ { I2C_BOARD_INFO("24c02", 0x52) }, ++ { I2C_BOARD_INFO("24c02", 0x53) }, ++}; ++ ++static int __init dell_s6000_s1220_init(void) ++{ ++ int i; ++ int rv = 0; ++ char const *vendor, *product; ++ struct i2c_adapter * i2ca; ++ ++ vendor = dmi_get_system_info(DMI_SYS_VENDOR); ++ product = dmi_get_system_info(DMI_PRODUCT_NAME); ++ ++ if(strcmp(vendor, "Dell Inc") || ++ (strcmp(product, "S6000 (SI)") && strcmp(product, "S6000-ON") && ++ strcmp(product, "S6000-ON (SI)"))) { ++ /* Not the S6000 */ ++ return -ENODEV; ++ } ++ ++ /** ++ * Register the GPIO mux for bus 0. ++ */ ++ rv = platform_device_register(&s6000_i2cmux); ++ if(rv < 0) { ++ pr_err("%s: platform_device_register() failed: %d", __FUNCTION__, rv); ++ return rv; ++ } ++ ++ /** ++ * Register I2C devices on new buses ++ */ ++ i2ca = i2c_get_adapter(11); ++ for(i = 0; i < ARRAY_SIZE(s6000_i2c_11_board_info); i++) { ++ if(i2c_new_device(i2ca, s6000_i2c_11_board_info+i) == NULL) { ++ pr_err("%s: i2c_new_device for bus 11:0x%x failed.", ++ __FUNCTION__, s6000_i2c_11_board_info[i].addr); ++ } ++ } ++ ++ return 0; ++ ++} ++ ++static void __exit dell_s6000_s1220_cleanup(void) ++{ ++ platform_device_unregister(&s6000_i2cmux); ++} ++ ++module_init(dell_s6000_s1220_init); ++module_exit(dell_s6000_s1220_cleanup); ++ ++MODULE_AUTHOR("Big Switch Networks (support@bigswitch.com)"); ++MODULE_VERSION("1.0"); ++MODULE_DESCRIPTION("Dell S6000"); ++MODULE_LICENSE("GPL"); From 1e6799374c61c03615f497d8fe1a71ee611259c6 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 23 Dec 2016 18:20:33 +0000 Subject: [PATCH 192/255] New patches. --- packages/base/any/kernels/3.16-lts/patches/series | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/base/any/kernels/3.16-lts/patches/series b/packages/base/any/kernels/3.16-lts/patches/series index e72c4459..e72aafa3 100644 --- a/packages/base/any/kernels/3.16-lts/patches/series +++ b/packages/base/any/kernels/3.16-lts/patches/series @@ -14,3 +14,5 @@ driver-arista-piix4-mux-patch.patch driver-igb-version-5.3.54.patch driver-support-intel-igb-bcm5461X-phy.patch driver-i2c-bus-intel-ismt-enable-param.patch +drivers-platform-x86-dell-s6000-s1220.patch +drivers-i2c-busses-i2c-isch-probe-param.patch From fd3b396222f446df8af9419c7675cd1df2d275d7 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Fri, 23 Dec 2016 18:22:42 +0000 Subject: [PATCH 193/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 73e54109..4ac99ea3 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 73e5410960dd945d49a27af3e53690988661d462 +Subproject commit 4ac99ea3ba7233b620c8ac269a59a9bd75d3bba0 From cfbb1a672954bfc0a40ac165611fac28b2cfac4c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 24 Dec 2016 15:02:01 +0000 Subject: [PATCH 194/255] Regenerated. --- .../configs/x86_64-all/x86_64-all.config | 21 ++----------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config index 94506877..6a33acd7 100644 --- a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.16.7-ckt25 Kernel Configuration +# Linux/x86_64 3.16.39 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y @@ -151,7 +151,6 @@ CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_CPUACCT=y CONFIG_RESOURCE_COUNTERS=y CONFIG_MEMCG=y -# CONFIG_MEMCG_DISABLED is not set CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_SWAP_ENABLED=y CONFIG_MEMCG_KMEM=y @@ -1542,7 +1541,6 @@ CONFIG_MDIO=y # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set # CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_XGENE is not set CONFIG_NET_VENDOR_ARC=y # CONFIG_NET_VENDOR_ATHEROS is not set CONFIG_NET_VENDOR_BROADCOM=y @@ -2463,7 +2461,6 @@ CONFIG_USB_DEFAULT_PERSIST=y # # CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_PLATFORM is not set CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y @@ -2838,6 +2835,7 @@ CONFIG_X86_PLATFORM_DEVICES=y # CONFIG_INTEL_RST is not set # CONFIG_INTEL_SMARTCONNECT is not set # CONFIG_PVPANIC is not set +CONFIG_DELL_S6000_S1220=y # CONFIG_CHROME_PLATFORMS is not set # @@ -2887,7 +2885,6 @@ CONFIG_GENERIC_PHY=y # CONFIG_PHY_SAMSUNG_USB2 is not set # CONFIG_POWERCAP is not set # CONFIG_MCB is not set -# CONFIG_THUNDERBOLT is not set # # Firmware Drivers @@ -3035,20 +3032,6 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_UFS_FS is not set # CONFIG_EXOFS_FS is not set # CONFIG_F2FS_FS is not set -CONFIG_AUFS_FS=y -CONFIG_AUFS_BRANCH_MAX_127=y -# CONFIG_AUFS_BRANCH_MAX_511 is not set -# CONFIG_AUFS_BRANCH_MAX_1023 is not set -# CONFIG_AUFS_BRANCH_MAX_32767 is not set -CONFIG_AUFS_SBILIST=y -# CONFIG_AUFS_HNOTIFY is not set -# CONFIG_AUFS_EXPORT is not set -# CONFIG_AUFS_FHSM is not set -# CONFIG_AUFS_RDU is not set -# CONFIG_AUFS_SHWH is not set -# CONFIG_AUFS_BR_RAMFS is not set -CONFIG_AUFS_BDEV_LOOP=y -# CONFIG_AUFS_DEBUG is not set CONFIG_ORE=y CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y From bb4d8c84d57729dbe04908de7a61108d5704fcb7 Mon Sep 17 00:00:00 2001 From: Shengzhou Liu Date: Wed, 7 Sep 2016 14:40:27 +0800 Subject: [PATCH 195/255] Add NXP arm64 LS2088ARDB platform support --- .../3.18.25/configs/arm64-all/Makefile | 2 +- ...add-nxp-arm64-ls2088ardb-device-tree.patch | 1116 +++++++++++++++++ .../any/kernels/3.18.25/patches/series.arm64 | 2 +- .../arm64/arm64-nxp-ls2080ardb/onlp/PKG.yml | 28 +- .../arm64_nxp_ls2080ardb/module/src/sysi.c | 67 + .../platform-config/r1/Makefile | 1 + .../platform-config/r1/PKG.yml | 1 + .../r1/src/lib/arm64-nxp-ls2088ardb-r1.yml | 44 + .../arm64_nxp_ls2088ardb_r1/__init__.py | 8 + 9 files changed, 1266 insertions(+), 3 deletions(-) create mode 100644 packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch create mode 100644 packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/builds/src/arm64_nxp_ls2080ardb/module/src/sysi.c create mode 100644 packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/Makefile create mode 100644 packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/PKG.yml create mode 100644 packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/lib/arm64-nxp-ls2088ardb-r1.yml create mode 100644 packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/python/arm64_nxp_ls2088ardb_r1/__init__.py diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile b/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile index acb673d5..f8c7663c 100644 --- a/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile +++ b/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile @@ -15,7 +15,7 @@ K_PATCH_SERIES=series.arm64 include ../../kconfig.mk K_CONFIG := arm64-all.config -K_BUILD_TARGET := Image Image.gz arm64-nxp-ls2080ardb-r0.dtb +K_BUILD_TARGET := Image Image.gz arm64-nxp-ls2080ardb-r0.dtb arm64-nxp-ls2088ardb-r1.dtb K_COPY_SRC := arch/arm64/boot/Image K_COPY_GZIP := 1 ifndef K_COPY_DST diff --git a/packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch b/packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch new file mode 100644 index 00000000..28aff876 --- /dev/null +++ b/packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch @@ -0,0 +1,1116 @@ +From 0b8911d6263d5b70d41fd741bcead8b68a48ed2b Mon Sep 17 00:00:00 2001 +From: Shengzhou Liu +Date: Wed, 24 Aug 2016 16:16:16 +0800 +Subject: [PATCH] add nxp arm64 ls2088ardb device tree + +--- + arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts | 256 ++++++++ + arch/arm64/boot/dts/fsl-ls2088a.dtsi | 833 ++++++++++++++++++++++++ + 2 files changed, 1089 insertions(+) + create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts + create mode 100644 arch/arm64/boot/dts/fsl-ls2088a.dtsi + +diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts +new file mode 100644 +index 0000000..3e72718 +--- /dev/null ++++ b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts +@@ -0,0 +1,256 @@ ++/* ++ * Device Tree file for NXP LS2088a RDB board ++ * ++ * Copyright (C) 2016, Freescale Semiconductor ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls2088a.dtsi" ++ ++/ { ++ model = "arm64-nxp-ls2088ardb-r1"; ++ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; ++}; ++ ++&esdhc { ++ status = "okay"; ++}; ++ ++&ifc { ++ status = "okay"; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 ++ 0x2 0x0 0x5 0x30000000 0x00010000 ++ 0x3 0x0 0x5 0x20000000 0x00010000>; ++ ++ nor@0,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "cfi-flash"; ++ reg = <0x0 0x0 0x8000000>; ++ bank-width = <2>; ++ device-width = <1>; ++ ++ partition@0 { ++ /* SoC RCW, this location must not be altered */ ++ reg = <0x0 0x100000>; ++ label = "rcw (RO)"; ++ read-only; ++ }; ++ ++ partition@1 { ++ /* U-Boot image */ ++ reg = <0x100000 0x100000>; ++ label = "uboot"; ++ }; ++ ++ partition@2 { ++ /* U-Boot environment varialbes, 1MB */ ++ reg = <0x200000 0x100000>; ++ label = "uboot-env"; ++ env_size = <0x20000>; ++ }; ++ ++ partition@3 { ++ /* MC firmware, 4MB*/ ++ reg = <0x300000 0x400000>; ++ label = "mc_firmware"; ++ }; ++ ++ partition@4 { ++ /* MC DPL Blob, 1MB */ ++ reg = <0x700000 0x100000>; ++ label = "mc_dpl_blob"; ++ }; ++ ++ partition@5 { ++ /* MC DPC Blob, 1MB */ ++ reg = <0x800000 0x100000>; ++ label = "mc_dpc_blob"; ++ }; ++ ++ partition@6 { ++ /* AIOP FW, 4MB */ ++ reg = <0x900000 0x400000>; ++ label = "aiop_fw"; ++ }; ++ ++ partition@7 { ++ /* DebugServerFW, 2MB */ ++ reg = <0xd00000 0x200000>; ++ label = "DebugServer_fw"; ++ }; ++ }; ++ ++ nand@2,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "fsl,ifc-nand"; ++ reg = <0x2 0x0 0x10000>; ++ }; ++ ++ cpld@3,0 { ++ reg = <0x3 0x0 0x10000>; ++ compatible = "fsl,ls2088a-rdb-qixis", "fsl,fpga-qixis"; ++ }; ++}; ++ ++&ftm0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ pca9547@75 { ++ compatible = "nxp,pca9547"; ++ reg = <0x75>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ i2c-mux-never-disable; ++ i2c@1 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x01>; ++ rtc@68 { ++ compatible = "dallas,ds3232"; ++ reg = <0x68>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ adt7481@4c { ++ compatible = "adi,adt7461"; ++ reg = <0x4c>; ++ }; ++ }; ++ }; ++}; ++ ++&i2c1 { ++ status = "disabled"; ++}; ++ ++&i2c2 { ++ status = "disabled"; ++}; ++ ++&i2c3 { ++ status = "disabled"; ++}; ++ ++&dspi { ++ status = "okay"; ++ dflash0: n25q512a { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <0>; ++ }; ++}; ++ ++&qspi { ++ status = "disabled"; ++}; ++ ++&sata0 { ++ status = "okay"; ++}; ++ ++&sata1 { ++ status = "okay"; ++}; ++ ++&usb0 { ++ status = "okay"; ++}; ++ ++&usb1 { ++ status = "okay"; ++}; ++ ++&emdio1 { ++ /* CS4340 PHYs */ ++ mdio1_phy1: emdio1_phy@1 { ++ reg = <0x10>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy2: emdio1_phy@2 { ++ reg = <0x11>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy3: emdio1_phy@3 { ++ reg = <0x12>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy4: emdio1_phy@4 { ++ reg = <0x13>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++&emdio2 { ++ /* AQR405 PHYs */ ++ mdio2_phy1: emdio2_phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 1 0x4>; /* Level high type */ ++ reg = <0x0>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy2: emdio2_phy@2 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 2 0x4>; /* Level high type */ ++ reg = <0x1>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy3: emdio2_phy@3 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 4 0x4>; /* Level high type */ ++ reg = <0x2>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy4: emdio2_phy@4 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 5 0x4>; /* Level high type */ ++ reg = <0x3>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++/* Update DPMAC connections to external PHYs, under the assumption of ++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. ++ */ ++&dpmac1 { ++ phy-handle = <&mdio1_phy1>; ++}; ++&dpmac2 { ++ phy-handle = <&mdio1_phy2>; ++}; ++&dpmac3 { ++ phy-handle = <&mdio1_phy3>; ++}; ++&dpmac4 { ++ phy-handle = <&mdio1_phy4>; ++}; ++&dpmac5 { ++ phy-handle = <&mdio2_phy1>; ++}; ++&dpmac6 { ++ phy-handle = <&mdio2_phy2>; ++}; ++&dpmac7 { ++ phy-handle = <&mdio2_phy3>; ++}; ++&dpmac8 { ++ phy-handle = <&mdio2_phy4>; ++}; +diff --git a/arch/arm64/boot/dts/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/fsl-ls2088a.dtsi +new file mode 100644 +index 0000000..892d426 +--- /dev/null ++++ b/arch/arm64/boot/dts/fsl-ls2088a.dtsi +@@ -0,0 +1,833 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-2088A family SoC. ++ * ++ * Copyright (C) 2016, Freescale Semiconductor ++ * ++ * Abhimanyu Saini ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++/memreserve/ 0x80000000 0x00010000; ++ ++/ { ++ compatible = "fsl,ls2088a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x0>; ++ clocks = <&clockgen 1 0>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x1>; ++ clocks = <&clockgen 1 0>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x100>; ++ clocks = <&clockgen 1 1>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x101>; ++ clocks = <&clockgen 1 1>; ++ }; ++ ++ cpu4: cpu@200 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x200>; ++ clocks = <&clockgen 1 2>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu5: cpu@201 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x201>; ++ clocks = <&clockgen 1 2>; ++ }; ++ ++ cpu6: cpu@300 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x300>; ++ clocks = <&clockgen 1 3>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu7: cpu@301 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a72"; ++ reg = <0x0 0x301>; ++ clocks = <&clockgen 1 3>; ++ }; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ ++ }; ++ ++ gic: interrupt-controller@6000000 { ++ compatible = "arm,gic-v3"; ++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ ++ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ ++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ ++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ ++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ interrupts = <1 9 0x4>; ++ ++ its: gic-its@6020000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x6020000 0 0x20000>; ++ }; ++ }; ++ ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "sysclk"; ++ }; ++ ++ clockgen: clocking@1300000 { ++ compatible = "fsl,ls2088a-clockgen"; ++ reg = <0 0x1300000 0 0xa0000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk>; ++ }; ++ ++ tmu: tmu@1f80000 { ++ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu", "fsl,ls2088a-tmu"; ++ reg = <0x0 0x1f80000 0x0 0x10000>; ++ interrupts = <0 23 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ little-endian; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ ++ thermal-sensors = <&tmu 4>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <75000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ cpu_crit: cpu-crit { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map1 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu2 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map2 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu4 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map3 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu6 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ serial0: serial@21c0500 { ++ device_type = "serial"; ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0500 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 0x4>; /* Level high type */ ++ }; ++ ++ serial1: serial@21c0600 { ++ device_type = "serial"; ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0600 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 0x4>; /* Level high type */ ++ }; ++ cluster1_core0_watchdog: wdt@c000000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc000000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster1_core1_watchdog: wdt@c010000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc010000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core0_watchdog: wdt@c100000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc100000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster2_core1_watchdog: wdt@c110000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc110000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster3_core0_watchdog: wdt@c200000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc200000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster3_core1_watchdog: wdt@c210000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc210000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster4_core0_watchdog: wdt@c300000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc300000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ cluster4_core1_watchdog: wdt@c310000 { ++ compatible = "arm,sp805-wdt", "arm,primecell"; ++ reg = <0x0 0xc310000 0x0 0x1000>; ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "apb_pclk", "wdog_clk"; ++ }; ++ ++ gpio0: gpio@2300000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2300000 0x0 0x10000>; ++ interrupts = <0 36 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio1: gpio@2310000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2310000 0x0 0x10000>; ++ interrupts = <0 36 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio2: gpio@2320000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2320000 0x0 0x10000>; ++ interrupts = <0 37 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio3: gpio@2330000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2330000 0x0 0x10000>; ++ interrupts = <0 37 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ /* TODO: WRIOP (CCSR?) */ ++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B96000 0x0 0x1000>; ++ device_type = "mdio"; /* TODO: is this necessary? */ ++ little-endian; /* force the driver in LE mode */ ++ ++ /* Not necessary on the QDS, but needed on the RDB */ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B97000 0x0 0x1000>; ++ device_type = "mdio"; /* TODO: is this necessary? */ ++ little-endian; /* force the driver in LE mode */ ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ ifc: ifc@2240000 { ++ compatible = "fsl,ifc", "simple-bus"; ++ reg = <0x0 0x2240000 0x0 0x20000>; ++ interrupts = <0 21 0x4>; /* Level high type */ ++ little-endian; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ++ ranges = <0 0 0x5 0x80000000 0x08000000 ++ 2 0 0x5 0x30000000 0x00010000 ++ 3 0 0x5 0x20000000 0x00010000>; ++ }; ++ ++ esdhc: esdhc@2140000 { ++ compatible = "fsl,ls2088a-esdhc", "fsl,ls2080a-esdhc", ++ "fsl,esdhc"; ++ reg = <0x0 0x2140000 0x0 0x10000>; ++ interrupts = <0 28 0x4>; /* Level high type */ ++ clock-frequency = <0>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ little-endian; ++ bus-width = <4>; ++ }; ++ ++ ftm0: ftm0@2800000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x2800000 0x0 0x10000>; ++ interrupts = <0 44 4>; ++ }; ++ ++ reset: reset@1E60000 { ++ compatible = "fsl,ls-reset"; ++ reg = <0x0 0x1E60000 0x0 0x10000>; ++ }; ++ ++ dspi: dspi@2100000 { ++ compatible = "fsl,ls2088a-dspi", "fsl,ls2085a-dspi", ++ "fsl,ls2080a-dspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2100000 0x0 0x10000>; ++ interrupts = <0 26 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ clock-names = "dspi"; ++ spi-num-chipselects = <5>; ++ bus-num = <0>; ++ }; ++ ++ i2c0: i2c@2000000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2000000 0x0 0x10000>; ++ interrupts = <0 34 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c1: i2c@2010000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2010000 0x0 0x10000>; ++ interrupts = <0 34 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c2: i2c@2020000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2020000 0x0 0x10000>; ++ interrupts = <0 35 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c3: i2c@2030000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2030000 0x0 0x10000>; ++ interrupts = <0 35 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ qspi: quadspi@20c0000 { ++ compatible = "fsl,ls2088a-qspi", "fsl,ls2080a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20c0000 0x0 0x10000>, ++ <0x0 0x20000000 0x0 0x10000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = <0 25 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "qspi_en", "qspi"; ++ }; ++ ++ pcie1: pcie@3400000 { ++ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", ++ "fsl,ls2085a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ ++ 0x20 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 108 0x4>; /* Level high type */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ fsl,lut_diff; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, ++ <0000 0 0 2 &gic 0 0 0 110 4>, ++ <0000 0 0 3 &gic 0 0 0 111 4>, ++ <0000 0 0 4 &gic 0 0 0 112 4>; ++ }; ++ ++ pcie2: pcie@3500000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", ++ "fsl,ls2085a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ ++ 0x28 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 113 0x4>; /* Level high type */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ fsl,lut_diff; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, ++ <0000 0 0 2 &gic 0 0 0 115 4>, ++ <0000 0 0 3 &gic 0 0 0 116 4>, ++ <0000 0 0 4 &gic 0 0 0 117 4>; ++ }; ++ ++ pcie3: pcie@3600000 { ++ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", ++ "fsl,ls2085a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ ++ 0x30 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 118 0x4>; /* Level high type */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ fsl,lut_diff; ++ num-lanes = <8>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, ++ <0000 0 0 2 &gic 0 0 0 120 4>, ++ <0000 0 0 3 &gic 0 0 0 121 4>, ++ <0000 0 0 4 &gic 0 0 0 122 4>; ++ }; ++ ++ pcie4: pcie@3700000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", ++ "fsl,ls2085a-pcie", "snps,dw-pcie"; ++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ ++ 0x38 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 123 0x4>; /* Level high type */ ++ interrupt-names = "aer"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ dma-coherent; ++ fsl,lut_diff; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, ++ <0000 0 0 2 &gic 0 0 0 125 4>, ++ <0000 0 0 3 &gic 0 0 0 126 4>, ++ <0000 0 0 4 &gic 0 0 0 127 4>; ++ }; ++ ++ sata0: sata@3200000 { ++ status = "disabled"; ++ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; ++ reg = <0x0 0x3200000 0x0 0x10000>; ++ interrupts = <0 133 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ sata1: sata@3210000 { ++ status = "disabled"; ++ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; ++ reg = <0x0 0x3210000 0x0 0x10000>; ++ interrupts = <0 136 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ usb0: usb3@3100000 { ++ status = "disabled"; ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3100000 0x0 0x10000>; ++ interrupts = <0 80 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ configure-gfladj; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ usb1: usb3@3110000 { ++ status = "disabled"; ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3110000 0x0 0x10000>; ++ interrupts = <0 81 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ configure-gfladj; ++ snps,dis_rxdet_inp3_quirk; ++ }; ++ ++ smmu: iommu@5000000 { ++ compatible = "arm,mmu-500"; ++ reg = <0 0x5000000 0 0x800000>; ++ #global-interrupts = <12>; ++ interrupts = <0 13 4>, /* global secure fault */ ++ <0 14 4>, /* combined secure interrupt */ ++ <0 15 4>, /* global non-secure fault */ ++ <0 16 4>, /* combined non-secure interrupt */ ++ /* performance counter interrupts 0-7 */ ++ <0 211 4>, ++ <0 212 4>, ++ <0 213 4>, ++ <0 214 4>, ++ <0 215 4>, ++ <0 216 4>, ++ <0 217 4>, ++ <0 218 4>, ++ /* per context interrupt, 64 interrupts */ ++ <0 146 4>, ++ <0 147 4>, ++ <0 148 4>, ++ <0 149 4>, ++ <0 150 4>, ++ <0 151 4>, ++ <0 152 4>, ++ <0 153 4>, ++ <0 154 4>, ++ <0 155 4>, ++ <0 156 4>, ++ <0 157 4>, ++ <0 158 4>, ++ <0 159 4>, ++ <0 160 4>, ++ <0 161 4>, ++ <0 162 4>, ++ <0 163 4>, ++ <0 164 4>, ++ <0 165 4>, ++ <0 166 4>, ++ <0 167 4>, ++ <0 168 4>, ++ <0 169 4>, ++ <0 170 4>, ++ <0 171 4>, ++ <0 172 4>, ++ <0 173 4>, ++ <0 174 4>, ++ <0 175 4>, ++ <0 176 4>, ++ <0 177 4>, ++ <0 178 4>, ++ <0 179 4>, ++ <0 180 4>, ++ <0 181 4>, ++ <0 182 4>, ++ <0 183 4>, ++ <0 184 4>, ++ <0 185 4>, ++ <0 186 4>, ++ <0 187 4>, ++ <0 188 4>, ++ <0 189 4>, ++ <0 190 4>, ++ <0 191 4>, ++ <0 192 4>, ++ <0 193 4>, ++ <0 194 4>, ++ <0 195 4>, ++ <0 196 4>, ++ <0 197 4>, ++ <0 198 4>, ++ <0 199 4>, ++ <0 200 4>, ++ <0 201 4>, ++ <0 202 4>, ++ <0 203 4>, ++ <0 204 4>, ++ <0 205 4>, ++ <0 206 4>, ++ <0 207 4>, ++ <0 208 4>, ++ <0 209 4>; ++ mmu-masters = <&fsl_mc 0x300 0>; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ ++ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ ++ <1 11 0x1>, /* Virtual PPI, edge triggered */ ++ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ ++ arm,reread-timer; ++ fsl,erratum-a008585; ++ }; ++ ++ fsl_mc: fsl-mc@80c000000 { ++ compatible = "fsl,qoriq-mc"; ++ #stream-id-cells = <2>; ++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ ++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ ++ msi-parent = <&its>; ++ #address-cells = <3>; ++ #size-cells = <1>; ++ ++ /* ++ * Region type 0x0 - MC portals ++ * Region type 0x1 - QBMAN portals ++ */ ++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 ++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; ++ ++ /* ++ * Define the maximum number of MACs present on the SoC. ++ * They won't necessarily be all probed, since the ++ * Data Path Layout file and the MC firmware can put fewer ++ * actual DPMAC objects on the MC bus. ++ */ ++ dpmacs { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ dpmac1: dpmac@1 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <1>; ++ }; ++ dpmac2: dpmac@2 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <2>; ++ }; ++ dpmac3: dpmac@3 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <3>; ++ }; ++ dpmac4: dpmac@4 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <4>; ++ }; ++ dpmac5: dpmac@5 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <5>; ++ }; ++ dpmac6: dpmac@6 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <6>; ++ }; ++ dpmac7: dpmac@7 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <7>; ++ }; ++ dpmac8: dpmac@8 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <8>; ++ }; ++ dpmac9: dpmac@9 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <9>; ++ }; ++ dpmac10: dpmac@10 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xa>; ++ }; ++ dpmac11: dpmac@11 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xb>; ++ }; ++ dpmac12: dpmac@12 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xc>; ++ }; ++ dpmac13: dpmac@13 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xd>; ++ }; ++ dpmac14: dpmac@14 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xe>; ++ }; ++ dpmac15: dpmac@15 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xf>; ++ }; ++ dpmac16: dpmac@16 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x10>; ++ }; ++ }; ++ }; ++ ++ ccn@4000000 { ++ compatible = "arm,ccn-504"; ++ reg = <0x0 0x04000000 0x0 0x01000000>; ++ interrupts = <0 12 4>; ++ }; ++ ++ memory@80000000 { ++ device_type = "memory"; ++ reg = <0x00000000 0x80000000 0 0x80000000>; ++ /* DRAM space 1 - 2 GB DRAM */ ++ }; ++}; +-- +2.1.0.27.g96db324 + diff --git a/packages/base/any/kernels/3.18.25/patches/series.arm64 b/packages/base/any/kernels/3.18.25/patches/series.arm64 index 77384486..0983a8d1 100644 --- a/packages/base/any/kernels/3.18.25/patches/series.arm64 +++ b/packages/base/any/kernels/3.18.25/patches/series.arm64 @@ -1,4 +1,4 @@ aufs.patch driver-support-intel-igb-bcm54616-phy.patch add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch - +add-nxp-arm64-ls2088ardb-device-tree.patch diff --git a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/PKG.yml b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/PKG.yml index 7131f0ba..02107aee 100644 --- a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/PKG.yml +++ b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/PKG.yml @@ -1 +1,27 @@ -!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=arm64-nxp-ls2080ardb ARCH=arm64 TOOLCHAIN=aarch64-linux-gnu +variables: + platformbase: arm64-nxp-ls2080ardb + r0_install: /lib/platform-config/arm64-nxp-ls2080ardb-r0/onl + r1_install: /lib/platform-config/arm64-nxp-ls2088ardb-r1/onl + +common: + version: 1.0.0 + arch: arm64 + copyright: Copyright 2016 NXP Semiconductor, Inc. + maintainer: support@bigswitch.com + changelog: Change changes changes., + + +packages: + - name: onlp-arm64-nxp-ls2080ardb-r0 + summary: ONLP Package for the arm64-nxp-ls2080ardb-r0 platform. + + files: + builds/lib/$BUILD_DIR/aarch64-linux-gnu/bin/libonlp-${platformbase}.so : ${r0_install}/lib/libonlp-${platformbase}.so + builds/onlpdump/$BUILD_DIR/aarch64-linux-gnu/bin/onlpdump : ${r0_install}/bin/ + + - name: onlp-arm64-nxp-ls2088ardb-r1 + summary: ONLP Package for the arm64-nxp-ls2088ardb-r1 platform. + + files: + builds/lib/$BUILD_DIR/aarch64-linux-gnu/bin/libonlp-${platformbase}.so : ${r1_install}/lib/libonlp-${platformbase}.so + builds/onlpdump/$BUILD_DIR/aarch64-linux-gnu/bin/onlpdump : ${r1_install}/bin/ diff --git a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/builds/src/arm64_nxp_ls2080ardb/module/src/sysi.c b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/builds/src/arm64_nxp_ls2080ardb/module/src/sysi.c new file mode 100644 index 00000000..cded44b4 --- /dev/null +++ b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/onlp/builds/src/arm64_nxp_ls2080ardb/module/src/sysi.c @@ -0,0 +1,67 @@ +/************************************************************ + * + * + * Copyright 2016 NXP Semiconductor, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include "arm64_nxp_ls2080ardb_log.h" + +const char* +onlp_sysi_platform_get(void) +{ + return "arm64-nxp-ls2088ardb-r1"; +} + +int +onlp_sysi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_sysi_onie_data_get(uint8_t** data, int* size) +{ + //TODO + return ONLP_STATUS_OK; +} + +void +onlp_sysi_onie_data_free(uint8_t* data) +{ + /* + * We returned a static array in onlp_sysi_onie_data_get() + * so no free operation is required. + */ +} + +void +onlp_sysi_platform_manage(void) +{ +} + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + memset(table, 0, max*sizeof(onlp_oid_t)); + return 0; +} diff --git a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/Makefile b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/PKG.yml b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/PKG.yml new file mode 100644 index 00000000..fae4a10c --- /dev/null +++ b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=arm64 VENDOR=nxp PLATFORM=arm64-nxp-ls2088ardb-r1 diff --git a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/lib/arm64-nxp-ls2088ardb-r1.yml b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/lib/arm64-nxp-ls2088ardb-r1.yml new file mode 100644 index 00000000..9abc04ba --- /dev/null +++ b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/lib/arm64-nxp-ls2088ardb-r1.yml @@ -0,0 +1,44 @@ +--- + +###################################################################### +# +# platform-config for LS2088ARDB +# +###################################################################### + +arm64-nxp-ls2088ardb-r1: + flat_image_tree: + kernel: + <<: *arm64-kernel + dtb: + =: arm64-nxp-ls2088ardb-r1.dtb + <<: *arm64-kernel-package + itb: + <<: *arm64-itb + + loader: + device: /dev/mmcblk0 + ##partition: /dev/mmcblk0p1 + loadaddr: 0xa0000000 + nos_bootcmds: *mmc_bootcmds + + environment: + - device: /dev/mtd2 + env_offset: 0x00000000 + env_size: 0x00020000 + sector_size: 0x00020000 + + installer: + - ONL-BOOT: + =: 128MiB + format: ext2 + ##format: raw + - ONL-CONFIG: + =: 128MiB + format: ext4 + - ONL-IMAGES: + =: 1GiB + format: ext4 + - ONL-DATA: + =: 100% + format: ext4 diff --git a/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/python/arm64_nxp_ls2088ardb_r1/__init__.py b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/python/arm64_nxp_ls2088ardb_r1/__init__.py new file mode 100644 index 00000000..e7ff9a4a --- /dev/null +++ b/packages/platforms/nxp/arm64/arm64-nxp-ls2080ardb/platform-config/r1/src/python/arm64_nxp_ls2088ardb_r1/__init__.py @@ -0,0 +1,8 @@ +from onl.platform.base import * +from onl.platform.nxp import * + +class OnlPlatform_arm64_nxp_ls2088ardb_r1(OnlPlatformNxp, + OnlPlatformPortConfig_8x1_8x10): + PLATFORM='arm64-nxp-ls2088ardb-r1' + MODEL="LS2088ARDB" + SYS_OBJECT_ID=".2088" From a9702b1888d43ce404d8e4eadecc9423fd8e78d7 Mon Sep 17 00:00:00 2001 From: Shengzhou Liu Date: Mon, 26 Sep 2016 22:37:43 +0800 Subject: [PATCH 196/255] Add support for NXP DPAA2.0 networking based on 3.18.25 --- ...-and-fsl-mc-support-based-on-3.18.25.patch | 35045 ++++++++++++++++ ...some-kernel-patches-based-on-3.18.25.patch | 11095 +++++ .../any/kernels/3.18.25/patches/series.arm64 | 2 + 3 files changed, 46142 insertions(+) create mode 100644 packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch create mode 100644 packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch diff --git a/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch b/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch new file mode 100644 index 00000000..5d493c1d --- /dev/null +++ b/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch @@ -0,0 +1,35045 @@ +From 340daa3e4a9851ab640062065eff4501e6f7cc61 Mon Sep 17 00:00:00 2001 +From: Shengzhou Liu +Date: Fri, 23 Sep 2016 13:45:59 +0800 +Subject: [PATCH 1/2] Add fsl-dpaa2 and fsl-mc support based on 3.18.25 + +This patch integrated a ton of patches to support DPAA2.0 & MC +networking which is used on LS2080A/LS2088A RDB. +--- + MAINTAINERS | 27 + + arch/arm64/include/asm/io.h | 1 + + arch/arm64/include/asm/pgtable.h | 1 + + drivers/net/ethernet/freescale/Kconfig | 8 +- + drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 +- + drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 2 +- + .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 4 +- + .../net/ethernet/freescale/fs_enet/mii-bitbang.c | 2 +- + drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 4 +- + drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 +- + drivers/net/ethernet/freescale/gianfar.c | 2 +- + drivers/net/ethernet/freescale/gianfar_ptp.c | 2 +- + drivers/net/ethernet/freescale/ucc_geth.c | 2 +- + drivers/net/ethernet/freescale/xgmac_mdio.c | 194 +- + drivers/net/phy/Kconfig | 5 + + drivers/net/phy/Makefile | 1 + + drivers/net/phy/aquantia.c | 201 ++ + drivers/net/phy/fsl_10gkr.c | 1467 ++++++++++ + drivers/net/phy/teranetics.c | 135 + + drivers/staging/Kconfig | 4 + + drivers/staging/Makefile | 2 + + drivers/staging/fsl-dpaa2/Kconfig | 12 + + drivers/staging/fsl-dpaa2/Makefile | 6 + + drivers/staging/fsl-dpaa2/ethernet/Kconfig | 36 + + drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 + + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 317 +++ + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 + + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2836 ++++++++++++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 377 +++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 861 ++++++ + drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++ + drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 ++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 +++++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 ++++++++++++++++++ + drivers/staging/fsl-dpaa2/mac/Kconfig | 24 + + drivers/staging/fsl-dpaa2/mac/Makefile | 10 + + drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++ + drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 +++ + drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++ + drivers/staging/fsl-dpaa2/mac/mac.c | 694 +++++ + drivers/staging/fsl-mc/Kconfig | 1 + + drivers/staging/fsl-mc/Makefile | 2 + + drivers/staging/fsl-mc/TODO | 13 + + drivers/staging/fsl-mc/bus/Kconfig | 45 + + drivers/staging/fsl-mc/bus/Makefile | 24 + + drivers/staging/fsl-mc/bus/dpbp.c | 459 ++++ + drivers/staging/fsl-mc/bus/dpcon.c | 407 +++ + drivers/staging/fsl-mc/bus/dpio/Makefile | 9 + + drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 401 +++ + drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 + + drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 ++++ + drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 ++++++ + drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 ++++ + drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 ++ + drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 + + drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 ++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 + + drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 +++++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 ++ + drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 ++ + drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 +++ + drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 + + drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++ + drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 56 + + drivers/staging/fsl-mc/bus/dpmcp.c | 318 +++ + drivers/staging/fsl-mc/bus/dpmcp.h | 323 +++ + drivers/staging/fsl-mc/bus/dpmng-cmd.h | 47 + + drivers/staging/fsl-mc/bus/dpmng.c | 85 + + drivers/staging/fsl-mc/bus/dprc-cmd.h | 87 + + drivers/staging/fsl-mc/bus/dprc-driver.c | 1084 ++++++++ + drivers/staging/fsl-mc/bus/dprc.c | 1218 +++++++++ + drivers/staging/fsl-mc/bus/mc-allocator.c | 716 +++++ + drivers/staging/fsl-mc/bus/mc-bus.c | 1347 ++++++++++ + drivers/staging/fsl-mc/bus/mc-ioctl.h | 25 + + drivers/staging/fsl-mc/bus/mc-restool.c | 312 +++ + drivers/staging/fsl-mc/bus/mc-sys.c | 677 +++++ + drivers/staging/fsl-mc/include/dpbp-cmd.h | 62 + + drivers/staging/fsl-mc/include/dpbp.h | 438 +++ + drivers/staging/fsl-mc/include/dpcon-cmd.h | 162 ++ + drivers/staging/fsl-mc/include/dpcon.h | 407 +++ + drivers/staging/fsl-mc/include/dpmac-cmd.h | 192 ++ + drivers/staging/fsl-mc/include/dpmac.h | 528 ++++ + drivers/staging/fsl-mc/include/dpmng.h | 80 + + drivers/staging/fsl-mc/include/dprc.h | 990 +++++++ + drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 ++++++ + drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 +++++ + drivers/staging/fsl-mc/include/mc-cmd.h | 133 + + drivers/staging/fsl-mc/include/mc-private.h | 168 ++ + drivers/staging/fsl-mc/include/mc-sys.h | 128 + + drivers/staging/fsl-mc/include/mc.h | 244 ++ + drivers/staging/fsl-mc/include/net.h | 481 ++++ + scripts/Makefile.dtbinst | 51 + + 94 files changed, 33975 insertions(+), 84 deletions(-) + create mode 100644 drivers/net/phy/aquantia.c + create mode 100644 drivers/net/phy/fsl_10gkr.c + create mode 100644 drivers/net/phy/teranetics.c + create mode 100644 drivers/staging/fsl-dpaa2/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c + create mode 100644 drivers/staging/fsl-mc/Kconfig + create mode 100644 drivers/staging/fsl-mc/Makefile + create mode 100644 drivers/staging/fsl-mc/TODO + create mode 100644 drivers/staging/fsl-mc/bus/Kconfig + create mode 100644 drivers/staging/fsl-mc/bus/Makefile + create mode 100644 drivers/staging/fsl-mc/bus/dpbp.c + create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c + create mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.c + create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h + create mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpmng.c + create mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dprc-driver.c + create mode 100644 drivers/staging/fsl-mc/bus/dprc.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-allocator.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-bus.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h + create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-sys.c + create mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpbp.h + create mode 100644 drivers/staging/fsl-mc/include/dpcon-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpcon.h + create mode 100644 drivers/staging/fsl-mc/include/dpmac-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpmac.h + create mode 100644 drivers/staging/fsl-mc/include/dpmng.h + create mode 100644 drivers/staging/fsl-mc/include/dprc.h + create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h + create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h + create mode 100644 drivers/staging/fsl-mc/include/mc-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/mc-private.h + create mode 100644 drivers/staging/fsl-mc/include/mc-sys.h + create mode 100644 drivers/staging/fsl-mc/include/mc.h + create mode 100644 drivers/staging/fsl-mc/include/net.h + create mode 100644 scripts/Makefile.dtbinst + +diff --git a/MAINTAINERS b/MAINTAINERS +index 1ae7362..63a796c 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -3973,6 +3973,33 @@ F: sound/soc/fsl/fsl* + F: sound/soc/fsl/imx* + F: sound/soc/fsl/mpc8610_hpcd.c + ++FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER ++M: J. German Rivera ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-mc/ ++ ++FREESCALE DPAA2 ETH DRIVER ++M: Ioana Radulescu ++M: Bogdan Hamciuc ++M: Cristian Sovaiala ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-dpaa2/ethernet/ ++ ++FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER ++M: Lijun Pan ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-mc/bus/mc-ioctl.h ++F: drivers/staging/fsl-mc/bus/mc-restool.c ++ ++FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER ++M: Alex Marginean ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-dpaa2/mac/ ++ + FREEVXFS FILESYSTEM + M: Christoph Hellwig + W: ftp://ftp.openlinux.org/pub/people/hch/vxfs +diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h +index 75825b6..f58e31a 100644 +--- a/arch/arm64/include/asm/io.h ++++ b/arch/arm64/include/asm/io.h +@@ -249,6 +249,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); + #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) + #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) + #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) ++#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS)) + #define iounmap __iounmap + + #define ARCH_HAS_IOREMAP_WC +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 41a43bf..009f690 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); + #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) + #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) + #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) ++#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) + + #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) +diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig +index 2703083..0c1c97d 100644 +--- a/drivers/net/ethernet/freescale/Kconfig ++++ b/drivers/net/ethernet/freescale/Kconfig +@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE + default y + depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ + M523x || M527x || M5272 || M528x || M520x || M532x || \ +- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) ++ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ ++ ARCH_LAYERSCAPE + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from +@@ -58,18 +59,17 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig" + + config FSL_PQ_MDIO + tristate "Freescale PQ MDIO" +- depends on FSL_SOC + select PHYLIB + ---help--- + This driver supports the MDIO bus used by the gianfar and UCC drivers. + + config FSL_XGMAC_MDIO + tristate "Freescale XGMAC MDIO" +- depends on FSL_SOC + select PHYLIB + select OF_MDIO + ---help--- +- This driver supports the MDIO bus on the Fman 10G Ethernet MACs. ++ This driver supports the MDIO bus on the Fman 10G Ethernet MACs and ++ on mEMAC (which supports both Clauses 22 and 45) + + config UCC_GETH + tristate "Freescale QE Gigabit Ethernet" +diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c +index ff55fbb..76ff046 100644 +--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c ++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c +@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op) + } + #endif + +-static struct of_device_id mpc52xx_fec_match[] = { ++static const struct of_device_id mpc52xx_fec_match[] = { + { .compatible = "fsl,mpc5200b-fec", }, + { .compatible = "fsl,mpc5200-fec", }, + { .compatible = "mpc5200-fec", }, +diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +index e052890..1e647be 100644 +--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c ++++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) + return 0; + } + +-static struct of_device_id mpc52xx_fec_mdio_match[] = { ++static const struct of_device_id mpc52xx_fec_mdio_match[] = { + { .compatible = "fsl,mpc5200b-mdio", }, + { .compatible = "fsl,mpc5200-mdio", }, + { .compatible = "mpc5200b-fec-phy", }, +diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +index c92c3b7..dc0da6c 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c ++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +@@ -886,7 +886,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { + #endif + }; + +-static struct of_device_id fs_enet_match[]; ++static const struct of_device_id fs_enet_match[]; + static int fs_enet_probe(struct platform_device *ofdev) + { + const struct of_device_id *match; +@@ -1047,7 +1047,7 @@ static int fs_enet_remove(struct platform_device *ofdev) + return 0; + } + +-static struct of_device_id fs_enet_match[] = { ++static const struct of_device_id fs_enet_match[] = { + #ifdef CONFIG_FS_ENET_HAS_SCC + { + .compatible = "fsl,cpm1-scc-enet", +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +index 3d3fde6..9ec396b 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) + return 0; + } + +-static struct of_device_id fs_enet_mdio_bb_match[] = { ++static const struct of_device_id fs_enet_mdio_bb_match[] = { + { + .compatible = "fsl,cpm2-mdio-bitbang", + }, +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +index ebf5d64..72205b0 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, + + } + +-static struct of_device_id fs_enet_mdio_fec_match[]; ++static const struct of_device_id fs_enet_mdio_fec_match[]; + static int fs_enet_mdio_probe(struct platform_device *ofdev) + { + const struct of_device_id *match; +@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) + return 0; + } + +-static struct of_device_id fs_enet_mdio_fec_match[] = { ++static const struct of_device_id fs_enet_mdio_fec_match[] = { + { + .compatible = "fsl,pq1-fec-mdio", + }, +diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c +index 964c6bf..f94fa63 100644 +--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c ++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c +@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) + + #endif + +-static struct of_device_id fsl_pq_mdio_match[] = { ++static const struct of_device_id fsl_pq_mdio_match[] = { + #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) + { + .compatible = "fsl,gianfar-tbi", +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index 4fdf0aa..a4a7396 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -3455,7 +3455,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) + phy_print_status(phydev); + } + +-static struct of_device_id gfar_match[] = ++static const struct of_device_id gfar_match[] = + { + { + .type = "network", +diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c +index bb56800..c7c75de 100644 +--- a/drivers/net/ethernet/freescale/gianfar_ptp.c ++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c +@@ -554,7 +554,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) + return 0; + } + +-static struct of_device_id match_table[] = { ++static const struct of_device_id match_table[] = { + { .compatible = "fsl,etsec-ptp" }, + {}, + }; +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c +index 3cf0478..741a7d4 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -3930,7 +3930,7 @@ static int ucc_geth_remove(struct platform_device* ofdev) + return 0; + } + +-static struct of_device_id ucc_geth_match[] = { ++static const struct of_device_id ucc_geth_match[] = { + { + .type = "network", + .compatible = "ucc_geth", +diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c +index 6e7db66..7b8fe86 100644 +--- a/drivers/net/ethernet/freescale/xgmac_mdio.c ++++ b/drivers/net/ethernet/freescale/xgmac_mdio.c +@@ -32,31 +32,62 @@ struct tgec_mdio_controller { + __be32 mdio_addr; /* MDIO address */ + } __packed; + ++#define MDIO_STAT_ENC BIT(6) + #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) +-#define MDIO_STAT_BSY (1 << 0) +-#define MDIO_STAT_RD_ER (1 << 1) ++#define MDIO_STAT_BSY BIT(0) ++#define MDIO_STAT_RD_ER BIT(1) + #define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) + #define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) +-#define MDIO_CTL_PRE_DIS (1 << 10) +-#define MDIO_CTL_SCAN_EN (1 << 11) +-#define MDIO_CTL_POST_INC (1 << 14) +-#define MDIO_CTL_READ (1 << 15) ++#define MDIO_CTL_PRE_DIS BIT(10) ++#define MDIO_CTL_SCAN_EN BIT(11) ++#define MDIO_CTL_POST_INC BIT(14) ++#define MDIO_CTL_READ BIT(15) + + #define MDIO_DATA(x) (x & 0xffff) +-#define MDIO_DATA_BSY (1 << 31) ++#define MDIO_DATA_BSY BIT(31) ++ ++struct mdio_fsl_priv { ++ struct tgec_mdio_controller __iomem *mdio_base; ++ bool is_little_endian; ++}; ++ ++static u32 xgmac_read32(void __iomem *regs, ++ bool is_little_endian) ++{ ++ if (is_little_endian) ++ return ioread32(regs); ++ else ++ return ioread32be(regs); ++} ++ ++static void xgmac_write32(u32 value, ++ void __iomem *regs, ++ bool is_little_endian) ++{ ++ if (is_little_endian) ++ iowrite32(value, regs); ++ else ++ iowrite32be(value, regs); ++} + + /* + * Wait until the MDIO bus is free + */ + static int xgmac_wait_until_free(struct device *dev, +- struct tgec_mdio_controller __iomem *regs) ++ struct tgec_mdio_controller __iomem *regs, ++ bool is_little_endian) + { +- uint32_t status; ++ unsigned int timeout; + + /* Wait till the bus is free */ +- status = spin_event_timeout( +- !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); +- if (!status) { ++ timeout = TIMEOUT; ++ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & ++ MDIO_STAT_BSY) && timeout) { ++ cpu_relax(); ++ timeout--; ++ } ++ ++ if (!timeout) { + dev_err(dev, "timeout waiting for bus to be free\n"); + return -ETIMEDOUT; + } +@@ -68,14 +99,20 @@ static int xgmac_wait_until_free(struct device *dev, + * Wait till the MDIO read or write operation is complete + */ + static int xgmac_wait_until_done(struct device *dev, +- struct tgec_mdio_controller __iomem *regs) ++ struct tgec_mdio_controller __iomem *regs, ++ bool is_little_endian) + { +- uint32_t status; ++ unsigned int timeout; + + /* Wait till the MDIO write is complete */ +- status = spin_event_timeout( +- !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); +- if (!status) { ++ timeout = TIMEOUT; ++ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & ++ MDIO_STAT_BSY) && timeout) { ++ cpu_relax(); ++ timeout--; ++ } ++ ++ if (!timeout) { + dev_err(dev, "timeout waiting for operation to complete\n"); + return -ETIMEDOUT; + } +@@ -90,32 +127,47 @@ static int xgmac_wait_until_done(struct device *dev, + */ + static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) + { +- struct tgec_mdio_controller __iomem *regs = bus->priv; +- uint16_t dev_addr = regnum >> 16; ++ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; ++ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; ++ uint16_t dev_addr; ++ u32 mdio_ctl, mdio_stat; + int ret; ++ bool endian = priv->is_little_endian; ++ ++ mdio_stat = xgmac_read32(®s->mdio_stat, endian); ++ if (regnum & MII_ADDR_C45) { ++ /* Clause 45 (ie 10G) */ ++ dev_addr = (regnum >> 16) & 0x1f; ++ mdio_stat |= MDIO_STAT_ENC; ++ } else { ++ /* Clause 22 (ie 1G) */ ++ dev_addr = regnum & 0x1f; ++ mdio_stat &= ~MDIO_STAT_ENC; ++ } + +- /* Setup the MII Mgmt clock speed */ +- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); ++ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the port and dev addr */ +- out_be32(®s->mdio_ctl, +- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); ++ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); ++ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Set the register address */ +- out_be32(®s->mdio_addr, regnum & 0xffff); ++ if (regnum & MII_ADDR_C45) { ++ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); +- if (ret) +- return ret; ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); ++ if (ret) ++ return ret; ++ } + + /* Write the value to the register */ +- out_be32(®s->mdio_data, MDIO_DATA(value)); ++ xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); + +- ret = xgmac_wait_until_done(&bus->dev, regs); ++ ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + +@@ -129,74 +181,70 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val + */ + static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) + { +- struct tgec_mdio_controller __iomem *regs = bus->priv; +- uint16_t dev_addr = regnum >> 16; ++ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; ++ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; ++ uint16_t dev_addr; ++ uint32_t mdio_stat; + uint32_t mdio_ctl; + uint16_t value; + int ret; ++ bool endian = priv->is_little_endian; ++ ++ mdio_stat = xgmac_read32(®s->mdio_stat, endian); ++ if (regnum & MII_ADDR_C45) { ++ dev_addr = (regnum >> 16) & 0x1f; ++ mdio_stat |= MDIO_STAT_ENC; ++ } else { ++ dev_addr = regnum & 0x1f; ++ mdio_stat &= ~MDIO_STAT_ENC; ++ } + +- /* Setup the MII Mgmt clock speed */ +- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); ++ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); +- out_be32(®s->mdio_ctl, mdio_ctl); ++ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Set the register address */ +- out_be32(®s->mdio_addr, regnum & 0xffff); ++ if (regnum & MII_ADDR_C45) { ++ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); +- if (ret) +- return ret; ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); ++ if (ret) ++ return ret; ++ } + + /* Initiate the read */ +- out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); ++ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); + +- ret = xgmac_wait_until_done(&bus->dev, regs); ++ ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Return all Fs if nothing was there */ +- if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { ++ if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + dev_err(&bus->dev, + "Error while reading PHY%d reg at %d.%hhu\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + +- value = in_be32(®s->mdio_data) & 0xffff; ++ value = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", value); + + return value; + } + +-/* Reset the MIIM registers, and wait for the bus to free */ +-static int xgmac_mdio_reset(struct mii_bus *bus) +-{ +- struct tgec_mdio_controller __iomem *regs = bus->priv; +- int ret; +- +- mutex_lock(&bus->mdio_lock); +- +- /* Setup the MII Mgmt clock speed */ +- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); +- +- ret = xgmac_wait_until_free(&bus->dev, regs); +- +- mutex_unlock(&bus->mdio_lock); +- +- return ret; +-} +- + static int xgmac_mdio_probe(struct platform_device *pdev) + { + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct resource res; ++ struct mdio_fsl_priv *priv; + int ret; + + ret = of_address_to_resource(np, 0, &res); +@@ -205,25 +253,30 @@ static int xgmac_mdio_probe(struct platform_device *pdev) + return ret; + } + +- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); ++ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale XGMAC MDIO Bus"; + bus->read = xgmac_mdio_read; + bus->write = xgmac_mdio_write; +- bus->reset = xgmac_mdio_reset; +- bus->irq = bus->priv; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + + /* Set the PHY base address */ +- bus->priv = of_iomap(np, 0); +- if (!bus->priv) { ++ priv = bus->priv; ++ priv->mdio_base = of_iomap(np, 0); ++ if (!priv->mdio_base) { + ret = -ENOMEM; + goto err_ioremap; + } + ++ if (of_get_property(pdev->dev.of_node, ++ "little-endian", NULL)) ++ priv->is_little_endian = true; ++ else ++ priv->is_little_endian = false; ++ + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "cannot register MDIO bus\n"); +@@ -235,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) + return 0; + + err_registration: +- iounmap(bus->priv); ++ iounmap(priv->mdio_base); + + err_ioremap: + mdiobus_free(bus); +@@ -254,10 +307,13 @@ static int xgmac_mdio_remove(struct platform_device *pdev) + return 0; + } + +-static struct of_device_id xgmac_mdio_match[] = { ++static const struct of_device_id xgmac_mdio_match[] = { + { + .compatible = "fsl,fman-xmdio", + }, ++ { ++ .compatible = "fsl,fman-memac-mdio", ++ }, + {}, + }; + MODULE_DEVICE_TABLE(of, xgmac_mdio_match); +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index 75472cf..2973c60 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -14,6 +14,11 @@ if PHYLIB + + comment "MII PHY device drivers" + ++config AQUANTIA_PHY ++ tristate "Drivers for the Aquantia PHYs" ++ ---help--- ++ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 ++ + config AT803X_PHY + tristate "Drivers for Atheros AT803X PHYs" + ---help--- +diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile +index eb3b18b..b5c8f9f 100644 +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -3,6 +3,7 @@ + libphy-objs := phy.o phy_device.o mdio_bus.o + + obj-$(CONFIG_PHYLIB) += libphy.o ++obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o + obj-$(CONFIG_MARVELL_PHY) += marvell.o + obj-$(CONFIG_DAVICOM_PHY) += davicom.o + obj-$(CONFIG_CICADA_PHY) += cicada.o +diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c +new file mode 100644 +index 0000000..d6111af +--- /dev/null ++++ b/drivers/net/phy/aquantia.c +@@ -0,0 +1,201 @@ ++/* ++ * Driver for Aquantia PHY ++ * ++ * Author: Shaohui Xie ++ * ++ * Copyright 2015 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public License ++ * version 2. This program is licensed "as is" without any warranty of any ++ * kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PHY_ID_AQ1202 0x03a1b445 ++#define PHY_ID_AQ2104 0x03a1b460 ++#define PHY_ID_AQR105 0x03a1b4a2 ++#define PHY_ID_AQR405 0x03a1b4b0 ++ ++#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ ++ SUPPORTED_1000baseT_Full | \ ++ SUPPORTED_100baseT_Full | \ ++ PHY_DEFAULT_FEATURES) ++ ++static int aquantia_config_aneg(struct phy_device *phydev) ++{ ++ phydev->supported = PHY_AQUANTIA_FEATURES; ++ phydev->advertising = phydev->supported; ++ ++ return 0; ++} ++ ++static int aquantia_aneg_done(struct phy_device *phydev) ++{ ++ int reg; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); ++} ++ ++static int aquantia_config_intr(struct phy_device *phydev) ++{ ++ int err; ++ ++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { ++ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001); ++ } else { ++ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0); ++ } ++ ++ return err; ++} ++ ++static int aquantia_ack_interrupt(struct phy_device *phydev) ++{ ++ int reg; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01); ++ return (reg < 0) ? reg : 0; ++} ++ ++static int aquantia_read_status(struct phy_device *phydev) ++{ ++ int reg; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ if (reg & MDIO_STAT1_LSTATUS) ++ phydev->link = 1; ++ else ++ phydev->link = 0; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); ++ mdelay(10); ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); ++ ++ switch (reg) { ++ case 0x9: ++ phydev->speed = SPEED_2500; ++ break; ++ case 0x5: ++ phydev->speed = SPEED_1000; ++ break; ++ case 0x3: ++ phydev->speed = SPEED_100; ++ break; ++ case 0x7: ++ default: ++ phydev->speed = SPEED_10000; ++ break; ++ } ++ phydev->duplex = DUPLEX_FULL; ++ ++ return 0; ++} ++ ++static struct phy_driver aquantia_driver[] = { ++{ ++ .phy_id = PHY_ID_AQ1202, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQ1202", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ ++ .phy_id = PHY_ID_AQ2104, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQ2104", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ ++ .phy_id = PHY_ID_AQR105, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR105", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ ++ .phy_id = PHY_ID_AQR405, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR405", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++}; ++ ++static int __init aquantia_init(void) ++{ ++ return phy_drivers_register(aquantia_driver, ++ ARRAY_SIZE(aquantia_driver)); ++} ++ ++static void __exit aquantia_exit(void) ++{ ++ return phy_drivers_unregister(aquantia_driver, ++ ARRAY_SIZE(aquantia_driver)); ++} ++ ++module_init(aquantia_init); ++module_exit(aquantia_exit); ++ ++static struct mdio_device_id __maybe_unused aquantia_tbl[] = { ++ { PHY_ID_AQ1202, 0xfffffff0 }, ++ { PHY_ID_AQ2104, 0xfffffff0 }, ++ { PHY_ID_AQR105, 0xfffffff0 }, ++ { PHY_ID_AQR405, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, aquantia_tbl); ++ ++MODULE_DESCRIPTION("Aquantia PHY driver"); ++MODULE_AUTHOR("Shaohui Xie "); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/net/phy/fsl_10gkr.c b/drivers/net/phy/fsl_10gkr.c +new file mode 100644 +index 0000000..3713726 +--- /dev/null ++++ b/drivers/net/phy/fsl_10gkr.c +@@ -0,0 +1,1467 @@ ++/* Freescale XFI 10GBASE-KR driver. ++ * Author: Shaohui Xie ++ * ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * Licensed under the GPL-2 or later. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define FSL_XFI_PCS_PHY_ID 0x7C000012 ++#define FSL_XFI_PCS_PHY_ID2 0x0083e400 ++ ++/* Freescale XFI PCS MMD */ ++#define FSL_XFI_PMD 0x1 ++#define FSL_XFI_PCS 0x3 ++#define FSL_XFI_AN 0x7 ++#define FSL_XFI_VS1 0x1e ++ ++/* Freescale XFI PMD registers */ ++#define FSL_XFI_PMD_CTRL 0x0 ++#define FSL_XFI_KR_PMD_CTRL 0x0096 ++#define FSL_XFI_KR_PMD_STATUS 0x0097 ++#define FSL_XFI_KR_LP_CU 0x0098 ++#define FSL_XFI_KR_LP_STATUS 0x0099 ++#define FSL_XFI_KR_LD_CU 0x009a ++#define FSL_XFI_KR_LD_STATUS 0x009b ++ ++/* PMD define */ ++#define PMD_RESET 0x1 ++#define PMD_STATUS_SUP_STAT 0x4 ++#define PMD_STATUS_FRAME_LOCK 0x2 ++#define TRAIN_EN 0x3 ++#define TRAIN_DISABLE 0x1 ++#define RX_STAT 0x1 ++ ++/* Freescale XFI PCS registers */ ++#define FSL_XFI_PCS_CTRL 0x0 ++#define FSL_XFI_PCS_STATUS 0x1 ++ ++/* Freescale XFI Auto-Negotiation Registers */ ++#define FSL_XFI_AN_CTRL 0x0000 ++#define FSL_XFI_LNK_STATUS 0x0001 ++#define FSL_XFI_AN_AD_1 0x0011 ++#define FSL_XFI_BP_STATUS 0x0030 ++ ++#define XFI_AN_AD1 0x85 ++#define XF_AN_RESTART 0x1200 ++#define XFI_AN_LNK_STAT_UP 0x4 ++ ++/* Freescale XFI Vendor-Specific 1 Registers */ ++#define FSL_XFI_PCS_INTR_EVENT 0x0002 ++#define FSL_XFI_PCS_INTR_MASK 0x0003 ++#define FSL_XFI_AN_INTR_EVENT 0x0004 ++#define FSL_XFI_AN_INTR_MASK 0x0005 ++#define FSL_XFI_LT_INTR_EVENT 0x0006 ++#define FSL_XFI_LT_INTR_MASK 0x0007 ++ ++/* C(-1) */ ++#define BIN_M1 0 ++/* C(1) */ ++#define BIN_LONG 1 ++#define BIN_M1_SEL 6 ++#define BIN_Long_SEL 7 ++#define CDR_SEL_MASK 0x00070000 ++#define BIN_SNAPSHOT_NUM 5 ++#define BIN_M1_THRESHOLD 3 ++#define BIN_LONG_THRESHOLD 2 ++ ++#define PRE_COE_MASK 0x03c00000 ++#define POST_COE_MASK 0x001f0000 ++#define ZERO_COE_MASK 0x00003f00 ++#define PRE_COE_SHIFT 22 ++#define POST_COE_SHIFT 16 ++#define ZERO_COE_SHIFT 8 ++ ++#define PRE_COE_MAX 0x0 ++#define PRE_COE_MIN 0x8 ++#define POST_COE_MAX 0x0 ++#define POST_COE_MIN 0x10 ++#define ZERO_COE_MAX 0x30 ++#define ZERO_COE_MIN 0x0 ++ ++#define TECR0_INIT 0x24200000 ++#define RATIO_PREQ 0x3 ++#define RATIO_PST1Q 0xd ++#define RATIO_EQ 0x20 ++ ++#define GCR1_CTL_SNP_START_MASK 0x00002000 ++#define GCR1_SNP_START_MASK 0x00000040 ++#define RECR1_SNP_DONE_MASK 0x00000004 ++#define RECR1_CTL_SNP_DONE_MASK 0x00000002 ++#define TCSR1_SNP_DATA_MASK 0x0000ffc0 ++#define TCSR1_SNP_DATA_SHIFT 6 ++#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100 ++ ++#define RECR1_GAINK2_MASK 0x0f000000 ++#define RECR1_GAINK2_SHIFT 24 ++#define RECR1_GAINK3_MASK 0x000f0000 ++#define RECR1_GAINK3_SHIFT 16 ++#define RECR1_OFFSET_MASK 0x00003f80 ++#define RECR1_OFFSET_SHIFT 7 ++#define RECR1_BLW_MASK 0x00000f80 ++#define RECR1_BLW_SHIFT 7 ++#define EYE_CTRL_SHIFT 12 ++#define BASE_WAND_SHIFT 10 ++ ++#define XGKR_TIMEOUT 1050 ++#define AN_ABILITY_MASK 0x9 ++#define AN_10GKR_MASK 0x8 ++#define LT_10GKR_MASK 0x4 ++#define TRAIN_FAIL 0x8 ++ ++#define INCREMENT 1 ++#define DECREMENT 2 ++#define TIMEOUT_LONG 3 ++#define TIMEOUT_M1 3 ++ ++#define RX_READY_MASK 0x8000 ++#define PRESET_MASK 0x2000 ++#define INIT_MASK 0x1000 ++#define COP1_MASK 0x30 ++#define COP1_SHIFT 4 ++#define COZ_MASK 0xc ++#define COZ_SHIFT 2 ++#define COM1_MASK 0x3 ++#define COM1_SHIFT 0 ++#define REQUEST_MASK 0x3f ++#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \ ++ COP1_MASK | COZ_MASK | COM1_MASK) ++ ++#define FSL_SERDES_INSTANCE1_BASE 0xffe0ea000 ++#define FSL_SERDES_INSTANCE2_BASE 0xffe0eb000 ++#define FSL_LANE_A_BASE 0x800 ++#define FSL_LANE_B_BASE 0x840 ++#define FSL_LANE_C_BASE 0x880 ++#define FSL_LANE_D_BASE 0x8C0 ++#define FSL_LANE_E_BASE 0x900 ++#define FSL_LANE_F_BASE 0x940 ++#define FSL_LANE_G_BASE 0x980 ++#define FSL_LANE_H_BASE 0x9C0 ++#define GCR0_RESET_MASK 0x600000 ++ ++#define NEW_ALGORITHM_TRAIN_TX ++#ifdef NEW_ALGORITHM_TRAIN_TX ++#define FORCE_INC_COP1_NUMBER 0 ++#define FORCE_INC_COM1_NUMBER 1 ++#endif ++ ++enum fsl_xgkr_driver { ++ FSL_XGKR_REV1, ++ FSL_XGKR_REV2, ++ FSL_XGKR_INV ++}; ++ ++static struct phy_driver fsl_xgkr_driver[FSL_XGKR_INV]; ++ ++enum coe_filed { ++ COE_COP1, ++ COE_COZ, ++ COE_COM ++}; ++ ++enum coe_update { ++ COE_NOTUPDATED, ++ COE_UPDATED, ++ COE_MIN, ++ COE_MAX, ++ COE_INV ++}; ++ ++enum serdes_inst { ++ SERDES_1, ++ SERDES_2, ++ SERDES_MAX ++}; ++ ++enum lane_inst { ++ LANE_A, ++ LANE_B, ++ LANE_C, ++ LANE_D, ++ LANE_E, ++ LANE_F, ++ LANE_G, ++ LANE_H, ++ LANE_MAX ++}; ++ ++struct serdes_map { ++ const char *serdes_name; ++ unsigned long serdes_base; ++}; ++ ++struct lane_map { ++ const char *lane_name; ++ unsigned long lane_base; ++}; ++ ++const struct serdes_map s_map[SERDES_MAX] = { ++ {"serdes-1", FSL_SERDES_INSTANCE1_BASE}, ++ {"serdes-2", FSL_SERDES_INSTANCE2_BASE} ++}; ++ ++const struct lane_map l_map[LANE_MAX] = { ++ {"lane-a", FSL_LANE_A_BASE}, ++ {"lane-b", FSL_LANE_B_BASE}, ++ {"lane-c", FSL_LANE_C_BASE}, ++ {"lane-d", FSL_LANE_D_BASE}, ++ {"lane-e", FSL_LANE_E_BASE}, ++ {"lane-f", FSL_LANE_F_BASE}, ++ {"lane-g", FSL_LANE_G_BASE}, ++ {"lane-h", FSL_LANE_H_BASE} ++}; ++ ++struct per_lane_ctrl_status { ++ __be32 gcr0; /* 0x.000 - General Control Register 0 */ ++ __be32 gcr1; /* 0x.004 - General Control Register 1 */ ++ __be32 gcr2; /* 0x.008 - General Control Register 2 */ ++ __be32 resv1; /* 0x.00C - Reserved */ ++ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */ ++ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */ ++ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */ ++ __be32 resv2; /* 0x.01C - Reserved */ ++ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */ ++ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */ ++ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */ ++ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */ ++ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */ ++ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */ ++ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */ ++ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */ ++}; ++ ++struct training_state_machine { ++ bool bin_m1_late_early; ++ bool bin_long_late_early; ++ bool bin_m1_stop; ++ bool bin_long_stop; ++ bool tx_complete; ++ bool an_ok; ++ bool link_up; ++ bool running; ++ bool sent_init; ++ int m1_min_max_cnt; ++ int long_min_max_cnt; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ int pre_inc; ++ int post_inc; ++#endif ++}; ++ ++struct fsl_xgkr_inst { ++ void *reg_base; ++ struct mii_bus *bus; ++ struct phy_device *phydev; ++ struct training_state_machine t_s_m; ++ u32 ld_update; ++ u32 ld_status; ++ u32 ratio_preq; ++ u32 ratio_pst1q; ++ u32 adpt_eq; ++}; ++ ++struct fsl_xgkr_wk { ++ struct work_struct xgkr_wk; ++ struct list_head xgkr_list; ++ struct fsl_xgkr_inst *xgkr_inst; ++}; ++ ++LIST_HEAD(fsl_xgkr_list); ++ ++static struct timer_list xgkr_timer; ++static int fire_timer; ++static struct workqueue_struct *xgkr_wq; ++ ++static void init_state_machine(struct training_state_machine *s_m) ++{ ++ s_m->bin_m1_late_early = true; ++ s_m->bin_long_late_early = false; ++ s_m->bin_m1_stop = false; ++ s_m->bin_long_stop = false; ++ s_m->tx_complete = false; ++ s_m->an_ok = false; ++ s_m->link_up = false; ++ s_m->running = false; ++ s_m->sent_init = false; ++ s_m->m1_min_max_cnt = 0; ++ s_m->long_min_max_cnt = 0; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ s_m->pre_inc = FORCE_INC_COM1_NUMBER; ++ s_m->post_inc = FORCE_INC_COP1_NUMBER; ++#endif ++} ++ ++void tune_tecr0(struct fsl_xgkr_inst *inst) ++{ ++ struct per_lane_ctrl_status *reg_base; ++ u32 val; ++ ++ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; ++ ++ val = TECR0_INIT | ++ inst->adpt_eq << ZERO_COE_SHIFT | ++ inst->ratio_preq << PRE_COE_SHIFT | ++ inst->ratio_pst1q << POST_COE_SHIFT; ++ ++ /* reset the lane */ ++ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ iowrite32be(val, ®_base->tecr0); ++ udelay(1); ++ /* unreset the lane */ ++ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++static void start_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_EN); ++} ++ ++static void stop_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); ++} ++ ++static void reset_gcr0(struct fsl_xgkr_inst *inst) ++{ ++ struct per_lane_ctrl_status *reg_base; ++ ++ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; ++ ++ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++static void reset_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_PMD_CTRL, PMD_RESET); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_CU, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_STATUS, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS, 0); ++} ++ ++static void start_an(struct phy_device *phydev) ++{ ++ reset_lt(phydev); ++ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_AD_1, XFI_AN_AD1); ++ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_CTRL, XF_AN_RESTART); ++} ++ ++static void ld_coe_status(struct fsl_xgkr_inst *inst) ++{ ++ phy_write_mmd(inst->phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_LD_STATUS, inst->ld_status); ++} ++ ++static void ld_coe_update(struct fsl_xgkr_inst *inst) ++{ ++ phy_write_mmd(inst->phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_LD_CU, inst->ld_update); ++} ++ ++static void init_inst(struct fsl_xgkr_inst *inst, int reset) ++{ ++ if (reset) { ++ inst->ratio_preq = RATIO_PREQ; ++ inst->ratio_pst1q = RATIO_PST1Q; ++ inst->adpt_eq = RATIO_EQ; ++ tune_tecr0(inst); ++ } ++ ++ inst->ld_status &= RX_READY_MASK; ++ ld_coe_status(inst); ++ ++ /* init state machine */ ++ init_state_machine(&inst->t_s_m); ++ ++ inst->ld_update = 0; ++ ld_coe_update(inst); ++ ++ inst->ld_status &= ~RX_READY_MASK; ++ ld_coe_status(inst); ++} ++ ++#ifdef NEW_ALGORITHM_TRAIN_TX ++static int get_median_gaink2(u32 *reg) ++{ ++ int gaink2_snap_shot[BIN_SNAPSHOT_NUM]; ++ u32 rx_eq_snp; ++ struct per_lane_ctrl_status *reg_base; ++ int timeout; ++ int i, j, tmp, pos; ++ ++ reg_base = (struct per_lane_ctrl_status *)reg; ++ ++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { ++ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */ ++ timeout = 100; ++ while (ioread32be(®_base->recr1) & ++ RECR1_CTL_SNP_DONE_MASK) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* start snap shot */ ++ iowrite32be((ioread32be(®_base->gcr1) | ++ GCR1_CTL_SNP_START_MASK), ++ ®_base->gcr1); ++ ++ /* wait for SNP done */ ++ timeout = 100; ++ while (!(ioread32be(®_base->recr1) & ++ RECR1_CTL_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* read and save the snap shot */ ++ rx_eq_snp = ioread32be(®_base->recr1); ++ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >> ++ RECR1_GAINK2_SHIFT; ++ ++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ ++ iowrite32be((ioread32be(®_base->gcr1) & ++ ~GCR1_CTL_SNP_START_MASK), ++ ®_base->gcr1); ++ } ++ ++ /* get median of the 5 snap shot */ ++ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) { ++ tmp = gaink2_snap_shot[i]; ++ pos = i; ++ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) { ++ if (gaink2_snap_shot[j] < tmp) { ++ tmp = gaink2_snap_shot[j]; ++ pos = j; ++ } ++ } ++ ++ gaink2_snap_shot[pos] = gaink2_snap_shot[i]; ++ gaink2_snap_shot[i] = tmp; ++ } ++ ++ return gaink2_snap_shot[2]; ++} ++#endif ++ ++static bool is_bin_early(int bin_sel, void __iomem *reg) ++{ ++ bool early = false; ++ int bin_snap_shot[BIN_SNAPSHOT_NUM]; ++ int i, negative_count = 0; ++ struct per_lane_ctrl_status *reg_base; ++ int timeout; ++ ++ reg_base = (struct per_lane_ctrl_status *)reg; ++ ++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { ++ /* wait RECR1_SNP_DONE_MASK has cleared */ ++ timeout = 100; ++ while ((ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* set TCSR1[CDR_SEL] to BinM1/BinLong */ ++ if (bin_sel == BIN_M1) { ++ iowrite32be((ioread32be(®_base->tcsr1) & ++ ~CDR_SEL_MASK) | BIN_M1_SEL, ++ ®_base->tcsr1); ++ } else { ++ iowrite32be((ioread32be(®_base->tcsr1) & ++ ~CDR_SEL_MASK) | BIN_Long_SEL, ++ ®_base->tcsr1); ++ } ++ ++ /* start snap shot */ ++ iowrite32be(ioread32be(®_base->gcr1) | GCR1_SNP_START_MASK, ++ ®_base->gcr1); ++ ++ /* wait for SNP done */ ++ timeout = 100; ++ while (!(ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* read and save the snap shot */ ++ bin_snap_shot[i] = (ioread32be(®_base->tcsr1) & ++ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT; ++ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK) ++ negative_count++; ++ ++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ ++ iowrite32be(ioread32be(®_base->gcr1) & ~GCR1_SNP_START_MASK, ++ ®_base->gcr1); ++ } ++ ++ if (((bin_sel == BIN_M1) && negative_count > BIN_M1_THRESHOLD) || ++ ((bin_sel == BIN_LONG && negative_count > BIN_LONG_THRESHOLD))) { ++ early = true; ++ } ++ ++ return early; ++} ++ ++static void train_tx(struct fsl_xgkr_inst *inst) ++{ ++ struct phy_device *phydev = inst->phydev; ++ struct training_state_machine *s_m = &inst->t_s_m; ++ bool bin_m1_early, bin_long_early; ++ u32 lp_status, old_ld_update; ++ u32 status_cop1, status_coz, status_com1; ++ u32 req_cop1, req_coz, req_com1, req_preset, req_init; ++ u32 temp; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ u32 median_gaink2; ++#endif ++ ++recheck: ++ if (s_m->bin_long_stop && s_m->bin_m1_stop) { ++ s_m->tx_complete = true; ++ inst->ld_status |= RX_READY_MASK; ++ ld_coe_status(inst); ++ /* tell LP we are ready */ ++ phy_write_mmd(phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_PMD_STATUS, RX_STAT); ++ return; ++ } ++ ++ /* We start by checking the current LP status. If we got any responses, ++ * we can clear up the appropriate update request so that the ++ * subsequent code may easily issue new update requests if needed. ++ */ ++ lp_status = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & ++ REQUEST_MASK; ++ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT; ++ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT; ++ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT; ++ ++ old_ld_update = inst->ld_update; ++ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT; ++ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT; ++ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT; ++ req_preset = old_ld_update & PRESET_MASK; ++ req_init = old_ld_update & INIT_MASK; ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.1 ++ * We may clear PRESET when all coefficients show UPDATED or MAX. ++ */ ++ if (req_preset) { ++ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) && ++ (status_coz == COE_UPDATED || status_coz == COE_MAX) && ++ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) { ++ inst->ld_update &= ~PRESET_MASK; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.2 ++ * We may clear INITIALIZE when no coefficients show NOT UPDATED. ++ */ ++ if (req_init) { ++ if (status_cop1 != COE_NOTUPDATED && ++ status_coz != COE_NOTUPDATED && ++ status_com1 != COE_NOTUPDATED) { ++ inst->ld_update &= ~INIT_MASK; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.2 ++ * we send initialize to the other side to ensure default settings ++ * for the LP. Naturally, we should do this only once. ++ */ ++ if (!s_m->sent_init) { ++ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) { ++ inst->ld_update |= INIT_MASK; ++ s_m->sent_init = true; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.3 ++ * We set coefficient requests to HOLD when we get the information ++ * about any updates On clearing our prior response, we also update ++ * our internal status. ++ */ ++ if (status_cop1 != COE_NOTUPDATED) { ++ if (req_cop1) { ++ inst->ld_update &= ~COP1_MASK; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (s_m->post_inc) { ++ if (req_cop1 == INCREMENT && ++ status_cop1 == COE_MAX) { ++ s_m->post_inc = 0; ++ s_m->bin_long_stop = true; ++ s_m->bin_m1_stop = true; ++ } else { ++ s_m->post_inc -= 1; ++ } ++ ++ ld_coe_update(inst); ++ goto recheck; ++ } ++#endif ++ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) || ++ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) { ++ s_m->long_min_max_cnt++; ++ if (s_m->long_min_max_cnt >= TIMEOUT_LONG) { ++ s_m->bin_long_stop = true; ++ ld_coe_update(inst); ++ goto recheck; ++ } ++ } ++ } ++ } ++ ++ if (status_coz != COE_NOTUPDATED) { ++ if (req_coz) ++ inst->ld_update &= ~COZ_MASK; ++ } ++ ++ if (status_com1 != COE_NOTUPDATED) { ++ if (req_com1) { ++ inst->ld_update &= ~COM1_MASK; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (s_m->pre_inc) { ++ if (req_com1 == INCREMENT && ++ status_com1 == COE_MAX) ++ s_m->pre_inc = 0; ++ else ++ s_m->pre_inc -= 1; ++ ++ ld_coe_update(inst); ++ goto recheck; ++ } ++#endif ++ /* Stop If we have reached the limit for a parameter. */ ++ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) || ++ (req_com1 == INCREMENT && status_com1 == COE_MAX)) { ++ s_m->m1_min_max_cnt++; ++ if (s_m->m1_min_max_cnt >= TIMEOUT_M1) { ++ s_m->bin_m1_stop = true; ++ ld_coe_update(inst); ++ goto recheck; ++ } ++ } ++ } ++ } ++ ++ if (old_ld_update != inst->ld_update) { ++ ld_coe_update(inst); ++ /* Redo these status checks and updates until we have no more ++ * changes, to speed up the overall process. ++ */ ++ goto recheck; ++ } ++ ++ /* Do nothing if we have pending request. */ ++ if ((req_coz || req_com1 || req_cop1)) ++ return; ++ else if (lp_status) ++ /* No pending request but LP status was not reverted to ++ * not updated. ++ */ ++ return; ++ ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { ++ if (s_m->pre_inc) { ++ inst->ld_update = INCREMENT << COM1_SHIFT; ++ ld_coe_update(inst); ++ return; ++ } ++ ++ if (status_cop1 != COE_MAX) { ++ median_gaink2 = get_median_gaink2(inst->reg_base); ++ if (median_gaink2 == 0xf) { ++ s_m->post_inc = 1; ++ } else { ++ /* Gaink2 median lower than "F" */ ++ s_m->bin_m1_stop = true; ++ s_m->bin_long_stop = true; ++ goto recheck; ++ } ++ } else { ++ /* C1 MAX */ ++ s_m->bin_m1_stop = true; ++ s_m->bin_long_stop = true; ++ goto recheck; ++ } ++ ++ if (s_m->post_inc) { ++ inst->ld_update = INCREMENT << COP1_SHIFT; ++ ld_coe_update(inst); ++ return; ++ } ++ } ++#endif ++ ++ /* snapshot and select bin */ ++ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base); ++ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base); ++ ++ if (!s_m->bin_m1_stop && !s_m->bin_m1_late_early && bin_m1_early) { ++ s_m->bin_m1_stop = true; ++ goto recheck; ++ } ++ ++ if (!s_m->bin_long_stop && ++ s_m->bin_long_late_early && !bin_long_early) { ++ s_m->bin_long_stop = true; ++ goto recheck; ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.3 ++ * We only request coefficient updates when no PRESET/INITIALIZE is ++ * pending! We also only request coefficient updates when the ++ * corresponding status is NOT UPDATED and nothing is pending. ++ */ ++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { ++ if (!s_m->bin_long_stop) { ++ /* BinM1 correction means changing COM1 */ ++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { ++ /* Avoid BinM1Late by requesting an ++ * immediate decrement. ++ */ ++ if (!bin_m1_early) { ++ /* request decrement c(-1) */ ++ temp = DECREMENT << COM1_SHIFT; ++ inst->ld_update |= temp; ++ ld_coe_update(inst); ++ s_m->bin_m1_late_early = bin_m1_early; ++ return; ++ } ++ } ++ ++ /* BinLong correction means changing COP1 */ ++ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) { ++ /* Locate BinLong transition point (if any) ++ * while avoiding BinM1Late. ++ */ ++ if (bin_long_early) { ++ /* request increment c(1) */ ++ temp = INCREMENT << COP1_SHIFT; ++ inst->ld_update |= temp; ++ } else { ++ /* request decrement c(1) */ ++ temp = DECREMENT << COP1_SHIFT; ++ inst->ld_update |= temp; ++ } ++ ++ ld_coe_update(inst); ++ s_m->bin_long_late_early = bin_long_early; ++ } ++ /* We try to finish BinLong before we do BinM1 */ ++ return; ++ } ++ ++ if (!s_m->bin_m1_stop) { ++ /* BinM1 correction means changing COM1 */ ++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { ++ /* Locate BinM1 transition point (if any) */ ++ if (bin_m1_early) { ++ /* request increment c(-1) */ ++ temp = INCREMENT << COM1_SHIFT; ++ inst->ld_update |= temp; ++ } else { ++ /* request decrement c(-1) */ ++ temp = DECREMENT << COM1_SHIFT; ++ inst->ld_update |= temp; ++ } ++ ++ ld_coe_update(inst); ++ s_m->bin_m1_late_early = bin_m1_early; ++ } ++ } ++ } ++} ++ ++static int check_an_link(struct phy_device *phydev) ++{ ++ int val; ++ int timeout = 100; ++ ++ while (timeout--) { ++ val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); ++ if (val & XFI_AN_LNK_STAT_UP) ++ return 1; ++ usleep_range(100, 500); ++ } ++ ++ return 0; ++} ++ ++static int is_link_training_fail(struct phy_device *phydev) ++{ ++ int val; ++ ++ val = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS); ++ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) { ++ /* check LNK_STAT for sure */ ++ if (check_an_link(phydev)) ++ return 0; ++ return 1; ++ } ++ return 1; ++} ++ ++static int check_rx(struct phy_device *phydev) ++{ ++ return phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & ++ RX_READY_MASK; ++} ++ ++/* Coefficient values have hardware restrictions */ ++static int is_ld_valid(u32 *ld_coe) ++{ ++ u32 ratio_pst1q = *ld_coe; ++ u32 adpt_eq = *(ld_coe + 1); ++ u32 ratio_preq = *(ld_coe + 2); ++ ++ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48) ++ return 0; ++ ++ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >= ++ ((adpt_eq - ratio_pst1q - ratio_preq) * 17)) ++ return 0; ++ ++ if (ratio_preq > ratio_pst1q) ++ return 0; ++ ++ if (ratio_preq > 8) ++ return 0; ++ ++ if (adpt_eq < 26) ++ return 0; ++ ++ if (ratio_pst1q > 16) ++ return 0; ++ ++ return 1; ++} ++ ++#define VAL_INVALID 0xff ++ ++static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5, ++ 0x7, 0x9, 0xb, 0xc, VAL_INVALID}; ++static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, ++ 0x7, 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID}; ++ ++static int is_value_allowed(const u32 *val_table, u32 val) ++{ ++ int i; ++ ++ for (i = 0;; i++) { ++ if (*(val_table + i) == VAL_INVALID) ++ return 0; ++ if (*(val_table + i) == val) ++ return 1; ++ } ++} ++ ++static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request) ++{ ++ u32 ld_limit[3], ld_coe[3], step[3]; ++ ++ ld_coe[0] = inst->ratio_pst1q; ++ ld_coe[1] = inst->adpt_eq; ++ ld_coe[2] = inst->ratio_preq; ++ ++ /* Information specific to the Freescale SerDes for 10GBase-KR: ++ * Incrementing C(+1) means *decrementing* RATIO_PST1Q ++ * Incrementing C(0) means incrementing ADPT_EQ ++ * Incrementing C(-1) means *decrementing* RATIO_PREQ ++ */ ++ step[0] = -1; ++ step[1] = 1; ++ step[2] = -1; ++ ++ switch (request) { ++ case INCREMENT: ++ ld_limit[0] = POST_COE_MAX; ++ ld_limit[1] = ZERO_COE_MAX; ++ ld_limit[2] = PRE_COE_MAX; ++ if (ld_coe[field] != ld_limit[field]) ++ ld_coe[field] += step[field]; ++ else ++ /* MAX */ ++ return 2; ++ break; ++ case DECREMENT: ++ ld_limit[0] = POST_COE_MIN; ++ ld_limit[1] = ZERO_COE_MIN; ++ ld_limit[2] = PRE_COE_MIN; ++ if (ld_coe[field] != ld_limit[field]) ++ ld_coe[field] -= step[field]; ++ else ++ /* MIN */ ++ return 1; ++ break; ++ default: ++ break; ++ } ++ ++ if (is_ld_valid(ld_coe)) { ++ /* accept new ld */ ++ inst->ratio_pst1q = ld_coe[0]; ++ inst->adpt_eq = ld_coe[1]; ++ inst->ratio_preq = ld_coe[2]; ++ /* only some values for preq and pst1q can be used. ++ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc. ++ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10. ++ */ ++ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) { ++ dev_dbg(&inst->phydev->dev, ++ "preq skipped value: %d.\n", ld_coe[2]); ++ return 0; ++ } ++ ++ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) { ++ dev_dbg(&inst->phydev->dev, ++ "pst1q skipped value: %d.\n", ld_coe[0]); ++ return 0; ++ } ++ ++ tune_tecr0(inst); ++ } else { ++ if (request == DECREMENT) ++ /* MIN */ ++ return 1; ++ if (request == INCREMENT) ++ /* MAX */ ++ return 2; ++ } ++ ++ return 0; ++} ++ ++static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld) ++{ ++ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX}; ++ u32 mask, val; ++ ++ switch (field) { ++ case COE_COP1: ++ mask = COP1_MASK; ++ val = ld_coe[new_ld] << COP1_SHIFT; ++ break; ++ case COE_COZ: ++ mask = COZ_MASK; ++ val = ld_coe[new_ld] << COZ_SHIFT; ++ break; ++ case COE_COM: ++ mask = COM1_MASK; ++ val = ld_coe[new_ld] << COM1_SHIFT; ++ break; ++ default: ++ return; ++ break; ++ } ++ ++ inst->ld_status &= ~mask; ++ inst->ld_status |= val; ++} ++ ++static void check_request(struct fsl_xgkr_inst *inst, int request) ++{ ++ int cop1_req, coz_req, com_req; ++ int old_status, new_ld_sta; ++ ++ cop1_req = (request & COP1_MASK) >> COP1_SHIFT; ++ coz_req = (request & COZ_MASK) >> COZ_SHIFT; ++ com_req = (request & COM1_MASK) >> COM1_SHIFT; ++ ++ /* IEEE802.3-2008, 72.6.10.2.5 ++ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED! ++ */ ++ old_status = inst->ld_status; ++ ++ if (cop1_req && !(inst->ld_status & COP1_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req); ++ min_max_updated(inst, COE_COP1, new_ld_sta); ++ } ++ ++ if (coz_req && !(inst->ld_status & COZ_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COZ, coz_req); ++ min_max_updated(inst, COE_COZ, new_ld_sta); ++ } ++ ++ if (com_req && !(inst->ld_status & COM1_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COM, com_req); ++ min_max_updated(inst, COE_COM, new_ld_sta); ++ } ++ ++ if (old_status != inst->ld_status) ++ ld_coe_status(inst); ++ ++} ++ ++static void preset(struct fsl_xgkr_inst *inst) ++{ ++ /* These are all MAX values from the IEEE802.3 perspective! */ ++ inst->ratio_pst1q = POST_COE_MAX; ++ inst->adpt_eq = ZERO_COE_MAX; ++ inst->ratio_preq = PRE_COE_MAX; ++ ++ tune_tecr0(inst); ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ inst->ld_status |= COE_MAX << COP1_SHIFT | ++ COE_MAX << COZ_SHIFT | ++ COE_MAX << COM1_SHIFT; ++ ld_coe_status(inst); ++} ++ ++static void initialize(struct fsl_xgkr_inst *inst) ++{ ++ inst->ratio_preq = RATIO_PREQ; ++ inst->ratio_pst1q = RATIO_PST1Q; ++ inst->adpt_eq = RATIO_EQ; ++ ++ tune_tecr0(inst); ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ inst->ld_status |= COE_UPDATED << COP1_SHIFT | ++ COE_UPDATED << COZ_SHIFT | ++ COE_UPDATED << COM1_SHIFT; ++ ld_coe_status(inst); ++} ++ ++static void train_rx(struct fsl_xgkr_inst *inst) ++{ ++ struct phy_device *phydev = inst->phydev; ++ int request, old_ld_status; ++ ++ /* get request from LP */ ++ request = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU) & ++ (LD_ALL_MASK); ++ old_ld_status = inst->ld_status; ++ ++ /* IEEE802.3-2008, 72.6.10.2.5 ++ * Ensure we always go to NOT UDPATED for status reporting in ++ * response to HOLD requests. ++ * IEEE802.3-2008, 72.6.10.2.3.1/2 ++ * ... but only if PRESET/INITIALIZE are not active to ensure ++ * we keep status until they are released! ++ */ ++ if (!(request & (PRESET_MASK | INIT_MASK))) { ++ if (!(request & COP1_MASK)) ++ inst->ld_status &= ~COP1_MASK; ++ ++ if (!(request & COZ_MASK)) ++ inst->ld_status &= ~COZ_MASK; ++ ++ if (!(request & COM1_MASK)) ++ inst->ld_status &= ~COM1_MASK; ++ ++ if (old_ld_status != inst->ld_status) ++ ld_coe_status(inst); ++ ++ } ++ ++ /* As soon as the LP shows ready, no need to do any more updates. */ ++ if (check_rx(phydev)) { ++ /* LP receiver is ready */ ++ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) { ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ ld_coe_status(inst); ++ } ++ } else { ++ /* IEEE802.3-2008, 72.6.10.2.3.1/2 ++ * only act on PRESET/INITIALIZE if all status is NOT UPDATED. ++ */ ++ if (request & (PRESET_MASK | INIT_MASK)) { ++ if (!(inst->ld_status & ++ (COP1_MASK | COZ_MASK | COM1_MASK))) { ++ if (request & PRESET_MASK) ++ preset(inst); ++ ++ if (request & INIT_MASK) ++ initialize(inst); ++ } ++ } ++ ++ /* LP Coefficient are not in HOLD */ ++ if (request & REQUEST_MASK) ++ check_request(inst, request & REQUEST_MASK); ++ } ++} ++ ++static void xgkr_wq_state_machine(struct work_struct *work) ++{ ++ struct fsl_xgkr_wk *wk = container_of(work, ++ struct fsl_xgkr_wk, xgkr_wk); ++ struct fsl_xgkr_inst *inst = wk->xgkr_inst; ++ struct training_state_machine *s_m = &inst->t_s_m; ++ struct phy_device *phydev = inst->phydev; ++ int val = 0, i; ++ int an_state, lt_state; ++ unsigned long dead_line; ++ int rx_ok, tx_ok; ++ ++ if (s_m->link_up) { ++ /* check abnormal link down events when link is up, for ex. ++ * the cable is pulled out or link partner is down. ++ */ ++ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); ++ if (!(an_state & XFI_AN_LNK_STAT_UP)) { ++ dev_info(&phydev->dev, ++ "Detect hotplug, restart training!\n"); ++ init_inst(inst, 1); ++ start_an(phydev); ++ } ++ s_m->running = false; ++ return; ++ } ++ ++ if (!s_m->an_ok) { ++ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_BP_STATUS); ++ if (!(an_state & AN_10GKR_MASK)) { ++ s_m->running = false; ++ return; ++ } else ++ s_m->an_ok = true; ++ } ++ ++ dev_info(&phydev->dev, "is training.\n"); ++ ++ start_lt(phydev); ++ for (i = 0; i < 2;) { ++ /* i < 1 also works, but start one more try immediately when ++ * failed can adjust our training frequency to match other ++ * devices. This can help the link being established more ++ * quickly. ++ */ ++ dead_line = jiffies + msecs_to_jiffies(500); ++ while (time_before(jiffies, dead_line)) { ++ val = phy_read_mmd(phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_PMD_STATUS); ++ if (val & TRAIN_FAIL) { ++ /* LT failed already, reset lane to avoid ++ * it run into hanging, then start LT again. ++ */ ++ reset_gcr0(inst); ++ start_lt(phydev); ++ } else if (val & PMD_STATUS_SUP_STAT && ++ val & PMD_STATUS_FRAME_LOCK) ++ break; ++ usleep_range(100, 500); ++ } ++ ++ if (!(val & PMD_STATUS_FRAME_LOCK && ++ val & PMD_STATUS_SUP_STAT)) { ++ i++; ++ continue; ++ } ++ ++ /* init process */ ++ rx_ok = tx_ok = false; ++ /* the LT should be finished in 500ms, failed or OK. */ ++ dead_line = jiffies + msecs_to_jiffies(500); ++ ++ while (time_before(jiffies, dead_line)) { ++ /* check if the LT is already failed */ ++ lt_state = phy_read_mmd(phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_PMD_STATUS); ++ if (lt_state & TRAIN_FAIL) { ++ reset_gcr0(inst); ++ break; ++ } ++ ++ rx_ok = check_rx(phydev); ++ tx_ok = s_m->tx_complete; ++ ++ if (rx_ok && tx_ok) ++ break; ++ ++ if (!rx_ok) ++ train_rx(inst); ++ ++ if (!tx_ok) ++ train_tx(inst); ++ usleep_range(100, 500); ++ } ++ ++ i++; ++ /* check LT result */ ++ if (is_link_training_fail(phydev)) { ++ /* reset state machine */ ++ init_inst(inst, 0); ++ continue; ++ } else { ++ stop_lt(phydev); ++ s_m->running = false; ++ s_m->link_up = true; ++ dev_info(&phydev->dev, "LT training is SUCCEEDED!\n"); ++ break; ++ } ++ } ++ ++ if (!s_m->link_up) { ++ /* reset state machine */ ++ init_inst(inst, 0); ++ } ++} ++ ++static void xgkr_timer_handle(unsigned long arg) ++{ ++ struct list_head *pos; ++ struct fsl_xgkr_wk *wk; ++ struct fsl_xgkr_inst *xgkr_inst; ++ struct phy_device *phydev; ++ struct training_state_machine *s_m; ++ ++ list_for_each(pos, &fsl_xgkr_list) { ++ wk = list_entry(pos, struct fsl_xgkr_wk, xgkr_list); ++ xgkr_inst = wk->xgkr_inst; ++ phydev = xgkr_inst->phydev; ++ s_m = &xgkr_inst->t_s_m; ++ ++ if (!s_m->running && (!s_m->an_ok || s_m->link_up)) { ++ s_m->running = true; ++ queue_work(xgkr_wq, (struct work_struct *)wk); ++ } ++ } ++ ++ if (!list_empty(&fsl_xgkr_list)) ++ mod_timer(&xgkr_timer, ++ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); ++} ++ ++static int fsl_xgkr_bind_serdes(const char *lane_name, ++ struct phy_device *phydev) ++{ ++ unsigned long serdes_base; ++ unsigned long lane_base; ++ int i; ++ ++ for (i = 0; i < SERDES_MAX; i++) { ++ if (strstr(lane_name, s_map[i].serdes_name)) { ++ serdes_base = s_map[i].serdes_base; ++ break; ++ } ++ } ++ ++ if (i == SERDES_MAX) ++ goto serdes_err; ++ ++ for (i = 0; i < LANE_MAX; i++) { ++ if (strstr(lane_name, l_map[i].lane_name)) { ++ lane_base = l_map[i].lane_base; ++ break; ++ } ++ } ++ ++ if (i == LANE_MAX) ++ goto lane_err; ++ ++ phydev->priv = ioremap(serdes_base + lane_base, ++ sizeof(struct per_lane_ctrl_status)); ++ if (!phydev->priv) ++ return -ENOMEM; ++ ++ return 0; ++ ++serdes_err: ++ dev_err(&phydev->dev, "Unknown SerDes name"); ++ return -EINVAL; ++lane_err: ++ dev_err(&phydev->dev, "Unknown Lane name"); ++ return -EINVAL; ++} ++ ++static int fsl_xgkr_probe(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_inst *xgkr_inst; ++ struct fsl_xgkr_wk *xgkr_wk; ++ struct device_node *child; ++ const char *lane_name; ++ int len; ++ ++ child = phydev->dev.of_node; ++ ++ /* if there is lane-instance property, 10G-KR need to run */ ++ lane_name = of_get_property(child, "lane-instance", &len); ++ if (!lane_name || (fsl_xgkr_bind_serdes(lane_name, phydev))) ++ return 0; ++ ++ xgkr_inst = kzalloc(sizeof(struct fsl_xgkr_inst), GFP_KERNEL); ++ if (!xgkr_inst) ++ goto mem_err1; ++ ++ xgkr_inst->reg_base = phydev->priv; ++ ++ xgkr_inst->bus = phydev->bus; ++ ++ xgkr_inst->phydev = phydev; ++ ++ init_inst(xgkr_inst, 1); ++ ++ xgkr_wk = kzalloc(sizeof(struct fsl_xgkr_wk), GFP_KERNEL); ++ if (!xgkr_wk) ++ goto mem_err2; ++ ++ xgkr_wk->xgkr_inst = xgkr_inst; ++ phydev->priv = xgkr_wk; ++ ++ list_add(&xgkr_wk->xgkr_list, &fsl_xgkr_list); ++ ++ if (!fire_timer) { ++ setup_timer(&xgkr_timer, xgkr_timer_handle, ++ (unsigned long)&fsl_xgkr_list); ++ mod_timer(&xgkr_timer, ++ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); ++ fire_timer = 1; ++ xgkr_wq = create_workqueue("fsl_xgkr"); ++ } ++ INIT_WORK((struct work_struct *)xgkr_wk, xgkr_wq_state_machine); ++ ++ /* start auto-negotiation to detect link partner */ ++ start_an(phydev); ++ ++ return 0; ++mem_err2: ++ kfree(xgkr_inst); ++mem_err1: ++ dev_err(&phydev->dev, "failed to allocate memory!\n"); ++ return -ENOMEM; ++} ++ ++static int fsl_xgkr_config_init(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int fsl_xgkr_config_aneg(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static void fsl_xgkr_remove(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_wk *wk = (struct fsl_xgkr_wk *)phydev->priv; ++ struct fsl_xgkr_inst *xgkr_inst = wk->xgkr_inst; ++ struct list_head *this, *next; ++ struct fsl_xgkr_wk *tmp; ++ ++ list_for_each_safe(this, next, &fsl_xgkr_list) { ++ tmp = list_entry(this, struct fsl_xgkr_wk, xgkr_list); ++ if (tmp == wk) { ++ cancel_work_sync((struct work_struct *)wk); ++ list_del(this); ++ } ++ } ++ ++ if (list_empty(&fsl_xgkr_list)) ++ del_timer(&xgkr_timer); ++ ++ if (xgkr_inst->reg_base) ++ iounmap(xgkr_inst->reg_base); ++ ++ kfree(xgkr_inst); ++ kfree(wk); ++} ++ ++static int fsl_xgkr_read_status(struct phy_device *phydev) ++{ ++ int val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); ++ ++ phydev->speed = SPEED_10000; ++ phydev->duplex = 1; ++ ++ if (val & XFI_AN_LNK_STAT_UP) ++ phydev->link = 1; ++ else ++ phydev->link = 0; ++ ++ return 0; ++} ++ ++static int fsl_xgkr_match_phy_device(struct phy_device *phydev) ++{ ++ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID; ++} ++ ++static int fsl_xgkr_match_phy_device2(struct phy_device *phydev) ++{ ++ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID2; ++} ++ ++static struct phy_driver fsl_xgkr_driver[] = { ++ { ++ .phy_id = FSL_XFI_PCS_PHY_ID, ++ .name = "Freescale 10G KR Rev1", ++ .phy_id_mask = 0xffffffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .probe = fsl_xgkr_probe, ++ .config_init = &fsl_xgkr_config_init, ++ .config_aneg = &fsl_xgkr_config_aneg, ++ .read_status = &fsl_xgkr_read_status, ++ .match_phy_device = fsl_xgkr_match_phy_device, ++ .remove = fsl_xgkr_remove, ++ .driver = { .owner = THIS_MODULE,}, ++ }, ++ { ++ .phy_id = FSL_XFI_PCS_PHY_ID2, ++ .name = "Freescale 10G KR Rev2", ++ .phy_id_mask = 0xffffffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .probe = fsl_xgkr_probe, ++ .config_init = &fsl_xgkr_config_init, ++ .config_aneg = &fsl_xgkr_config_aneg, ++ .read_status = &fsl_xgkr_read_status, ++ .match_phy_device = fsl_xgkr_match_phy_device2, ++ .remove = fsl_xgkr_remove, ++ .driver = { .owner = THIS_MODULE,}, ++ }, ++}; ++ ++static int __init fsl_xgkr_init(void) ++{ ++ return phy_drivers_register(fsl_xgkr_driver, ++ ARRAY_SIZE(fsl_xgkr_driver)); ++} ++ ++static void __exit fsl_xgkr_exit(void) ++{ ++ phy_drivers_unregister(fsl_xgkr_driver, ++ ARRAY_SIZE(fsl_xgkr_driver)); ++} ++ ++module_init(fsl_xgkr_init); ++module_exit(fsl_xgkr_exit); ++ ++static struct mdio_device_id __maybe_unused freescale_tbl[] = { ++ { FSL_XFI_PCS_PHY_ID, 0xffffffff }, ++ { FSL_XFI_PCS_PHY_ID2, 0xffffffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, freescale_tbl); +diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c +new file mode 100644 +index 0000000..91e1bec +--- /dev/null ++++ b/drivers/net/phy/teranetics.c +@@ -0,0 +1,135 @@ ++/* ++ * Driver for Teranetics PHY ++ * ++ * Author: Shaohui Xie ++ * ++ * Copyright 2015 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public License ++ * version 2. This program is licensed "as is" without any warranty of any ++ * kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++MODULE_DESCRIPTION("Teranetics PHY driver"); ++MODULE_AUTHOR("Shaohui Xie "); ++MODULE_LICENSE("GPL v2"); ++ ++#define PHY_ID_TN2020 0x00a19410 ++#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 ++#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 ++#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 ++#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 ++#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 ++ ++#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ ++ MDIO_PHYXS_LNSTAT_SYNC1 | \ ++ MDIO_PHYXS_LNSTAT_SYNC2 | \ ++ MDIO_PHYXS_LNSTAT_SYNC3 | \ ++ MDIO_PHYXS_LNSTAT_ALIGN) ++ ++static int teranetics_config_init(struct phy_device *phydev) ++{ ++ phydev->supported = SUPPORTED_10000baseT_Full; ++ phydev->advertising = SUPPORTED_10000baseT_Full; ++ ++ return 0; ++} ++ ++static int teranetics_soft_reset(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int teranetics_aneg_done(struct phy_device *phydev) ++{ ++ int reg; ++ ++ /* auto negotiation state can only be checked when using copper ++ * port, if using fiber port, just lie it's done. ++ */ ++ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); ++ } ++ ++ return 1; ++} ++ ++static int teranetics_config_aneg(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int teranetics_read_status(struct phy_device *phydev) ++{ ++ int reg; ++ ++ phydev->link = 1; ++ ++ phydev->speed = SPEED_10000; ++ phydev->duplex = DUPLEX_FULL; ++ ++ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { ++ reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); ++ if (reg < 0 || ++ !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { ++ phydev->link = 0; ++ return 0; ++ } ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) ++ phydev->link = 0; ++ } ++ ++ return 0; ++} ++ ++static int teranetics_match_phy_device(struct phy_device *phydev) ++{ ++ return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; ++} ++ ++static struct phy_driver teranetics_driver[] = { ++{ ++ .phy_id = PHY_ID_TN2020, ++ .phy_id_mask = 0xffffffff, ++ .name = "Teranetics TN2020", ++ .soft_reset = teranetics_soft_reset, ++ .aneg_done = teranetics_aneg_done, ++ .config_init = teranetics_config_init, ++ .config_aneg = teranetics_config_aneg, ++ .read_status = teranetics_read_status, ++ .match_phy_device = teranetics_match_phy_device, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++}; ++ ++static int __init teranetics_init(void) ++{ ++ return phy_drivers_register(teranetics_driver, ++ ARRAY_SIZE(teranetics_driver)); ++} ++ ++static void __exit teranetics_exit(void) ++{ ++ return phy_drivers_unregister(teranetics_driver, ++ ARRAY_SIZE(teranetics_driver)); ++} ++ ++module_init(teranetics_init); ++module_exit(teranetics_exit); ++ ++static struct mdio_device_id __maybe_unused teranetics_tbl[] = { ++ { PHY_ID_TN2020, 0xffffffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, teranetics_tbl); +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 4690ae9..43ff2b5 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -108,4 +108,8 @@ source "drivers/staging/skein/Kconfig" + + source "drivers/staging/unisys/Kconfig" + ++source "drivers/staging/fsl-mc/Kconfig" ++ ++source "drivers/staging/fsl-dpaa2/Kconfig" ++ + endif # STAGING +diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile +index c780a0e..a9bd303 100644 +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -46,3 +46,5 @@ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ + obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ + obj-$(CONFIG_CRYPTO_SKEIN) += skein/ + obj-$(CONFIG_UNISYSSPAR) += unisys/ ++obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ ++obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ +diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig +new file mode 100644 +index 0000000..3fe47bc +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/Kconfig +@@ -0,0 +1,12 @@ ++# ++# Freescale device configuration ++# ++ ++config FSL_DPAA2 ++ bool "Freescale DPAA2 devices" ++ depends on FSL_MC_BUS ++ ---help--- ++ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. ++# TODO move DPIO driver in-here? ++source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" ++source "drivers/staging/fsl-dpaa2/mac/Kconfig" +diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile +new file mode 100644 +index 0000000..bc687a1 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/Makefile +@@ -0,0 +1,6 @@ ++# ++# Makefile for the Freescale network device drivers. ++# ++ ++obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ ++obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/Kconfig b/drivers/staging/fsl-dpaa2/ethernet/Kconfig +new file mode 100644 +index 0000000..df91da2 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig +@@ -0,0 +1,36 @@ ++# ++# Freescale DPAA Ethernet driver configuration ++# ++# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++ ++menuconfig FSL_DPAA2_ETH ++ tristate "Freescale DPAA2 Ethernet" ++ depends on FSL_DPAA2 && FSL_MC_BUS && FSL_MC_DPIO ++ select FSL_DPAA2_MAC ++ default y ++ ---help--- ++ Freescale Data Path Acceleration Architecture Ethernet ++ driver, using the Freescale MC bus driver. ++ ++if FSL_DPAA2_ETH ++ ++config FSL_DPAA2_ETH_USE_ERR_QUEUE ++ bool "Enable Rx error queue" ++ default n ++ ---help--- ++ Allow Rx error frames to be enqueued on an error queue ++ and processed by the driver (by default they are dropped ++ in hardware). ++ This may impact performance, recommended for debugging ++ purposes only. ++ ++config FSL_DPAA2_ETH_DEBUGFS ++ depends on DEBUG_FS && FSL_QBMAN_DEBUG ++ bool "Enable debugfs support" ++ default n ++ ---help--- ++ Enable advanced statistics through debugfs interface. ++endif +diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile +new file mode 100644 +index 0000000..74bff15 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile +@@ -0,0 +1,21 @@ ++# ++# Makefile for the Freescale DPAA Ethernet controllers ++# ++# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++ ++ccflags-y += -DVERSION=\"\" ++ ++obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o ++ ++fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o ++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o ++ ++#Needed by the tracing framework ++CFLAGS_dpaa2-eth.o := -I$(src) ++ ++ifeq ($(CONFIG_FSL_DPAA2_ETH_GCOV),y) ++ GCOV_PROFILE := y ++endif +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c +new file mode 100644 +index 0000000..c397983 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c +@@ -0,0 +1,317 @@ ++ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "dpaa2-eth.h" ++#include "dpaa2-eth-debugfs.h" ++ ++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" ++ ++static struct dentry *dpaa2_dbg_root; ++ ++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct rtnl_link_stats64 *stats; ++ struct dpaa2_eth_drv_stats *extras; ++ int i; ++ ++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", ++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", ++ "Tx SG", "Enq busy"); ++ ++ for_each_online_cpu(i) { ++ stats = per_cpu_ptr(priv->percpu_stats, i); ++ extras = per_cpu_ptr(priv->percpu_extras, i); ++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", ++ i, ++ stats->rx_packets, ++ stats->rx_errors, ++ extras->rx_sg_frames, ++ stats->tx_packets, ++ stats->tx_errors, ++ extras->tx_conf_frames, ++ extras->tx_sg_frames, ++ extras->tx_portal_busy); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_cpu_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_cpu_ops = { ++ .open = dpaa2_dbg_cpu_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static char *fq_type_to_str(struct dpaa2_eth_fq *fq) ++{ ++ switch (fq->type) { ++ case DPAA2_RX_FQ: ++ return "Rx"; ++ case DPAA2_TX_CONF_FQ: ++ return "Tx conf"; ++ case DPAA2_RX_ERR_FQ: ++ return "Rx err"; ++ default: ++ return "N/A"; ++ } ++} ++ ++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct dpaa2_eth_fq *fq; ++ u32 fcnt, bcnt; ++ int i, err; ++ ++ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s\n", ++ "VFQID", "CPU", "Type", "Frames", "Pending frames"); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); ++ if (err) ++ fcnt = 0; ++ ++ seq_printf(file, "%5d%16d%16s%16llu%16u\n", ++ fq->fqid, ++ fq->target_cpu, ++ fq_type_to_str(fq), ++ fq->stats.frames, ++ fcnt); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_fqs_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_fq_ops = { ++ .open = dpaa2_dbg_fqs_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", ++ "CHID", "CPU", "Deq busy", "Frames", "CDANs", ++ "Avg frm/CDAN"); ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", ++ ch->ch_id, ++ ch->nctx.desired_cpu, ++ ch->stats.dequeue_portal_busy, ++ ch->stats.frames, ++ ch->stats.cdan, ++ ch->stats.frames / ch->stats.cdan); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_ch_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_ch_ops = { ++ .open = dpaa2_dbg_ch_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *offset) ++{ ++ struct dpaa2_eth_priv *priv = file->private_data; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct dpaa2_eth_fq *fq; ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for_each_online_cpu(i) { ++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); ++ memset(percpu_stats, 0, sizeof(*percpu_stats)); ++ ++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i); ++ memset(percpu_extras, 0, sizeof(*percpu_extras)); ++ } ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ memset(&fq->stats, 0, sizeof(fq->stats)); ++ } ++ ++ for_each_cpu(i, &priv->dpio_cpumask) { ++ ch = priv->channel[i]; ++ memset(&ch->stats, 0, sizeof(ch->stats)); ++ } ++ ++ return count; ++} ++ ++static const struct file_operations dpaa2_dbg_reset_ops = { ++ .open = simple_open, ++ .write = dpaa2_dbg_reset_write, ++}; ++ ++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) ++{ ++ if (!dpaa2_dbg_root) ++ return; ++ ++ /* Create a directory for the interface */ ++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, ++ dpaa2_dbg_root); ++ if (!priv->dbg.dir) { ++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); ++ return; ++ } ++ ++ /* per-cpu stats file */ ++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", S_IRUGO, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_cpu_ops); ++ if (!priv->dbg.cpu_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_cpu_stats; ++ } ++ ++ /* per-fq stats file */ ++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", S_IRUGO, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_fq_ops); ++ if (!priv->dbg.fq_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_fq_stats; ++ } ++ ++ /* per-fq stats file */ ++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", S_IRUGO, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_ch_ops); ++ if (!priv->dbg.fq_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_ch_stats; ++ } ++ ++ /* reset stats */ ++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", S_IWUSR, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_reset_ops); ++ if (!priv->dbg.reset_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_reset_stats; ++ } ++ ++ return; ++ ++err_reset_stats: ++ debugfs_remove(priv->dbg.ch_stats); ++err_ch_stats: ++ debugfs_remove(priv->dbg.fq_stats); ++err_fq_stats: ++ debugfs_remove(priv->dbg.cpu_stats); ++err_cpu_stats: ++ debugfs_remove(priv->dbg.dir); ++} ++ ++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) ++{ ++ debugfs_remove(priv->dbg.reset_stats); ++ debugfs_remove(priv->dbg.fq_stats); ++ debugfs_remove(priv->dbg.ch_stats); ++ debugfs_remove(priv->dbg.cpu_stats); ++ debugfs_remove(priv->dbg.dir); ++} ++ ++void dpaa2_eth_dbg_init(void) ++{ ++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); ++ if (!dpaa2_dbg_root) { ++ pr_err("DPAA2-ETH: debugfs create failed\n"); ++ return; ++ } ++ ++ pr_info("DPAA2-ETH: debugfs created\n"); ++} ++ ++void __exit dpaa2_eth_dbg_exit(void) ++{ ++ debugfs_remove(dpaa2_dbg_root); ++} ++ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h +new file mode 100644 +index 0000000..7ba706c +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h +@@ -0,0 +1,61 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPAA2_ETH_DEBUGFS_H ++#define DPAA2_ETH_DEBUGFS_H ++ ++#include ++#include "dpaa2-eth.h" ++ ++extern struct dpaa2_eth_priv *priv; ++ ++struct dpaa2_debugfs { ++ struct dentry *dir; ++ struct dentry *fq_stats; ++ struct dentry *ch_stats; ++ struct dentry *cpu_stats; ++ struct dentry *reset_stats; ++}; ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++void dpaa2_eth_dbg_init(void); ++void dpaa2_eth_dbg_exit(void); ++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); ++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); ++#else ++static inline void dpaa2_eth_dbg_init(void) {} ++static inline void dpaa2_eth_dbg_exit(void) {} ++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} ++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} ++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ ++ ++#endif /* DPAA2_ETH_DEBUGFS_H */ ++ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h +new file mode 100644 +index 0000000..3b040e8 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h +@@ -0,0 +1,185 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM dpaa2_eth ++ ++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _DPAA2_ETH_TRACE_H ++ ++#include ++#include ++#include "dpaa2-eth.h" ++#include ++ ++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" ++/* trace_printk format for raw buffer event class */ ++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" ++ ++/* This is used to declare a class of events. ++ * individual events of this type will be defined below. ++ */ ++ ++/* Store details about a frame descriptor */ ++DECLARE_EVENT_CLASS(dpaa2_eth_fd, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, fd), ++ ++ /* A structure containing the relevant information we want ++ * to record. Declare name and type for each normal element, ++ * name, type and size for arrays. Use __string for variable ++ * length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(u64, fd_addr) ++ __field(u32, fd_len) ++ __field(u16, fd_offset) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared ++ * fields ++ */ ++ TP_fast_assign( ++ __entry->fd_addr = dpaa2_fd_get_addr(fd); ++ __entry->fd_len = dpaa2_fd_get_len(fd); ++ __entry->fd_offset = dpaa2_fd_get_offset(fd); ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is ++ * triggered. ++ */ ++ TP_printk(TR_FMT, ++ __get_str(name), ++ __entry->fd_addr, ++ __entry->fd_len, ++ __entry->fd_offset) ++); ++ ++/* Now declare events of the above type. Format is: ++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class ++ */ ++ ++/* Tx (egress) fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Rx fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Tx confirmation fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Log data about raw buffers. Useful for tracing DPBP content. */ ++TRACE_EVENT(dpaa2_eth_buf_seed, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ /* virtual address and size */ ++ void *vaddr, ++ size_t size, ++ /* dma map address and size */ ++ dma_addr_t dma_addr, ++ size_t map_size, ++ /* buffer pool id, if relevant */ ++ u16 bpid), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), ++ ++ /* A structure containing the relevant information we want ++ * to record. Declare name and type for each normal element, ++ * name, type and size for arrays. Use __string for variable ++ * length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(void *, vaddr) ++ __field(size_t, size) ++ __field(dma_addr_t, dma_addr) ++ __field(size_t, map_size) ++ __field(u16, bpid) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared ++ * fields ++ */ ++ TP_fast_assign( ++ __entry->vaddr = vaddr; ++ __entry->size = size; ++ __entry->dma_addr = dma_addr; ++ __entry->map_size = map_size; ++ __entry->bpid = bpid; ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is ++ * triggered. ++ */ ++ TP_printk(TR_BUF_FMT, ++ __get_str(name), ++ __entry->vaddr, ++ __entry->size, ++ &__entry->dma_addr, ++ __entry->map_size, ++ __entry->bpid) ++); ++ ++/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). ++ * The syntax is the same as for DECLARE_EVENT_CLASS(). ++ */ ++ ++#endif /* _DPAA2_ETH_TRACE_H */ ++ ++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH . ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE dpaa2-eth-trace ++#include +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +new file mode 100644 +index 0000000..27d1a91 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +@@ -0,0 +1,2836 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++#include "dpaa2-eth.h" ++ ++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files ++ * using trace events only need to #include ++ */ ++#define CREATE_TRACE_POINTS ++#include "dpaa2-eth-trace.h" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); ++ ++/* Oldest DPAA2 objects version we are compatible with */ ++#define DPAA2_SUPPORTED_DPNI_VERSION 6 ++#define DPAA2_SUPPORTED_DPBP_VERSION 2 ++#define DPAA2_SUPPORTED_DPCON_VERSION 2 ++ ++static void validate_rx_csum(struct dpaa2_eth_priv *priv, ++ u32 fd_status, ++ struct sk_buff *skb) ++{ ++ skb_checksum_none_assert(skb); ++ ++ /* HW checksum validation is disabled, nothing to do here */ ++ if (!(priv->net_dev->features & NETIF_F_RXCSUM)) ++ return; ++ ++ /* Read checksum validation bits */ ++ if (!((fd_status & DPAA2_FAS_L3CV) && ++ (fd_status & DPAA2_FAS_L4CV))) ++ return; ++ ++ /* Inform the stack there's no need to compute L3/L4 csum anymore */ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++/* Free a received FD. ++ * Not to be used for Tx conf FDs or on any other paths. ++ */ ++static void free_rx_fd(struct dpaa2_eth_priv *priv, ++ const struct dpaa2_fd *fd, ++ void *vaddr) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ struct dpaa2_sg_entry *sgt; ++ void *sg_vaddr; ++ int i; ++ ++ /* If single buffer frame, just free the data buffer */ ++ if (fd_format == dpaa2_fd_single) ++ goto free_buf; ++ ++ /* For S/G frames, we first need to free all SG entries */ ++ sgt = vaddr + dpaa2_fd_get_offset(fd); ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { ++ dpaa2_sg_le_to_cpu(&sgt[i]); ++ ++ addr = dpaa2_sg_get_addr(&sgt[i]); ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ ++ sg_vaddr = phys_to_virt(addr); ++ put_page(virt_to_head_page(sg_vaddr)); ++ ++ if (dpaa2_sg_is_final(&sgt[i])) ++ break; ++ } ++ ++free_buf: ++ put_page(virt_to_head_page(vaddr)); ++} ++ ++/* Build a linear skb based on a single-buffer frame descriptor */ ++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ void *fd_vaddr) ++{ ++ struct sk_buff *skb = NULL; ++ u16 fd_offset = dpaa2_fd_get_offset(fd); ++ u32 fd_length = dpaa2_fd_get_len(fd); ++ ++ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ skb_reserve(skb, fd_offset); ++ skb_put(skb, fd_length); ++ ++ ch->buf_count--; ++ ++ return skb; ++} ++ ++/* Build a non linear (fragmented) skb based on a S/G table */ ++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ struct dpaa2_sg_entry *sgt) ++{ ++ struct sk_buff *skb = NULL; ++ struct device *dev = priv->net_dev->dev.parent; ++ void *sg_vaddr; ++ dma_addr_t sg_addr; ++ u16 sg_offset; ++ u32 sg_length; ++ struct page *page, *head_page; ++ int page_offset; ++ int i; ++ ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { ++ struct dpaa2_sg_entry *sge = &sgt[i]; ++ ++ dpaa2_sg_le_to_cpu(sge); ++ ++ /* NOTE: We only support SG entries in dpaa2_sg_single format, ++ * but this is the only format we may receive from HW anyway ++ */ ++ ++ /* Get the address and length from the S/G entry */ ++ sg_addr = dpaa2_sg_get_addr(sge); ++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ ++ sg_vaddr = phys_to_virt(sg_addr); ++ sg_length = dpaa2_sg_get_len(sge); ++ ++ if (i == 0) { ++ /* We build the skb around the first data buffer */ ++ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ sg_offset = dpaa2_sg_get_offset(sge); ++ skb_reserve(skb, sg_offset); ++ skb_put(skb, sg_length); ++ } else { ++ /* Rest of the data buffers are stored as skb frags */ ++ page = virt_to_page(sg_vaddr); ++ head_page = virt_to_head_page(sg_vaddr); ++ ++ /* Offset in page (which may be compound). ++ * Data in subsequent SG entries is stored from the ++ * beginning of the buffer, so we don't need to add the ++ * sg_offset. ++ */ ++ page_offset = ((unsigned long)sg_vaddr & ++ (PAGE_SIZE - 1)) + ++ (page_address(page) - page_address(head_page)); ++ ++ skb_add_rx_frag(skb, i - 1, head_page, page_offset, ++ sg_length, DPAA2_ETH_RX_BUF_SIZE); ++ } ++ ++ if (dpaa2_sg_is_final(sge)) ++ break; ++ } ++ ++ /* Count all data buffers + SG table buffer */ ++ ch->buf_count -= i + 2; ++ ++ return skb; ++} ++ ++/* Main Rx frame processing routine */ ++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi) ++{ ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ void *vaddr; ++ struct sk_buff *skb; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpaa2_fas *fas; ++ u32 status = 0; ++ ++ /* Tracing point */ ++ trace_dpaa2_rx_fd(priv->net_dev, fd); ++ ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); ++ vaddr = phys_to_virt(addr); ++ ++ prefetch(vaddr + priv->buf_layout.private_data_size); ++ prefetch(vaddr + dpaa2_fd_get_offset(fd)); ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ ++ if (fd_format == dpaa2_fd_single) { ++ skb = build_linear_skb(priv, ch, fd, vaddr); ++ } else if (fd_format == dpaa2_fd_sg) { ++ struct dpaa2_sg_entry *sgt = ++ vaddr + dpaa2_fd_get_offset(fd); ++ skb = build_frag_skb(priv, ch, sgt); ++ put_page(virt_to_head_page(vaddr)); ++ percpu_extras->rx_sg_frames++; ++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); ++ } else { ++ /* We don't support any other format */ ++ goto err_frame_format; ++ } ++ ++ if (unlikely(!skb)) ++ goto err_build_skb; ++ ++ prefetch(skb->data); ++ ++ /* Get the timestamp value */ ++ if (priv->ts_rx_en) { ++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); ++ u64 *ns = (u64 *)(vaddr + ++ priv->buf_layout.private_data_size + ++ sizeof(struct dpaa2_fas)); ++ ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); ++ shhwtstamps->hwtstamp = ns_to_ktime(*ns); ++ } ++ ++ /* Check if we need to validate the L4 csum */ ++ if (likely(fd->simple.frc & DPAA2_FD_FRC_FASV)) { ++ fas = (struct dpaa2_fas *) ++ (vaddr + priv->buf_layout.private_data_size); ++ status = le32_to_cpu(fas->status); ++ validate_rx_csum(priv, status, skb); ++ } ++ ++ skb->protocol = eth_type_trans(skb, priv->net_dev); ++ ++ percpu_stats->rx_packets++; ++ percpu_stats->rx_bytes += skb->len; ++ ++ if (priv->net_dev->features & NETIF_F_GRO) ++ napi_gro_receive(napi, skb); ++ else ++ netif_receive_skb(skb); ++ ++ return; ++err_frame_format: ++err_build_skb: ++ free_rx_fd(priv, fd, vaddr); ++ percpu_stats->rx_dropped++; ++} ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++/* Processing of Rx frames received on the error FQ ++ * We check and print the error bits and then free the frame ++ */ ++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi __always_unused) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ void *vaddr; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_fas *fas; ++ u32 status = 0; ++ ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); ++ vaddr = phys_to_virt(addr); ++ ++ if (fd->simple.frc & DPAA2_FD_FRC_FASV) { ++ fas = (struct dpaa2_fas *) ++ (vaddr + priv->buf_layout.private_data_size); ++ status = le32_to_cpu(fas->status); ++ if (net_ratelimit()) ++ netdev_warn(priv->net_dev, "Rx frame error: 0x%08x\n", ++ status & DPAA2_ETH_RX_ERR_MASK); ++ } ++ free_rx_fd(priv, fd, vaddr); ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_stats->rx_errors++; ++} ++#endif ++ ++/* Consume all frames pull-dequeued into the store. This is the simplest way to ++ * make sure we don't accidentally issue another volatile dequeue which would ++ * overwrite (leak) frames already in the store. ++ * ++ * Observance of NAPI budget is not our concern, leaving that to the caller. ++ */ ++static int consume_frames(struct dpaa2_eth_channel *ch) ++{ ++ struct dpaa2_eth_priv *priv = ch->priv; ++ struct dpaa2_eth_fq *fq; ++ struct dpaa2_dq *dq; ++ const struct dpaa2_fd *fd; ++ int cleaned = 0; ++ int is_last; ++ ++ do { ++ dq = dpaa2_io_store_next(ch->store, &is_last); ++ if (unlikely(!dq)) { ++ /* If we're here, we *must* have placed a ++ * volatile dequeue comnmand, so keep reading through ++ * the store until we get some sort of valid response ++ * token (either a valid frame or an "empty dequeue") ++ */ ++ continue; ++ } ++ ++ fd = dpaa2_dq_fd(dq); ++ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); ++ fq->stats.frames++; ++ ++ fq->consume(priv, ch, fd, &ch->napi); ++ cleaned++; ++ } while (!is_last); ++ ++ return cleaned; ++} ++ ++/* Create a frame descriptor based on a fragmented skb */ ++static int build_sg_fd(struct dpaa2_eth_priv *priv, ++ struct sk_buff *skb, ++ struct dpaa2_fd *fd) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ void *sgt_buf = NULL; ++ dma_addr_t addr; ++ int nr_frags = skb_shinfo(skb)->nr_frags; ++ struct dpaa2_sg_entry *sgt; ++ int i, j, err; ++ int sgt_buf_size; ++ struct scatterlist *scl, *crt_scl; ++ int num_sg; ++ int num_dma_bufs; ++ struct dpaa2_eth_swa *swa; ++ ++ /* Create and map scatterlist. ++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have ++ * to go beyond nr_frags+1. ++ * Note: We don't support chained scatterlists ++ */ ++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) ++ return -EINVAL; ++ ++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); ++ if (unlikely(!scl)) ++ return -ENOMEM; ++ ++ sg_init_table(scl, nr_frags + 1); ++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len); ++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++ if (unlikely(!num_dma_bufs)) { ++ err = -ENOMEM; ++ goto dma_map_sg_failed; ++ } ++ ++ /* Prepare the HW SGT structure */ ++ sgt_buf_size = priv->tx_data_offset + ++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); ++ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); ++ if (unlikely(!sgt_buf)) { ++ err = -ENOMEM; ++ goto sgt_buf_alloc_failed; ++ } ++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); ++ ++ /* PTA from egress side is passed as is to the confirmation side so ++ * we need to clear some fields here in order to find consistent values ++ * on TX confirmation. We are clearing FAS (Frame Annotation Status) ++ * field here. ++ */ ++ memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); ++ ++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); ++ ++ /* Fill in the HW SGT structure. ++ * ++ * sgt_buf is zeroed out, so the following fields are implicit ++ * in all sgt entries: ++ * - offset is 0 ++ * - format is 'dpaa2_sg_single' ++ */ ++ for_each_sg(scl, crt_scl, num_dma_bufs, i) { ++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); ++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); ++ } ++ dpaa2_sg_set_final(&sgt[i - 1], true); ++ ++ /* Store the skb backpointer in the SGT buffer. ++ * Fit the scatterlist and the number of buffers alongside the ++ * skb backpointer in the SWA. We'll need all of them on Tx Conf. ++ */ ++ swa = (struct dpaa2_eth_swa *)sgt_buf; ++ swa->skb = skb; ++ swa->scl = scl; ++ swa->num_sg = num_sg; ++ swa->num_dma_bufs = num_dma_bufs; ++ ++ /* Hardware expects the SG table to be in little endian format */ ++ for (j = 0; j < i; j++) ++ dpaa2_sg_cpu_to_le(&sgt[j]); ++ ++ /* Separately map the SGT buffer */ ++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) { ++ err = -ENOMEM; ++ goto dma_map_single_failed; ++ } ++ dpaa2_fd_set_offset(fd, priv->tx_data_offset); ++ dpaa2_fd_set_format(fd, dpaa2_fd_sg); ++ dpaa2_fd_set_addr(fd, addr); ++ dpaa2_fd_set_len(fd, skb->len); ++ ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | ++ DPAA2_FD_CTRL_PTV1; ++ ++ return 0; ++ ++dma_map_single_failed: ++ kfree(sgt_buf); ++sgt_buf_alloc_failed: ++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++dma_map_sg_failed: ++ kfree(scl); ++ return err; ++} ++ ++/* Create a frame descriptor based on a linear skb */ ++static int build_single_fd(struct dpaa2_eth_priv *priv, ++ struct sk_buff *skb, ++ struct dpaa2_fd *fd) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u8 *buffer_start; ++ struct sk_buff **skbh; ++ dma_addr_t addr; ++ ++ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - ++ DPAA2_ETH_TX_BUF_ALIGN, ++ DPAA2_ETH_TX_BUF_ALIGN); ++ ++ /* PTA from egress side is passed as is to the confirmation side so ++ * we need to clear some fields here in order to find consistent values ++ * on TX confirmation. We are clearing FAS (Frame Annotation Status) ++ * field here. ++ */ ++ memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); ++ ++ /* Store a backpointer to the skb at the beginning of the buffer ++ * (in the private data area) such that we can release it ++ * on Tx confirm ++ */ ++ skbh = (struct sk_buff **)buffer_start; ++ *skbh = skb; ++ ++ addr = dma_map_single(dev, buffer_start, ++ skb_tail_pointer(skb) - buffer_start, ++ DMA_TO_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ return -ENOMEM; ++ ++ dpaa2_fd_set_addr(fd, addr); ++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); ++ dpaa2_fd_set_len(fd, skb->len); ++ dpaa2_fd_set_format(fd, dpaa2_fd_single); ++ ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | ++ DPAA2_FD_CTRL_PTV1; ++ ++ return 0; ++} ++ ++/* FD freeing routine on the Tx path ++ * ++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb ++ * back-pointed to is also freed. ++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of ++ * dpaa2_eth_tx(). ++ * Optionally, return the frame annotation status word (FAS), which needs ++ * to be checked if we're on the confirmation path. ++ */ ++static void free_tx_fd(const struct dpaa2_eth_priv *priv, ++ const struct dpaa2_fd *fd, ++ u32 *status) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t fd_addr; ++ struct sk_buff **skbh, *skb; ++ unsigned char *buffer_start; ++ int unmap_size; ++ struct scatterlist *scl; ++ int num_sg, num_dma_bufs; ++ struct dpaa2_eth_swa *swa; ++ bool fd_single; ++ struct dpaa2_fas *fas; ++ ++ fd_addr = dpaa2_fd_get_addr(fd); ++ skbh = phys_to_virt(fd_addr); ++ fd_single = (dpaa2_fd_get_format(fd) == dpaa2_fd_single); ++ ++ if (fd_single) { ++ skb = *skbh; ++ buffer_start = (unsigned char *)skbh; ++ /* Accessing the skb buffer is safe before dma unmap, because ++ * we didn't map the actual skb shell. ++ */ ++ dma_unmap_single(dev, fd_addr, ++ skb_tail_pointer(skb) - buffer_start, ++ DMA_TO_DEVICE); ++ } else { ++ swa = (struct dpaa2_eth_swa *)skbh; ++ skb = swa->skb; ++ scl = swa->scl; ++ num_sg = swa->num_sg; ++ num_dma_bufs = swa->num_dma_bufs; ++ ++ /* Unmap the scatterlist */ ++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++ kfree(scl); ++ ++ /* Unmap the SGT buffer */ ++ unmap_size = priv->tx_data_offset + ++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); ++ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE); ++ } ++ ++ /* Get the timestamp value */ ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { ++ struct skb_shared_hwtstamps shhwtstamps; ++ u64 *ns; ++ ++ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); ++ ++ ns = (u64 *)((void *)skbh + ++ priv->buf_layout.private_data_size + ++ sizeof(struct dpaa2_fas)); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ shhwtstamps.hwtstamp = ns_to_ktime(*ns); ++ skb_tstamp_tx(skb, &shhwtstamps); ++ } ++ ++ /* Read the status from the Frame Annotation after we unmap the first ++ * buffer but before we free it. The caller function is responsible ++ * for checking the status value. ++ */ ++ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) { ++ fas = (struct dpaa2_fas *) ++ ((void *)skbh + priv->buf_layout.private_data_size); ++ *status = le32_to_cpu(fas->status); ++ } ++ ++ /* Free SGT buffer kmalloc'ed on tx */ ++ if (!fd_single) ++ kfree(skbh); ++ ++ /* Move on with skb release */ ++ dev_kfree_skb(skb); ++} ++ ++static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpaa2_fd fd; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ u16 queue_mapping, flow_id; ++ int err, i; ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ ++ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { ++ struct sk_buff *ns; ++ ++ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); ++ if (unlikely(!ns)) { ++ percpu_stats->tx_dropped++; ++ goto err_alloc_headroom; ++ } ++ dev_kfree_skb(skb); ++ skb = ns; ++ } ++ ++ /* We'll be holding a back-reference to the skb until Tx Confirmation; ++ * we don't want that overwritten by a concurrent Tx with a cloned skb. ++ */ ++ skb = skb_unshare(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) { ++ /* skb_unshare() has already freed the skb */ ++ percpu_stats->tx_dropped++; ++ return NETDEV_TX_OK; ++ } ++ ++ /* Setup the FD fields */ ++ memset(&fd, 0, sizeof(fd)); ++ ++ if (skb_is_nonlinear(skb)) { ++ err = build_sg_fd(priv, skb, &fd); ++ percpu_extras->tx_sg_frames++; ++ percpu_extras->tx_sg_bytes += skb->len; ++ } else { ++ err = build_single_fd(priv, skb, &fd); ++ } ++ ++ if (unlikely(err)) { ++ percpu_stats->tx_dropped++; ++ goto err_build_fd; ++ } ++ ++ /* Tracing point */ ++ trace_dpaa2_tx_fd(net_dev, &fd); ++ ++ /* TxConf FQ selection primarily based on cpu affinity; this is ++ * non-migratable context, so it's safe to call smp_processor_id(). ++ */ ++ queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; ++ flow_id = priv->fq[queue_mapping].flowid; ++ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) { ++ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, ++ flow_id, &fd); ++ if (err != -EBUSY) ++ break; ++ } ++ percpu_extras->tx_portal_busy += i; ++ if (unlikely(err < 0)) { ++ percpu_stats->tx_errors++; ++ /* Clean up everything, including freeing the skb */ ++ free_tx_fd(priv, &fd, NULL); ++ } else { ++ percpu_stats->tx_packets++; ++ percpu_stats->tx_bytes += skb->len; ++ } ++ ++ return NETDEV_TX_OK; ++ ++err_build_fd: ++err_alloc_headroom: ++ dev_kfree_skb(skb); ++ ++ return NETDEV_TX_OK; ++} ++ ++/* Tx confirmation frame processing routine */ ++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi __always_unused) ++{ ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ u32 status = 0; ++ ++ /* Tracing point */ ++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd); ++ ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ percpu_extras->tx_conf_frames++; ++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); ++ ++ free_tx_fd(priv, fd, &status); ++ ++ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) { ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ /* Tx-conf logically pertains to the egress path. */ ++ percpu_stats->tx_errors++; ++ } ++} ++ ++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) ++{ ++ int err; ++ ++ err = dpni_set_l3_chksum_validation(priv->mc_io, 0, priv->mc_token, ++ enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_l3_chksum_validation() failed\n"); ++ return err; ++ } ++ ++ err = dpni_set_l4_chksum_validation(priv->mc_io, 0, priv->mc_token, ++ enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_l4_chksum_validation failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) ++{ ++ struct dpaa2_eth_fq *fq; ++ struct dpni_tx_flow_cfg tx_flow_cfg; ++ int err; ++ int i; ++ ++ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); ++ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN | ++ DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; ++ tx_flow_cfg.l3_chksum_gen = enable; ++ tx_flow_cfg.l4_chksum_gen = enable; ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ if (fq->type != DPAA2_TX_CONF_FQ) ++ continue; ++ ++ /* The Tx flowid is kept in the corresponding TxConf FQ. */ ++ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, ++ &fq->flowid, &tx_flow_cfg); ++ if (err) { ++ netdev_err(priv->net_dev, "dpni_set_tx_flow failed\n"); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++/* Perform a single release command to add buffers ++ * to the specified buffer pool ++ */ ++static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; ++ void *buf; ++ dma_addr_t addr; ++ int i; ++ ++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { ++ /* Allocate buffer visible to WRIOP + skb shared info + ++ * alignment padding ++ */ ++ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); ++ if (unlikely(!buf)) ++ goto err_alloc; ++ ++ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); ++ ++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ goto err_map; ++ ++ buf_array[i] = addr; ++ ++ /* tracing point */ ++ trace_dpaa2_eth_buf_seed(priv->net_dev, ++ buf, DPAA2_ETH_BUF_RAW_SIZE, ++ addr, DPAA2_ETH_RX_BUF_SIZE, ++ bpid); ++ } ++ ++release_bufs: ++ /* In case the portal is busy, retry until successful. ++ * The buffer release function would only fail if the QBMan portal ++ * was busy, which implies portal contention (i.e. more CPUs than ++ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, ++ * there is little we can realistically do, short of giving up - ++ * in which case we'd risk depleting the buffer pool and never again ++ * receiving the Rx interrupt which would kick-start the refill logic. ++ * So just keep retrying, at the risk of being moved to ksoftirqd. ++ */ ++ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) ++ cpu_relax(); ++ return i; ++ ++err_map: ++ put_page(virt_to_head_page(buf)); ++err_alloc: ++ if (i) ++ goto release_bufs; ++ ++ return 0; ++} ++ ++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) ++{ ++ int i, j; ++ int new_count; ++ ++ /* This is the lazy seeding of Rx buffer pools. ++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls ++ * napi_alloc_frag(). The trouble with that is that it in turn ends up ++ * calling this_cpu_ptr(), which mandates execution in atomic context. ++ * Rather than splitting up the code, do a one-off preempt disable. ++ */ ++ preempt_disable(); ++ for (j = 0; j < priv->num_channels; j++) { ++ for (i = 0; i < DPAA2_ETH_NUM_BUFS; ++ i += DPAA2_ETH_BUFS_PER_CMD) { ++ new_count = add_bufs(priv, bpid); ++ priv->channel[j]->buf_count += new_count; ++ ++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) { ++ preempt_enable(); ++ return -ENOMEM; ++ } ++ } ++ } ++ preempt_enable(); ++ ++ return 0; ++} ++ ++/** ++ * Drain the specified number of buffers from the DPNI's private buffer pool. ++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD ++ */ ++static void drain_bufs(struct dpaa2_eth_priv *priv, int count) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; ++ void *vaddr; ++ int ret, i; ++ ++ do { ++ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid, ++ buf_array, count); ++ if (ret < 0) { ++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); ++ return; ++ } ++ for (i = 0; i < ret; i++) { ++ /* Same logic as on regular Rx path */ ++ dma_unmap_single(dev, buf_array[i], ++ DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ vaddr = phys_to_virt(buf_array[i]); ++ put_page(virt_to_head_page(vaddr)); ++ } ++ } while (ret); ++} ++ ++static void drain_pool(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); ++ drain_bufs(priv, 1); ++ ++ for (i = 0; i < priv->num_channels; i++) ++ priv->channel[i]->buf_count = 0; ++} ++ ++/* Function is called from softirq context only, so we don't need to guard ++ * the access to percpu count ++ */ ++static int refill_pool(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ u16 bpid) ++{ ++ int new_count; ++ ++ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) ++ return 0; ++ ++ do { ++ new_count = add_bufs(priv, bpid); ++ if (unlikely(!new_count)) { ++ /* Out of memory; abort for now, we'll try later on */ ++ break; ++ } ++ ch->buf_count += new_count; ++ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); ++ ++ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int pull_channel(struct dpaa2_eth_channel *ch) ++{ ++ int err; ++ int dequeues = -1; ++ ++ /* Retry while portal is busy */ ++ do { ++ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); ++ dequeues++; ++ cpu_relax(); ++ } while (err == -EBUSY); ++ ++ ch->stats.dequeue_portal_busy += dequeues; ++ if (unlikely(err)) ++ ch->stats.pull_err++; ++ ++ return err; ++} ++ ++/* NAPI poll routine ++ * ++ * Frames are dequeued from the QMan channel associated with this NAPI context. ++ * Rx, Tx confirmation and (if configured) Rx error frames all count ++ * towards the NAPI budget. ++ */ ++static int dpaa2_eth_poll(struct napi_struct *napi, int budget) ++{ ++ struct dpaa2_eth_channel *ch; ++ int cleaned = 0, store_cleaned; ++ struct dpaa2_eth_priv *priv; ++ int err; ++ ++ ch = container_of(napi, struct dpaa2_eth_channel, napi); ++ priv = ch->priv; ++ ++ while (cleaned < budget) { ++ err = pull_channel(ch); ++ if (unlikely(err)) ++ break; ++ ++ /* Refill pool if appropriate */ ++ refill_pool(priv, ch, priv->dpbp_attrs.bpid); ++ ++ store_cleaned = consume_frames(ch); ++ cleaned += store_cleaned; ++ ++ /* If we have enough budget left for a full store, ++ * try a new pull dequeue, otherwise we're done here ++ */ ++ if (store_cleaned == 0 || ++ cleaned > budget - DPAA2_ETH_STORE_SIZE) ++ break; ++ } ++ ++ if (cleaned < budget) { ++ napi_complete_done(napi, cleaned); ++ /* Re-enable data available notifications */ ++ do { ++ err = dpaa2_io_service_rearm(NULL, &ch->nctx); ++ cpu_relax(); ++ } while (err == -EBUSY); ++ } ++ ++ ch->stats.frames += cleaned; ++ ++ return cleaned; ++} ++ ++static void enable_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ napi_enable(&ch->napi); ++ } ++} ++ ++static void disable_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ napi_disable(&ch->napi); ++ } ++} ++ ++static int link_state_update(struct dpaa2_eth_priv *priv) ++{ ++ struct dpni_link_state state; ++ int err; ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (unlikely(err)) { ++ netdev_err(priv->net_dev, ++ "dpni_get_link_state() failed\n"); ++ return err; ++ } ++ ++ /* Chech link state; speed / duplex changes are not treated yet */ ++ if (priv->link_state.up == state.up) ++ return 0; ++ ++ priv->link_state = state; ++ if (state.up) { ++ netif_carrier_on(priv->net_dev); ++ netif_tx_start_all_queues(priv->net_dev); ++ } else { ++ netif_tx_stop_all_queues(priv->net_dev); ++ netif_carrier_off(priv->net_dev); ++ } ++ ++ netdev_info(priv->net_dev, "Link Event: state %s", ++ state.up ? "up" : "down"); ++ ++ return 0; ++} ++ ++static int dpaa2_eth_open(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = seed_pool(priv, priv->dpbp_attrs.bpid); ++ if (err) { ++ /* Not much to do; the buffer pool, though not filled up, ++ * may still contain some buffers which would enable us ++ * to limp on. ++ */ ++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", ++ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid); ++ } ++ ++ /* We'll only start the txqs when the link is actually ready; make sure ++ * we don't race against the link up notification, which may come ++ * immediately after dpni_enable(); ++ */ ++ netif_tx_stop_all_queues(net_dev); ++ enable_ch_napi(priv); ++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will ++ * return true and cause 'ip link show' to report the LOWER_UP flag, ++ * even though the link notification wasn't even received. ++ */ ++ netif_carrier_off(net_dev); ++ ++ err = dpni_enable(priv->mc_io, 0, priv->mc_token); ++ if (err < 0) { ++ netdev_err(net_dev, "dpni_enable() failed\n"); ++ goto enable_err; ++ } ++ ++ /* If the DPMAC object has already processed the link up interrupt, ++ * we have to learn the link state ourselves. ++ */ ++ err = link_state_update(priv); ++ if (err < 0) { ++ netdev_err(net_dev, "Can't update link state\n"); ++ goto link_state_err; ++ } ++ ++ return 0; ++ ++link_state_err: ++enable_err: ++ disable_ch_napi(priv); ++ drain_pool(priv); ++ return err; ++} ++ ++/* The DPIO store must be empty when we call this, ++ * at the end of every NAPI cycle. ++ */ ++static u32 drain_channel(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch) ++{ ++ u32 drained = 0, total = 0; ++ ++ do { ++ pull_channel(ch); ++ drained = consume_frames(ch); ++ total += drained; ++ } while (drained); ++ ++ return total; ++} ++ ++static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ u32 drained = 0; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ drained += drain_channel(priv, ch); ++ } ++ ++ return drained; ++} ++ ++static int dpaa2_eth_stop(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int dpni_enabled; ++ int retries = 10; ++ u32 drained; ++ ++ netif_tx_stop_all_queues(net_dev); ++ netif_carrier_off(net_dev); ++ ++ /* Loop while dpni_disable() attempts to drain the egress FQs ++ * and confirm them back to us. ++ */ ++ do { ++ dpni_disable(priv->mc_io, 0, priv->mc_token); ++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); ++ if (dpni_enabled) ++ /* Allow the MC some slack */ ++ msleep(100); ++ } while (dpni_enabled && --retries); ++ if (!retries) { ++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); ++ /* Must go on and disable NAPI nonetheless, so we don't crash at ++ * the next "ifconfig up" ++ */ ++ } ++ ++ /* Wait for NAPI to complete on every core and disable it. ++ * In particular, this will also prevent NAPI from being rescheduled if ++ * a new CDAN is serviced, effectively discarding the CDAN. We therefore ++ * don't even need to disarm the channels, except perhaps for the case ++ * of a huge coalescing value. ++ */ ++ disable_ch_napi(priv); ++ ++ /* Manually drain the Rx and TxConf queues */ ++ drained = drain_ingress_frames(priv); ++ if (drained) ++ netdev_dbg(net_dev, "Drained %d frames.\n", drained); ++ ++ /* Empty the buffer pool */ ++ drain_pool(priv); ++ ++ return 0; ++} ++ ++static int dpaa2_eth_init(struct net_device *net_dev) ++{ ++ u64 supported = 0; ++ u64 not_supported = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ u32 options = priv->dpni_attrs.options; ++ ++ /* Capabilities listing */ ++ supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI; ++ ++ if (options & DPNI_OPT_UNICAST_FILTER) ++ supported |= IFF_UNICAST_FLT; ++ else ++ not_supported |= IFF_UNICAST_FLT; ++ ++ if (options & DPNI_OPT_MULTICAST_FILTER) ++ supported |= IFF_MULTICAST; ++ else ++ not_supported |= IFF_MULTICAST; ++ ++ net_dev->priv_flags |= supported; ++ net_dev->priv_flags &= ~not_supported; ++ ++ /* Features */ ++ net_dev->features = NETIF_F_RXCSUM | ++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ++ NETIF_F_SG | NETIF_F_HIGHDMA | ++ NETIF_F_LLTX; ++ net_dev->hw_features = net_dev->features; ++ ++ return 0; ++} ++ ++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; ++ int err; ++ ++ err = eth_mac_addr(net_dev, addr); ++ if (err < 0) { ++ dev_err(dev, "eth_mac_addr() failed with error %d\n", err); ++ return err; ++ } ++ ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ net_dev->dev_addr); ++ if (err) { ++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/** Fill in counters maintained by the GPP driver. These may be different from ++ * the hardware counters obtained by ethtool. ++ */ ++static struct rtnl_link_stats64 ++*dpaa2_eth_get_stats(struct net_device *net_dev, ++ struct rtnl_link_stats64 *stats) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct rtnl_link_stats64 *percpu_stats; ++ u64 *cpustats; ++ u64 *netstats = (u64 *)stats; ++ int i, j; ++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); ++ ++ for_each_possible_cpu(i) { ++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); ++ cpustats = (u64 *)percpu_stats; ++ for (j = 0; j < num; j++) ++ netstats[j] += cpustats[j]; ++ } ++ ++ return stats; ++} ++ ++static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ if (mtu < 68 || mtu > DPAA2_ETH_MAX_MTU) { ++ netdev_err(net_dev, "Invalid MTU %d. Valid range is: 68..%d\n", ++ mtu, DPAA2_ETH_MAX_MTU); ++ return -EINVAL; ++ } ++ ++ /* Set the maximum Rx frame length to match the transmit side; ++ * account for L2 headers when computing the MFL ++ */ ++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, ++ (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); ++ if (err) { ++ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); ++ return err; ++ } ++ ++ net_dev->mtu = mtu; ++ return 0; ++} ++ ++/* Copy mac unicast addresses from @net_dev to @priv. ++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. ++ */ ++static void add_uc_hw_addr(const struct net_device *net_dev, ++ struct dpaa2_eth_priv *priv) ++{ ++ struct netdev_hw_addr *ha; ++ int err; ++ ++ netdev_for_each_uc_addr(ha, net_dev) { ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, ++ ha->addr); ++ if (err) ++ netdev_warn(priv->net_dev, ++ "Could not add ucast MAC %pM to the filtering table (err %d)\n", ++ ha->addr, err); ++ } ++} ++ ++/* Copy mac multicast addresses from @net_dev to @priv ++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. ++ */ ++static void add_mc_hw_addr(const struct net_device *net_dev, ++ struct dpaa2_eth_priv *priv) ++{ ++ struct netdev_hw_addr *ha; ++ int err; ++ ++ netdev_for_each_mc_addr(ha, net_dev) { ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, ++ ha->addr); ++ if (err) ++ netdev_warn(priv->net_dev, ++ "Could not add mcast MAC %pM to the filtering table (err %d)\n", ++ ha->addr, err); ++ } ++} ++ ++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int uc_count = netdev_uc_count(net_dev); ++ int mc_count = netdev_mc_count(net_dev); ++ u8 max_uc = priv->dpni_attrs.max_unicast_filters; ++ u8 max_mc = priv->dpni_attrs.max_multicast_filters; ++ u32 options = priv->dpni_attrs.options; ++ u16 mc_token = priv->mc_token; ++ struct fsl_mc_io *mc_io = priv->mc_io; ++ int err; ++ ++ /* Basic sanity checks; these probably indicate a misconfiguration */ ++ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0) ++ netdev_info(net_dev, ++ "max_unicast_filters=%d, DPNI_OPT_UNICAST_FILTER option must be enabled\n", ++ max_uc); ++ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0) ++ netdev_info(net_dev, ++ "max_multicast_filters=%d, DPNI_OPT_MULTICAST_FILTER option must be enabled\n", ++ max_mc); ++ ++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */ ++ if (uc_count > max_uc) { ++ netdev_info(net_dev, ++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", ++ uc_count, max_uc); ++ goto force_promisc; ++ } ++ if (mc_count > max_mc) { ++ netdev_info(net_dev, ++ "Multicast addr count reached %d, max allowed is %d; forcing promisc\n", ++ mc_count, max_mc); ++ goto force_mc_promisc; ++ } ++ ++ /* Adjust promisc settings due to flag combinations */ ++ if (net_dev->flags & IFF_PROMISC) ++ goto force_promisc; ++ if (net_dev->flags & IFF_ALLMULTI) { ++ /* First, rebuild unicast filtering table. This should be done ++ * in promisc mode, in order to avoid frame loss while we ++ * progressively add entries to the table. ++ * We don't know whether we had been in promisc already, and ++ * making an MC call to find it is expensive; so set uc promisc ++ * nonetheless. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set uc promisc\n"); ++ ++ /* Actual uc table reconstruction. */ ++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear uc filters\n"); ++ add_uc_hw_addr(net_dev, priv); ++ ++ /* Finally, clear uc promisc and set mc promisc as requested. */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear uc promisc\n"); ++ goto force_mc_promisc; ++ } ++ ++ /* Neither unicast, nor multicast promisc will be on... eventually. ++ * For now, rebuild mac filtering tables while forcing both of them on. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); ++ ++ /* Actual mac filtering tables reconstruction */ ++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't clear mac filters\n"); ++ add_mc_hw_addr(net_dev, priv); ++ add_uc_hw_addr(net_dev, priv); ++ ++ /* Now we can clear both ucast and mcast promisc, without risking ++ * to drop legitimate frames anymore. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear ucast promisc\n"); ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear mcast promisc\n"); ++ ++ return; ++ ++force_promisc: ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set ucast promisc\n"); ++force_mc_promisc: ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set mcast promisc\n"); ++} ++ ++static int dpaa2_eth_set_features(struct net_device *net_dev, ++ netdev_features_t features) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ netdev_features_t changed = features ^ net_dev->features; ++ bool enable; ++ int err; ++ ++ if (changed & NETIF_F_RXCSUM) { ++ enable = !!(features & NETIF_F_RXCSUM); ++ err = set_rx_csum(priv, enable); ++ if (err) ++ return err; ++ } ++ ++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { ++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); ++ err = set_tx_csum(priv, enable); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(dev); ++ struct hwtstamp_config config; ++ ++ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) ++ return -EFAULT; ++ ++ switch (config.tx_type) { ++ case HWTSTAMP_TX_OFF: ++ priv->ts_tx_en = false; ++ break; ++ case HWTSTAMP_TX_ON: ++ priv->ts_tx_en = true; ++ break; ++ default: ++ return -ERANGE; ++ } ++ ++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) { ++ priv->ts_rx_en = false; ++ } else { ++ priv->ts_rx_en = true; ++ /* TS is set for all frame types, not only those requested */ ++ config.rx_filter = HWTSTAMP_FILTER_ALL; ++ } ++ ++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? ++ -EFAULT : 0; ++} ++ ++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ if (cmd == SIOCSHWTSTAMP) ++ return dpaa2_eth_ts_ioctl(dev, rq, cmd); ++ ++ return -EINVAL; ++} ++ ++static const struct net_device_ops dpaa2_eth_ops = { ++ .ndo_open = dpaa2_eth_open, ++ .ndo_start_xmit = dpaa2_eth_tx, ++ .ndo_stop = dpaa2_eth_stop, ++ .ndo_init = dpaa2_eth_init, ++ .ndo_set_mac_address = dpaa2_eth_set_addr, ++ .ndo_get_stats64 = dpaa2_eth_get_stats, ++ .ndo_change_mtu = dpaa2_eth_change_mtu, ++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, ++ .ndo_set_features = dpaa2_eth_set_features, ++ .ndo_do_ioctl = dpaa2_eth_ioctl, ++}; ++ ++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_eth_channel *ch; ++ ++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx); ++ ++ /* Update NAPI statistics */ ++ ch->stats.cdan++; ++ ++ napi_schedule_irqoff(&ch->napi); ++} ++ ++/* Verify that the FLIB API version of various MC objects is supported ++ * by our driver ++ */ ++static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) ++{ ++ char *name = ls_dev->obj_desc.type; ++ struct device *dev = &ls_dev->dev; ++ u16 supported_version, flib_version; ++ ++ if (strcmp(name, "dpni") == 0) { ++ flib_version = DPNI_VER_MAJOR; ++ supported_version = DPAA2_SUPPORTED_DPNI_VERSION; ++ } else if (strcmp(name, "dpbp") == 0) { ++ flib_version = DPBP_VER_MAJOR; ++ supported_version = DPAA2_SUPPORTED_DPBP_VERSION; ++ } else if (strcmp(name, "dpcon") == 0) { ++ flib_version = DPCON_VER_MAJOR; ++ supported_version = DPAA2_SUPPORTED_DPCON_VERSION; ++ } else { ++ dev_err(dev, "invalid object type (%s)\n", name); ++ return -EINVAL; ++ } ++ ++ /* Check that the FLIB-defined version matches the one reported by MC */ ++ if (mc_version != flib_version) { ++ dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", ++ name, mc_version, flib_version); ++ return -EINVAL; ++ } ++ ++ /* ... and that we actually support it */ ++ if (mc_version < supported_version) { ++ dev_err(dev, "Unsupported %s FLIB version (%d)\n", ++ name, mc_version); ++ return -EINVAL; ++ } ++ dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); ++ ++ return 0; ++} ++ ++/* Allocate and configure a DPCON object */ ++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) ++{ ++ struct fsl_mc_device *dpcon; ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpcon_attr attrs; ++ int err; ++ ++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), ++ FSL_MC_POOL_DPCON, &dpcon); ++ if (err) { ++ dev_info(dev, "Not enough DPCONs, will go on as-is\n"); ++ return NULL; ++ } ++ ++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); ++ if (err) { ++ dev_err(dev, "dpcon_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ err = check_obj_version(dpcon, attrs.version.major); ++ if (err) ++ goto err_dpcon_ver; ++ ++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_enable() failed\n"); ++ goto err_enable; ++ } ++ ++ return dpcon; ++ ++err_enable: ++err_dpcon_ver: ++err_get_attr: ++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); ++err_open: ++ fsl_mc_object_free(dpcon); ++ ++ return NULL; ++} ++ ++static void free_dpcon(struct dpaa2_eth_priv *priv, ++ struct fsl_mc_device *dpcon) ++{ ++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); ++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); ++ fsl_mc_object_free(dpcon); ++} ++ ++static struct dpaa2_eth_channel * ++alloc_channel(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *channel; ++ struct dpcon_attr attr; ++ struct device *dev = priv->net_dev->dev.parent; ++ int err; ++ ++ channel = kzalloc(sizeof(*channel), GFP_ATOMIC); ++ if (!channel) ++ return NULL; ++ ++ channel->dpcon = setup_dpcon(priv); ++ if (!channel->dpcon) ++ goto err_setup; ++ ++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, ++ &attr); ++ if (err) { ++ dev_err(dev, "dpcon_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ channel->dpcon_id = attr.id; ++ channel->ch_id = attr.qbman_ch_id; ++ channel->priv = priv; ++ ++ return channel; ++ ++err_get_attr: ++ free_dpcon(priv, channel->dpcon); ++err_setup: ++ kfree(channel); ++ return NULL; ++} ++ ++static void free_channel(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *channel) ++{ ++ free_dpcon(priv, channel->dpcon); ++ kfree(channel); ++} ++ ++/* DPIO setup: allocate and configure QBMan channels, setup core affinity ++ * and register data availability notifications ++ */ ++static int setup_dpio(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_io_notification_ctx *nctx; ++ struct dpaa2_eth_channel *channel; ++ struct dpcon_notification_cfg dpcon_notif_cfg; ++ struct device *dev = priv->net_dev->dev.parent; ++ int i, err; ++ ++ /* Don't allocate more channels than strictly necessary and assign ++ * them to cores starting from the first one available in ++ * cpu_online_mask. ++ * If the number of channels is lower than the number of cores, ++ * there will be no rx/tx conf processing on the last cores in the mask. ++ */ ++ cpumask_clear(&priv->dpio_cpumask); ++ for_each_online_cpu(i) { ++ /* Try to allocate a channel */ ++ channel = alloc_channel(priv); ++ if (!channel) ++ goto err_alloc_ch; ++ ++ priv->channel[priv->num_channels] = channel; ++ ++ nctx = &channel->nctx; ++ nctx->is_cdan = 1; ++ nctx->cb = cdan_cb; ++ nctx->id = channel->ch_id; ++ nctx->desired_cpu = i; ++ ++ /* Register the new context */ ++ err = dpaa2_io_service_register(NULL, nctx); ++ if (err) { ++ dev_info(dev, "No affine DPIO for core %d\n", i); ++ /* This core doesn't have an affine DPIO, but there's ++ * a chance another one does, so keep trying ++ */ ++ free_channel(priv, channel); ++ continue; ++ } ++ ++ /* Register DPCON notification with MC */ ++ dpcon_notif_cfg.dpio_id = nctx->dpio_id; ++ dpcon_notif_cfg.priority = 0; ++ dpcon_notif_cfg.user_ctx = nctx->qman64; ++ err = dpcon_set_notification(priv->mc_io, 0, ++ channel->dpcon->mc_handle, ++ &dpcon_notif_cfg); ++ if (err) { ++ dev_err(dev, "dpcon_set_notification failed()\n"); ++ goto err_set_cdan; ++ } ++ ++ /* If we managed to allocate a channel and also found an affine ++ * DPIO for this core, add it to the final mask ++ */ ++ cpumask_set_cpu(i, &priv->dpio_cpumask); ++ priv->num_channels++; ++ ++ if (priv->num_channels == dpaa2_eth_max_channels(priv)) ++ break; ++ } ++ ++ /* Tx confirmation queues can only be serviced by cpus ++ * with an affine DPIO/channel ++ */ ++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); ++ ++ return 0; ++ ++err_set_cdan: ++ dpaa2_io_service_deregister(NULL, nctx); ++ free_channel(priv, channel); ++err_alloc_ch: ++ if (cpumask_empty(&priv->dpio_cpumask)) { ++ dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); ++ return -ENODEV; ++ } ++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); ++ ++ return 0; ++} ++ ++static void free_dpio(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ /* deregister CDAN notifications and free channels */ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ dpaa2_io_service_deregister(NULL, &ch->nctx); ++ free_channel(priv, ch); ++ } ++} ++ ++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, ++ int cpu) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) ++ if (priv->channel[i]->nctx.desired_cpu == cpu) ++ return priv->channel[i]; ++ ++ /* We should never get here. Issue a warning and return ++ * the first channel, because it's still better than nothing ++ */ ++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu); ++ ++ return priv->channel[0]; ++} ++ ++static void set_fq_affinity(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpaa2_eth_fq *fq; ++ int rx_cpu, txc_cpu; ++ int i; ++ ++ /* For each FQ, pick one channel/CPU to deliver frames to. ++ * This may well change at runtime, either through irqbalance or ++ * through direct user intervention. ++ */ ++ rx_cpu = cpumask_first(&priv->dpio_cpumask); ++ txc_cpu = cpumask_first(&priv->txconf_cpumask); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ switch (fq->type) { ++ case DPAA2_RX_FQ: ++ case DPAA2_RX_ERR_FQ: ++ fq->target_cpu = rx_cpu; ++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); ++ if (rx_cpu >= nr_cpu_ids) ++ rx_cpu = cpumask_first(&priv->dpio_cpumask); ++ break; ++ case DPAA2_TX_CONF_FQ: ++ fq->target_cpu = txc_cpu; ++ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); ++ if (txc_cpu >= nr_cpu_ids) ++ txc_cpu = cpumask_first(&priv->txconf_cpumask); ++ break; ++ default: ++ dev_err(dev, "Unknown FQ type: %d\n", fq->type); ++ } ++ fq->channel = get_affine_channel(priv, fq->target_cpu); ++ } ++} ++ ++static void setup_fqs(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ /* We have one TxConf FQ per Tx flow */ ++ for (i = 0; i < priv->dpni_attrs.max_senders; i++) { ++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; ++ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; ++ } ++ ++ /* The number of Rx queues (Rx distribution width) may be different from ++ * the number of cores. ++ * We only support one traffic class for now. ++ */ ++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { ++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; ++ priv->fq[priv->num_fqs++].flowid = (u16)i; ++ } ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ /* We have exactly one Rx error queue per DPNI */ ++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; ++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; ++#endif ++ ++ /* For each FQ, decide on which core to process incoming frames */ ++ set_fq_affinity(priv); ++} ++ ++/* Allocate and configure one buffer pool for each interface */ ++static int setup_dpbp(struct dpaa2_eth_priv *priv) ++{ ++ int err; ++ struct fsl_mc_device *dpbp_dev; ++ struct device *dev = priv->net_dev->dev.parent; ++ ++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, ++ &dpbp_dev); ++ if (err) { ++ dev_err(dev, "DPBP device allocation failed\n"); ++ return err; ++ } ++ ++ priv->dpbp_dev = dpbp_dev; ++ ++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, ++ &dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_enable() failed\n"); ++ goto err_enable; ++ } ++ ++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, ++ &priv->dpbp_attrs); ++ if (err) { ++ dev_err(dev, "dpbp_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); ++ if (err) ++ goto err_dpbp_ver; ++ ++ return 0; ++ ++err_dpbp_ver: ++err_get_attr: ++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); ++err_enable: ++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); ++err_open: ++ fsl_mc_object_free(dpbp_dev); ++ ++ return err; ++} ++ ++static void free_dpbp(struct dpaa2_eth_priv *priv) ++{ ++ drain_pool(priv); ++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); ++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); ++ fsl_mc_object_free(priv->dpbp_dev); ++} ++ ++/* Configure the DPNI object this interface is associated with */ ++static int setup_dpni(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_eth_priv *priv; ++ struct net_device *net_dev; ++ void *dma_mem; ++ int err; ++ ++ net_dev = dev_get_drvdata(dev); ++ priv = netdev_priv(net_dev); ++ ++ priv->dpni_id = ls_dev->obj_desc.id; ++ ++ /* get a handle for the DPNI object */ ++ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); ++ if (err) { ++ dev_err(dev, "dpni_open() failed\n"); ++ goto err_open; ++ } ++ ++ ls_dev->mc_io = priv->mc_io; ++ ls_dev->mc_handle = priv->mc_token; ++ ++ /* Map a memory region which will be used by MC to pass us an ++ * attribute structure ++ */ ++ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ goto err_alloc; ++ ++ priv->dpni_attrs.ext_cfg_iova = dma_map_single(dev, dma_mem, ++ DPAA2_EXT_CFG_SIZE, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, priv->dpni_attrs.ext_cfg_iova)) { ++ dev_err(dev, "dma mapping for dpni_ext_cfg failed\n"); ++ goto err_dma_map; ++ } ++ ++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, ++ &priv->dpni_attrs); ++ ++ /* We'll check the return code after unmapping, as we need to ++ * do this anyway ++ */ ++ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, ++ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); ++ ++ if (err) { ++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); ++ goto err_get_attr; ++ } ++ ++ err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); ++ if (err) ++ goto err_dpni_ver; ++ ++ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); ++ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_extract_extended_cfg() failed\n"); ++ goto err_extract; ++ } ++ ++ /* Configure our buffers' layout */ ++ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | ++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | ++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | ++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; ++ priv->buf_layout.pass_parser_result = true; ++ priv->buf_layout.pass_frame_status = true; ++ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; ++ /* HW erratum mandates data alignment in multiples of 256 */ ++ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; ++ ++ /* rx buffer */ ++ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ &priv->buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_buffer_layout() failed"); ++ goto err_buf_layout; ++ } ++ /* tx buffer: remove Rx-only options */ ++ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | ++ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); ++ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ &priv->buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_buffer_layout() failed"); ++ goto err_buf_layout; ++ } ++ /* tx-confirm: same options as tx */ ++ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; ++ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP; ++ priv->buf_layout.pass_timestamp = 1; ++ err = dpni_set_tx_conf_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ &priv->buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_conf_buffer_layout() failed"); ++ goto err_buf_layout; ++ } ++ /* Now that we've set our tx buffer layout, retrieve the minimum ++ * required tx data offset. ++ */ ++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, ++ &priv->tx_data_offset); ++ if (err) { ++ dev_err(dev, "dpni_get_tx_data_offset() failed\n"); ++ goto err_data_offset; ++ } ++ ++ if ((priv->tx_data_offset % 64) != 0) ++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", ++ priv->tx_data_offset); ++ ++ /* Accommodate SWA space. */ ++ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; ++ ++ /* allocate classification rule space */ ++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * ++ DPAA2_CLASSIFIER_ENTRY_COUNT, GFP_KERNEL); ++ if (!priv->cls_rule) ++ goto err_cls_rule; ++ ++ kfree(dma_mem); ++ ++ return 0; ++ ++err_cls_rule: ++err_data_offset: ++err_buf_layout: ++err_extract: ++err_dpni_ver: ++err_get_attr: ++err_dma_map: ++ kfree(dma_mem); ++err_alloc: ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++err_open: ++ return err; ++} ++ ++static void free_dpni(struct dpaa2_eth_priv *priv) ++{ ++ int err; ++ ++ err = dpni_reset(priv->mc_io, 0, priv->mc_token); ++ if (err) ++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", ++ err); ++ ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++} ++ ++static int setup_rx_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_queue_attr rx_queue_attr; ++ struct dpni_queue_cfg queue_cfg; ++ int err; ++ ++ memset(&queue_cfg, 0, sizeof(queue_cfg)); ++ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | ++ DPNI_QUEUE_OPT_TAILDROP_THRESHOLD; ++ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; ++ queue_cfg.dest_cfg.priority = 1; ++ queue_cfg.user_ctx = (u64)fq; ++ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; ++ queue_cfg.tail_drop_threshold = DPAA2_ETH_TAILDROP_THRESH; ++ err = dpni_set_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, ++ &queue_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_flow() failed\n"); ++ return err; ++ } ++ ++ /* Get the actual FQID that was assigned by MC */ ++ err = dpni_get_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, ++ &rx_queue_attr); ++ if (err) { ++ dev_err(dev, "dpni_get_rx_flow() failed\n"); ++ return err; ++ } ++ fq->fqid = rx_queue_attr.fqid; ++ ++ return 0; ++} ++ ++static int setup_tx_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_tx_flow_cfg tx_flow_cfg; ++ struct dpni_tx_conf_cfg tx_conf_cfg; ++ struct dpni_tx_conf_attr tx_conf_attr; ++ int err; ++ ++ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); ++ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; ++ tx_flow_cfg.use_common_tx_conf_queue = 0; ++ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, ++ &fq->flowid, &tx_flow_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_flow() failed\n"); ++ return err; ++ } ++ ++ tx_conf_cfg.errors_only = 0; ++ tx_conf_cfg.queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | ++ DPNI_QUEUE_OPT_DEST; ++ tx_conf_cfg.queue_cfg.user_ctx = (u64)fq; ++ tx_conf_cfg.queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; ++ tx_conf_cfg.queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; ++ tx_conf_cfg.queue_cfg.dest_cfg.priority = 0; ++ ++ err = dpni_set_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, ++ &tx_conf_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_conf() failed\n"); ++ return err; ++ } ++ ++ err = dpni_get_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, ++ &tx_conf_attr); ++ if (err) { ++ dev_err(dev, "dpni_get_tx_conf() failed\n"); ++ return err; ++ } ++ ++ fq->fqid = tx_conf_attr.queue_attr.fqid; ++ ++ return 0; ++} ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct dpni_queue_attr queue_attr; ++ struct dpni_queue_cfg queue_cfg; ++ int err; ++ ++ /* Configure the Rx error queue to generate CDANs, ++ * just like the Rx queues ++ */ ++ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; ++ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; ++ queue_cfg.dest_cfg.priority = 1; ++ queue_cfg.user_ctx = (u64)fq; ++ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; ++ err = dpni_set_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_cfg); ++ if (err) { ++ netdev_err(priv->net_dev, "dpni_set_rx_err_queue() failed\n"); ++ return err; ++ } ++ ++ /* Get the FQID */ ++ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, ++ &queue_attr); ++ if (err) { ++ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n"); ++ return err; ++ } ++ fq->fqid = queue_attr.fqid; ++ ++ return 0; ++} ++#endif ++ ++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, ++ * frame queues and channels ++ */ ++static int bind_dpni(struct dpaa2_eth_priv *priv) ++{ ++ struct net_device *net_dev = priv->net_dev; ++ struct device *dev = net_dev->dev.parent; ++ struct dpni_pools_cfg pools_params; ++ struct dpni_error_cfg err_cfg; ++ int err = 0; ++ int i; ++ ++ pools_params.num_dpbp = 1; ++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; ++ pools_params.pools[0].backup_pool = 0; ++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; ++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); ++ if (err) { ++ dev_err(dev, "dpni_set_pools() failed\n"); ++ return err; ++ } ++ ++ check_fs_support(net_dev); ++ ++ /* have the interface implicitly distribute traffic based on supported ++ * header fields ++ */ ++ if (dpaa2_eth_hash_enabled(priv)) { ++ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); ++ if (err) ++ return err; ++ } ++ ++ /* Configure handling of error frames */ ++ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK; ++ err_cfg.set_frame_annotation = 1; ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; ++#else ++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; ++#endif ++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, ++ &err_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_errors_behavior failed\n"); ++ return err; ++ } ++ ++ /* Configure Rx and Tx conf queues to generate CDANs */ ++ for (i = 0; i < priv->num_fqs; i++) { ++ switch (priv->fq[i].type) { ++ case DPAA2_RX_FQ: ++ err = setup_rx_flow(priv, &priv->fq[i]); ++ break; ++ case DPAA2_TX_CONF_FQ: ++ err = setup_tx_flow(priv, &priv->fq[i]); ++ break; ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ case DPAA2_RX_ERR_FQ: ++ err = setup_rx_err_flow(priv, &priv->fq[i]); ++ break; ++#endif ++ default: ++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); ++ return -EINVAL; ++ } ++ if (err) ++ return err; ++ } ++ ++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, &priv->tx_qdid); ++ if (err) { ++ dev_err(dev, "dpni_get_qdid() failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/* Allocate rings for storing incoming frame descriptors */ ++static int alloc_rings(struct dpaa2_eth_priv *priv) ++{ ++ struct net_device *net_dev = priv->net_dev; ++ struct device *dev = net_dev->dev.parent; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ priv->channel[i]->store = ++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); ++ if (!priv->channel[i]->store) { ++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); ++ goto err_ring; ++ } ++ } ++ ++ return 0; ++ ++err_ring: ++ for (i = 0; i < priv->num_channels; i++) { ++ if (!priv->channel[i]->store) ++ break; ++ dpaa2_io_store_destroy(priv->channel[i]->store); ++ } ++ ++ return -ENOMEM; ++} ++ ++static void free_rings(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) ++ dpaa2_io_store_destroy(priv->channel[i]->store); ++} ++ ++static int netdev_init(struct net_device *net_dev) ++{ ++ int err; ++ struct device *dev = net_dev->dev.parent; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ u8 mac_addr[ETH_ALEN]; ++ u8 bcast_addr[ETH_ALEN]; ++ ++ net_dev->netdev_ops = &dpaa2_eth_ops; ++ ++ /* If the DPNI attributes contain an all-0 mac_addr, ++ * set a random hardware address ++ */ ++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ mac_addr); ++ if (err) { ++ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)", err); ++ return err; ++ } ++ if (is_zero_ether_addr(mac_addr)) { ++ /* Fills in net_dev->dev_addr, as required by ++ * register_netdevice() ++ */ ++ eth_hw_addr_random(net_dev); ++ /* Make the user aware, without cluttering the boot log */ ++ pr_info_once(KBUILD_MODNAME " device(s) have all-zero hwaddr, replaced with random"); ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ net_dev->dev_addr); ++ if (err) { ++ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err); ++ return err; ++ } ++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all ++ * practical purposes, this will be our "permanent" mac address, ++ * at least until the next reboot. This move will also permit ++ * register_netdevice() to properly fill up net_dev->perm_addr. ++ */ ++ net_dev->addr_assign_type = NET_ADDR_PERM; ++ } else { ++ /* NET_ADDR_PERM is default, all we have to do is ++ * fill in the device addr. ++ */ ++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); ++ } ++ ++ /* Explicitly add the broadcast address to the MAC filtering table; ++ * the MC won't do that for us. ++ */ ++ eth_broadcast_addr(bcast_addr); ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); ++ if (err) { ++ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); ++ /* Won't return an error; at least, we'd have egress traffic */ ++ } ++ ++ /* Reserve enough space to align buffer as per hardware requirement; ++ * NOTE: priv->tx_data_offset MUST be initialized at this point. ++ */ ++ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); ++ ++ /* Our .ndo_init will be called herein */ ++ err = register_netdev(net_dev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev() = %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int poll_link_state(void *arg) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; ++ int err; ++ ++ while (!kthread_should_stop()) { ++ err = link_state_update(priv); ++ if (unlikely(err)) ++ return err; ++ ++ msleep(DPAA2_ETH_LINK_STATE_REFRESH); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) ++{ ++ u8 irq_index = DPNI_IRQ_INDEX; ++ u32 status, clear = 0; ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); ++ struct net_device *net_dev = dev_get_drvdata(dev); ++ int err; ++ ++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, ++ irq_index, &status); ++ if (unlikely(err)) { ++ netdev_err(net_dev, "Can't get irq status (err %d)", err); ++ clear = 0xffffffff; ++ goto out; ++ } ++ ++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { ++ clear |= DPNI_IRQ_EVENT_LINK_CHANGED; ++ link_state_update(netdev_priv(net_dev)); ++ } ++ ++out: ++ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, ++ irq_index, clear); ++ return IRQ_HANDLED; ++} ++ ++static int setup_irqs(struct fsl_mc_device *ls_dev) ++{ ++ int err = 0; ++ struct fsl_mc_device_irq *irq; ++ u8 irq_index = DPNI_IRQ_INDEX; ++ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED; ++ ++ err = fsl_mc_allocate_irqs(ls_dev); ++ if (err) { ++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); ++ return err; ++ } ++ ++ irq = ls_dev->irqs[0]; ++ err = devm_request_threaded_irq(&ls_dev->dev, irq->irq_number, ++ dpni_irq0_handler, ++ dpni_irq0_handler_thread, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(&ls_dev->dev), &ls_dev->dev); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); ++ goto free_mc_irq; ++ } ++ ++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ irq_index, mask); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); ++ goto free_irq; ++ } ++ ++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ irq_index, 1); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); ++ goto free_irq; ++ } ++ ++ return 0; ++ ++free_irq: ++ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); ++free_mc_irq: ++ fsl_mc_free_irqs(ls_dev); ++ ++ return err; ++} ++ ++static void add_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ ++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, ++ NAPI_POLL_WEIGHT); ++ } ++} ++ ++static void del_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ netif_napi_del(&ch->napi); ++ } ++} ++ ++/* SysFS support */ ++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ /* No MC API for getting the shaping config. We're stateful. */ ++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; ++ ++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); ++} ++ ++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ int err, items; ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ struct dpni_tx_shaping_cfg scfg; ++ ++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); ++ if (items != 2) { ++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); ++ return -EINVAL; ++ } ++ /* Size restriction as per MC API documentation */ ++ if (scfg.max_burst_size > 64000) { ++ pr_err("max_burst_size must be <= 64000, thanks.\n"); ++ return -EINVAL; ++ } ++ ++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_shaping() failed\n"); ++ return -EPERM; ++ } ++ /* If successful, save the current configuration for future inquiries */ ++ priv->shaping_cfg = scfg; ++ ++ return count; ++} ++ ++static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ ++ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); ++} ++ ++static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ struct dpaa2_eth_fq *fq; ++ bool running = netif_running(priv->net_dev); ++ int i, err; ++ ++ err = cpulist_parse(buf, &priv->txconf_cpumask); ++ if (err) ++ return err; ++ ++ /* Only accept CPUs that have an affine DPIO */ ++ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { ++ netdev_info(priv->net_dev, ++ "cpumask must be a subset of 0x%lx\n", ++ *cpumask_bits(&priv->dpio_cpumask)); ++ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, ++ &priv->txconf_cpumask); ++ } ++ ++ /* Rewiring the TxConf FQs requires interface shutdown. ++ */ ++ if (running) { ++ err = dpaa2_eth_stop(priv->net_dev); ++ if (err) ++ return -ENODEV; ++ } ++ ++ /* Set the new TxConf FQ affinities */ ++ set_fq_affinity(priv); ++ ++ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit ++ * link up notification is received. Give the polling thread enough time ++ * to detect the link state change, or else we'll end up with the ++ * transmission side forever shut down. ++ */ ++ if (priv->do_link_poll) ++ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ if (fq->type != DPAA2_TX_CONF_FQ) ++ continue; ++ setup_tx_flow(priv, fq); ++ } ++ ++ if (running) { ++ err = dpaa2_eth_open(priv->net_dev); ++ if (err) ++ return -ENODEV; ++ } ++ ++ return count; ++} ++ ++static struct device_attribute dpaa2_eth_attrs[] = { ++ __ATTR(txconf_cpumask, ++ S_IRUSR | S_IWUSR, ++ dpaa2_eth_show_txconf_cpumask, ++ dpaa2_eth_write_txconf_cpumask), ++ ++ __ATTR(tx_shaping, ++ S_IRUSR | S_IWUSR, ++ dpaa2_eth_show_tx_shaping, ++ dpaa2_eth_write_tx_shaping), ++}; ++ ++void dpaa2_eth_sysfs_init(struct device *dev) ++{ ++ int i, err; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { ++ err = device_create_file(dev, &dpaa2_eth_attrs[i]); ++ if (err) { ++ dev_err(dev, "ERROR creating sysfs file\n"); ++ goto undo; ++ } ++ } ++ return; ++ ++undo: ++ while (i > 0) ++ device_remove_file(dev, &dpaa2_eth_attrs[--i]); ++} ++ ++void dpaa2_eth_sysfs_remove(struct device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) ++ device_remove_file(dev, &dpaa2_eth_attrs[i]); ++} ++ ++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) ++{ ++ struct device *dev; ++ struct net_device *net_dev = NULL; ++ struct dpaa2_eth_priv *priv = NULL; ++ int err = 0; ++ ++ dev = &dpni_dev->dev; ++ ++ /* Net device */ ++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); ++ if (!net_dev) { ++ dev_err(dev, "alloc_etherdev_mq() failed\n"); ++ return -ENOMEM; ++ } ++ ++ SET_NETDEV_DEV(net_dev, dev); ++ dev_set_drvdata(dev, net_dev); ++ ++ priv = netdev_priv(net_dev); ++ priv->net_dev = net_dev; ++ ++ /* Obtain a MC portal */ ++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, ++ &priv->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_portal_alloc; ++ } ++ ++ /* MC objects initialization and configuration */ ++ err = setup_dpni(dpni_dev); ++ if (err) ++ goto err_dpni_setup; ++ ++ err = setup_dpio(priv); ++ if (err) ++ goto err_dpio_setup; ++ ++ setup_fqs(priv); ++ ++ err = setup_dpbp(priv); ++ if (err) ++ goto err_dpbp_setup; ++ ++ err = bind_dpni(priv); ++ if (err) ++ goto err_bind; ++ ++ /* Add a NAPI context for each channel */ ++ add_ch_napi(priv); ++ ++ /* Percpu statistics */ ++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); ++ if (!priv->percpu_stats) { ++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); ++ err = -ENOMEM; ++ goto err_alloc_percpu_stats; ++ } ++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras); ++ if (!priv->percpu_extras) { ++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); ++ err = -ENOMEM; ++ goto err_alloc_percpu_extras; ++ } ++ ++ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); ++ if (!dev_valid_name(net_dev->name)) { ++ dev_warn(&net_dev->dev, ++ "netdevice name \"%s\" cannot be used, reverting to default..\n", ++ net_dev->name); ++ dev_alloc_name(net_dev, "eth%d"); ++ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); ++ } ++ ++ err = netdev_init(net_dev); ++ if (err) ++ goto err_netdev_init; ++ ++ /* Configure checksum offload based on current interface flags */ ++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); ++ if (err) ++ goto err_csum; ++ ++ err = set_tx_csum(priv, !!(net_dev->features & ++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); ++ if (err) ++ goto err_csum; ++ ++ err = alloc_rings(priv); ++ if (err) ++ goto err_alloc_rings; ++ ++ net_dev->ethtool_ops = &dpaa2_ethtool_ops; ++ ++ err = setup_irqs(dpni_dev); ++ if (err) { ++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); ++ priv->poll_thread = kthread_run(poll_link_state, priv, ++ "%s_poll_link", net_dev->name); ++ if (IS_ERR(priv->poll_thread)) { ++ netdev_err(net_dev, "Error starting polling thread\n"); ++ goto err_poll_thread; ++ } ++ priv->do_link_poll = true; ++ } ++ ++ dpaa2_eth_sysfs_init(&net_dev->dev); ++ dpaa2_dbg_add(priv); ++ ++ dev_info(dev, "Probed interface %s\n", net_dev->name); ++ return 0; ++ ++err_poll_thread: ++ free_rings(priv); ++err_alloc_rings: ++err_csum: ++ unregister_netdev(net_dev); ++err_netdev_init: ++ free_percpu(priv->percpu_extras); ++err_alloc_percpu_extras: ++ free_percpu(priv->percpu_stats); ++err_alloc_percpu_stats: ++ del_ch_napi(priv); ++err_bind: ++ free_dpbp(priv); ++err_dpbp_setup: ++ free_dpio(priv); ++err_dpio_setup: ++ kfree(priv->cls_rule); ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++err_dpni_setup: ++ fsl_mc_portal_free(priv->mc_io); ++err_portal_alloc: ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++ ++ return err; ++} ++ ++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct net_device *net_dev; ++ struct dpaa2_eth_priv *priv; ++ ++ dev = &ls_dev->dev; ++ net_dev = dev_get_drvdata(dev); ++ priv = netdev_priv(net_dev); ++ ++ dpaa2_dbg_remove(priv); ++ dpaa2_eth_sysfs_remove(&net_dev->dev); ++ ++ unregister_netdev(net_dev); ++ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); ++ ++ free_dpio(priv); ++ free_rings(priv); ++ del_ch_napi(priv); ++ free_dpbp(priv); ++ free_dpni(priv); ++ ++ fsl_mc_portal_free(priv->mc_io); ++ ++ free_percpu(priv->percpu_stats); ++ free_percpu(priv->percpu_extras); ++ ++ if (priv->do_link_poll) ++ kthread_stop(priv->poll_thread); ++ else ++ fsl_mc_free_irqs(ls_dev); ++ ++ kfree(priv->cls_rule); ++ ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpni", ++ .ver_major = DPNI_VER_MAJOR, ++ .ver_minor = DPNI_VER_MINOR ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_eth_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_eth_probe, ++ .remove = dpaa2_eth_remove, ++ .match_id_table = dpaa2_eth_match_id_table ++}; ++ ++static int __init dpaa2_eth_driver_init(void) ++{ ++ int err; ++ ++ dpaa2_eth_dbg_init(); ++ ++ err = fsl_mc_driver_register(&dpaa2_eth_driver); ++ if (err) { ++ dpaa2_eth_dbg_exit(); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void __exit dpaa2_eth_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dpaa2_eth_driver); ++ dpaa2_eth_dbg_exit(); ++} ++ ++module_init(dpaa2_eth_driver_init); ++module_exit(dpaa2_eth_driver_exit); +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +new file mode 100644 +index 0000000..7274fbe +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +@@ -0,0 +1,377 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA2_ETH_H ++#define __DPAA2_ETH_H ++ ++#include ++#include ++#include "../../fsl-mc/include/fsl_dpaa2_io.h" ++#include "../../fsl-mc/include/fsl_dpaa2_fd.h" ++#include "../../fsl-mc/include/dpbp.h" ++#include "../../fsl-mc/include/dpbp-cmd.h" ++#include "../../fsl-mc/include/dpcon.h" ++#include "../../fsl-mc/include/dpcon-cmd.h" ++#include "../../fsl-mc/include/dpmng.h" ++#include "dpni.h" ++#include "dpni-cmd.h" ++ ++#include "dpaa2-eth-trace.h" ++#include "dpaa2-eth-debugfs.h" ++ ++#define DPAA2_ETH_STORE_SIZE 16 ++ ++/* Maximum number of scatter-gather entries in an ingress frame, ++ * considering the maximum receive frame size is 64K ++ */ ++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) ++ ++/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced ++ * Max Frame Length (currently 10k). ++ */ ++#define DPAA2_ETH_MFL (10 * 1024) ++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) ++/* Convert L3 MTU to L2 MFL */ ++#define DPAA2_ETH_L2_MAX_FRM(mtu) (mtu + VLAN_ETH_HLEN) ++ ++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo ++ * frames in the Rx queues (length of the current frame is not ++ * taken into account when making the taildrop decision) ++ */ ++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) ++ ++/* Buffer quota per queue. Must be large enough such that for minimum sized ++ * frames taildrop kicks in before the bpool gets depleted, so we compute ++ * how many 64B frames fit inside the taildrop threshold and add a margin ++ * to accommodate the buffer refill delay. ++ */ ++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) ++#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) ++#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE ++ ++/* Maximum number of buffers that can be acquired/released through a single ++ * QBMan command ++ */ ++#define DPAA2_ETH_BUFS_PER_CMD 7 ++ ++/* Hardware requires alignment for ingress/egress buffer addresses ++ * and ingress buffer lengths. ++ */ ++#define DPAA2_ETH_RX_BUF_SIZE 2048 ++#define DPAA2_ETH_TX_BUF_ALIGN 64 ++#define DPAA2_ETH_RX_BUF_ALIGN 256 ++#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ ++ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) ++ ++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress ++ * buffers large enough to allow building an skb around them and also account ++ * for alignment restrictions ++ */ ++#define DPAA2_ETH_BUF_RAW_SIZE \ ++ (DPAA2_ETH_RX_BUF_SIZE + \ ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ ++ DPAA2_ETH_RX_BUF_ALIGN) ++ ++/* PTP nominal frequency 1MHz */ ++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 ++ ++/* We are accommodating a skb backpointer and some S/G info ++ * in the frame's software annotation. The hardware ++ * options are either 0 or 64, so we choose the latter. ++ */ ++#define DPAA2_ETH_SWA_SIZE 64 ++ ++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ ++struct dpaa2_eth_swa { ++ struct sk_buff *skb; ++ struct scatterlist *scl; ++ int num_sg; ++ int num_dma_bufs; ++}; ++ ++/* Annotation valid bits in FD FRC */ ++#define DPAA2_FD_FRC_FASV 0x8000 ++#define DPAA2_FD_FRC_FAEADV 0x4000 ++#define DPAA2_FD_FRC_FAPRV 0x2000 ++#define DPAA2_FD_FRC_FAIADV 0x1000 ++#define DPAA2_FD_FRC_FASWOV 0x0800 ++#define DPAA2_FD_FRC_FAICFDV 0x0400 ++ ++/* Annotation bits in FD CTRL */ ++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ ++#define DPAA2_FD_CTRL_PTA 0x00800000 ++#define DPAA2_FD_CTRL_PTV1 0x00400000 ++ ++/* Frame annotation status */ ++struct dpaa2_fas { ++ u8 reserved; ++ u8 ppid; ++ __le16 ifpid; ++ __le32 status; ++} __packed; ++ ++/* Error and status bits in the frame annotation status word */ ++/* Debug frame, otherwise supposed to be discarded */ ++#define DPAA2_FAS_DISC 0x80000000 ++/* MACSEC frame */ ++#define DPAA2_FAS_MS 0x40000000 ++#define DPAA2_FAS_PTP 0x08000000 ++/* Ethernet multicast frame */ ++#define DPAA2_FAS_MC 0x04000000 ++/* Ethernet broadcast frame */ ++#define DPAA2_FAS_BC 0x02000000 ++#define DPAA2_FAS_KSE 0x00040000 ++#define DPAA2_FAS_EOFHE 0x00020000 ++#define DPAA2_FAS_MNLE 0x00010000 ++#define DPAA2_FAS_TIDE 0x00008000 ++#define DPAA2_FAS_PIEE 0x00004000 ++/* Frame length error */ ++#define DPAA2_FAS_FLE 0x00002000 ++/* Frame physical error */ ++#define DPAA2_FAS_FPE 0x00001000 ++#define DPAA2_FAS_PTE 0x00000080 ++#define DPAA2_FAS_ISP 0x00000040 ++#define DPAA2_FAS_PHE 0x00000020 ++#define DPAA2_FAS_BLE 0x00000010 ++/* L3 csum validation performed */ ++#define DPAA2_FAS_L3CV 0x00000008 ++/* L3 csum error */ ++#define DPAA2_FAS_L3CE 0x00000004 ++/* L4 csum validation performed */ ++#define DPAA2_FAS_L4CV 0x00000002 ++/* L4 csum error */ ++#define DPAA2_FAS_L4CE 0x00000001 ++/* Possible errors on the ingress path */ ++#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \ ++ DPAA2_FAS_EOFHE | \ ++ DPAA2_FAS_MNLE | \ ++ DPAA2_FAS_TIDE | \ ++ DPAA2_FAS_PIEE | \ ++ DPAA2_FAS_FLE | \ ++ DPAA2_FAS_FPE | \ ++ DPAA2_FAS_PTE | \ ++ DPAA2_FAS_ISP | \ ++ DPAA2_FAS_PHE | \ ++ DPAA2_FAS_BLE | \ ++ DPAA2_FAS_L3CE | \ ++ DPAA2_FAS_L4CE) ++/* Tx errors */ ++#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \ ++ DPAA2_FAS_EOFHE | \ ++ DPAA2_FAS_MNLE | \ ++ DPAA2_FAS_TIDE) ++ ++/* Time in milliseconds between link state updates */ ++#define DPAA2_ETH_LINK_STATE_REFRESH 1000 ++ ++/* Driver statistics, other than those in struct rtnl_link_stats64. ++ * These are usually collected per-CPU and aggregated by ethtool. ++ */ ++struct dpaa2_eth_drv_stats { ++ __u64 tx_conf_frames; ++ __u64 tx_conf_bytes; ++ __u64 tx_sg_frames; ++ __u64 tx_sg_bytes; ++ __u64 rx_sg_frames; ++ __u64 rx_sg_bytes; ++ /* Enqueues retried due to portal busy */ ++ __u64 tx_portal_busy; ++}; ++ ++/* Per-FQ statistics */ ++struct dpaa2_eth_fq_stats { ++ /* Number of frames received on this queue */ ++ __u64 frames; ++}; ++ ++/* Per-channel statistics */ ++struct dpaa2_eth_ch_stats { ++ /* Volatile dequeues retried due to portal busy */ ++ __u64 dequeue_portal_busy; ++ /* Number of CDANs; useful to estimate avg NAPI len */ ++ __u64 cdan; ++ /* Number of frames received on queues from this channel */ ++ __u64 frames; ++ /* Pull errors */ ++ __u64 pull_err; ++}; ++ ++/* Maximum number of queues associated with a DPNI */ ++#define DPAA2_ETH_MAX_RX_QUEUES 16 ++#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS ++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 ++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ ++ DPAA2_ETH_MAX_TX_QUEUES + \ ++ DPAA2_ETH_MAX_RX_ERR_QUEUES) ++ ++#define DPAA2_ETH_MAX_DPCONS NR_CPUS ++ ++enum dpaa2_eth_fq_type { ++ DPAA2_RX_FQ = 0, ++ DPAA2_TX_CONF_FQ, ++ DPAA2_RX_ERR_FQ ++}; ++ ++struct dpaa2_eth_priv; ++ ++struct dpaa2_eth_fq { ++ u32 fqid; ++ u16 flowid; ++ int target_cpu; ++ struct dpaa2_eth_channel *channel; ++ enum dpaa2_eth_fq_type type; ++ ++ void (*consume)(struct dpaa2_eth_priv *, ++ struct dpaa2_eth_channel *, ++ const struct dpaa2_fd *, ++ struct napi_struct *); ++ struct dpaa2_eth_fq_stats stats; ++}; ++ ++struct dpaa2_eth_channel { ++ struct dpaa2_io_notification_ctx nctx; ++ struct fsl_mc_device *dpcon; ++ int dpcon_id; ++ int ch_id; ++ int dpio_id; ++ struct napi_struct napi; ++ struct dpaa2_io_store *store; ++ struct dpaa2_eth_priv *priv; ++ int buf_count; ++ struct dpaa2_eth_ch_stats stats; ++}; ++ ++struct dpaa2_eth_cls_rule { ++ struct ethtool_rx_flow_spec fs; ++ bool in_use; ++}; ++ ++/* Driver private data */ ++struct dpaa2_eth_priv { ++ struct net_device *net_dev; ++ ++ u8 num_fqs; ++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; ++ ++ u8 num_channels; ++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; ++ ++ int dpni_id; ++ struct dpni_attr dpni_attrs; ++ struct dpni_extended_cfg dpni_ext_cfg; ++ /* Insofar as the MC is concerned, we're using one layout on all 3 types ++ * of buffers (Rx, Tx, Tx-Conf). ++ */ ++ struct dpni_buffer_layout buf_layout; ++ u16 tx_data_offset; ++ ++ struct fsl_mc_device *dpbp_dev; ++ struct dpbp_attr dpbp_attrs; ++ ++ u16 tx_qdid; ++ struct fsl_mc_io *mc_io; ++ /* SysFS-controlled affinity mask for TxConf FQs */ ++ struct cpumask txconf_cpumask; ++ /* Cores which have an affine DPIO/DPCON. ++ * This is the cpu set on which Rx frames are processed; ++ * Tx confirmation frames are processed on a subset of this, ++ * depending on user settings. ++ */ ++ struct cpumask dpio_cpumask; ++ ++ /* Standard statistics */ ++ struct rtnl_link_stats64 __percpu *percpu_stats; ++ /* Extra stats, in addition to the ones known by the kernel */ ++ struct dpaa2_eth_drv_stats __percpu *percpu_extras; ++ ++ u16 mc_token; ++ ++ struct dpni_link_state link_state; ++ bool do_link_poll; ++ struct task_struct *poll_thread; ++ ++ /* enabled ethtool hashing bits */ ++ u64 rx_hash_fields; ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++ struct dpaa2_debugfs dbg; ++#endif ++ ++ /* array of classification rules */ ++ struct dpaa2_eth_cls_rule *cls_rule; ++ ++ struct dpni_tx_shaping_cfg shaping_cfg; ++ ++ bool ts_tx_en; /* Tx timestamping enabled */ ++ bool ts_rx_en; /* Rx timestamping enabled */ ++}; ++ ++/* default Rx hash options, set during probing */ ++#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ ++ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ ++ | RXH_L4_B_2_3) ++ ++#define dpaa2_eth_hash_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) ++ ++#define dpaa2_eth_fs_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) ++ ++#define DPAA2_CLASSIFIER_ENTRY_COUNT 16 ++ ++/* Required by struct dpni_attr::ext_cfg_iova */ ++#define DPAA2_EXT_CFG_SIZE 256 ++ ++extern const struct ethtool_ops dpaa2_ethtool_ops; ++ ++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); ++ ++static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) ++{ ++ if (!dpaa2_eth_hash_enabled(priv)) ++ return 1; ++ ++ return priv->dpni_ext_cfg.tc_cfg[0].max_dist; ++} ++ ++static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) ++{ ++ /* Ideally, we want a number of channels large enough ++ * to accommodate both the Rx distribution size ++ * and the max number of Tx confirmation queues ++ */ ++ return max_t(int, dpaa2_eth_queue_count(priv), ++ priv->dpni_attrs.max_senders); ++} ++ ++void check_fs_support(struct net_device *); ++ ++#endif /* __DPAA2_H */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +new file mode 100644 +index 0000000..fdab07f +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +@@ -0,0 +1,861 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "dpni.h" /* DPNI_LINK_OPT_* */ ++#include "dpaa2-eth.h" ++ ++/* size of DMA memory used to pass configuration to classifier, in bytes */ ++#define DPAA2_CLASSIFIER_DMA_SIZE 256 ++ ++/* To be kept in sync with 'enum dpni_counter' */ ++char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { ++ "rx frames", ++ "rx bytes", ++ /* rx frames filtered/policed */ ++ "rx filtered frames", ++ /* rx frames dropped with errors */ ++ "rx discarded frames", ++ "rx mcast frames", ++ "rx mcast bytes", ++ "rx bcast frames", ++ "rx bcast bytes", ++ "tx frames", ++ "tx bytes", ++ /* tx frames dropped with errors */ ++ "tx discarded frames", ++}; ++ ++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) ++ ++/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ ++char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { ++ /* per-cpu stats */ ++ ++ "tx conf frames", ++ "tx conf bytes", ++ "tx sg frames", ++ "tx sg bytes", ++ "rx sg frames", ++ "rx sg bytes", ++ /* how many times we had to retry the enqueue command */ ++ "enqueue portal busy", ++ ++ /* Channel stats */ ++ /* How many times we had to retry the volatile dequeue command */ ++ "dequeue portal busy", ++ "channel pull errors", ++ /* Number of notifications received */ ++ "cdan", ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ /* FQ stats */ ++ "rx pending frames", ++ "rx pending bytes", ++ "tx conf pending frames", ++ "tx conf pending bytes", ++ "buffer count" ++#endif ++}; ++ ++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) ++ ++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct mc_version mc_ver; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ char fw_version[ETHTOOL_FWVERS_LEN]; ++ char version[32]; ++ int err; ++ ++ err = mc_get_version(priv->mc_io, 0, &mc_ver); ++ if (err) { ++ strlcpy(drvinfo->fw_version, "Error retrieving MC version", ++ sizeof(drvinfo->fw_version)); ++ } else { ++ scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", ++ mc_ver.major, mc_ver.minor, mc_ver.revision); ++ strlcpy(drvinfo->fw_version, fw_version, ++ sizeof(drvinfo->fw_version)); ++ } ++ ++ scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, ++ DPNI_VER_MINOR); ++ strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); ++ ++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), ++ sizeof(drvinfo->bus_info)); ++} ++ ++static int dpaa2_eth_get_settings(struct net_device *net_dev, ++ struct ethtool_cmd *cmd) ++{ ++ struct dpni_link_state state = {0}; ++ int err = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* At the moment, we have no way of interrogating the DPMAC ++ * from the DPNI side - and for that matter there may exist ++ * no DPMAC at all. So for now we just don't report anything ++ * beyond the DPNI attributes. ++ */ ++ if (state.options & DPNI_LINK_OPT_AUTONEG) ++ cmd->autoneg = AUTONEG_ENABLE; ++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) ++ cmd->duplex = DUPLEX_FULL; ++ ethtool_cmd_speed_set(cmd, state.rate); ++ ++out: ++ return err; ++} ++ ++static int dpaa2_eth_set_settings(struct net_device *net_dev, ++ struct ethtool_cmd *cmd) ++{ ++ struct dpni_link_cfg cfg = {0}; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err = 0; ++ ++ netdev_dbg(net_dev, "Setting link parameters..."); ++ ++ /* Due to a temporary firmware limitation, the DPNI must be down ++ * in order to be able to change link settings. Taking steps to let ++ * the user know that. ++ */ ++ if (netif_running(net_dev)) { ++ netdev_info(net_dev, "Sorry, interface must be brought down first.\n"); ++ return -EACCES; ++ } ++ ++ cfg.rate = ethtool_cmd_speed(cmd); ++ if (cmd->autoneg == AUTONEG_ENABLE) ++ cfg.options |= DPNI_LINK_OPT_AUTONEG; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG; ++ if (cmd->duplex == DUPLEX_HALF) ++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; ++ ++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); ++ if (err) ++ /* ethtool will be loud enough if we return an error; no point ++ * in putting our own error message on the console by default ++ */ ++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); ++ ++ return err; ++} ++ ++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, ++ u8 *data) ++{ ++ u8 *p = data; ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { ++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { ++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ break; ++ } ++} ++ ++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ ++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++/** Fill in hardware counters, as returned by the MC firmware. ++ */ ++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ int i; /* Current index in the data array */ ++ int j, k, err; ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ u32 fcnt, bcnt; ++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0; ++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0; ++ u32 buf_cnt; ++#endif ++ u64 cdan = 0; ++ u64 portal_busy = 0, pull_err = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpaa2_eth_drv_stats *extras; ++ struct dpaa2_eth_ch_stats *ch_stats; ++ ++ memset(data, 0, ++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); ++ ++ /* Print standard counters, from DPNI statistics */ ++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { ++ err = dpni_get_counter(priv->mc_io, 0, priv->mc_token, i, ++ data + i); ++ if (err != 0) ++ netdev_warn(net_dev, "Err %d getting DPNI counter %d", ++ err, i); ++ } ++ ++ /* Print per-cpu extra stats */ ++ for_each_online_cpu(k) { ++ extras = per_cpu_ptr(priv->percpu_extras, k); ++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) ++ *((__u64 *)data + i + j) += *((__u64 *)extras + j); ++ } ++ i += j; ++ ++ /* We may be using fewer DPIOs than actual CPUs */ ++ for_each_cpu(j, &priv->dpio_cpumask) { ++ ch_stats = &priv->channel[j]->stats; ++ cdan += ch_stats->cdan; ++ portal_busy += ch_stats->dequeue_portal_busy; ++ pull_err += ch_stats->pull_err; ++ } ++ ++ *(data + i++) = portal_busy; ++ *(data + i++) = pull_err; ++ *(data + i++) = cdan; ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ for (j = 0; j < priv->num_fqs; j++) { ++ /* Print FQ instantaneous counts */ ++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, ++ &fcnt, &bcnt); ++ if (err) { ++ netdev_warn(net_dev, "FQ query error %d", err); ++ return; ++ } ++ ++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { ++ fcnt_tx_total += fcnt; ++ bcnt_tx_total += bcnt; ++ } else { ++ fcnt_rx_total += fcnt; ++ bcnt_rx_total += bcnt; ++ } ++ } ++ *(data + i++) = fcnt_rx_total; ++ *(data + i++) = bcnt_rx_total; ++ *(data + i++) = fcnt_tx_total; ++ *(data + i++) = bcnt_tx_total; ++ ++ err = dpaa2_io_query_bp_count(NULL, priv->dpbp_attrs.bpid, &buf_cnt); ++ if (err) { ++ netdev_warn(net_dev, "Buffer count query error %d\n", err); ++ return; ++ } ++ *(data + i++) = buf_cnt; ++#endif ++} ++ ++static const struct dpaa2_eth_hash_fields { ++ u64 rxnfc_field; ++ enum net_prot cls_prot; ++ int cls_field; ++ int size; ++} hash_fields[] = { ++ { ++ /* L2 header */ ++ .rxnfc_field = RXH_L2DA, ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_DA, ++ .size = 6, ++ }, { ++ /* VLAN header */ ++ .rxnfc_field = RXH_VLAN, ++ .cls_prot = NET_PROT_VLAN, ++ .cls_field = NH_FLD_VLAN_TCI, ++ .size = 2, ++ }, { ++ /* IP header */ ++ .rxnfc_field = RXH_IP_SRC, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_SRC, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_IP_DST, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_DST, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_L3_PROTO, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_PROTO, ++ .size = 1, ++ }, { ++ /* Using UDP ports, this is functionally equivalent to raw ++ * byte pairs from L4 header. ++ */ ++ .rxnfc_field = RXH_L4_B_0_1, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_SRC, ++ .size = 2, ++ }, { ++ .rxnfc_field = RXH_L4_B_2_3, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_DST, ++ .size = 2, ++ }, ++}; ++ ++static int cls_is_enabled(struct net_device *net_dev, u64 flag) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ return !!(priv->rx_hash_fields & flag); ++} ++ ++static int cls_key_off(struct net_device *net_dev, u64 flag) ++{ ++ int i, off = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { ++ if (hash_fields[i].rxnfc_field & flag) ++ return off; ++ if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) ++ off += hash_fields[i].size; ++ } ++ ++ return -1; ++} ++ ++static u8 cls_key_size(struct net_device *net_dev) ++{ ++ u8 i, size = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { ++ if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) ++ continue; ++ size += hash_fields[i].size; ++ } ++ ++ return size; ++} ++ ++static u8 cls_max_key_size(struct net_device *net_dev) ++{ ++ u8 i, size = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) ++ size += hash_fields[i].size; ++ ++ return size; ++} ++ ++void check_fs_support(struct net_device *net_dev) ++{ ++ u8 key_size = cls_max_key_size(net_dev); ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && ++ priv->dpni_attrs.max_dist_key_size < key_size) { ++ dev_err(&net_dev->dev, ++ "max_dist_key_size = %d, expected %d. Steering is disabled\n", ++ priv->dpni_attrs.max_dist_key_size, ++ key_size); ++ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; ++ } ++} ++ ++/* Set RX hash options ++ * flags is a combination of RXH_ bits ++ */ ++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) ++{ ++ struct device *dev = net_dev->dev.parent; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpkg_profile_cfg cls_cfg; ++ struct dpni_rx_tc_dist_cfg dist_cfg; ++ u8 *dma_mem; ++ u64 enabled_flags = 0; ++ int i; ++ int err = 0; ++ ++ if (!dpaa2_eth_hash_enabled(priv)) { ++ dev_err(dev, "Hashing support is not enabled\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (flags & ~DPAA2_RXH_SUPPORTED) { ++ /* RXH_DISCARD is not supported */ ++ dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { ++ struct dpkg_extract *key = ++ &cls_cfg.extracts[cls_cfg.num_extracts]; ++ ++ if (!(flags & hash_fields[i].rxnfc_field)) ++ continue; ++ ++ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { ++ dev_err(dev, "error adding key extraction rule, too many rules?\n"); ++ return -E2BIG; ++ } ++ ++ key->type = DPKG_EXTRACT_FROM_HDR; ++ key->extract.from_hdr.prot = hash_fields[i].cls_prot; ++ key->extract.from_hdr.type = DPKG_FULL_FIELD; ++ key->extract.from_hdr.field = hash_fields[i].cls_field; ++ cls_cfg.num_extracts++; ++ ++ enabled_flags |= hash_fields[i].rxnfc_field; ++ } ++ ++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_prepare_key_cfg error %d", err); ++ return err; ++ } ++ ++ memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ ++ /* Prepare for setting the rx dist */ ++ dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, ++ DPAA2_CLASSIFIER_DMA_SIZE, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ kfree(dma_mem); ++ return -ENOMEM; ++ } ++ ++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); ++ if (dpaa2_eth_fs_enabled(priv)) { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; ++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; ++ } else { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ } ++ ++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); ++ dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ kfree(dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); ++ return err; ++ } ++ ++ priv->rx_hash_fields = enabled_flags; ++ ++ return 0; ++} ++ ++static int prep_cls_rule(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs, ++ void *key) ++{ ++ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; ++ struct ethhdr *eth_h, *eth_m; ++ struct ethtool_flow_ext *ext_h, *ext_m; ++ const u8 key_size = cls_key_size(net_dev); ++ void *msk = key + key_size; ++ ++ memset(key, 0, key_size * 2); ++ ++ /* This code is a major mess, it has to be cleaned up after the ++ * classification mask issue is fixed and key format will be made static ++ */ ++ ++ switch (fs->flow_type & 0xff) { ++ case TCP_V4_FLOW: ++ l4ip4_h = &fs->h_u.tcp_ip4_spec; ++ l4ip4_m = &fs->m_u.tcp_ip4_spec; ++ /* TODO: ethertype to match IPv4 and protocol to match TCP */ ++ goto l4ip4; ++ ++ case UDP_V4_FLOW: ++ l4ip4_h = &fs->h_u.udp_ip4_spec; ++ l4ip4_m = &fs->m_u.udp_ip4_spec; ++ goto l4ip4; ++ ++ case SCTP_V4_FLOW: ++ l4ip4_h = &fs->h_u.sctp_ip4_spec; ++ l4ip4_m = &fs->m_u.sctp_ip4_spec; ++ ++l4ip4: ++ if (l4ip4_m->tos) { ++ netdev_err(net_dev, ++ "ToS is not supported for IPv4 L4\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { ++ netdev_err(net_dev, "IP SRC not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { ++ netdev_err(net_dev, "IP DST not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { ++ netdev_err(net_dev, "PSRC not supported, ignored\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { ++ netdev_err(net_dev, "PDST not supported, ignored\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_IP_SRC)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) ++ = l4ip4_h->ip4src; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) ++ = l4ip4_m->ip4src; ++ } ++ if (cls_is_enabled(net_dev, RXH_IP_DST)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) ++ = l4ip4_h->ip4dst; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) ++ = l4ip4_m->ip4dst; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) ++ = l4ip4_h->psrc; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) ++ = l4ip4_m->psrc; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) ++ = l4ip4_h->pdst; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) ++ = l4ip4_m->pdst; ++ } ++ break; ++ ++ case ETHER_FLOW: ++ eth_h = &fs->h_u.ether_spec; ++ eth_m = &fs->m_u.ether_spec; ++ ++ if (eth_m->h_proto) { ++ netdev_err(net_dev, "Ethertype is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (!is_zero_ether_addr(eth_m->h_source)) { ++ netdev_err(net_dev, "ETH SRC is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_L2DA)) { ++ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), ++ eth_h->h_dest); ++ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), ++ eth_m->h_dest); ++ } else { ++ if (!is_zero_ether_addr(eth_m->h_dest)) { ++ netdev_err(net_dev, ++ "ETH DST is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ } ++ break; ++ ++ default: ++ /* TODO: IP user flow, AH, ESP */ ++ return -EOPNOTSUPP; ++ } ++ ++ if (fs->flow_type & FLOW_EXT) { ++ /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ ++ return -EOPNOTSUPP; ++ } ++ ++ if (fs->flow_type & FLOW_MAC_EXT) { ++ ext_h = &fs->h_ext; ++ ext_m = &fs->m_ext; ++ ++ if (cls_is_enabled(net_dev, RXH_L2DA)) { ++ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), ++ ext_h->h_dest); ++ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), ++ ext_m->h_dest); ++ } else { ++ if (!is_zero_ether_addr(ext_m->h_dest)) { ++ netdev_err(net_dev, ++ "ETH DST is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ } ++ } ++ return 0; ++} ++ ++static int do_cls(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs, ++ bool add) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; ++ struct dpni_rule_cfg rule_cfg; ++ void *dma_mem; ++ int err = 0; ++ ++ if (!dpaa2_eth_fs_enabled(priv)) { ++ netdev_err(net_dev, "dev does not support steering!\n"); ++ /* dev doesn't support steering */ ++ return -EOPNOTSUPP; ++ } ++ ++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC && ++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || ++ fs->location >= rule_cnt) ++ return -EINVAL; ++ ++ memset(&rule_cfg, 0, sizeof(rule_cfg)); ++ rule_cfg.key_size = cls_key_size(net_dev); ++ ++ /* allocate twice the key size, for the actual key and for mask */ ++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = prep_cls_rule(net_dev, fs, dma_mem); ++ if (err) ++ goto err_free_mem; ++ ++ rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, ++ rule_cfg.key_size * 2, ++ DMA_TO_DEVICE); ++ ++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; ++ ++ if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { ++ int i; ++ u8 *mask = dma_mem + rule_cfg.key_size; ++ ++ /* check that nothing is masked out, otherwise it won't work */ ++ for (i = 0; i < rule_cfg.key_size; i++) { ++ if (mask[i] == 0xff) ++ continue; ++ netdev_err(net_dev, "dev does not support masking!\n"); ++ err = -EOPNOTSUPP; ++ goto err_free_mem; ++ } ++ rule_cfg.mask_iova = 0; ++ } ++ ++ /* No way to control rule order in firmware */ ++ if (add) ++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, ++ &rule_cfg, (u16)fs->ring_cookie); ++ else ++ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, ++ &rule_cfg); ++ ++ dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, ++ rule_cfg.key_size * 2, DMA_TO_DEVICE); ++ if (err) { ++ netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); ++ goto err_free_mem; ++ } ++ ++ priv->cls_rule[fs->location].fs = *fs; ++ priv->cls_rule[fs->location].in_use = true; ++ ++err_free_mem: ++ kfree(dma_mem); ++ ++ return err; ++} ++ ++static int add_cls(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = do_cls(net_dev, fs, true); ++ if (err) ++ return err; ++ ++ priv->cls_rule[fs->location].in_use = true; ++ priv->cls_rule[fs->location].fs = *fs; ++ ++ return 0; ++} ++ ++static int del_cls(struct net_device *net_dev, int location) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false); ++ if (err) ++ return err; ++ ++ priv->cls_rule[location].in_use = false; ++ ++ return 0; ++} ++ ++static void clear_cls(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int i, err; ++ ++ for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { ++ if (!priv->cls_rule[i].in_use) ++ continue; ++ ++ err = del_cls(net_dev, i); ++ if (err) ++ netdev_warn(net_dev, ++ "err trying to delete classification entry %d\n", ++ i); ++ } ++} ++ ++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, ++ struct ethtool_rxnfc *rxnfc) ++{ ++ int err = 0; ++ ++ switch (rxnfc->cmd) { ++ case ETHTOOL_SRXFH: ++ /* first off clear ALL classification rules, chaging key ++ * composition will break them anyway ++ */ ++ clear_cls(net_dev); ++ /* we purposely ignore cmd->flow_type for now, because the ++ * classifier only supports a single set of fields for all ++ * protocols ++ */ ++ err = dpaa2_eth_set_hash(net_dev, rxnfc->data); ++ break; ++ case ETHTOOL_SRXCLSRLINS: ++ err = add_cls(net_dev, &rxnfc->fs); ++ break; ++ ++ case ETHTOOL_SRXCLSRLDEL: ++ err = del_cls(net_dev, rxnfc->fs.location); ++ break; ++ ++ default: ++ err = -EOPNOTSUPP; ++ } ++ ++ return err; ++} ++ ++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, ++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; ++ int i, j; ++ ++ switch (rxnfc->cmd) { ++ case ETHTOOL_GRXFH: ++ /* we purposely ignore cmd->flow_type for now, because the ++ * classifier only supports a single set of fields for all ++ * protocols ++ */ ++ rxnfc->data = priv->rx_hash_fields; ++ break; ++ ++ case ETHTOOL_GRXRINGS: ++ rxnfc->data = dpaa2_eth_queue_count(priv); ++ break; ++ ++ case ETHTOOL_GRXCLSRLCNT: ++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) ++ if (priv->cls_rule[i].in_use) ++ rxnfc->rule_cnt++; ++ rxnfc->data = rule_cnt; ++ break; ++ ++ case ETHTOOL_GRXCLSRULE: ++ if (!priv->cls_rule[rxnfc->fs.location].in_use) ++ return -EINVAL; ++ ++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; ++ break; ++ ++ case ETHTOOL_GRXCLSRLALL: ++ for (i = 0, j = 0; i < rule_cnt; i++) { ++ if (!priv->cls_rule[i].in_use) ++ continue; ++ if (j == rxnfc->rule_cnt) ++ return -EMSGSIZE; ++ rule_locs[j++] = i; ++ } ++ rxnfc->rule_cnt = j; ++ rxnfc->data = rule_cnt; ++ break; ++ ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++const struct ethtool_ops dpaa2_ethtool_ops = { ++ .get_drvinfo = dpaa2_eth_get_drvinfo, ++ .get_link = ethtool_op_get_link, ++ .get_settings = dpaa2_eth_get_settings, ++ .set_settings = dpaa2_eth_set_settings, ++ .get_sset_count = dpaa2_eth_get_sset_count, ++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, ++ .get_strings = dpaa2_eth_get_strings, ++ .get_rxnfc = dpaa2_eth_get_rxnfc, ++ .set_rxnfc = dpaa2_eth_set_rxnfc, ++}; +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h +new file mode 100644 +index 0000000..92ec12b +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h +@@ -0,0 +1,175 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPKG_H_ ++#define __FSL_DPKG_H_ ++ ++#include ++#include "../../fsl-mc/include/net.h" ++ ++/* Data Path Key Generator API ++ * Contains initialization APIs and runtime APIs for the Key Generator ++ */ ++ ++/** Key Generator properties */ ++ ++/** ++ * Number of masks per key extraction ++ */ ++#define DPKG_NUM_OF_MASKS 4 ++/** ++ * Number of extractions per key profile ++ */ ++#define DPKG_MAX_NUM_OF_EXTRACTS 10 ++ ++/** ++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types ++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset ++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field ++ * @DPKG_FULL_FIELD: Extract a full field ++ */ ++enum dpkg_extract_from_hdr_type { ++ DPKG_FROM_HDR = 0, ++ DPKG_FROM_FIELD = 1, ++ DPKG_FULL_FIELD = 2 ++}; ++ ++/** ++ * enum dpkg_extract_type - Enumeration for selecting extraction type ++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header ++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header ++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; ++ * e.g. can be used to extract header existence; ++ * please refer to 'Parse Result definition' section in the parser BG ++ */ ++enum dpkg_extract_type { ++ DPKG_EXTRACT_FROM_HDR = 0, ++ DPKG_EXTRACT_FROM_DATA = 1, ++ DPKG_EXTRACT_FROM_PARSE = 3 ++}; ++ ++/** ++ * struct dpkg_mask - A structure for defining a single extraction mask ++ * @mask: Byte mask for the extracted content ++ * @offset: Offset within the extracted content ++ */ ++struct dpkg_mask { ++ uint8_t mask; ++ uint8_t offset; ++}; ++ ++/** ++ * struct dpkg_extract - A structure for defining a single extraction ++ * @type: Determines how the union below is interpreted: ++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; ++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; ++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' ++ * @extract: Selects extraction method ++ * @num_of_byte_masks: Defines the number of valid entries in the array below; ++ * This is also the number of bytes to be used as masks ++ * @masks: Masks parameters ++ */ ++struct dpkg_extract { ++ enum dpkg_extract_type type; ++ /** ++ * union extract - Selects extraction method ++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' ++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' ++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' ++ */ ++ union { ++ /** ++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' ++ * @prot: Any of the supported headers ++ * @type: Defines the type of header extraction: ++ * DPKG_FROM_HDR: use size & offset below; ++ * DPKG_FROM_FIELD: use field, size and offset below; ++ * DPKG_FULL_FIELD: use field below ++ * @field: One of the supported fields (NH_FLD_) ++ * ++ * @size: Size in bytes ++ * @offset: Byte offset ++ * @hdr_index: Clear for cases not listed below; ++ * Used for protocols that may have more than a single ++ * header, 0 indicates an outer header; ++ * Supported protocols (possible values): ++ * NET_PROT_VLAN (0, HDR_INDEX_LAST); ++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); ++ * NET_PROT_IP(0, HDR_INDEX_LAST); ++ * NET_PROT_IPv4(0, HDR_INDEX_LAST); ++ * NET_PROT_IPv6(0, HDR_INDEX_LAST); ++ */ ++ ++ struct { ++ enum net_prot prot; ++ enum dpkg_extract_from_hdr_type type; ++ uint32_t field; ++ uint8_t size; ++ uint8_t offset; ++ uint8_t hdr_index; ++ } from_hdr; ++ /** ++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' ++ * @size: Size in bytes ++ * @offset: Byte offset ++ */ ++ struct { ++ uint8_t size; ++ uint8_t offset; ++ } from_data; ++ ++ /** ++ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' ++ * @size: Size in bytes ++ * @offset: Byte offset ++ */ ++ struct { ++ uint8_t size; ++ uint8_t offset; ++ } from_parse; ++ } extract; ++ ++ uint8_t num_of_byte_masks; ++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; ++}; ++ ++/** ++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation ++ * profile (rule) ++ * @num_extracts: Defines the number of valid entries in the array below ++ * @extracts: Array of required extractions ++ */ ++struct dpkg_profile_cfg { ++ uint8_t num_extracts; ++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; ++}; ++ ++#endif /* __FSL_DPKG_H_ */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h +new file mode 100644 +index 0000000..c0f8af0 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h +@@ -0,0 +1,1058 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPNI_CMD_H ++#define _FSL_DPNI_CMD_H ++ ++/* DPNI Version */ ++#define DPNI_VER_MAJOR 6 ++#define DPNI_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPNI_CMDID_OPEN 0x801 ++#define DPNI_CMDID_CLOSE 0x800 ++#define DPNI_CMDID_CREATE 0x901 ++#define DPNI_CMDID_DESTROY 0x900 ++ ++#define DPNI_CMDID_ENABLE 0x002 ++#define DPNI_CMDID_DISABLE 0x003 ++#define DPNI_CMDID_GET_ATTR 0x004 ++#define DPNI_CMDID_RESET 0x005 ++#define DPNI_CMDID_IS_ENABLED 0x006 ++ ++#define DPNI_CMDID_SET_IRQ 0x010 ++#define DPNI_CMDID_GET_IRQ 0x011 ++#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPNI_CMDID_SET_IRQ_MASK 0x014 ++#define DPNI_CMDID_GET_IRQ_MASK 0x015 ++#define DPNI_CMDID_GET_IRQ_STATUS 0x016 ++#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPNI_CMDID_SET_POOLS 0x200 ++#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 ++#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 ++#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 ++#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 ++#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 ++#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 ++#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 ++#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 ++#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 ++#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A ++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B ++#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C ++ ++#define DPNI_CMDID_GET_QDID 0x210 ++#define DPNI_CMDID_GET_SP_INFO 0x211 ++#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 ++#define DPNI_CMDID_GET_COUNTER 0x213 ++#define DPNI_CMDID_SET_COUNTER 0x214 ++#define DPNI_CMDID_GET_LINK_STATE 0x215 ++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 ++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 ++#define DPNI_CMDID_SET_MTU 0x218 ++#define DPNI_CMDID_GET_MTU 0x219 ++#define DPNI_CMDID_SET_LINK_CFG 0x21A ++#define DPNI_CMDID_SET_TX_SHAPING 0x21B ++ ++#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 ++#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 ++#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 ++#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 ++#define DPNI_CMDID_SET_PRIM_MAC 0x224 ++#define DPNI_CMDID_GET_PRIM_MAC 0x225 ++#define DPNI_CMDID_ADD_MAC_ADDR 0x226 ++#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 ++#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 ++ ++#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 ++#define DPNI_CMDID_ADD_VLAN_ID 0x231 ++#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 ++#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 ++ ++#define DPNI_CMDID_SET_RX_TC_DIST 0x235 ++#define DPNI_CMDID_SET_TX_FLOW 0x236 ++#define DPNI_CMDID_GET_TX_FLOW 0x237 ++#define DPNI_CMDID_SET_RX_FLOW 0x238 ++#define DPNI_CMDID_GET_RX_FLOW 0x239 ++#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A ++#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B ++ ++#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E ++#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F ++ ++#define DPNI_CMDID_SET_QOS_TBL 0x240 ++#define DPNI_CMDID_ADD_QOS_ENT 0x241 ++#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 ++#define DPNI_CMDID_CLR_QOS_TBL 0x243 ++#define DPNI_CMDID_ADD_FS_ENT 0x244 ++#define DPNI_CMDID_REMOVE_FS_ENT 0x245 ++#define DPNI_CMDID_CLR_FS_ENT 0x246 ++#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 ++#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 ++#define DPNI_CMDID_SET_IPR 0x249 ++#define DPNI_CMDID_SET_IPF 0x24A ++ ++#define DPNI_CMDID_SET_TX_SELECTION 0x250 ++#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 ++#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 ++#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 ++#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 ++#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 ++#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 ++#define DPNI_CMDID_SET_TX_CONF 0x257 ++#define DPNI_CMDID_GET_TX_CONF 0x258 ++#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 ++#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A ++#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B ++#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_OPEN(cmd, dpni_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) ++ ++#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ ++do { \ ++ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ ++ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ ++ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ ++ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ ++ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ ++ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ ++ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ ++ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ ++ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ ++ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ ++ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ ++ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ ++ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ ++ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ ++ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ ++ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ ++ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv4); \ ++ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv6); \ ++ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ ++ cfg->ipr_cfg.max_reass_frm_size); \ ++ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv4); \ ++ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv6); \ ++} while (0) ++ ++#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ ++do { \ ++ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ ++ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ ++ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ ++ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ ++ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ ++ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ ++ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ ++ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ ++ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ ++ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ ++ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ ++ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ ++ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ ++ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ ++ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ ++ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ ++ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv4); \ ++ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv6); \ ++ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ ++ cfg->ipr_cfg.max_reass_frm_size); \ ++ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv4); \ ++ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv6); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CREATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ ++ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ ++ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ ++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ ++ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ ++ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ ++ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ ++ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ ++ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ ++ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ ++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_POOLS(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ ++ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ ++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ ++ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ ++ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ ++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ ++ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ ++ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ ++ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ ++ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ ++ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_ATTR(cmd, attr) \ ++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ ++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ ++ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ ++ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ ++ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ ++ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ ++ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ ++ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ ++ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ ++ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ ++ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ ++ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ ++ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ ++ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ ++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ ++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ ++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_QDID(cmd, qdid) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_COUNTER(cmd, counter) \ ++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_COUNTER(cmd, value) \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ ++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_MTU(cmd, mtu) \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_MTU(cmd, mtu) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ ++ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ ++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ ++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[0].mode); \ ++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[1].mode); \ ++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[2].mode); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[3].mode); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[4].mode); \ ++ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[5].mode); \ ++ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[6].mode); \ ++ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[7].mode); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ ++ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ ++ cfg->fs_cfg.miss_action); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ ++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ ++ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ ++ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ ++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ ++ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ ++ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ ++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ ++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ ++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ ++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ ++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.frame_data_size);\ ++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.flow_context_size);\ ++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ ++ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ ++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ ++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ ++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ ++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.frame_data_size);\ ++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.flow_context_size);\ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ ++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ ++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ ++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ ++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.frame_data_size);\ ++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.flow_context_size);\ ++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ ++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ ++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ ++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ ++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ ++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.frame_data_size);\ ++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.flow_context_size);\ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ ++ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ ++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IPR(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IPF(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ ++ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ ++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ ++ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ ++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_PREP_EARLY_DROP(ext, cfg) \ ++do { \ ++ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ ++ MC_PREP_OP(ext, 0, 2, 2, \ ++ enum dpni_congestion_unit, cfg->units); \ ++ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ ++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ ++ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ ++ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ ++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ ++ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ ++ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ ++ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ ++ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ ++ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_EXT_EARLY_DROP(ext, cfg) \ ++do { \ ++ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ ++ MC_EXT_OP(ext, 0, 2, 2, \ ++ enum dpni_congestion_unit, cfg->units); \ ++ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ ++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ ++ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ ++ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ ++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ ++ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ ++ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ ++ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ ++ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ ++ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) ++ ++#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) ++ ++#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ ++ cfg->queue_cfg.dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ ++ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ ++ cfg->queue_cfg.tail_drop_threshold); \ ++ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ ++ cfg->queue_cfg.flc_cfg.flc_type); \ ++ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ ++ cfg->queue_cfg.flc_cfg.frame_data_size); \ ++ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ ++ cfg->queue_cfg.flc_cfg.flow_context_size); \ ++ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ ++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ ++ cfg->queue_cfg.flc_cfg.flow_context); \ ++} while (0) ++ ++#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ ++ attr->queue_attr.dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ ++ attr->queue_attr.dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ ++ MC_RSP_OP(cmd, 0, 46, 1, int, \ ++ attr->queue_attr.order_preservation_en); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ ++ attr->queue_attr.tail_drop_threshold); \ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ ++ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ ++ attr->queue_attr.flc_cfg.flc_type); \ ++ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ ++ attr->queue_attr.flc_cfg.frame_data_size); \ ++ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ ++ attr->queue_attr.flc_cfg.flow_context_size); \ ++ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ ++ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ ++ attr->queue_attr.flc_cfg.flow_context); \ ++} while (0) ++ ++#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#endif /* _FSL_DPNI_CMD_H */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c +new file mode 100644 +index 0000000..c228ce5 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c +@@ -0,0 +1,1907 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpni.h" ++#include "dpni-cmd.h" ++ ++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, ++ uint8_t *key_cfg_buf) ++{ ++ int i, j; ++ int offset = 0; ++ int param = 1; ++ uint64_t *params = (uint64_t *)key_cfg_buf; ++ ++ if (!key_cfg_buf || !cfg) ++ return -EINVAL; ++ ++ params[0] |= mc_enc(0, 8, cfg->num_extracts); ++ params[0] = cpu_to_le64(params[0]); ++ ++ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) ++ return -EINVAL; ++ ++ for (i = 0; i < cfg->num_extracts; i++) { ++ switch (cfg->extracts[i].type) { ++ case DPKG_EXTRACT_FROM_HDR: ++ params[param] |= mc_enc(0, 8, ++ cfg->extracts[i].extract.from_hdr.prot); ++ params[param] |= mc_enc(8, 4, ++ cfg->extracts[i].extract.from_hdr.type); ++ params[param] |= mc_enc(16, 8, ++ cfg->extracts[i].extract.from_hdr.size); ++ params[param] |= mc_enc(24, 8, ++ cfg->extracts[i].extract. ++ from_hdr.offset); ++ params[param] |= mc_enc(32, 32, ++ cfg->extracts[i].extract. ++ from_hdr.field); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ params[param] |= mc_enc(0, 8, ++ cfg->extracts[i].extract. ++ from_hdr.hdr_index); ++ break; ++ case DPKG_EXTRACT_FROM_DATA: ++ params[param] |= mc_enc(16, 8, ++ cfg->extracts[i].extract. ++ from_data.size); ++ params[param] |= mc_enc(24, 8, ++ cfg->extracts[i].extract. ++ from_data.offset); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ break; ++ case DPKG_EXTRACT_FROM_PARSE: ++ params[param] |= mc_enc(16, 8, ++ cfg->extracts[i].extract. ++ from_parse.size); ++ params[param] |= mc_enc(24, 8, ++ cfg->extracts[i].extract. ++ from_parse.offset); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ break; ++ default: ++ return -EINVAL; ++ } ++ params[param] |= mc_enc( ++ 24, 8, cfg->extracts[i].num_of_byte_masks); ++ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ for (offset = 0, j = 0; ++ j < DPKG_NUM_OF_MASKS; ++ offset += 16, j++) { ++ params[param] |= mc_enc( ++ (offset), 8, cfg->extracts[i].masks[j].mask); ++ params[param] |= mc_enc( ++ (offset + 8), 8, ++ cfg->extracts[i].masks[j].offset); ++ } ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ } ++ return 0; ++} ++ ++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, ++ uint8_t *ext_cfg_buf) ++{ ++ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; ++ ++ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); ++ ++ return 0; ++} ++ ++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, ++ const uint8_t *ext_cfg_buf) ++{ ++ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; ++ ++ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); ++ ++ return 0; ++} ++ ++int dpni_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpni_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPNI_CMD_OPEN(cmd, dpni_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpni_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpni_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPNI_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpni_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_pools(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_pools_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_POOLS(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpni_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpni_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpni_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpni_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpni_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_ATTR(cmd, attr); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_error_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); ++ ++ return 0; ++} ++ ++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); ++ ++ return 0; ++} ++ ++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_qdid(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *qdid) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_QDID(cmd, *qdid); ++ ++ return 0; ++} ++ ++int dpni_get_sp_info(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_sp_info *sp_info) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_SP_INFO(cmd, sp_info); ++ ++ return 0; ++} ++ ++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *data_offset) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); ++ ++ return 0; ++} ++ ++int dpni_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t *value) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_COUNTER(cmd, counter); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_COUNTER(cmd, *value); ++ ++ return 0; ++} ++ ++int dpni_set_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t value) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_COUNTER(cmd, counter, value); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_LINK_CFG(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_link_state *state) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_LINK_STATE(cmd, state); ++ ++ return 0; ++} ++ ++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_shaping_cfg *tx_shaper) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); ++ ++ return 0; ++} ++ ++int dpni_set_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t mtu) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_MTU(cmd, mtu); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *mtu) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_MTU(cmd, *mtu); ++ ++ return 0; ++} ++ ++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); ++ ++ return 0; ++} ++ ++int dpni_add_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int unicast, ++ int multicast) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, ++ cmd_flags, ++ token); ++ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_add_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_tx_selection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_selection_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_dist_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *flow_id, ++ const struct dpni_tx_flow_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); ++ ++ return 0; ++} ++ ++int dpni_get_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_flow_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_FLOW(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ const struct dpni_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ struct dpni_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_RX_FLOW(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int revoke) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_qos_tbl_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_add_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg, ++ uint8_t tc_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_add_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg, ++ uint16_t flow_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, ++ cmd_flags, token); ++ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, ++ cmd_flags, token); ++ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_ipr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IPR(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_ipf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IPF(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_policing_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_rx_tc_policing_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); ++ ++ return 0; ++} ++ ++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, ++ uint8_t *early_drop_buf) ++{ ++ uint64_t *ext_params = (uint64_t *)early_drop_buf; ++ ++ DPNI_PREP_EARLY_DROP(ext_params, cfg); ++} ++ ++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, ++ const uint8_t *early_drop_buf) ++{ ++ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; ++ ++ DPNI_EXT_EARLY_DROP(ext_params, cfg); ++} ++ ++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); ++ ++ return 0; ++} ++ ++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_tx_conf_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_conf_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_CONF(cmd, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_TX_CONF(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); ++ ++ return 0; ++} +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h +new file mode 100644 +index 0000000..fca426d +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h +@@ -0,0 +1,2581 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPNI_H ++#define __FSL_DPNI_H ++ ++#include "dpkg.h" ++ ++struct fsl_mc_io; ++ ++/** ++ * Data Path Network Interface API ++ * Contains initialization APIs and runtime control APIs for DPNI ++ */ ++ ++/** General DPNI macros */ ++ ++/** ++ * Maximum number of traffic classes ++ */ ++#define DPNI_MAX_TC 8 ++/** ++ * Maximum number of buffer pools per DPNI ++ */ ++#define DPNI_MAX_DPBP 8 ++/** ++ * Maximum number of storage-profiles per DPNI ++ */ ++#define DPNI_MAX_SP 2 ++ ++/** ++ * All traffic classes considered; see dpni_set_rx_flow() ++ */ ++#define DPNI_ALL_TCS (uint8_t)(-1) ++/** ++ * All flows within traffic class considered; see dpni_set_rx_flow() ++ */ ++#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) ++/** ++ * Generate new flow ID; see dpni_set_tx_flow() ++ */ ++#define DPNI_NEW_FLOW_ID (uint16_t)(-1) ++/* use for common tx-conf queue; see dpni_set_tx_conf_() */ ++#define DPNI_COMMON_TX_CONF (uint16_t)(-1) ++ ++/** ++ * dpni_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpni_id: DPNI unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpni_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpni_id, ++ uint16_t *token); ++ ++/** ++ * dpni_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/* DPNI configuration options */ ++ ++/** ++ * Allow different distribution key profiles for different traffic classes; ++ * if not set, a single key profile is assumed ++ */ ++#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 ++ ++/** ++ * Disable all non-error transmit confirmation; error frames are reported ++ * back to a common Tx error queue ++ */ ++#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 ++ ++/** ++ * Disable per-sender private Tx confirmation/error queue ++ */ ++#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 ++ ++/** ++ * Support distribution based on hashed key; ++ * allows statistical distribution over receive queues in a traffic class ++ */ ++#define DPNI_OPT_DIST_HASH 0x00000010 ++ ++/** ++ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are ++ * '0' then backward compatibility is preserved; ++ * Support distribution based on flow steering; ++ * allows explicit control of distribution over receive queues in a traffic ++ * class ++ */ ++#define DPNI_OPT_DIST_FS 0x00000020 ++ ++/** ++ * Unicast filtering support ++ */ ++#define DPNI_OPT_UNICAST_FILTER 0x00000080 ++/** ++ * Multicast filtering support ++ */ ++#define DPNI_OPT_MULTICAST_FILTER 0x00000100 ++/** ++ * VLAN filtering support ++ */ ++#define DPNI_OPT_VLAN_FILTER 0x00000200 ++/** ++ * Support IP reassembly on received packets ++ */ ++#define DPNI_OPT_IPR 0x00000800 ++/** ++ * Support IP fragmentation on transmitted packets ++ */ ++#define DPNI_OPT_IPF 0x00001000 ++/** ++ * VLAN manipulation support ++ */ ++#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 ++/** ++ * Support masking of QoS lookup keys ++ */ ++#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 ++/** ++ * Support masking of Flow Steering lookup keys ++ */ ++#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 ++ ++/** ++ * struct dpni_extended_cfg - Structure representing extended DPNI configuration ++ * @tc_cfg: TCs configuration ++ * @ipr_cfg: IP reassembly configuration ++ */ ++struct dpni_extended_cfg { ++ /** ++ * struct tc_cfg - TC configuration ++ * @max_dist: Maximum distribution size for Rx traffic class; ++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, ++ * 112,128,192,224,256,384,448,512,768,896,1024; ++ * value '0' will be treated as '1'. ++ * other unsupported values will be round down to the nearest ++ * supported value. ++ * @max_fs_entries: Maximum FS entries for Rx traffic class; ++ * '0' means no support for this TC; ++ */ ++ struct { ++ uint16_t max_dist; ++ uint16_t max_fs_entries; ++ } tc_cfg[DPNI_MAX_TC]; ++ /** ++ * struct ipr_cfg - Structure representing IP reassembly configuration ++ * @max_reass_frm_size: Maximum size of the reassembled frame ++ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments ++ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments ++ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly ++ * process ++ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly ++ * process ++ */ ++ struct { ++ uint16_t max_reass_frm_size; ++ uint16_t min_frag_size_ipv4; ++ uint16_t min_frag_size_ipv6; ++ uint16_t max_open_frames_ipv4; ++ uint16_t max_open_frames_ipv6; ++ } ipr_cfg; ++}; ++ ++/** ++ * dpni_prepare_extended_cfg() - function prepare extended parameters ++ * @cfg: extended structure ++ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before dpni_create() ++ */ ++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, ++ uint8_t *ext_cfg_buf); ++ ++/** ++ * struct dpni_cfg - Structure representing DPNI configuration ++ * @mac_addr: Primary MAC address ++ * @adv: Advanced parameters; default is all zeros; ++ * use this structure to change default settings ++ */ ++struct dpni_cfg { ++ uint8_t mac_addr[6]; ++ /** ++ * struct adv - Advanced parameters ++ * @options: Mask of available options; use 'DPNI_OPT_' values ++ * @start_hdr: Selects the packet starting header for parsing; ++ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' ++ * @max_senders: Maximum number of different senders; used as the number ++ * of dedicated Tx flows; Non-power-of-2 values are rounded ++ * up to the next power-of-2 value as hardware demands it; ++ * '0' will be treated as '1' ++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); ++ * '0' will e treated as '1' ++ * @max_unicast_filters: Maximum number of unicast filters; ++ * '0' is treated as '16' ++ * @max_multicast_filters: Maximum number of multicast filters; ++ * '0' is treated as '64' ++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in ++ * the QoS table; '0' is treated as '64' ++ * @max_qos_key_size: Maximum key size for the QoS look-up; ++ * '0' is treated as '24' which is enough for IPv4 ++ * 5-tuple ++ * @max_dist_key_size: Maximum key size for the distribution; ++ * '0' is treated as '24' which is enough for IPv4 5-tuple ++ * @max_policers: Maximum number of policers; ++ * should be between '0' and max_tcs ++ * @max_congestion_ctrl: Maximum number of congestion control groups ++ * (CGs); covers early drop and congestion notification ++ * requirements; ++ * should be between '0' and ('max_tcs' + 'max_senders') ++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory ++ * filled with the extended configuration by calling ++ * dpni_prepare_extended_cfg() ++ */ ++ struct { ++ uint32_t options; ++ enum net_prot start_hdr; ++ uint8_t max_senders; ++ uint8_t max_tcs; ++ uint8_t max_unicast_filters; ++ uint8_t max_multicast_filters; ++ uint8_t max_vlan_filters; ++ uint8_t max_qos_entries; ++ uint8_t max_qos_key_size; ++ uint8_t max_dist_key_size; ++ uint8_t max_policers; ++ uint8_t max_congestion_ctrl; ++ uint64_t ext_cfg_iova; ++ } adv; ++}; ++ ++/** ++ * dpni_create() - Create the DPNI object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPNI object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpni_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpni_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpni_destroy() - Destroy the DPNI object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpni_pools_cfg - Structure representing buffer pools configuration ++ * @num_dpbp: Number of DPBPs ++ * @pools: Array of buffer pools parameters; The number of valid entries ++ * must match 'num_dpbp' value ++ */ ++struct dpni_pools_cfg { ++ uint8_t num_dpbp; ++ /** ++ * struct pools - Buffer pools parameters ++ * @dpbp_id: DPBP object ID ++ * @buffer_size: Buffer size ++ * @backup_pool: Backup pool ++ */ ++ struct { ++ int dpbp_id; ++ uint16_t buffer_size; ++ int backup_pool; ++ } pools[DPNI_MAX_DPBP]; ++}; ++ ++/** ++ * dpni_set_pools() - Set buffer pools configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Buffer pools configuration ++ * ++ * mandatory for DPNI operation ++ * warning:Allowed only when DPNI is disabled ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_pools(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_pools_cfg *cfg); ++ ++/** ++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpni_is_enabled() - Check if the DPNI is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_reset() - Reset the DPNI, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * DPNI IRQ Index and Events ++ */ ++ ++/** ++ * IRQ index ++ */ ++#define DPNI_IRQ_INDEX 0 ++/** ++ * IRQ event - indicates a change in link state ++ */ ++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 ++ ++/** ++ * struct dpni_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpni_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpni_irq_cfg *irq_cfg); ++ ++/** ++ * dpni_get_irq() - Get IRQ information from the DPNI. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpni_irq_cfg *irq_cfg); ++ ++/** ++ * dpni_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state: - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpni_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpni_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpni_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpni_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpni_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpni_attr - Structure representing DPNI attributes ++ * @id: DPNI object ID ++ * @version: DPNI version ++ * @start_hdr: Indicates the packet starting header for parsing ++ * @options: Mask of available options; reflects the value as was given in ++ * object's creation ++ * @max_senders: Maximum number of different senders; used as the number ++ * of dedicated Tx flows; ++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) ++ * @max_unicast_filters: Maximum number of unicast filters ++ * @max_multicast_filters: Maximum number of multicast filters ++ * @max_vlan_filters: Maximum number of VLAN filters ++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table ++ * @max_qos_key_size: Maximum key size for the QoS look-up ++ * @max_dist_key_size: Maximum key size for the distribution look-up ++ * @max_policers: Maximum number of policers; ++ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); ++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; ++ * call dpni_extract_extended_cfg() to extract the extended configuration ++ */ ++struct dpni_attr { ++ int id; ++ /** ++ * struct version - DPNI version ++ * @major: DPNI major version ++ * @minor: DPNI minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ enum net_prot start_hdr; ++ uint32_t options; ++ uint8_t max_senders; ++ uint8_t max_tcs; ++ uint8_t max_unicast_filters; ++ uint8_t max_multicast_filters; ++ uint8_t max_vlan_filters; ++ uint8_t max_qos_entries; ++ uint8_t max_qos_key_size; ++ uint8_t max_dist_key_size; ++ uint8_t max_policers; ++ uint8_t max_congestion_ctrl; ++ uint64_t ext_cfg_iova; ++}; ++ ++/** ++ * dpni_get_attributes() - Retrieve DPNI attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @attr: Object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_attr *attr); ++ ++/** ++ * dpni_extract_extended_cfg() - extract the extended parameters ++ * @cfg: extended structure ++ * @ext_cfg_buf: 256 bytes of DMA-able memory ++ * ++ * This function has to be called after dpni_get_attributes() ++ */ ++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, ++ const uint8_t *ext_cfg_buf); ++ ++/** ++ * DPNI errors ++ */ ++ ++/** ++ * Extract out of frame header error ++ */ ++#define DPNI_ERROR_EOFHE 0x00020000 ++/** ++ * Frame length error ++ */ ++#define DPNI_ERROR_FLE 0x00002000 ++/** ++ * Frame physical error ++ */ ++#define DPNI_ERROR_FPE 0x00001000 ++/** ++ * Parsing header error ++ */ ++#define DPNI_ERROR_PHE 0x00000020 ++/** ++ * Parser L3 checksum error ++ */ ++#define DPNI_ERROR_L3CE 0x00000004 ++/** ++ * Parser L3 checksum error ++ */ ++#define DPNI_ERROR_L4CE 0x00000001 ++ ++/** ++ * enum dpni_error_action - Defines DPNI behavior for errors ++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame ++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow ++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue ++ */ ++enum dpni_error_action { ++ DPNI_ERROR_ACTION_DISCARD = 0, ++ DPNI_ERROR_ACTION_CONTINUE = 1, ++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 ++}; ++ ++/** ++ * struct dpni_error_cfg - Structure representing DPNI errors treatment ++ * @errors: Errors mask; use 'DPNI_ERROR__ ++ * @error_action: The desired action for the errors mask ++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation ++ * status (FAS); relevant only for the non-discard action ++ */ ++struct dpni_error_cfg { ++ uint32_t errors; ++ enum dpni_error_action error_action; ++ int set_frame_annotation; ++}; ++ ++/** ++ * dpni_set_errors_behavior() - Set errors behavior ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Errors configuration ++ * ++ * this function may be called numerous times with different ++ * error masks ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_error_cfg *cfg); ++ ++/** ++ * DPNI buffer layout modification options ++ */ ++ ++/** ++ * Select to modify the time-stamp setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 ++/** ++ * Select to modify the parser-result setting; not applicable for Tx ++ */ ++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 ++/** ++ * Select to modify the frame-status setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 ++/** ++ * Select to modify the private-data-size setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 ++/** ++ * Select to modify the data-alignment setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 ++/** ++ * Select to modify the data-head-room setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 ++/** ++ * Select to modify the data-tail-room setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 ++ ++/** ++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout ++ * @options: Flags representing the suggested modifications to the buffer ++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags ++ * @pass_timestamp: Pass timestamp value ++ * @pass_parser_result: Pass parser results ++ * @pass_frame_status: Pass frame status ++ * @private_data_size: Size kept for private data (in bytes) ++ * @data_align: Data alignment ++ * @data_head_room: Data head room ++ * @data_tail_room: Data tail room ++ */ ++struct dpni_buffer_layout { ++ uint32_t options; ++ int pass_timestamp; ++ int pass_parser_result; ++ int pass_frame_status; ++ uint16_t private_data_size; ++ uint16_t data_align; ++ uint16_t data_head_room; ++ uint16_t data_tail_room; ++}; ++ ++/** ++ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout ++ * attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout ++ * configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used ++ * for enqueue operations ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qdid: Returned virtual QDID value that should be used as an argument ++ * in all enqueue operations ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_qdid(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *qdid); ++ ++/** ++ * struct dpni_sp_info - Structure representing DPNI storage-profile information ++ * (relevant only for DPNI owned by AIOP) ++ * @spids: array of storage-profiles ++ */ ++struct dpni_sp_info { ++ uint16_t spids[DPNI_MAX_SP]; ++}; ++ ++/** ++ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @sp_info: Returned AIOP storage-profile information ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Only relevant for DPNI that belongs to AIOP container. ++ */ ++int dpni_get_sp_info(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_sp_info *sp_info); ++ ++/** ++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @data_offset: Tx data offset (from start of buffer) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *data_offset); ++ ++/** ++ * enum dpni_counter - DPNI counter types ++ * @DPNI_CNT_ING_FRAME: Counts ingress frames ++ * @DPNI_CNT_ING_BYTE: Counts ingress bytes ++ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit ++ * 'drop' setting ++ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors ++ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames ++ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes ++ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames ++ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes ++ * @DPNI_CNT_EGR_FRAME: Counts egress frames ++ * @DPNI_CNT_EGR_BYTE: Counts egress bytes ++ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors ++ */ ++enum dpni_counter { ++ DPNI_CNT_ING_FRAME = 0x0, ++ DPNI_CNT_ING_BYTE = 0x1, ++ DPNI_CNT_ING_FRAME_DROP = 0x2, ++ DPNI_CNT_ING_FRAME_DISCARD = 0x3, ++ DPNI_CNT_ING_MCAST_FRAME = 0x4, ++ DPNI_CNT_ING_MCAST_BYTE = 0x5, ++ DPNI_CNT_ING_BCAST_FRAME = 0x6, ++ DPNI_CNT_ING_BCAST_BYTES = 0x7, ++ DPNI_CNT_EGR_FRAME = 0x8, ++ DPNI_CNT_EGR_BYTE = 0x9, ++ DPNI_CNT_EGR_FRAME_DISCARD = 0xa ++}; ++ ++/** ++ * dpni_get_counter() - Read a specific DPNI counter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @counter: The requested counter ++ * @value: Returned counter's current value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t *value); ++ ++/** ++ * dpni_set_counter() - Set (or clear) a specific DPNI counter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @counter: The requested counter ++ * @value: New counter value; typically pass '0' for resetting ++ * the counter. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t value); ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct - Structure representing DPNI link configuration ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values ++ */ ++struct dpni_link_cfg { ++ uint32_t rate; ++ uint64_t options; ++}; ++ ++/** ++ * dpni_set_link_cfg() - set the link configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_link_cfg *cfg); ++ ++/** ++ * struct dpni_link_state - Structure representing DPNI link state ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values ++ * @up: Link state; '0' for down, '1' for up ++ */ ++struct dpni_link_state { ++ uint32_t rate; ++ uint64_t options; ++ int up; ++}; ++ ++/** ++ * dpni_get_link_state() - Return the link state (either up or down) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @state: Returned link state; ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_link_state *state); ++ ++/** ++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration ++ * @rate_limit: rate in Mbps ++ * @max_burst_size: burst size in bytes (up to 64KB) ++ */ ++struct dpni_tx_shaping_cfg { ++ uint32_t rate_limit; ++ uint16_t max_burst_size; ++}; ++ ++/** ++ * dpni_set_tx_shaping() - Set the transmit shaping ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tx_shaper: tx shaping configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_shaping_cfg *tx_shaper); ++ ++/** ++ * dpni_set_max_frame_length() - Set the maximum received frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @max_frame_length: Maximum received frame length (in ++ * bytes); frame is discarded if its ++ * length exceeds this value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t max_frame_length); ++ ++/** ++ * dpni_get_max_frame_length() - Get the maximum received frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @max_frame_length: Maximum received frame length (in ++ * bytes); frame is discarded if its ++ * length exceeds this value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *max_frame_length); ++ ++/** ++ * dpni_set_mtu() - Set the MTU for the interface. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mtu: MTU length (in bytes) ++ * ++ * MTU determines the maximum fragment size for performing IP ++ * fragmentation on egress packets. ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t mtu); ++ ++/** ++ * dpni_get_mtu() - Get the MTU. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mtu: Returned MTU length (in bytes) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *mtu); ++ ++/** ++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_set_primary_mac_addr() - Set the primary MAC address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to set as primary address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]); ++ ++/** ++ * dpni_get_primary_mac_addr() - Get the primary MAC address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: Returned MAC address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t mac_addr[6]); ++ ++/** ++ * dpni_add_mac_addr() - Add MAC address filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to add ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]); ++ ++/** ++ * dpni_remove_mac_addr() - Remove MAC address filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]); ++ ++/** ++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @unicast: Set to '1' to clear unicast addresses ++ * @multicast: Set to '1' to clear multicast addresses ++ * ++ * The primary MAC address is not cleared by this operation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int unicast, ++ int multicast); ++ ++/** ++ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_add_vlan_id() - Add VLAN ID filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @vlan_id: VLAN ID to add ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id); ++ ++/** ++ * dpni_remove_vlan_id() - Remove VLAN ID filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @vlan_id: VLAN ID to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id); ++ ++/** ++ * dpni_clear_vlan_filters() - Clear all VLAN filters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode ++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority ++ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling ++ */ ++enum dpni_tx_schedule_mode { ++ DPNI_TX_SCHED_STRICT_PRIORITY, ++ DPNI_TX_SCHED_WEIGHTED, ++}; ++ ++/** ++ * struct dpni_tx_schedule_cfg - Structure representing Tx ++ * scheduling configuration ++ * @mode: scheduling mode ++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; ++ * not applicable for 'strict-priority' mode; ++ */ ++struct dpni_tx_schedule_cfg { ++ enum dpni_tx_schedule_mode mode; ++ uint16_t delta_bandwidth; ++}; ++ ++/** ++ * struct dpni_tx_selection_cfg - Structure representing transmission ++ * selection configuration ++ * @tc_sched: an array of traffic-classes ++ */ ++struct dpni_tx_selection_cfg { ++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; ++}; ++ ++/** ++ * dpni_set_tx_selection() - Set transmission selection configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: transmission selection configuration ++ * ++ * warning: Allowed only when DPNI is disabled ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_selection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_selection_cfg *cfg); ++ ++/** ++ * enum dpni_dist_mode - DPNI distribution mode ++ * @DPNI_DIST_MODE_NONE: No distribution ++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if ++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation ++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if ++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation ++ */ ++enum dpni_dist_mode { ++ DPNI_DIST_MODE_NONE = 0, ++ DPNI_DIST_MODE_HASH = 1, ++ DPNI_DIST_MODE_FS = 2 ++}; ++ ++/** ++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action ++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame ++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id ++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash ++ */ ++enum dpni_fs_miss_action { ++ DPNI_FS_MISS_DROP = 0, ++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, ++ DPNI_FS_MISS_HASH = 2 ++}; ++ ++/** ++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration ++ * @miss_action: Miss action selection ++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' ++ */ ++struct dpni_fs_tbl_cfg { ++ enum dpni_fs_miss_action miss_action; ++ uint16_t default_flow_id; ++}; ++ ++/** ++ * dpni_prepare_key_cfg() - function prepare extract parameters ++ * @cfg: defining a full Key Generation profile (rule) ++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before the following functions: ++ * - dpni_set_rx_tc_dist() ++ * - dpni_set_qos_table() ++ */ ++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, ++ uint8_t *key_cfg_buf); ++ ++/** ++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration ++ * @dist_size: Set the distribution size; ++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, ++ * 112,128,192,224,256,384,448,512,768,896,1024 ++ * @dist_mode: Distribution mode ++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with ++ * the extractions to be used for the distribution key by calling ++ * dpni_prepare_key_cfg() relevant only when ++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' ++ * @fs_cfg: Flow Steering table configuration; only relevant if ++ * 'dist_mode = DPNI_DIST_MODE_FS' ++ */ ++struct dpni_rx_tc_dist_cfg { ++ uint16_t dist_size; ++ enum dpni_dist_mode dist_mode; ++ uint64_t key_cfg_iova; ++ struct dpni_fs_tbl_cfg fs_cfg; ++}; ++ ++/** ++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class distribution configuration ++ * ++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() ++ * first to prepare the key_cfg_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_dist_cfg *cfg); ++ ++/** ++ * Set to select color aware mode (otherwise - color blind) ++ */ ++#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 ++/** ++ * Set to discard frame with RED color ++ */ ++#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 ++ ++/** ++ * enum dpni_policer_mode - selecting the policer mode ++ * @DPNI_POLICER_MODE_NONE: Policer is disabled ++ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through ++ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 ++ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 ++ */ ++enum dpni_policer_mode { ++ DPNI_POLICER_MODE_NONE = 0, ++ DPNI_POLICER_MODE_PASS_THROUGH, ++ DPNI_POLICER_MODE_RFC_2698, ++ DPNI_POLICER_MODE_RFC_4115 ++}; ++ ++/** ++ * enum dpni_policer_unit - DPNI policer units ++ * @DPNI_POLICER_UNIT_BYTES: bytes units ++ * @DPNI_POLICER_UNIT_FRAMES: frames units ++ */ ++enum dpni_policer_unit { ++ DPNI_POLICER_UNIT_BYTES = 0, ++ DPNI_POLICER_UNIT_FRAMES ++}; ++ ++/** ++ * enum dpni_policer_color - selecting the policer color ++ * @DPNI_POLICER_COLOR_GREEN: Green color ++ * @DPNI_POLICER_COLOR_YELLOW: Yellow color ++ * @DPNI_POLICER_COLOR_RED: Red color ++ */ ++enum dpni_policer_color { ++ DPNI_POLICER_COLOR_GREEN = 0, ++ DPNI_POLICER_COLOR_YELLOW, ++ DPNI_POLICER_COLOR_RED ++}; ++ ++/** ++ * struct dpni_rx_tc_policing_cfg - Policer configuration ++ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values ++ * @mode: policer mode ++ * @default_color: For pass-through mode the policer re-colors with this ++ * color any incoming packets. For Color aware non-pass-through mode: ++ * policer re-colors with this color all packets with FD[DROPP]>2. ++ * @units: Bytes or Packets ++ * @cir: Committed information rate (CIR) in Kbps or packets/second ++ * @cbs: Committed burst size (CBS) in bytes or packets ++ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second ++ * Excess information rate (EIR, rfc4115) in Kbps or packets/second ++ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets ++ * Excess burst size (EBS, rfc4115) in bytes or packets ++ */ ++struct dpni_rx_tc_policing_cfg { ++ uint32_t options; ++ enum dpni_policer_mode mode; ++ enum dpni_policer_unit units; ++ enum dpni_policer_color default_color; ++ uint32_t cir; ++ uint32_t cbs; ++ uint32_t eir; ++ uint32_t ebs; ++}; ++ ++/** ++ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class policing configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_policing_cfg *cfg); ++ ++/** ++ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class policing configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_rx_tc_policing_cfg *cfg); ++ ++/** ++ * enum dpni_congestion_unit - DPNI congestion units ++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units ++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units ++ */ ++enum dpni_congestion_unit { ++ DPNI_CONGESTION_UNIT_BYTES = 0, ++ DPNI_CONGESTION_UNIT_FRAMES ++}; ++ ++/** ++ * enum dpni_early_drop_mode - DPNI early drop mode ++ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled ++ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode ++ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode ++ */ ++enum dpni_early_drop_mode { ++ DPNI_EARLY_DROP_MODE_NONE = 0, ++ DPNI_EARLY_DROP_MODE_TAIL, ++ DPNI_EARLY_DROP_MODE_WRED ++}; ++ ++/** ++ * struct dpni_wred_cfg - WRED configuration ++ * @max_threshold: maximum threshold that packets may be discarded. Above this ++ * threshold all packets are discarded; must be less than 2^39; ++ * approximated to be expressed as (x+256)*2^(y-1) due to HW ++ * implementation. ++ * @min_threshold: minimum threshold that packets may be discarded at ++ * @drop_probability: probability that a packet will be discarded (1-100, ++ * associated with the max_threshold). ++ */ ++struct dpni_wred_cfg { ++ uint64_t max_threshold; ++ uint64_t min_threshold; ++ uint8_t drop_probability; ++}; ++ ++/** ++ * struct dpni_early_drop_cfg - early-drop configuration ++ * @mode: drop mode ++ * @units: units type ++ * @green: WRED - 'green' configuration ++ * @yellow: WRED - 'yellow' configuration ++ * @red: WRED - 'red' configuration ++ * @tail_drop_threshold: tail drop threshold ++ */ ++struct dpni_early_drop_cfg { ++ enum dpni_early_drop_mode mode; ++ enum dpni_congestion_unit units; ++ ++ struct dpni_wred_cfg green; ++ struct dpni_wred_cfg yellow; ++ struct dpni_wred_cfg red; ++ ++ uint32_t tail_drop_threshold; ++}; ++ ++/** ++ * dpni_prepare_early_drop() - prepare an early drop. ++ * @cfg: Early-drop configuration ++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before dpni_set_rx_tc_early_drop or ++ * dpni_set_tx_tc_early_drop ++ * ++ */ ++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, ++ uint8_t *early_drop_buf); ++ ++/** ++ * dpni_extract_early_drop() - extract the early drop configuration. ++ * @cfg: Early-drop configuration ++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called after dpni_get_rx_tc_early_drop or ++ * dpni_get_tx_tc_early_drop ++ * ++ */ ++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, ++ const uint8_t *early_drop_buf); ++ ++/** ++ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled ++ * with the early-drop configuration by calling dpni_prepare_early_drop() ++ * ++ * warning: Before calling this function, call dpni_prepare_early_drop() to ++ * prepare the early_drop_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory ++ * ++ * warning: After calling this function, call dpni_extract_early_drop() to ++ * get the early drop configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled ++ * with the early-drop configuration by calling dpni_prepare_early_drop() ++ * ++ * warning: Before calling this function, call dpni_prepare_early_drop() to ++ * prepare the early_drop_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory ++ * ++ * warning: After calling this function, call dpni_extract_early_drop() to ++ * get the early drop configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * enum dpni_dest - DPNI destination types ++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and ++ * does not generate FQDAN notifications; user is expected to ++ * dequeue from the queue based on polling or other user-defined ++ * method ++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN ++ * notifications to the specified DPIO; user is expected to dequeue ++ * from the queue only after notification is received ++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate ++ * FQDAN notifications, but is connected to the specified DPCON ++ * object; user is expected to dequeue from the DPCON channel ++ */ ++enum dpni_dest { ++ DPNI_DEST_NONE = 0, ++ DPNI_DEST_DPIO = 1, ++ DPNI_DEST_DPCON = 2 ++}; ++ ++/** ++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters ++ * @dest_type: Destination type ++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type ++ * @priority: Priority selection within the DPIO or DPCON channel; valid values ++ * are 0-1 or 0-7, depending on the number of priorities in that ++ * channel; not relevant for 'DPNI_DEST_NONE' option ++ */ ++struct dpni_dest_cfg { ++ enum dpni_dest dest_type; ++ int dest_id; ++ uint8_t priority; ++}; ++ ++/* DPNI congestion options */ ++ ++/** ++ * CSCN message is written to message_iova once entering a ++ * congestion state (see 'threshold_entry') ++ */ ++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 ++/** ++ * CSCN message is written to message_iova once exiting a ++ * congestion state (see 'threshold_exit') ++ */ ++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 ++/** ++ * CSCN write will attempt to allocate into a cache (coherent write); ++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected ++ */ ++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to ++ * DPIO/DPCON's WQ channel once entering a congestion state ++ * (see 'threshold_entry') ++ */ ++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to ++ * DPIO/DPCON's WQ channel once exiting a congestion state ++ * (see 'threshold_exit') ++ */ ++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the ++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) ++ */ ++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 ++ ++/** ++ * struct dpni_congestion_notification_cfg - congestion notification ++ * configuration ++ * @units: units type ++ * @threshold_entry: above this threshold we enter a congestion state. ++ * set it to '0' to disable it ++ * @threshold_exit: below this threshold we exit the congestion state. ++ * @message_ctx: The context that will be part of the CSCN message ++ * @message_iova: I/O virtual address (must be in DMA-able memory), ++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is ++ * contained in 'options' ++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel ++ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values ++ */ ++ ++struct dpni_congestion_notification_cfg { ++ enum dpni_congestion_unit units; ++ uint32_t threshold_entry; ++ uint32_t threshold_exit; ++ uint64_t message_ctx; ++ uint64_t message_iova; ++ struct dpni_dest_cfg dest_cfg; ++ uint16_t options; ++}; ++ ++/** ++ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * enum dpni_flc_type - DPNI FLC types ++ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value ++ * @DPNI_FLC_STASH: select the FLC to be used for stash control ++ */ ++enum dpni_flc_type { ++ DPNI_FLC_USER_DEFINED = 0, ++ DPNI_FLC_STASH = 1, ++}; ++ ++/** ++ * enum dpni_stash_size - DPNI FLC stashing size ++ * @DPNI_STASH_SIZE_0B: no stash ++ * @DPNI_STASH_SIZE_64B: stashes 64 bytes ++ * @DPNI_STASH_SIZE_128B: stashes 128 bytes ++ * @DPNI_STASH_SIZE_192B: stashes 192 bytes ++ */ ++enum dpni_stash_size { ++ DPNI_STASH_SIZE_0B = 0, ++ DPNI_STASH_SIZE_64B = 1, ++ DPNI_STASH_SIZE_128B = 2, ++ DPNI_STASH_SIZE_192B = 3, ++}; ++ ++/* DPNI FLC stash options */ ++ ++/** ++ * stashes the whole annotation area (up to 192 bytes) ++ */ ++#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 ++ ++/** ++ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration ++ * @flc_type: FLC type ++ * @options: Mask of available options; ++ * use 'DPNI_FLC_STASH_' values ++ * @frame_data_size: Size of frame data to be stashed ++ * @flow_context_size: Size of flow context to be stashed ++ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': ++ * this value will be provided in the frame descriptor ++ * (FD[FLC]) ++ * 2. In case flc_type is 'DPNI_FLC_STASH': ++ * this value will be I/O virtual address of the ++ * flow-context; ++ * Must be cacheline-aligned and DMA-able memory ++ */ ++struct dpni_flc_cfg { ++ enum dpni_flc_type flc_type; ++ uint32_t options; ++ enum dpni_stash_size frame_data_size; ++ enum dpni_stash_size flow_context_size; ++ uint64_t flow_context; ++}; ++ ++/** ++ * DPNI queue modification options ++ */ ++ ++/** ++ * Select to modify the user's context associated with the queue ++ */ ++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 ++/** ++ * Select to modify the queue's destination ++ */ ++#define DPNI_QUEUE_OPT_DEST 0x00000002 ++/** Select to modify the flow-context parameters; ++ * not applicable for Tx-conf/Err queues as the FD comes from the user ++ */ ++#define DPNI_QUEUE_OPT_FLC 0x00000004 ++/** ++ * Select to modify the queue's order preservation ++ */ ++#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 ++/* Select to modify the queue's tail-drop threshold */ ++#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 ++ ++/** ++ * struct dpni_queue_cfg - Structure representing queue configuration ++ * @options: Flags representing the suggested modifications to the queue; ++ * Use any combination of 'DPNI_QUEUE_OPT_' flags ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' ++ * is contained in 'options' ++ * @dest_cfg: Queue destination parameters; ++ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' ++ * @flc_cfg: Flow context configuration; in case the TC's distribution ++ * is either NONE or HASH the FLC's settings of flow#0 are used. ++ * in the case of FS (flow-steering) the flow's FLC settings ++ * are used. ++ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' ++ * @order_preservation_en: enable/disable order preservation; ++ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained ++ * in 'options' ++ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; ++ * '0' value disable the threshold; maximum value is 0xE000000; ++ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained ++ * in 'options' ++ */ ++struct dpni_queue_cfg { ++ uint32_t options; ++ uint64_t user_ctx; ++ struct dpni_dest_cfg dest_cfg; ++ struct dpni_flc_cfg flc_cfg; ++ int order_preservation_en; ++ uint32_t tail_drop_threshold; ++}; ++ ++/** ++ * struct dpni_queue_attr - Structure representing queue attributes ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame ++ * @dest_cfg: Queue destination configuration ++ * @flc_cfg: Flow context configuration ++ * @order_preservation_en: enable/disable order preservation ++ * @tail_drop_threshold: queue's tail drop threshold in bytes; ++ * @fqid: Virtual fqid value to be used for dequeue operations ++ */ ++struct dpni_queue_attr { ++ uint64_t user_ctx; ++ struct dpni_dest_cfg dest_cfg; ++ struct dpni_flc_cfg flc_cfg; ++ int order_preservation_en; ++ uint32_t tail_drop_threshold; ++ ++ uint32_t fqid; ++}; ++ ++/** ++ * DPNI Tx flow modification options ++ */ ++ ++/** ++ * Select to modify the settings for dedicate Tx confirmation/error ++ */ ++#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 ++/** ++ * Select to modify the L3 checksum generation setting ++ */ ++#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 ++/** ++ * Select to modify the L4 checksum generation setting ++ */ ++#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 ++ ++/** ++ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration ++ * @options: Flags representing the suggested modifications to the Tx flow; ++ * Use any combination 'DPNI_TX_FLOW_OPT_' flags ++ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx ++ * confirmation and error queue; Set to '0' to use the private ++ * Tx confirmation and error queue; valid only if ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation ++ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' ++ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; ++ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' ++ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; ++ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' ++ */ ++struct dpni_tx_flow_cfg { ++ uint32_t options; ++ int use_common_tx_conf_queue; ++ int l3_chksum_gen; ++ int l4_chksum_gen; ++}; ++ ++/** ++ * dpni_set_tx_flow() - Set Tx flow configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: Provides (or returns) the sender's flow ID; ++ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate ++ * a new flow_id; this ID should be used as the QDBIN argument ++ * in enqueue operations ++ * @cfg: Tx flow configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *flow_id, ++ const struct dpni_tx_flow_cfg *cfg); ++ ++/** ++ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes ++ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and ++ * error queue; '0' if using private Tx confirmation and error queue ++ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled ++ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled ++ */ ++struct dpni_tx_flow_attr { ++ int use_common_tx_conf_queue; ++ int l3_chksum_gen; ++ int l4_chksum_gen; ++}; ++ ++/** ++ * dpni_get_tx_flow() - Get Tx flow attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function ++ * @attr: Returned Tx flow attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_flow_attr *attr); ++ ++/** ++ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration ++ * @errors_only: Set to '1' to report back only error frames; ++ * Set to '0' to confirm transmission/error for all transmitted frames; ++ * @queue_cfg: Queue configuration ++ */ ++struct dpni_tx_conf_cfg { ++ int errors_only; ++ struct dpni_queue_cfg queue_cfg; ++}; ++ ++/** ++ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @cfg: Queue configuration ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported ++ * back - successfully transmitted frames are not confirmed. Otherwise, all ++ * transmitted frames are sent for confirmation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_tx_conf_cfg *cfg); ++ ++/** ++ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes ++ * @errors_only: '1' if only error frames are reported back; '0' if all ++ * transmitted frames are confirmed ++ * @queue_attr: Queue attributes ++ */ ++struct dpni_tx_conf_attr { ++ int errors_only; ++ struct dpni_queue_attr queue_attr; ++}; ++ ++/** ++ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @attr: Returned tx-conf attributes ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_conf_attr *attr); ++ ++/** ++ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @cfg: congestion notification configuration ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @cfg: congestion notification ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_set_tx_conf_revoke() - Tx confirmation revocation ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @revoke: revoke or not ++ * ++ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not ++ * selected at DPNI creation. ++ * Calling this function with 'revoke' set to '1' disables all transmit ++ * confirmation (including the private confirmation queues), regardless of ++ * previous settings; Note that in this case, Tx error frames are still ++ * enqueued to the general transmit errors queue. ++ * Calling this function with 'revoke' set to '0' restores the previous ++ * settings for both general and private transmit confirmation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int revoke); ++ ++/** ++ * dpni_set_rx_flow() - Set Rx flow configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7); ++ * use 'DPNI_ALL_TCS' to set all TCs and all flows ++ * @flow_id: Rx flow id within the traffic class; use ++ * 'DPNI_ALL_TC_FLOWS' to set all flows within ++ * this tc_id; ignored if tc_id is set to ++ * 'DPNI_ALL_TCS'; ++ * @cfg: Rx flow configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ const struct dpni_queue_cfg *cfg); ++ ++/** ++ * dpni_get_rx_flow() - Get Rx flow attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @flow_id: Rx flow id within the traffic class ++ * @attr: Returned Rx flow attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ struct dpni_queue_attr *attr); ++ ++/** ++ * dpni_set_rx_err_queue() - Set Rx error queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Queue configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_queue_cfg *cfg); ++ ++/** ++ * dpni_get_rx_err_queue() - Get Rx error queue attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @attr: Returned Queue attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_queue_attr *attr); ++ ++/** ++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration ++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with ++ * key extractions to be used as the QoS criteria by calling ++ * dpni_prepare_key_cfg() ++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); ++ * '0' to use the 'default_tc' in such cases ++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 ++ */ ++struct dpni_qos_tbl_cfg { ++ uint64_t key_cfg_iova; ++ int discard_on_miss; ++ uint8_t default_tc; ++}; ++ ++/** ++ * dpni_set_qos_table() - Set QoS mapping table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS table configuration ++ * ++ * This function and all QoS-related functions require that ++ *'max_tcs > 1' was set at DPNI creation. ++ * ++ * warning: Before calling this function, call dpni_prepare_key_cfg() to ++ * prepare the key_cfg_iova parameter ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_qos_tbl_cfg *cfg); ++ ++/** ++ * struct dpni_rule_cfg - Rule configuration for table lookup ++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) ++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) ++ * @key_size: key and mask size (in bytes) ++ */ ++struct dpni_rule_cfg { ++ uint64_t key_iova; ++ uint64_t mask_iova; ++ uint8_t key_size; ++}; ++ ++/** ++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS rule to add ++ * @tc_id: Traffic class selection (0-7) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg, ++ uint8_t tc_id); ++ ++/** ++ * dpni_remove_qos_entry() - Remove QoS mapping entry ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS rule to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg); ++ ++/** ++ * dpni_clear_qos_table() - Clear all QoS mapping entries ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Following this function call, all frames are directed to ++ * the default traffic class (0) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class ++ * (to select a flow ID) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Flow steering rule to add ++ * @flow_id: Flow id selection (must be smaller than the ++ * distribution size of the traffic class) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg, ++ uint16_t flow_id); ++ ++/** ++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific ++ * traffic class ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Flow steering rule to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg); ++ ++/** ++ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific ++ * traffic class ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id); ++ ++/** ++ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set ++ * at DPNI creation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set ++ * at DPNI creation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_ipr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI ++ * creation. Fragmentation is performed according to MTU value ++ * set by dpni_set_mtu() function ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_ipf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++#endif /* __FSL_DPNI_H */ +diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig +new file mode 100644 +index 0000000..174a9cd +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/Kconfig +@@ -0,0 +1,24 @@ ++config FSL_DPAA2_MAC ++ tristate "DPAA2 MAC / PHY interface" ++ depends on FSL_MC_BUS && FSL_DPAA2 ++ select MDIO_BUS_MUX_MMIOREG ++ select FSL_XGMAC_MDIO ++ select FIXED_PHY ++ ---help--- ++ Prototype driver for DPAA2 MAC / PHY interface object. ++ This driver works as a proxy between phylib including phy drivers and ++ the MC firmware. It receives updates on link state changes from PHY ++ lib and forwards them to MC and receives interrupt from MC whenever ++ a request is made to change the link state. ++ ++ ++config FSL_DPAA2_MAC_NETDEVS ++ bool "Expose net interfaces for PHYs" ++ default n ++ depends on FSL_DPAA2_MAC ++ ---help--- ++ Exposes macX net interfaces which allow direct control over MACs and ++ PHYs. ++ . ++ Leave disabled if unsure. ++ +diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile +new file mode 100644 +index 0000000..bda9410 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/Makefile +@@ -0,0 +1,10 @@ ++ ++obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o ++ ++dpaa2-mac-objs := mac.o dpmac.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean +diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h +new file mode 100644 +index 0000000..dc00590 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h +@@ -0,0 +1,195 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMAC_CMD_H ++#define _FSL_DPMAC_CMD_H ++ ++/* DPMAC Version */ ++#define DPMAC_VER_MAJOR 3 ++#define DPMAC_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPMAC_CMDID_CLOSE 0x800 ++#define DPMAC_CMDID_OPEN 0x80c ++#define DPMAC_CMDID_CREATE 0x90c ++#define DPMAC_CMDID_DESTROY 0x900 ++ ++#define DPMAC_CMDID_GET_ATTR 0x004 ++#define DPMAC_CMDID_RESET 0x005 ++ ++#define DPMAC_CMDID_SET_IRQ 0x010 ++#define DPMAC_CMDID_GET_IRQ 0x011 ++#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPMAC_CMDID_SET_IRQ_MASK 0x014 ++#define DPMAC_CMDID_GET_IRQ_MASK 0x015 ++#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 ++#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPMAC_CMDID_MDIO_READ 0x0c0 ++#define DPMAC_CMDID_MDIO_WRITE 0x0c1 ++#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 ++#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 ++#define DPMAC_CMDID_GET_COUNTER 0x0c4 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CREATE(cmd, cfg) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ ++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ ++ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_MDIO_READ(cmd, data) \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_COUNTER(cmd, type) \ ++ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) ++ ++#endif /* _FSL_DPMAC_CMD_H */ +diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c +new file mode 100644 +index 0000000..fc23b40 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c +@@ -0,0 +1,422 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpmac.h" ++#include "dpmac-cmd.h" ++ ++int dpmac_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmac_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPMAC_CMD_OPEN(cmd, dpmac_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return err; ++} ++ ++int dpmac_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmac_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPMAC_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpmac_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmac_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmac_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); ++ ++ return 0; ++} ++ ++int dpmac_mdio_read(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, ++ cmd_flags, ++ token); ++ DPMAC_CMD_MDIO_READ(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_MDIO_READ(cmd, cfg->data); ++ ++ return 0; ++} ++ ++int dpmac_mdio_write(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_MDIO_WRITE(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); ++ ++ return 0; ++} ++ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_state *link_state) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpmac_counter type, ++ uint64_t *counter) ++{ ++ struct mc_command cmd = { 0 }; ++ int err = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_COUNTER(cmd, type); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPMAC_RSP_GET_COUNTER(cmd, *counter); ++ ++ return 0; ++} +diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h +new file mode 100644 +index 0000000..ad27772 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h +@@ -0,0 +1,593 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMAC_H ++#define __FSL_DPMAC_H ++ ++/* Data Path MAC API ++ * Contains initialization APIs and runtime control APIs for DPMAC ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpmac_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpmac_id: DPMAC unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmac_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmac_id, ++ uint16_t *token); ++ ++/** ++ * dpmac_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * enum dpmac_link_type - DPMAC link type ++ * @DPMAC_LINK_TYPE_NONE: No link ++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type ++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID ++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type ++ */ ++enum dpmac_link_type { ++ DPMAC_LINK_TYPE_NONE, ++ DPMAC_LINK_TYPE_FIXED, ++ DPMAC_LINK_TYPE_PHY, ++ DPMAC_LINK_TYPE_BACKPLANE ++}; ++ ++/** ++ * enum dpmac_eth_if - DPMAC Ethrnet interface ++ * @DPMAC_ETH_IF_MII: MII interface ++ * @DPMAC_ETH_IF_RMII: RMII interface ++ * @DPMAC_ETH_IF_SMII: SMII interface ++ * @DPMAC_ETH_IF_GMII: GMII interface ++ * @DPMAC_ETH_IF_RGMII: RGMII interface ++ * @DPMAC_ETH_IF_SGMII: SGMII interface ++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface ++ * @DPMAC_ETH_IF_XAUI: XAUI interface ++ * @DPMAC_ETH_IF_XFI: XFI interface ++ */ ++enum dpmac_eth_if { ++ DPMAC_ETH_IF_MII, ++ DPMAC_ETH_IF_RMII, ++ DPMAC_ETH_IF_SMII, ++ DPMAC_ETH_IF_GMII, ++ DPMAC_ETH_IF_RGMII, ++ DPMAC_ETH_IF_SGMII, ++ DPMAC_ETH_IF_QSGMII, ++ DPMAC_ETH_IF_XAUI, ++ DPMAC_ETH_IF_XFI ++}; ++ ++/** ++ * struct dpmac_cfg - Structure representing DPMAC configuration ++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, ++ * the MAC IDs are continuous. ++ * For example: 2 WRIOPs, 16 MACs in each: ++ * MAC IDs for the 1st WRIOP: 1-16, ++ * MAC IDs for the 2nd WRIOP: 17-32. ++ */ ++struct dpmac_cfg { ++ int mac_id; ++}; ++ ++/** ++ * dpmac_create() - Create the DPMAC object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPMAC object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpmac_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmac_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmac_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * DPMAC IRQ Index and Events ++ */ ++ ++/** ++ * IRQ index ++ */ ++#define DPMAC_IRQ_INDEX 0 ++/** ++ * IRQ event - indicates a change in link state ++ */ ++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 ++/** ++ * IRQ event - Indicates that the link state changed ++ */ ++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 ++ ++/** ++ * struct dpmac_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpmac_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmac_irq_cfg *irq_cfg); ++ ++/** ++ * dpmac_get_irq() - Get IRQ information from the DPMAC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmac_irq_cfg *irq_cfg); ++ ++/** ++ * dpmac_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpmac_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpmac_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpmac_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpmac_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpmac_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpmac_attr - Structure representing DPMAC attributes ++ * @id: DPMAC object ID ++ * @phy_id: PHY ID ++ * @link_type: link type ++ * @eth_if: Ethernet interface ++ * @max_rate: Maximum supported rate - in Mbps ++ * @version: DPMAC version ++ */ ++struct dpmac_attr { ++ int id; ++ int phy_id; ++ enum dpmac_link_type link_type; ++ enum dpmac_eth_if eth_if; ++ uint32_t max_rate; ++ /** ++ * struct version - Structure representing DPMAC version ++ * @major: DPMAC major version ++ * @minor: DPMAC minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dpmac_get_attributes - Retrieve DPMAC attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_attr *attr); ++ ++/** ++ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters ++ * @phy_addr: MDIO device address ++ * @reg: Address of the register within the Clause 45 PHY device from which data ++ * is to be read ++ * @data: Data read/write from/to MDIO ++ */ ++struct dpmac_mdio_cfg { ++ uint8_t phy_addr; ++ uint8_t reg; ++ uint16_t data; ++}; ++ ++/** ++ * dpmac_mdio_read() - Perform MDIO read transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_read(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++/** ++ * dpmac_mdio_write() - Perform MDIO write transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_write(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++/** ++ * DPMAC link configuration/state options ++ */ ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration ++ * @rate: Link's rate - in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ */ ++struct dpmac_link_cfg { ++ uint32_t rate; ++ uint64_t options; ++}; ++ ++/** ++ * dpmac_get_link_cfg() - Get Ethernet link configuration ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Returned structure with the link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_cfg *cfg); ++ ++/** ++ * struct dpmac_link_state - DPMAC link configuration request ++ * @rate: Rate in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ * @up: Link state ++ */ ++struct dpmac_link_state { ++ uint32_t rate; ++ uint64_t options; ++ int up; ++}; ++ ++/** ++ * dpmac_set_link_state() - Set the Ethernet link status ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @link_state: Link state configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_state *link_state); ++ ++/** ++ * enum dpmac_counter - DPMAC counter types ++ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger ++ * (up to max frame length specified), ++ * good or bad. ++ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received ++ * with a wrong CRC ++ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length ++ * specified, with a bad frame check sequence. ++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. ++ * Occurs when a receive FIFO overflows. ++ * Includes also frames truncated as a result of ++ * the receive FIFO overflow. ++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error ++ * (optional used for wrong SFD). ++ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 ++ * bytes long with a good CRC. ++ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length ++ * specified, with a good frame check sequence. ++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) ++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted ++ * (regular and PFC). ++ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid ++ * frames and valid pause frames. ++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. ++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. ++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. ++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. ++ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error ++ * (except for undersized/fragment frame). ++ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid ++ * frames and valid pause frames transmitted. ++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. ++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. ++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. ++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. ++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including ++ * pause frames. ++ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including ++ * pause frames. ++ */ ++enum dpmac_counter { ++ DPMAC_CNT_ING_FRAME_64, ++ DPMAC_CNT_ING_FRAME_127, ++ DPMAC_CNT_ING_FRAME_255, ++ DPMAC_CNT_ING_FRAME_511, ++ DPMAC_CNT_ING_FRAME_1023, ++ DPMAC_CNT_ING_FRAME_1518, ++ DPMAC_CNT_ING_FRAME_1519_MAX, ++ DPMAC_CNT_ING_FRAG, ++ DPMAC_CNT_ING_JABBER, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ DPMAC_CNT_ING_ALIGN_ERR, ++ DPMAC_CNT_EGR_UNDERSIZED, ++ DPMAC_CNT_ING_OVERSIZED, ++ DPMAC_CNT_ING_VALID_PAUSE_FRAME, ++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, ++ DPMAC_CNT_ING_BYTE, ++ DPMAC_CNT_ING_MCAST_FRAME, ++ DPMAC_CNT_ING_BCAST_FRAME, ++ DPMAC_CNT_ING_ALL_FRAME, ++ DPMAC_CNT_ING_UCAST_FRAME, ++ DPMAC_CNT_ING_ERR_FRAME, ++ DPMAC_CNT_EGR_BYTE, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ DPMAC_CNT_EGR_BCAST_FRAME, ++ DPMAC_CNT_EGR_UCAST_FRAME, ++ DPMAC_CNT_EGR_ERR_FRAME, ++ DPMAC_CNT_ING_GOOD_FRAME, ++ DPMAC_CNT_ENG_GOOD_FRAME ++}; ++ ++/** ++ * dpmac_get_counter() - Read a specific DPMAC counter ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @type: The requested counter ++ * @counter: Returned counter value ++ * ++ * Return: The requested counter; '0' otherwise. ++ */ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpmac_counter type, ++ uint64_t *counter); ++ ++#endif /* __FSL_DPMAC_H */ +diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c +new file mode 100644 +index 0000000..366ad4c +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/mac.c +@@ -0,0 +1,694 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++ ++#include "dpmac.h" ++#include "dpmac-cmd.h" ++ ++#define DPAA2_SUPPORTED_DPMAC_VERSION 3 ++ ++struct dpaa2_mac_priv { ++ struct net_device *netdev; ++ struct fsl_mc_device *mc_dev; ++ struct dpmac_attr attr; ++ struct dpmac_link_state old_state; ++}; ++ ++/* TODO: fix the 10G modes, mapping can't be right: ++ * XGMII is paralel ++ * XAUI is serial, using 8b/10b encoding ++ * XFI is also serial but using 64b/66b encoding ++ * they can't all map to XGMII... ++ * ++ * This must be kept in sync with enum dpmac_eth_if. ++ */ ++static phy_interface_t dpaa2_mac_iface_mode[] = { ++ /* DPMAC_ETH_IF_MII */ ++ PHY_INTERFACE_MODE_MII, ++ /* DPMAC_ETH_IF_RMII */ ++ PHY_INTERFACE_MODE_RMII, ++ /* DPMAC_ETH_IF_SMII */ ++ PHY_INTERFACE_MODE_SMII, ++ /* DPMAC_ETH_IF_GMII */ ++ PHY_INTERFACE_MODE_GMII, ++ /* DPMAC_ETH_IF_RGMII */ ++ PHY_INTERFACE_MODE_RGMII, ++ /* DPMAC_ETH_IF_SGMII */ ++ PHY_INTERFACE_MODE_SGMII, ++ /* DPMAC_ETH_IF_QSGMII */ ++ PHY_INTERFACE_MODE_QSGMII, ++ /* DPMAC_ETH_IF_XAUI */ ++ PHY_INTERFACE_MODE_XGMII, ++ /* DPMAC_ETH_IF_XFI */ ++ PHY_INTERFACE_MODE_XGMII, ++}; ++ ++static void dpaa2_mac_link_changed(struct net_device *netdev) ++{ ++ struct phy_device *phydev; ++ struct dpmac_link_state state = { 0 }; ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ int err; ++ ++ /* the PHY just notified us of link state change */ ++ phydev = netdev->phydev; ++ ++ state.up = !!phydev->link; ++ if (phydev->link) { ++ state.rate = phydev->speed; ++ ++ if (!phydev->duplex) ++ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; ++ if (phydev->autoneg) ++ state.options |= DPMAC_LINK_OPT_AUTONEG; ++ ++ netif_carrier_on(netdev); ++ } else { ++ netif_carrier_off(netdev); ++ } ++ ++ if (priv->old_state.up != state.up || ++ priv->old_state.rate != state.rate || ++ priv->old_state.options != state.options) { ++ priv->old_state = state; ++ phy_print_status(phydev); ++ } ++ ++ /* We must call into the MC firmware at all times, because we don't know ++ * when and whether a potential DPNI may have read the link state. ++ */ ++ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, ++ priv->mc_dev->mc_handle, &state); ++ if (unlikely(err)) ++ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); ++} ++ ++/* IRQ bits that we handle */ ++static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, ++ struct net_device *dev) ++{ ++ /* we don't support I/O for now, drop the frame */ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++} ++ ++static int dpaa2_mac_open(struct net_device *netdev) ++{ ++ /* start PHY state machine */ ++ phy_start(netdev->phydev); ++ ++ return 0; ++} ++ ++static int dpaa2_mac_stop(struct net_device *netdev) ++{ ++ if (!netdev->phydev) ++ goto done; ++ ++ /* stop PHY state machine */ ++ phy_stop(netdev->phydev); ++ ++ /* signal link down to firmware */ ++ netdev->phydev->link = 0; ++ dpaa2_mac_link_changed(netdev); ++ ++done: ++ return 0; ++} ++ ++static int dpaa2_mac_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ return phy_ethtool_gset(netdev->phydev, cmd); ++} ++ ++static int dpaa2_mac_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ return phy_ethtool_sset(netdev->phydev, cmd); ++} ++ ++static struct rtnl_link_stats64 ++*dpaa2_mac_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *storage) ++{ ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ u64 tmp; ++ int err; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ &storage->tx_packets); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_BCAST_FRAME, &tmp); ++ if (err) ++ goto error; ++ storage->tx_packets += tmp; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_UCAST_FRAME, &tmp); ++ if (err) ++ goto error; ++ storage->tx_packets += tmp; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); ++ if (err) ++ goto error; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ &storage->rx_dropped); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_OVERSIZED, &tmp); ++ if (err) ++ goto error; ++ storage->rx_errors += tmp; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_BYTE, &storage->rx_bytes); ++ if (err) ++ goto error; ++ ++ return storage; ++ ++error: ++ netdev_err(netdev, "dpmac_get_counter err %d\n", err); ++ return storage; ++} ++ ++static struct { ++ enum dpmac_counter id; ++ char name[ETH_GSTRING_LEN]; ++} dpaa2_mac_counters[] = { ++ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, ++ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, ++ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, ++ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, ++ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, ++ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, ++ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, ++ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, ++ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, ++ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, ++ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, ++ {DPMAC_CNT_ING_FRAG, "rx frags"}, ++ {DPMAC_CNT_ING_JABBER, "rx jabber"}, ++ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, ++ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, ++ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, ++ {DPMAC_CNT_ING_BYTE, "rx bytes"}, ++ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, ++ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, ++ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, ++ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, ++ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, ++ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, ++ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, ++ {DPMAC_CNT_EGR_BYTE, "tx bytes"}, ++ ++}; ++ ++static void dpaa2_mac_get_strings(struct net_device *netdev, ++ u32 stringset, u8 *data) ++{ ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) ++ memcpy(data + i * ETH_GSTRING_LEN, ++ dpaa2_mac_counters[i].name, ++ ETH_GSTRING_LEN); ++ break; ++ } ++} ++ ++static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ int i; ++ int err; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { ++ err = dpmac_get_counter(priv->mc_dev->mc_io, ++ 0, ++ priv->mc_dev->mc_handle, ++ dpaa2_mac_counters[i].id, &data[i]); ++ if (err) ++ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", ++ dpaa2_mac_counters[i].name, err); ++ } ++} ++ ++static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(dpaa2_mac_counters); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static const struct net_device_ops dpaa2_mac_ndo_ops = { ++ .ndo_start_xmit = &dpaa2_mac_drop_frame, ++ .ndo_open = &dpaa2_mac_open, ++ .ndo_stop = &dpaa2_mac_stop, ++ .ndo_get_stats64 = &dpaa2_mac_get_stats, ++}; ++ ++static const struct ethtool_ops dpaa2_mac_ethtool_ops = { ++ .get_settings = &dpaa2_mac_get_settings, ++ .set_settings = &dpaa2_mac_set_settings, ++ .get_strings = &dpaa2_mac_get_strings, ++ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, ++ .get_sset_count = &dpaa2_mac_get_sset_count, ++}; ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ ++static void configure_link(struct dpaa2_mac_priv *priv, ++ struct dpmac_link_cfg *cfg) ++{ ++ struct phy_device *phydev = priv->netdev->phydev; ++ ++ if (unlikely(!phydev)) ++ return; ++ ++ phydev->speed = cfg->rate; ++ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); ++ ++ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { ++ phydev->autoneg = 1; ++ phydev->advertising |= ADVERTISED_Autoneg; ++ } else { ++ phydev->autoneg = 0; ++ phydev->advertising &= ~ADVERTISED_Autoneg; ++ } ++ ++ phy_start_aneg(phydev); ++} ++ ++static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ struct dpmac_link_cfg link_cfg; ++ u32 status; ++ int err; ++ ++ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, &status); ++ if (unlikely(err || !status)) ++ return IRQ_NONE; ++ ++ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ ++ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { ++ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ &link_cfg); ++ if (unlikely(err)) ++ goto out; ++ ++ configure_link(priv, &link_cfg); ++ } ++ ++out: ++ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, status); ++ ++ return IRQ_HANDLED; ++} ++ ++static int setup_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int err; ++ ++ err = fsl_mc_allocate_irqs(mc_dev); ++ if (err) { ++ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); ++ return err; ++ } ++ ++ err = devm_request_threaded_irq(&mc_dev->dev, ++ mc_dev->irqs[0]->irq_number, ++ NULL, &dpaa2_mac_irq_handler, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(&mc_dev->dev), &mc_dev->dev); ++ if (err) { ++ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", ++ err); ++ goto free_irq; ++ } ++ ++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, dpmac_irq_mask); ++ if (err) { ++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); ++ goto free_irq; ++ } ++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, 1); ++ if (err) { ++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); ++ goto free_irq; ++ } ++ ++ return 0; ++ ++free_irq: ++ fsl_mc_free_irqs(mc_dev); ++ ++ return err; ++} ++ ++static void teardown_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int err; ++ ++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, dpmac_irq_mask); ++ if (err) ++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); ++ ++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, 0); ++ if (err) ++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); ++ ++ fsl_mc_free_irqs(mc_dev); ++} ++ ++static struct device_node *lookup_node(struct device *dev, int dpmac_id) ++{ ++ struct device_node *dpmacs, *dpmac = NULL; ++ struct device_node *mc_node = dev->of_node; ++ u32 id; ++ int err; ++ ++ dpmacs = of_find_node_by_name(mc_node, "dpmacs"); ++ if (!dpmacs) { ++ dev_err(dev, "No dpmacs subnode in device-tree\n"); ++ return NULL; ++ } ++ ++ while ((dpmac = of_get_next_child(dpmacs, dpmac))) { ++ err = of_property_read_u32(dpmac, "reg", &id); ++ if (err) ++ continue; ++ if (id == dpmac_id) ++ return dpmac; ++ } ++ ++ return NULL; ++} ++ ++static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev; ++ struct dpaa2_mac_priv *priv = NULL; ++ struct device_node *phy_node, *dpmac_node; ++ struct net_device *netdev; ++ phy_interface_t if_mode; ++ int err = 0; ++ ++ dev = &mc_dev->dev; ++ ++ /* prepare a net_dev structure to make the phy lib API happy */ ++ netdev = alloc_etherdev(sizeof(*priv)); ++ if (!netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ priv = netdev_priv(netdev); ++ priv->mc_dev = mc_dev; ++ priv->netdev = netdev; ++ ++ SET_NETDEV_DEV(netdev, dev); ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); ++#endif ++ ++ dev_set_drvdata(dev, priv); ++ ++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); ++ if (err || !mc_dev->mc_io) { ++ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); ++ err = -ENODEV; ++ goto err_free_netdev; ++ } ++ ++ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (err || !mc_dev->mc_handle) { ++ dev_err(dev, "dpmac_open error: %d\n", err); ++ err = -ENODEV; ++ goto err_free_mcp; ++ } ++ ++ err = dpmac_get_attributes(mc_dev->mc_io, 0, ++ mc_dev->mc_handle, &priv->attr); ++ if (err) { ++ dev_err(dev, "dpmac_get_attributes err %d\n", err); ++ err = -EINVAL; ++ goto err_close; ++ } ++ ++ dev_info_once(dev, "Using DPMAC API %d.%d\n", ++ priv->attr.version.major, priv->attr.version.minor); ++ ++ /* Look up the DPMAC node in the device-tree. */ ++ dpmac_node = lookup_node(dev, priv->attr.id); ++ if (!dpmac_node) { ++ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); ++ err = -ENODEV; ++ goto err_close; ++ } ++ ++ err = setup_irqs(mc_dev); ++ if (err) { ++ err = -EFAULT; ++ goto err_close; ++ } ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ /* OPTIONAL, register netdev just to make it visible to the user */ ++ netdev->netdev_ops = &dpaa2_mac_ndo_ops; ++ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; ++ ++ /* phy starts up enabled so netdev should be up too */ ++ netdev->flags |= IFF_UP; ++ ++ err = register_netdev(priv->netdev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev error %d\n", err); ++ err = -ENODEV; ++ goto err_free_irq; ++ } ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ ++ /* probe the PHY as a fixed-link if the link type declared in DPC ++ * explicitly mandates this ++ */ ++ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED) ++ goto probe_fixed_link; ++ ++ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { ++ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; ++ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", ++ phy_modes(if_mode), priv->attr.eth_if); ++ } else { ++ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", ++ priv->attr.eth_if); ++ goto probe_fixed_link; ++ } ++ ++ /* try to connect to the PHY */ ++ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); ++ if (!phy_node) { ++ if (!phy_node) { ++ dev_err(dev, "dpmac node has no phy-handle property\n"); ++ err = -ENODEV; ++ goto err_no_phy; ++ } ++ } ++ netdev->phydev = of_phy_connect(netdev, phy_node, ++ &dpaa2_mac_link_changed, 0, if_mode); ++ if (!netdev->phydev) { ++ /* No need for dev_err(); the kernel's loud enough as it is. */ ++ dev_dbg(dev, "Can't of_phy_connect() now.\n"); ++ /* We might be waiting for the MDIO MUX to probe, so defer ++ * our own probing. ++ */ ++ err = -EPROBE_DEFER; ++ goto err_defer; ++ } ++ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); ++ ++probe_fixed_link: ++ if (!netdev->phydev) { ++ struct fixed_phy_status status = { ++ .link = 1, ++ /* fixed-phys don't support 10Gbps speed for now */ ++ .speed = 1000, ++ .duplex = 1, ++ }; ++ ++ /* try to register a fixed link phy */ ++ netdev->phydev = fixed_phy_register(PHY_POLL, &status, NULL); ++ if (!netdev->phydev || IS_ERR(netdev->phydev)) { ++ dev_err(dev, "error trying to register fixed PHY\n"); ++ /* So we don't crash unregister_netdev() later on */ ++ netdev->phydev = NULL; ++ err = -EFAULT; ++ goto err_no_phy; ++ } ++ dev_info(dev, "Registered fixed PHY.\n"); ++ } ++ ++ /* start PHY state machine */ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ dpaa2_mac_open(netdev); ++#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ phy_start(netdev->phydev); ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ return 0; ++ ++err_defer: ++err_no_phy: ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ unregister_netdev(netdev); ++err_free_irq: ++#endif ++ teardown_irqs(mc_dev); ++err_close: ++ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++err_free_mcp: ++ fsl_mc_portal_free(mc_dev->mc_io); ++err_free_netdev: ++ free_netdev(netdev); ++err_exit: ++ return err; ++} ++ ++static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev = &mc_dev->dev; ++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ unregister_netdev(priv->netdev); ++#endif ++ teardown_irqs(priv->mc_dev); ++ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); ++ fsl_mc_portal_free(priv->mc_dev->mc_io); ++ free_netdev(priv->netdev); ++ ++ dev_set_drvdata(dev, NULL); ++ kfree(priv); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpmac", ++ .ver_major = DPMAC_VER_MAJOR, ++ .ver_minor = DPMAC_VER_MINOR, ++ }, ++ {} ++}; ++ ++static struct fsl_mc_driver dpaa2_mac_drv = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_mac_probe, ++ .remove = dpaa2_mac_remove, ++ .match_id_table = dpaa2_mac_match_id_table, ++}; ++ ++module_fsl_mc_driver(dpaa2_mac_drv); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); +diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig +new file mode 100644 +index 0000000..32df07b +--- /dev/null ++++ b/drivers/staging/fsl-mc/Kconfig +@@ -0,0 +1 @@ ++source "drivers/staging/fsl-mc/bus/Kconfig" +diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile +new file mode 100644 +index 0000000..9c6a001 +--- /dev/null ++++ b/drivers/staging/fsl-mc/Makefile +@@ -0,0 +1,2 @@ ++# Freescale Management Complex (MC) bus drivers ++obj-$(CONFIG_FSL_MC_BUS) += bus/ +diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO +new file mode 100644 +index 0000000..d78288b +--- /dev/null ++++ b/drivers/staging/fsl-mc/TODO +@@ -0,0 +1,13 @@ ++* Add README file (with ASCII art) describing relationships between ++ DPAA2 objects and how combine them to make a NIC, an LS2 switch, etc. ++ Also, define all acronyms used. ++ ++* Decide if multiple root fsl-mc buses will be supported per Linux instance, ++ and if so add support for this. ++ ++* Add at least one device driver for a DPAA2 object (child device of the ++ fsl-mc bus). ++ ++Please send any patches to Greg Kroah-Hartman , ++german.rivera@freescale.com, devel@driverdev.osuosl.org, ++linux-kernel@vger.kernel.org +diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig +new file mode 100644 +index 0000000..8bef5b8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/Kconfig +@@ -0,0 +1,45 @@ ++# ++# Freescale Management Complex (MC) bus drivers ++# ++# Copyright (C) 2014 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++ ++config FSL_MC_BUS ++ tristate "Freescale Management Complex (MC) bus driver" ++ depends on OF && ARM64 ++ help ++ Driver to enable the bus infrastructure for the Freescale ++ QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware ++ module of the QorIQ LS2 SoCs, that does resource management ++ for hardware building-blocks in the SoC that can be used ++ to dynamically create networking hardware objects such as ++ network interfaces (NICs), crypto accelerator instances, ++ or L2 switches. ++ ++ Only enable this option when building the kernel for ++ Freescale QorQIQ LS2xxxx SoCs. ++ ++config FSL_MC_RESTOOL ++ tristate "Freescale Management Complex (MC) restool driver" ++ depends on FSL_MC_BUS ++ help ++ Driver that provides kernel support for the Freescale Management ++ Complex resource manager user-space tool. ++ ++config FSL_MC_DPIO ++ tristate "Freescale Data Path I/O (DPIO) driver" ++ depends on FSL_MC_BUS ++ help ++ Driver for Freescale Data Path I/O (DPIO) devices. ++ A DPIO device provides queue and buffer management facilities ++ for software to interact with other Data Path devices. This ++ driver does not expose the DPIO device individually, but ++ groups them under a service layer API. ++ ++config FSL_QBMAN_DEBUG ++ tristate "Freescale QBMAN Debug APIs" ++ depends on FSL_MC_DPIO ++ help ++ QBMan debug assistant APIs. +diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile +new file mode 100644 +index 0000000..f29399c +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/Makefile +@@ -0,0 +1,24 @@ ++# ++# Freescale Management Complex (MC) bus drivers ++# ++# Copyright (C) 2014 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o ++ ++mc-bus-driver-objs := mc-bus.o \ ++ mc-sys.o \ ++ dprc.o \ ++ dpmng.o \ ++ dprc-driver.o \ ++ mc-allocator.o \ ++ dpmcp.o \ ++ dpbp.o \ ++ dpcon.o ++ ++# MC restool kernel support ++obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o ++ ++# MC DPIO driver ++obj-$(CONFIG_FSL_MC_DPIO) += dpio/ +diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c +new file mode 100644 +index 0000000..f183121 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpbp.c +@@ -0,0 +1,459 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions are met: ++* * Redistributions of source code must retain the above copyright ++* notice, this list of conditions and the following disclaimer. ++* * Redistributions in binary form must reproduce the above copyright ++* notice, this list of conditions and the following disclaimer in the ++* documentation and/or other materials provided with the distribution. ++* * Neither the name of the above-listed copyright holders nor the ++* names of any contributors may be used to endorse or promote products ++* derived from this software without specific prior written permission. ++* ++* ++* ALTERNATIVELY, this software may be distributed under the terms of the ++* GNU General Public License ("GPL") as published by the Free Software ++* Foundation, either version 2 of that License or (at your option) any ++* later version. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++* POSSIBILITY OF SUCH DAMAGE. ++*/ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpbp.h" ++#include "../include/dpbp-cmd.h" ++ ++int dpbp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpbp_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ ++ cmd.params[0] |= mc_enc(0, 32, dpbp_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpbp_open); ++ ++int dpbp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpbp_close); ++ ++int dpbp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpbp_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ (void)(cfg); /* unused */ ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpbp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpbp_enable); ++ ++int dpbp_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpbp_disable); ++ ++int dpbp_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = (int)mc_dec(cmd.params[0], 0, 1); ++ ++ return 0; ++} ++ ++int dpbp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpbp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 8, irq_index); ++ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpbp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->addr = (uint64_t)mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); ++ *type = (int)mc_dec(cmd.params[2], 32, 32); ++ ++ return 0; ++} ++ ++int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 8, en); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); ++ return 0; ++} ++ ++int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, mask); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpbp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, *status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ attr->bpid = (uint16_t)mc_dec(cmd.params[0], 16, 16); ++ attr->id = (int)mc_dec(cmd.params[0], 32, 32); ++ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); ++ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); ++ return 0; ++} ++EXPORT_SYMBOL(dpbp_get_attributes); ++ ++int dpbp_set_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry); ++ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit); ++ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry); ++ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit); ++ cmd.params[2] |= mc_enc(0, 16, cfg->options); ++ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx); ++ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ cfg->depletion_entry = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ cfg->depletion_exit = (uint32_t)mc_dec(cmd.params[0], 32, 32); ++ cfg->surplus_entry = (uint32_t)mc_dec(cmd.params[1], 0, 32); ++ cfg->surplus_exit = (uint32_t)mc_dec(cmd.params[1], 32, 32); ++ cfg->options = (uint16_t)mc_dec(cmd.params[2], 0, 16); ++ cfg->message_ctx = (uint64_t)mc_dec(cmd.params[3], 0, 64); ++ cfg->message_iova = (uint64_t)mc_dec(cmd.params[4], 0, 64); ++ ++ return 0; ++} +diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c +new file mode 100644 +index 0000000..7965284 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpcon.c +@@ -0,0 +1,407 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpcon.h" ++#include "../include/dpcon-cmd.h" ++ ++int dpcon_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpcon_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPCON_CMD_OPEN(cmd, dpcon_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_open); ++ ++int dpcon_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_close); ++ ++int dpcon_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpcon_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPCON_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpcon_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_enable); ++ ++int dpcon_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_disable); ++ ++int dpcon_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpcon_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, ++ cmd_flags, token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpcon_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpcon_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpcon_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_get_attributes); ++ ++int dpcon_set_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_set_notification); ++ +diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile +new file mode 100644 +index 0000000..c20356b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile +@@ -0,0 +1,9 @@ ++# ++# Freescale DPIO driver ++# ++ ++obj-$(CONFIG_FSL_MC_BUS) += fsl-dpio-drv.o ++ ++fsl-dpio-drv-objs := dpio-drv.o dpio_service.o dpio.o qbman_portal.o ++ ++obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c +new file mode 100644 +index 0000000..80add27 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c +@@ -0,0 +1,401 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../include/mc.h" ++#include "../../include/fsl_dpaa2_io.h" ++ ++#include "fsl_qbman_portal.h" ++#include "fsl_dpio.h" ++#include "fsl_dpio_cmd.h" ++ ++#include "dpio-drv.h" ++ ++#define DPIO_DESCRIPTION "DPIO Driver" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION(DPIO_DESCRIPTION); ++ ++#define MAX_DPIO_IRQ_NAME 16 /* Big enough for "FSL DPIO %d" */ ++ ++struct dpio_priv { ++ struct dpaa2_io *io; ++ char irq_name[MAX_DPIO_IRQ_NAME]; ++ struct task_struct *thread; ++}; ++ ++static int dpio_thread(void *data) ++{ ++ struct dpaa2_io *io = data; ++ ++ while (!kthread_should_stop()) { ++ int err = dpaa2_io_poll(io); ++ ++ if (err) { ++ pr_err("dpaa2_io_poll() failed\n"); ++ return err; ++ } ++ msleep(50); ++ } ++ return 0; ++} ++ ++static irqreturn_t dpio_irq_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct dpio_priv *priv = dev_get_drvdata(dev); ++ ++ return dpaa2_io_irq(priv->io); ++} ++ ++static void unregister_dpio_irq_handlers(struct fsl_mc_device *ls_dev) ++{ ++ int i; ++ struct fsl_mc_device_irq *irq; ++ int irq_count = ls_dev->obj_desc.irq_count; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = ls_dev->irqs[i]; ++ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); ++ } ++} ++ ++static int register_dpio_irq_handlers(struct fsl_mc_device *ls_dev, int cpu) ++{ ++ struct dpio_priv *priv; ++ unsigned int i; ++ int error; ++ struct fsl_mc_device_irq *irq; ++ unsigned int num_irq_handlers_registered = 0; ++ int irq_count = ls_dev->obj_desc.irq_count; ++ cpumask_t mask; ++ ++ priv = dev_get_drvdata(&ls_dev->dev); ++ ++ if (WARN_ON(irq_count != 1)) ++ return -EINVAL; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = ls_dev->irqs[i]; ++ error = devm_request_irq(&ls_dev->dev, ++ irq->irq_number, ++ dpio_irq_handler, ++ 0, ++ priv->irq_name, ++ &ls_dev->dev); ++ if (error < 0) { ++ dev_err(&ls_dev->dev, ++ "devm_request_irq() failed: %d\n", ++ error); ++ goto error_unregister_irq_handlers; ++ } ++ ++ /* Set the IRQ affinity */ ++ cpumask_clear(&mask); ++ cpumask_set_cpu(cpu, &mask); ++ if (irq_set_affinity(irq->irq_number, &mask)) ++ pr_err("irq_set_affinity failed irq %d cpu %d\n", ++ irq->irq_number, cpu); ++ ++ num_irq_handlers_registered++; ++ } ++ ++ return 0; ++ ++error_unregister_irq_handlers: ++ for (i = 0; i < num_irq_handlers_registered; i++) { ++ irq = ls_dev->irqs[i]; ++ devm_free_irq(&ls_dev->dev, irq->irq_number, ++ &ls_dev->dev); ++ } ++ ++ return error; ++} ++ ++static int __cold ++dpaa2_dpio_probe(struct fsl_mc_device *ls_dev) ++{ ++ struct dpio_attr dpio_attrs; ++ struct dpaa2_io_desc desc; ++ struct dpio_priv *priv; ++ int err = -ENOMEM; ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_io *defservice; ++ bool irq_allocated = false; ++ static int next_cpu; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ goto err_priv_alloc; ++ ++ dev_set_drvdata(dev, priv); ++ ++ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ err = -EPROBE_DEFER; ++ goto err_mcportal; ++ } ++ ++ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, ++ &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpio_get_attributes(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ &dpio_attrs); ++ if (err) { ++ dev_err(dev, "dpio_get_attributes() failed %d\n", err); ++ goto err_get_attr; ++ } ++ err = dpio_enable(ls_dev->mc_io, 0, ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_enable() failed %d\n", err); ++ goto err_get_attr; ++ } ++ pr_info("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n", ++ ls_dev->regions[0].start, ++ ls_dev->regions[1].start, ++ dpio_attrs.qbman_portal_id, ++ dpio_attrs.num_priorities); ++ ++ pr_info("ce_size=0x%llx, ci_size=0x%llx\n", ++ resource_size(&ls_dev->regions[0]), ++ resource_size(&ls_dev->regions[1])); ++ ++ desc.qman_version = dpio_attrs.qbman_version; ++ /* Build DPIO driver object out of raw MC object */ ++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; ++ desc.has_irq = 1; ++ desc.will_poll = 1; ++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; ++ desc.cpu = next_cpu; ++ desc.stash_affinity = next_cpu; ++ next_cpu = (next_cpu + 1) % num_active_cpus(); ++ desc.dpio_id = ls_dev->obj_desc.id; ++ desc.regs_cena = ioremap_cache_ns(ls_dev->regions[0].start, ++ resource_size(&ls_dev->regions[0])); ++ desc.regs_cinh = ioremap(ls_dev->regions[1].start, ++ resource_size(&ls_dev->regions[1])); ++ ++ err = fsl_mc_allocate_irqs(ls_dev); ++ if (err) { ++ dev_err(dev, "DPIO fsl_mc_allocate_irqs failed\n"); ++ desc.has_irq = 0; ++ } else { ++ irq_allocated = true; ++ ++ snprintf(priv->irq_name, MAX_DPIO_IRQ_NAME, "FSL DPIO %d", ++ desc.dpio_id); ++ ++ err = register_dpio_irq_handlers(ls_dev, desc.cpu); ++ if (err) ++ desc.has_irq = 0; ++ } ++ ++ priv->io = dpaa2_io_create(&desc); ++ if (!priv->io) { ++ dev_err(dev, "DPIO setup failed\n"); ++ goto err_dpaa2_io_create; ++ } ++ ++ /* If no irq then go to poll mode */ ++ if (desc.has_irq == 0) { ++ dev_info(dev, "Using polling mode for DPIO %d\n", ++ desc.dpio_id); ++ /* goto err_register_dpio_irq; */ ++ /* TEMP: Start polling if IRQ could not ++ be registered. This will go away once ++ KVM support for MSI is present */ ++ if (irq_allocated == true) ++ fsl_mc_free_irqs(ls_dev); ++ ++ if (desc.stash_affinity) ++ priv->thread = kthread_create_on_cpu(dpio_thread, ++ priv->io, ++ desc.cpu, ++ "dpio_aff%u"); ++ else ++ priv->thread = ++ kthread_create(dpio_thread, ++ priv->io, ++ "dpio_non%u", ++ dpio_attrs.qbman_portal_id); ++ if (IS_ERR(priv->thread)) { ++ dev_err(dev, "DPIO thread failure\n"); ++ err = PTR_ERR(priv->thread); ++ goto err_dpaa_thread; ++ } ++ wake_up_process(priv->thread); ++ } ++ ++ defservice = dpaa2_io_default_service(); ++ err = dpaa2_io_service_add(defservice, priv->io); ++ dpaa2_io_down(defservice); ++ if (err) { ++ dev_err(dev, "DPIO add-to-service failed\n"); ++ goto err_dpaa2_io_add; ++ } ++ ++ dev_info(dev, "dpio: probed object %d\n", ls_dev->obj_desc.id); ++ dev_info(dev, " receives_notifications = %d\n", ++ desc.receives_notifications); ++ dev_info(dev, " has_irq = %d\n", desc.has_irq); ++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); ++ fsl_mc_portal_free(ls_dev->mc_io); ++ return 0; ++ ++err_dpaa2_io_add: ++ unregister_dpio_irq_handlers(ls_dev); ++/* TEMP: To be restored once polling is removed ++ err_register_dpio_irq: ++ fsl_mc_free_irqs(ls_dev); ++*/ ++err_dpaa_thread: ++err_dpaa2_io_create: ++ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); ++err_get_attr: ++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); ++err_open: ++ fsl_mc_portal_free(ls_dev->mc_io); ++err_mcportal: ++ dev_set_drvdata(dev, NULL); ++ devm_kfree(dev, priv); ++err_priv_alloc: ++ return err; ++} ++ ++/* ++ * Tear down interrupts for a given DPIO object ++ */ ++static void dpio_teardown_irqs(struct fsl_mc_device *ls_dev) ++{ ++ /* (void)disable_dpio_irqs(ls_dev); */ ++ unregister_dpio_irq_handlers(ls_dev); ++ fsl_mc_free_irqs(ls_dev); ++} ++ ++static int __cold ++dpaa2_dpio_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct dpio_priv *priv; ++ int err; ++ ++ dev = &ls_dev->dev; ++ priv = dev_get_drvdata(dev); ++ ++ /* there is no implementation yet for pulling a DPIO object out of a ++ * running service (and they're currently always running). ++ */ ++ dev_crit(dev, "DPIO unplugging is broken, the service holds onto it\n"); ++ ++ if (priv->thread) ++ kthread_stop(priv->thread); ++ else ++ dpio_teardown_irqs(ls_dev); ++ ++ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_mcportal; ++ } ++ ++ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, ++ &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_open() failed\n"); ++ goto err_open; ++ } ++ ++ dev_set_drvdata(dev, NULL); ++ dpaa2_io_down(priv->io); ++ ++ err = 0; ++ ++ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); ++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); ++err_open: ++ fsl_mc_portal_free(ls_dev->mc_io); ++err_mcportal: ++ return err; ++} ++ ++static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpio", ++ .ver_major = DPIO_VER_MAJOR, ++ .ver_minor = DPIO_VER_MINOR ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_dpio_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_dpio_probe, ++ .remove = dpaa2_dpio_remove, ++ .match_id_table = dpaa2_dpio_match_id_table ++}; ++ ++static int dpio_driver_init(void) ++{ ++ int err; ++ ++ err = dpaa2_io_service_driver_init(); ++ if (!err) { ++ err = fsl_mc_driver_register(&dpaa2_dpio_driver); ++ if (err) ++ dpaa2_io_service_driver_exit(); ++ } ++ return err; ++} ++static void dpio_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dpaa2_dpio_driver); ++ dpaa2_io_service_driver_exit(); ++} ++module_init(dpio_driver_init); ++module_exit(dpio_driver_exit); +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h +new file mode 100644 +index 0000000..fe8d40b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h +@@ -0,0 +1,33 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++int dpaa2_io_service_driver_init(void); ++void dpaa2_io_service_driver_exit(void); +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c +new file mode 100644 +index 0000000..b63edd6 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c +@@ -0,0 +1,468 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../include/mc-sys.h" ++#include "../../include/mc-cmd.h" ++#include "fsl_dpio.h" ++#include "fsl_dpio_cmd.h" ++ ++int dpio_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpio_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPIO_CMD_OPEN(cmd, dpio_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpio_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpio_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPIO_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpio_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpio_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpio_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpio_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpio_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpio_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpio_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpio_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpio_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++ ++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t sdest) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t *sdest) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); ++ ++ return 0; ++} ++ ++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id, ++ uint8_t *channel_index) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, ++ cmd_flags, ++ token); ++ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); ++ ++ return 0; ++} ++ ++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, ++ cmd_flags, ++ token); ++ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio_service.c b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c +new file mode 100644 +index 0000000..ebcfd59 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c +@@ -0,0 +1,801 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include "fsl_qbman_portal.h" ++#include "../../include/mc.h" ++#include "../../include/fsl_dpaa2_io.h" ++#include "fsl_dpio.h" ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dpio-drv.h" ++#include "qbman_debug.h" ++ ++#define UNIMPLEMENTED() pr_err("FOO: %s unimplemented!\n", __func__) ++ ++#define MAGIC_SERVICE 0xabcd9876 ++#define MAGIC_OBJECT 0x1234fedc ++ ++struct dpaa2_io { ++ /* If MAGIC_SERVICE, this is a group of objects, use the 'service' part ++ * of the union. If MAGIC_OBJECT, use the 'object' part of the union. If ++ * it's neither, something got corrupted. This is mainly to satisfy ++ * dpaa2_io_from_registration(), which dereferences a caller- ++ * instantiated struct and so warrants a bug-checking step - hence the ++ * magic rather than a boolean. ++ */ ++ unsigned int magic; ++ atomic_t refs; ++ union { ++ struct dpaa2_io_service { ++ spinlock_t lock; ++ struct list_head list; ++ /* for targeted dpaa2_io selection */ ++ struct dpaa2_io *objects_by_cpu[NR_CPUS]; ++ cpumask_t cpus_notifications; ++ cpumask_t cpus_stashing; ++ int has_nonaffine; ++ /* slight hack. record the special case of the ++ * "default service", because that's the case where we ++ * need to avoid a kfree() ... */ ++ int is_defservice; ++ } service; ++ struct dpaa2_io_object { ++ struct dpaa2_io_desc dpio_desc; ++ struct qbman_swp_desc swp_desc; ++ struct qbman_swp *swp; ++ /* If the object is part of a service, this is it (and ++ * 'node' is linked into the service's list) */ ++ struct dpaa2_io *service; ++ struct list_head node; ++ /* Interrupt mask, as used with ++ * qbman_swp_interrupt_[gs]et_vanish(). This isn't ++ * locked, because the higher layer is driving all ++ * "ingress" processing. */ ++ uint32_t irq_mask; ++ /* As part of simplifying assumptions, we provide an ++ * irq-safe lock for each type of DPIO operation that ++ * isn't innately lockless. The selection algorithms ++ * (which are simplified) require this, whereas ++ * eventually adherence to cpu-affinity will presumably ++ * relax the locking requirements. */ ++ spinlock_t lock_mgmt_cmd; ++ spinlock_t lock_notifications; ++ struct list_head notifications; ++ } object; ++ }; ++}; ++ ++struct dpaa2_io_store { ++ unsigned int max; ++ dma_addr_t paddr; ++ struct dpaa2_dq *vaddr; ++ void *alloced_addr; /* the actual return from kmalloc as it may ++ be adjusted for alignment purposes */ ++ unsigned int idx; /* position of the next-to-be-returned entry */ ++ struct qbman_swp *swp; /* portal used to issue VDQCR */ ++ struct device *dev; /* device used for DMA mapping */ ++}; ++ ++static struct dpaa2_io def_serv; ++ ++/**********************/ ++/* Internal functions */ ++/**********************/ ++ ++static void service_init(struct dpaa2_io *d, int is_defservice) ++{ ++ struct dpaa2_io_service *s = &d->service; ++ ++ d->magic = MAGIC_SERVICE; ++ atomic_set(&d->refs, 1); ++ spin_lock_init(&s->lock); ++ INIT_LIST_HEAD(&s->list); ++ cpumask_clear(&s->cpus_notifications); ++ cpumask_clear(&s->cpus_stashing); ++ s->has_nonaffine = 0; ++ s->is_defservice = is_defservice; ++} ++ ++/* Selection algorithms, stupid ones at that. These are to handle the case where ++ * the given dpaa2_io is a service, by choosing the non-service dpaa2_io within ++ * it to use. ++ */ ++static struct dpaa2_io *_service_select_by_cpu_slow(struct dpaa2_io_service *ss, ++ int cpu) ++{ ++ struct dpaa2_io *o; ++ unsigned long irqflags; ++ ++ spin_lock_irqsave(&ss->lock, irqflags); ++ /* TODO: this is about the dumbest and slowest selection algorithm you ++ * could imagine. (We're looking for something working first, and ++ * something efficient second...) ++ */ ++ list_for_each_entry(o, &ss->list, object.node) ++ if (o->object.dpio_desc.cpu == cpu) ++ goto found; ++ ++ /* No joy. Try the first nonaffine portal (bleurgh) */ ++ if (ss->has_nonaffine) ++ list_for_each_entry(o, &ss->list, object.node) ++ if (!o->object.dpio_desc.stash_affinity) ++ goto found; ++ ++ /* No joy. Try the first object. Told you it was horrible. */ ++ if (!list_empty(&ss->list)) ++ o = list_entry(ss->list.next, struct dpaa2_io, object.node); ++ else ++ o = NULL; ++ ++found: ++ spin_unlock_irqrestore(&ss->lock, irqflags); ++ return o; ++} ++ ++static struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, int cpu) ++{ ++ struct dpaa2_io_service *ss; ++ unsigned long irqflags; ++ ++ if (!d) ++ d = &def_serv; ++ else if (d->magic == MAGIC_OBJECT) ++ return d; ++ BUG_ON(d->magic != MAGIC_SERVICE); ++ ++ ss = &d->service; ++ ++ /* If cpu==-1, choose the current cpu, with no guarantees about ++ * potentially being migrated away. ++ */ ++ if (unlikely(cpu < 0)) { ++ spin_lock_irqsave(&ss->lock, irqflags); ++ cpu = smp_processor_id(); ++ spin_unlock_irqrestore(&ss->lock, irqflags); ++ ++ return _service_select_by_cpu_slow(ss, cpu); ++ } ++ ++ /* If a specific cpu was requested, pick it up immediately */ ++ return ss->objects_by_cpu[cpu]; ++} ++ ++static inline struct dpaa2_io *service_select_any(struct dpaa2_io *d) ++{ ++ struct dpaa2_io_service *ss; ++ struct dpaa2_io *o; ++ unsigned long irqflags; ++ ++ if (!d) ++ d = &def_serv; ++ else if (d->magic == MAGIC_OBJECT) ++ return d; ++ BUG_ON(d->magic != MAGIC_SERVICE); ++ ++ /* ++ * Lock the service, looking for the first DPIO object in the list, ++ * ignore everything else about that DPIO, and choose it to do the ++ * operation! As a post-selection step, move the DPIO to the end of ++ * the list. It should improve load-balancing a little, although it ++ * might also incur a performance hit, given that the lock is *global* ++ * and this may be called on the fast-path... ++ */ ++ ss = &d->service; ++ spin_lock_irqsave(&ss->lock, irqflags); ++ if (!list_empty(&ss->list)) { ++ o = list_entry(ss->list.next, struct dpaa2_io, object.node); ++ list_del(&o->object.node); ++ list_add_tail(&o->object.node, &ss->list); ++ } else ++ o = NULL; ++ spin_unlock_irqrestore(&ss->lock, irqflags); ++ return o; ++} ++ ++/* If the context is not preemptible, select the service affine to the ++ * current cpu. Otherwise, "select any". ++ */ ++static inline struct dpaa2_io *_service_select(struct dpaa2_io *d) ++{ ++ struct dpaa2_io *temp = d; ++ ++ if (likely(!preemptible())) { ++ d = service_select_by_cpu(d, smp_processor_id()); ++ if (likely(d)) ++ return d; ++ } ++ return service_select_any(temp); ++} ++ ++/**********************/ ++/* Exported functions */ ++/**********************/ ++ ++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) ++{ ++ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ struct dpaa2_io_object *o = &ret->object; ++ ++ if (!ret) ++ return NULL; ++ ret->magic = MAGIC_OBJECT; ++ atomic_set(&ret->refs, 1); ++ o->dpio_desc = *desc; ++ o->swp_desc.cena_bar = o->dpio_desc.regs_cena; ++ o->swp_desc.cinh_bar = o->dpio_desc.regs_cinh; ++ o->swp_desc.qman_version = o->dpio_desc.qman_version; ++ o->swp = qbman_swp_init(&o->swp_desc); ++ o->service = NULL; ++ if (!o->swp) { ++ kfree(ret); ++ return NULL; ++ } ++ INIT_LIST_HEAD(&o->node); ++ spin_lock_init(&o->lock_mgmt_cmd); ++ spin_lock_init(&o->lock_notifications); ++ INIT_LIST_HEAD(&o->notifications); ++ if (!o->dpio_desc.has_irq) ++ qbman_swp_interrupt_set_vanish(o->swp, 0xffffffff); ++ else { ++ /* For now only enable DQRR interrupts */ ++ qbman_swp_interrupt_set_trigger(o->swp, ++ QBMAN_SWP_INTERRUPT_DQRI); ++ } ++ qbman_swp_interrupt_clear_status(o->swp, 0xffffffff); ++ if (o->dpio_desc.receives_notifications) ++ qbman_swp_push_set(o->swp, 0, 1); ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_create); ++ ++struct dpaa2_io *dpaa2_io_create_service(void) ++{ ++ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ ++ if (ret) ++ service_init(ret, 0); ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_create_service); ++ ++struct dpaa2_io *dpaa2_io_default_service(void) ++{ ++ atomic_inc(&def_serv.refs); ++ return &def_serv; ++} ++EXPORT_SYMBOL(dpaa2_io_default_service); ++ ++void dpaa2_io_down(struct dpaa2_io *d) ++{ ++ if (!atomic_dec_and_test(&d->refs)) ++ return; ++ if (d->magic == MAGIC_SERVICE) { ++ BUG_ON(!list_empty(&d->service.list)); ++ if (d->service.is_defservice) ++ /* avoid the kfree()! */ ++ return; ++ } else { ++ BUG_ON(d->magic != MAGIC_OBJECT); ++ BUG_ON(d->object.service); ++ BUG_ON(!list_empty(&d->object.notifications)); ++ } ++ kfree(d); ++} ++EXPORT_SYMBOL(dpaa2_io_down); ++ ++int dpaa2_io_service_add(struct dpaa2_io *s, struct dpaa2_io *o) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ struct dpaa2_io_object *oo = &o->object; ++ int res = -EINVAL; ++ ++ if ((s->magic != MAGIC_SERVICE) || (o->magic != MAGIC_OBJECT)) ++ return res; ++ atomic_inc(&o->refs); ++ atomic_inc(&s->refs); ++ spin_lock(&ss->lock); ++ /* 'obj' must not already be associated with a service */ ++ if (!oo->service) { ++ oo->service = s; ++ list_add(&oo->node, &ss->list); ++ if (oo->dpio_desc.receives_notifications) { ++ cpumask_set_cpu(oo->dpio_desc.cpu, ++ &ss->cpus_notifications); ++ /* Update the fast-access array */ ++ ss->objects_by_cpu[oo->dpio_desc.cpu] = ++ container_of(oo, struct dpaa2_io, object); ++ } ++ if (oo->dpio_desc.stash_affinity) ++ cpumask_set_cpu(oo->dpio_desc.cpu, ++ &ss->cpus_stashing); ++ if (!oo->dpio_desc.stash_affinity) ++ ss->has_nonaffine = 1; ++ /* success */ ++ res = 0; ++ } ++ spin_unlock(&ss->lock); ++ if (res) { ++ dpaa2_io_down(s); ++ dpaa2_io_down(o); ++ } ++ return res; ++} ++EXPORT_SYMBOL(dpaa2_io_service_add); ++ ++int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc) ++{ ++ if (obj->magic == MAGIC_SERVICE) ++ return -EINVAL; ++ BUG_ON(obj->magic != MAGIC_OBJECT); ++ *desc = obj->object.dpio_desc; ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_get_descriptor); ++ ++#define DPAA_POLL_MAX 32 ++ ++int dpaa2_io_poll(struct dpaa2_io *obj) ++{ ++ const struct dpaa2_dq *dq; ++ struct qbman_swp *swp; ++ int max = 0; ++ ++ if (obj->magic != MAGIC_OBJECT) ++ return -EINVAL; ++ swp = obj->object.swp; ++ dq = qbman_swp_dqrr_next(swp); ++ while (dq) { ++ if (qbman_result_is_SCN(dq)) { ++ struct dpaa2_io_notification_ctx *ctx; ++ uint64_t q64; ++ ++ q64 = qbman_result_SCN_ctx(dq); ++ ctx = (void *)q64; ++ ctx->cb(ctx); ++ } else ++ pr_crit("Unrecognised/ignored DQRR entry\n"); ++ qbman_swp_dqrr_consume(swp, dq); ++ ++max; ++ if (max > DPAA_POLL_MAX) ++ return 0; ++ dq = qbman_swp_dqrr_next(swp); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_poll); ++ ++int dpaa2_io_irq(struct dpaa2_io *obj) ++{ ++ struct qbman_swp *swp; ++ uint32_t status; ++ ++ if (obj->magic != MAGIC_OBJECT) ++ return -EINVAL; ++ swp = obj->object.swp; ++ status = qbman_swp_interrupt_read_status(swp); ++ if (!status) ++ return IRQ_NONE; ++ dpaa2_io_poll(obj); ++ qbman_swp_interrupt_clear_status(swp, status); ++ qbman_swp_interrupt_set_inhibit(swp, 0); ++ return IRQ_HANDLED; ++} ++EXPORT_SYMBOL(dpaa2_io_irq); ++ ++int dpaa2_io_pause_poll(struct dpaa2_io *obj) ++{ ++ UNIMPLEMENTED(); ++ return -EINVAL; ++} ++EXPORT_SYMBOL(dpaa2_io_pause_poll); ++ ++int dpaa2_io_resume_poll(struct dpaa2_io *obj) ++{ ++ UNIMPLEMENTED(); ++ return -EINVAL; ++} ++EXPORT_SYMBOL(dpaa2_io_resume_poll); ++ ++void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ ++ BUG_ON(s->magic != MAGIC_SERVICE); ++ cpumask_copy(mask, &ss->cpus_notifications); ++} ++EXPORT_SYMBOL(dpaa2_io_service_notifications); ++ ++void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ ++ BUG_ON(s->magic != MAGIC_SERVICE); ++ cpumask_copy(mask, &ss->cpus_stashing); ++} ++EXPORT_SYMBOL(dpaa2_io_service_stashing); ++ ++int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ ++ BUG_ON(s->magic != MAGIC_SERVICE); ++ return ss->has_nonaffine; ++} ++EXPORT_SYMBOL(dpaa2_io_service_has_nonaffine); ++ ++int dpaa2_io_service_register(struct dpaa2_io *d, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ unsigned long irqflags; ++ ++ d = service_select_by_cpu(d, ctx->desired_cpu); ++ if (!d) ++ return -ENODEV; ++ ctx->dpio_id = d->object.dpio_desc.dpio_id; ++ ctx->qman64 = (uint64_t)ctx; ++ ctx->dpio_private = d; ++ spin_lock_irqsave(&d->object.lock_notifications, irqflags); ++ list_add(&ctx->node, &d->object.notifications); ++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); ++ if (ctx->is_cdan) ++ /* Enable the generation of CDAN notifications */ ++ qbman_swp_CDAN_set_context_enable(d->object.swp, ++ (uint16_t)ctx->id, ++ ctx->qman64); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_service_register); ++ ++int dpaa2_io_service_deregister(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_io *d = ctx->dpio_private; ++ unsigned long irqflags; ++ ++ if (!service) ++ service = &def_serv; ++ BUG_ON((service != d) && (service != d->object.service)); ++ if (ctx->is_cdan) ++ qbman_swp_CDAN_disable(d->object.swp, ++ (uint16_t)ctx->id); ++ spin_lock_irqsave(&d->object.lock_notifications, irqflags); ++ list_del(&ctx->node); ++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_service_deregister); ++ ++int dpaa2_io_service_rearm(struct dpaa2_io *d, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ unsigned long irqflags; ++ int err; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ if (ctx->is_cdan) ++ err = qbman_swp_CDAN_enable(d->object.swp, (uint16_t)ctx->id); ++ else ++ err = qbman_swp_fq_schedule(d->object.swp, ctx->id); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_rearm); ++ ++int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, ++ struct dpaa2_io **io) ++{ ++ struct dpaa2_io_notification_ctx *tmp; ++ struct dpaa2_io *d = ctx->dpio_private; ++ unsigned long irqflags; ++ int ret = 0; ++ ++ BUG_ON(d->magic != MAGIC_OBJECT); ++ /* Iterate the notifications associated with 'd' looking for a match. If ++ * not, we've been passed an unregistered ctx! */ ++ spin_lock_irqsave(&d->object.lock_notifications, irqflags); ++ list_for_each_entry(tmp, &d->object.notifications, node) ++ if (tmp == ctx) ++ goto found; ++ ret = -EINVAL; ++found: ++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); ++ if (!ret) { ++ atomic_inc(&d->refs); ++ *io = d; ++ } ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_from_registration); ++ ++int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, ++ struct dpaa2_io **ret) ++{ ++ if (cpu == -1) ++ *ret = service_select_any(service); ++ else ++ *ret = service_select_by_cpu(service, cpu); ++ if (*ret) { ++ atomic_inc(&(*ret)->refs); ++ return 0; ++ } ++ return -ENODEV; ++} ++EXPORT_SYMBOL(dpaa2_io_service_get_persistent); ++ ++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, ++ struct dpaa2_io_store *s) ++{ ++ struct qbman_pull_desc pd; ++ int err; ++ ++ qbman_pull_desc_clear(&pd); ++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); ++ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); ++ qbman_pull_desc_set_fq(&pd, fqid); ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ s->swp = d->object.swp; ++ err = qbman_swp_pull(d->object.swp, &pd); ++ if (err) ++ s->swp = NULL; ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_pull_fq); ++ ++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, ++ struct dpaa2_io_store *s) ++{ ++ struct qbman_pull_desc pd; ++ int err; ++ ++ qbman_pull_desc_clear(&pd); ++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); ++ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); ++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ s->swp = d->object.swp; ++ err = qbman_swp_pull(d->object.swp, &pd); ++ if (err) ++ s->swp = NULL; ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_pull_channel); ++ ++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, ++ uint32_t fqid, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc ed; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ qbman_eq_desc_clear(&ed); ++ qbman_eq_desc_set_no_orp(&ed, 0); ++ qbman_eq_desc_set_fq(&ed, fqid); ++ return qbman_swp_enqueue(d->object.swp, &ed, ++ (const struct qbman_fd *)fd); ++} ++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); ++ ++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, ++ uint32_t qdid, uint8_t prio, uint16_t qdbin, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc ed; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ qbman_eq_desc_clear(&ed); ++ qbman_eq_desc_set_no_orp(&ed, 0); ++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); ++ return qbman_swp_enqueue(d->object.swp, &ed, ++ (const struct qbman_fd *)fd); ++} ++EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd); ++ ++int dpaa2_io_service_release(struct dpaa2_io *d, ++ uint32_t bpid, ++ const uint64_t *buffers, ++ unsigned int num_buffers) ++{ ++ struct qbman_release_desc rd; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ qbman_release_desc_clear(&rd); ++ qbman_release_desc_set_bpid(&rd, bpid); ++ return qbman_swp_release(d->object.swp, &rd, buffers, num_buffers); ++} ++EXPORT_SYMBOL(dpaa2_io_service_release); ++ ++int dpaa2_io_service_acquire(struct dpaa2_io *d, ++ uint32_t bpid, ++ uint64_t *buffers, ++ unsigned int num_buffers) ++{ ++ unsigned long irqflags; ++ int err; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ err = qbman_swp_acquire(d->object.swp, bpid, buffers, num_buffers); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_acquire); ++ ++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, ++ struct device *dev) ++{ ++ struct dpaa2_io_store *ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ size_t size; ++ ++ BUG_ON(!max_frames || (max_frames > 16)); ++ if (!ret) ++ return NULL; ++ ret->max = max_frames; ++ size = max_frames * sizeof(struct dpaa2_dq) + 64; ++ ret->alloced_addr = kmalloc(size, GFP_KERNEL); ++ if (!ret->alloced_addr) { ++ kfree(ret); ++ return NULL; ++ } ++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); ++ ret->paddr = dma_map_single(dev, ret->vaddr, ++ sizeof(struct dpaa2_dq) * max_frames, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, ret->paddr)) { ++ kfree(ret->alloced_addr); ++ kfree(ret); ++ return NULL; ++ } ++ ret->idx = 0; ++ ret->dev = dev; ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_store_create); ++ ++void dpaa2_io_store_destroy(struct dpaa2_io_store *s) ++{ ++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, ++ DMA_FROM_DEVICE); ++ kfree(s->alloced_addr); ++ kfree(s); ++} ++EXPORT_SYMBOL(dpaa2_io_store_destroy); ++ ++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) ++{ ++ int match; ++ struct dpaa2_dq *ret = &s->vaddr[s->idx]; ++ ++ match = qbman_result_has_new_result(s->swp, ret); ++ if (!match) { ++ *is_last = 0; ++ return NULL; ++ } ++ BUG_ON(!qbman_result_is_DQ(ret)); ++ s->idx++; ++ if (dpaa2_dq_is_pull_complete(ret)) { ++ *is_last = 1; ++ s->idx = 0; ++ /* If we get an empty dequeue result to terminate a zero-results ++ * vdqcr, return NULL to the caller rather than expecting him to ++ * check non-NULL results every time. */ ++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) ++ ret = NULL; ++ } else ++ *is_last = 0; ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_store_next); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, ++ uint32_t *fcnt, uint32_t *bcnt) ++{ ++ struct qbman_attr state; ++ struct qbman_swp *swp; ++ unsigned long irqflags; ++ int ret; ++ ++ d = service_select_any(d); ++ if (!d) ++ return -ENODEV; ++ ++ swp = d->object.swp; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ ret = qbman_fq_query_state(swp, fqid, &state); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ if (ret) ++ return ret; ++ *fcnt = qbman_fq_state_frame_count(&state); ++ *bcnt = qbman_fq_state_byte_count(&state); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_query_fq_count); ++ ++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, ++ uint32_t *num) ++{ ++ struct qbman_attr state; ++ struct qbman_swp *swp; ++ unsigned long irqflags; ++ int ret; ++ ++ d = service_select_any(d); ++ if (!d) ++ return -ENODEV; ++ ++ swp = d->object.swp; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ ret = qbman_bp_query(swp, bpid, &state); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ if (ret) ++ return ret; ++ *num = qbman_bp_info_num_free_bufs(&state); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_query_bp_count); ++ ++#endif ++ ++/* module init/exit hooks called from dpio-drv.c. These are declared in ++ * dpio-drv.h. ++ */ ++int dpaa2_io_service_driver_init(void) ++{ ++ service_init(&def_serv, 1); ++ return 0; ++} ++ ++void dpaa2_io_service_driver_exit(void) ++{ ++ if (atomic_read(&def_serv.refs) != 1) ++ pr_err("default DPIO service leaves dangling DPIO objects!\n"); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h +new file mode 100644 +index 0000000..88a492f +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h +@@ -0,0 +1,460 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPIO_H ++#define __FSL_DPIO_H ++ ++/* Data Path I/O Portal API ++ * Contains initialization APIs and runtime control APIs for DPIO ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpio_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpio_id: DPIO unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpio_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpio_id, ++ uint16_t *token); ++ ++/** ++ * dpio_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * enum dpio_channel_mode - DPIO notification channel mode ++ * @DPIO_NO_CHANNEL: No support for notification channel ++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a ++ * dedicated channel in the DPIO; user should point the queue's ++ * destination in the relevant interface to this DPIO ++ */ ++enum dpio_channel_mode { ++ DPIO_NO_CHANNEL = 0, ++ DPIO_LOCAL_CHANNEL = 1, ++}; ++ ++/** ++ * struct dpio_cfg - Structure representing DPIO configuration ++ * @channel_mode: Notification channel mode ++ * @num_priorities: Number of priorities for the notification channel (1-8); ++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' ++ */ ++struct dpio_cfg { ++ enum dpio_channel_mode channel_mode; ++ uint8_t num_priorities; ++}; ++ ++/** ++ * dpio_create() - Create the DPIO object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPIO object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpio_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpio_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpio_destroy() - Destroy the DPIO object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_enable() - Enable the DPIO, allow I/O portal operations. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_is_enabled() - Check if the DPIO is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpio_reset() - Reset the DPIO, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_set_stashing_destination() - Set the stashing destination. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @sdest: stashing destination value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t sdest); ++ ++/** ++ * dpio_get_stashing_destination() - Get the stashing destination.. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @sdest: Returns the stashing destination value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t *sdest); ++ ++/** ++ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @dpcon_id: DPCON object ID ++ * @channel_index: Returned channel index to be used in qbman API ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id, ++ uint8_t *channel_index); ++ ++/** ++ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @dpcon_id: DPCON object ID ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id); ++ ++/** ++ * DPIO IRQ Index and Events ++ */ ++ ++/** ++ * Irq software-portal index ++ */ ++#define DPIO_IRQ_SWP_INDEX 0 ++ ++/** ++ * struct dpio_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpio_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpio_irq_cfg *irq_cfg); ++ ++/** ++ * dpio_get_irq() - Get IRQ information from the DPIO. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpio_irq_cfg *irq_cfg); ++ ++/** ++ * dpio_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpio_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpio_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpio_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpio_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpio_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpio_attr - Structure representing DPIO attributes ++ * @id: DPIO object ID ++ * @version: DPIO version ++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area ++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area ++ * @qbman_portal_id: Software portal ID ++ * @channel_mode: Notification channel mode ++ * @num_priorities: Number of priorities for the notification channel (1-8); ++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' ++ * @qbman_version: QBMAN version ++ */ ++struct dpio_attr { ++ int id; ++ /** ++ * struct version - DPIO version ++ * @major: DPIO major version ++ * @minor: DPIO minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint64_t qbman_portal_ce_offset; ++ uint64_t qbman_portal_ci_offset; ++ uint16_t qbman_portal_id; ++ enum dpio_channel_mode channel_mode; ++ uint8_t num_priorities; ++ uint32_t qbman_version; ++}; ++ ++/** ++ * dpio_get_attributes() - Retrieve DPIO attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpio_attr *attr); ++#endif /* __FSL_DPIO_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h +new file mode 100644 +index 0000000..f339cd6 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h +@@ -0,0 +1,184 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPIO_CMD_H ++#define _FSL_DPIO_CMD_H ++ ++/* DPIO Version */ ++#define DPIO_VER_MAJOR 3 ++#define DPIO_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPIO_CMDID_CLOSE 0x800 ++#define DPIO_CMDID_OPEN 0x803 ++#define DPIO_CMDID_CREATE 0x903 ++#define DPIO_CMDID_DESTROY 0x900 ++ ++#define DPIO_CMDID_ENABLE 0x002 ++#define DPIO_CMDID_DISABLE 0x003 ++#define DPIO_CMDID_GET_ATTR 0x004 ++#define DPIO_CMDID_RESET 0x005 ++#define DPIO_CMDID_IS_ENABLED 0x006 ++ ++#define DPIO_CMDID_SET_IRQ 0x010 ++#define DPIO_CMDID_GET_IRQ 0x011 ++#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPIO_CMDID_SET_IRQ_MASK 0x014 ++#define DPIO_CMDID_GET_IRQ_MASK 0x015 ++#define DPIO_CMDID_GET_IRQ_STATUS 0x016 ++#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPIO_CMDID_SET_STASHING_DEST 0x120 ++#define DPIO_CMDID_GET_STASHING_DEST 0x121 ++#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 ++#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_OPEN(cmd, dpio_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_CREATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ ++ cfg->channel_mode);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ ++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ ++ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ ++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ ++ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) ++#endif /* _FSL_DPIO_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h +new file mode 100644 +index 0000000..2874ff8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h +@@ -0,0 +1,123 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_QBMAN_BASE_H ++#define _FSL_QBMAN_BASE_H ++ ++/** ++ * struct qbman_block_desc - qbman block descriptor structure ++ * ++ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not ++ * control this QBMan instance, these values may simply be place-holders. The ++ * idea is simply that we be able to distinguish between them, eg. so that SWP ++ * descriptors can identify which QBMan instance they belong to. ++ */ ++struct qbman_block_desc { ++ void *ccsr_reg_bar; /* CCSR register map */ ++ int irq_rerr; /* Recoverable error interrupt line */ ++ int irq_nrerr; /* Non-recoverable error interrupt line */ ++}; ++ ++/** ++ * struct qbman_swp_desc - qbman software portal descriptor structure ++ * ++ * Descriptor for a QBMan software portal, expressed in terms that make sense to ++ * the user context. Ie. on MC, this information is likely to be true-physical, ++ * and instantiated statically at compile-time. On GPP, this information is ++ * likely to be obtained via "discovery" over a partition's "layerscape bus" ++ * (ie. in response to a MC portal command), and would take into account any ++ * virtualisation of the GPP user's address space and/or interrupt numbering. ++ */ ++struct qbman_swp_desc { ++ const struct qbman_block_desc *block; /* The QBMan instance */ ++ void *cena_bar; /* Cache-enabled portal register map */ ++ void *cinh_bar; /* Cache-inhibited portal register map */ ++ uint32_t qman_version; ++}; ++ ++/* Driver object for managing a QBMan portal */ ++struct qbman_swp; ++ ++/** ++ * struct qbman_fd - basci structure for qbman frame descriptor ++ * ++ * Place-holder for FDs, we represent it via the simplest form that we need for ++ * now. Different overlays may be needed to support different options, etc. (It ++ * is impractical to define One True Struct, because the resulting encoding ++ * routines (lots of read-modify-writes) would be worst-case performance whether ++ * or not circumstances required them.) ++ * ++ * Note, as with all data-structures exchanged between software and hardware (be ++ * they located in the portal register map or DMA'd to and from main-memory), ++ * the driver ensures that the caller of the driver API sees the data-structures ++ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words ++ * contained within this structure are represented in host-endianness, even if ++ * hardware always treats them as little-endian. As such, if any of these fields ++ * are interpreted in a binary (rather than numerical) fashion by hardware ++ * blocks (eg. accelerators), then the user should be careful. We illustrate ++ * with an example; ++ * ++ * Suppose the desired behaviour of an accelerator is controlled by the "frc" ++ * field of the FDs that are sent to it. Suppose also that the behaviour desired ++ * by the user corresponds to an "frc" value which is expressed as the literal ++ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit ++ * value in which 0xfe is the first byte and 0xba is the last byte, and as ++ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If ++ * the software is little-endian also, this can simply be achieved by setting ++ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set ++ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is ++ * to treat the 32-bit words as numerical values, in which the offset of a field ++ * from the beginning of the first byte (as required or generated by hardware) ++ * is numerically encoded by a left-shift (ie. by raising the field to a ++ * corresponding power of 2). Ie. in the current example, software could set ++ * "frc" in the following way, and it would work correctly on both little-endian ++ * and big-endian operation; ++ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); ++ */ ++struct qbman_fd { ++ union { ++ uint32_t words[8]; ++ struct qbman_fd_simple { ++ uint32_t addr_lo; ++ uint32_t addr_hi; ++ uint32_t len; ++ /* offset in the MS 16 bits, BPID in the LS 16 bits */ ++ uint32_t bpid_offset; ++ uint32_t frc; /* frame context */ ++ /* "err", "va", "cbmt", "asal", [...] */ ++ uint32_t ctrl; ++ /* flow context */ ++ uint32_t flc_lo; ++ uint32_t flc_hi; ++ } simple; ++ }; ++}; ++ ++#endif /* !_FSL_QBMAN_BASE_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h +new file mode 100644 +index 0000000..c9e543e +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h +@@ -0,0 +1,753 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_QBMAN_PORTAL_H ++#define _FSL_QBMAN_PORTAL_H ++ ++#include "fsl_qbman_base.h" ++ ++/** ++ * qbman_swp_init() - Create a functional object representing the given ++ * QBMan portal descriptor. ++ * @d: the given qbman swp descriptor ++ * ++ * Return qbman_swp portal object for success, NULL if the object cannot ++ * be created. ++ */ ++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); ++/** ++ * qbman_swp_finish() - Create and destroy a functional object representing ++ * the given QBMan portal descriptor. ++ * @p: the qbman_swp object to be destroyed. ++ * ++ */ ++void qbman_swp_finish(struct qbman_swp *p); ++ ++/** ++ * qbman_swp_get_desc() - Get the descriptor of the given portal object. ++ * @p: the given portal object. ++ * ++ * Return the descriptor for this portal. ++ */ ++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p); ++ ++ /**************/ ++ /* Interrupts */ ++ /**************/ ++ ++/* See the QBMan driver API documentation for details on the interrupt ++ * mechanisms. */ ++#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) ++#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) ++#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) ++#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) ++#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) ++#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) ++ ++/** ++ * qbman_swp_interrupt_get_vanish() ++ * qbman_swp_interrupt_set_vanish() - Get/Set the data in software portal ++ * interrupt status disable register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_IDSR register. ++ * ++ * Return the settings in SWP_ISDR register for Get function. ++ */ ++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); ++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); ++ ++/** ++ * qbman_swp_interrupt_read_status() ++ * qbman_swp_interrupt_clear_status() - Get/Set the data in software portal ++ * interrupt status register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_ISR register. ++ * ++ * Return the settings in SWP_ISR register for Get function. ++ * ++ */ ++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); ++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); ++ ++/** ++ * qbman_swp_interrupt_get_trigger() ++ * qbman_swp_interrupt_set_trigger() - Get/Set the data in software portal ++ * interrupt enable register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_IER register. ++ * ++ * Return the settings in SWP_IER register for Get function. ++ */ ++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); ++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); ++ ++/** ++ * qbman_swp_interrupt_get_inhibit() ++ * qbman_swp_interrupt_set_inhibit() - Set/Set the data in software portal ++ * interrupt inhibit register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_IIR register. ++ * ++ * Return the settings in SWP_IIR register for Get function. ++ */ ++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); ++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); ++ ++ /************/ ++ /* Dequeues */ ++ /************/ ++ ++/* See the QBMan driver API documentation for details on the enqueue ++ * mechanisms. NB: the use of a 'dpaa2_' prefix for this type is because it is ++ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan ++ * driver. The structure is defined in the DPIO interface, but to avoid circular ++ * dependencies we just pre/re-declare it here opaquely. */ ++struct dpaa2_dq; ++ ++/* ------------------- */ ++/* Push-mode dequeuing */ ++/* ------------------- */ ++ ++/** ++ * qbman_swp_push_get() - Get the push dequeue setup. ++ * @p: the software portal object. ++ * @channel_idx: the channel index to query. ++ * @enabled: returned boolean to show whether the push dequeue is enabled for ++ * the given channel. ++ */ ++void qbman_swp_push_get(struct qbman_swp *, uint8_t channel_idx, int *enabled); ++/** ++ * qbman_swp_push_set() - Enable or disable push dequeue. ++ * @p: the software portal object. ++ * @channel_idx: the channel index.. ++ * @enable: enable or disable push dequeue. ++ * ++ * The user of a portal can enable and disable push-mode dequeuing of up to 16 ++ * channels independently. It does not specify this toggling by channel IDs, but ++ * rather by specifying the index (from 0 to 15) that has been mapped to the ++ * desired channel. ++ */ ++void qbman_swp_push_set(struct qbman_swp *, uint8_t channel_idx, int enable); ++ ++/* ------------------- */ ++/* Pull-mode dequeuing */ ++/* ------------------- */ ++ ++/** ++ * struct qbman_pull_desc - the structure for pull dequeue descriptor ++ */ ++struct qbman_pull_desc { ++ uint32_t dont_manipulate_directly[6]; ++}; ++ ++enum qbman_pull_type_e { ++ /* dequeue with priority precedence, respect intra-class scheduling */ ++ qbman_pull_type_prio = 1, ++ /* dequeue with active FQ precedence, respect ICS */ ++ qbman_pull_type_active, ++ /* dequeue with active FQ precedence, no ICS */ ++ qbman_pull_type_active_noics ++}; ++ ++/** ++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ * @d: the pull dequeue descriptor to be cleared. ++ */ ++void qbman_pull_desc_clear(struct qbman_pull_desc *d); ++ ++/** ++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage ++ * @d: the pull dequeue descriptor to be set. ++ * @storage: the pointer of the memory to store the dequeue result. ++ * @storage_phys: the physical address of the storage memory. ++ * @stash: to indicate whether write allocate is enabled. ++ * ++ * If not called, or if called with 'storage' as NULL, the result pull dequeues ++ * will produce results to DQRR. If 'storage' is non-NULL, then results are ++ * produced to the given memory location (using the physical/DMA address which ++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not ++ * those writes to main-memory express a cache-warming attribute. ++ */ ++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, ++ struct dpaa2_dq *storage, ++ dma_addr_t storage_phys, ++ int stash); ++/** ++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. ++ * @d: the pull dequeue descriptor to be set. ++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. ++ */ ++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes); ++ ++/** ++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. ++ * @fqid: the frame queue index of the given FQ. ++ * ++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. ++ * @wqid: composed of channel id and wqid within the channel. ++ * @dct: the dequeue command type. ++ * ++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command ++ * dequeues. ++ * @chid: the channel id to be dequeued. ++ * @dct: the dequeue command type. ++ * ++ * Exactly one of the following descriptor "actions" should be set. (Calling any ++ * one of these will replace the effect of any prior call to one of these.) ++ * - pull dequeue from the given frame queue (FQ) ++ * - pull dequeue from any FQ in the given work queue (WQ) ++ * - pull dequeue from any FQ in any WQ in the given channel ++ */ ++void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid); ++void qbman_pull_desc_set_wq(struct qbman_pull_desc *, uint32_t wqid, ++ enum qbman_pull_type_e dct); ++void qbman_pull_desc_set_channel(struct qbman_pull_desc *, uint32_t chid, ++ enum qbman_pull_type_e dct); ++ ++/** ++ * qbman_swp_pull() - Issue the pull dequeue command ++ * @s: the software portal object. ++ * @d: the software portal descriptor which has been configured with ++ * the set of qbman_pull_desc_set_*() calls. ++ * ++ * Return 0 for success, and -EBUSY if the software portal is not ready ++ * to do pull dequeue. ++ */ ++int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *d); ++ ++/* -------------------------------- */ ++/* Polling DQRR for dequeue results */ ++/* -------------------------------- */ ++ ++/** ++ * qbman_swp_dqrr_next() - Get an valid DQRR entry. ++ * @s: the software portal object. ++ * ++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry ++ * only once, so repeated calls can return a sequence of DQRR entries, without ++ * requiring they be consumed immediately or in any particular order. ++ */ ++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); ++ ++/** ++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from ++ * qbman_swp_dqrr_next(). ++ * @s: the software portal object. ++ * @dq: the DQRR entry to be consumed. ++ */ ++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); ++ ++/* ------------------------------------------------- */ ++/* Polling user-provided storage for dequeue results */ ++/* ------------------------------------------------- */ ++/** ++ * qbman_result_has_new_result() - Check and get the dequeue response from the ++ * dq storage memory set in pull dequeue command ++ * @s: the software portal object. ++ * @dq: the dequeue result read from the memory. ++ * ++ * Only used for user-provided storage of dequeue results, not DQRR. For ++ * efficiency purposes, the driver will perform any required endianness ++ * conversion to ensure that the user's dequeue result storage is in host-endian ++ * format (whether or not that is the same as the little-endian format that ++ * hardware DMA'd to the user's storage). As such, once the user has called ++ * qbman_result_has_new_result() and been returned a valid dequeue result, ++ * they should not call it again on the same memory location (except of course ++ * if another dequeue command has been executed to produce a new result to that ++ * location). ++ * ++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid ++ * dequeue result. ++ */ ++int qbman_result_has_new_result(struct qbman_swp *, ++ const struct dpaa2_dq *); ++ ++/* -------------------------------------------------------- */ ++/* Parsing dequeue entries (DQRR and user-provided storage) */ ++/* -------------------------------------------------------- */ ++ ++/** ++ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not ++ * @dq: the dequeue result to be checked. ++ * ++ * DQRR entries may contain non-dequeue results, ie. notifications ++ */ ++int qbman_result_is_DQ(const struct dpaa2_dq *); ++ ++/** ++ * qbman_result_is_SCN() - Check the dequeue result is notification or not ++ * @dq: the dequeue result to be checked. ++ * ++ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change ++ * notifications" of one type or another. Some APIs apply to all of them, of the ++ * form qbman_result_SCN_***(). ++ */ ++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) ++{ ++ return !qbman_result_is_DQ(dq); ++} ++ ++/** ++ * Recognise different notification types, only required if the user allows for ++ * these to occur, and cares about them when they do. ++ */ ++int qbman_result_is_FQDAN(const struct dpaa2_dq *); ++ /* FQ Data Availability */ ++int qbman_result_is_CDAN(const struct dpaa2_dq *); ++ /* Channel Data Availability */ ++int qbman_result_is_CSCN(const struct dpaa2_dq *); ++ /* Congestion State Change */ ++int qbman_result_is_BPSCN(const struct dpaa2_dq *); ++ /* Buffer Pool State Change */ ++int qbman_result_is_CGCU(const struct dpaa2_dq *); ++ /* Congestion Group Count Update */ ++/* Frame queue state change notifications; (FQDAN in theory counts too as it ++ * leaves a FQ parked, but it is primarily a data availability notification) */ ++int qbman_result_is_FQRN(const struct dpaa2_dq *); /* Retirement */ ++int qbman_result_is_FQRNI(const struct dpaa2_dq *); ++ /* Retirement Immediate */ ++int qbman_result_is_FQPN(const struct dpaa2_dq *); /* Park */ ++ ++/* NB: for parsing dequeue results (when "is_DQ" is TRUE), use the higher-layer ++ * dpaa2_dq_*() functions. */ ++ ++/* State-change notifications (FQDAN/CDAN/CSCN/...). */ ++/** ++ * qbman_result_SCN_state() - Get the state field in State-change notification ++ */ ++uint8_t qbman_result_SCN_state(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_rid() - Get the resource id in State-change notification ++ */ ++uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_ctx() - Get the context data in State-change notification ++ */ ++uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_state_in_mem() - Get the state field in State-change ++ * notification which is written to memory instead of DQRR. ++ */ ++uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_rid_in_mem() - Get the resource id in State-change ++ * notification which is written to memory instead of DQRR. ++ */ ++uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *); ++ ++/* Type-specific "resource IDs". Mainly for illustration purposes, though it ++ * also gives the appropriate type widths. */ ++#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) ++#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) ++ ++/** ++ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN ++ * ++ * Return the buffer pool id. ++ */ ++uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_has_free_bufs() - Check whether there are free ++ * buffers in the pool from BPSCN. ++ * ++ * Return the number of free buffers. ++ */ ++int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the ++ * buffer pool is depleted. ++ * ++ * Return the status of buffer pool depletion. ++ */ ++int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer ++ * pool is surplus or not. ++ * ++ * Return the status of buffer pool surplus. ++ */ ++int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message ++ * ++ * Return the BPSCN context. ++ */ ++uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *); ++ ++/* Parsing CGCU */ ++/** ++ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid ++ * ++ * Return the CGCU resource id. ++ */ ++uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *); ++/** ++ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU ++ * ++ * Return instantaneous count in the CGCU notification. ++ */ ++uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *); ++ ++ /************/ ++ /* Enqueues */ ++ /************/ ++/** ++ * struct qbman_eq_desc - structure of enqueue descriptor ++ */ ++struct qbman_eq_desc { ++ uint32_t dont_manipulate_directly[8]; ++}; ++ ++/** ++ * struct qbman_eq_response - structure of enqueue response ++ */ ++struct qbman_eq_response { ++ uint32_t dont_manipulate_directly[16]; ++}; ++ ++/** ++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ */ ++void qbman_eq_desc_clear(struct qbman_eq_desc *); ++ ++/* Exactly one of the following descriptor "actions" should be set. (Calling ++ * any one of these will replace the effect of any prior call to one of these.) ++ * - enqueue without order-restoration ++ * - enqueue with order-restoration ++ * - fill a hole in the order-restoration sequence, without any enqueue ++ * - advance NESN (Next Expected Sequence Number), without any enqueue ++ * 'respond_success' indicates whether an enqueue response should be DMA'd ++ * after success (otherwise a response is DMA'd only after failure). ++ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to ++ * be enqueued. ++ */ ++/** ++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp ++ * @d: the enqueue descriptor. ++ * @response_success: 1 = enqueue with response always; 0 = enqueue with ++ * rejections returned on a FQ. ++ */ ++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); ++ ++/** ++ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor ++ * @d: the enqueue descriptor. ++ * @response_success: 1 = enqueue with response always; 0 = enqueue with ++ * rejections returned on a FQ. ++ * @opr_id: the order point record id. ++ * @seqnum: the order restoration sequence number. ++ * @incomplete: indiates whether this is the last fragments using the same ++ * sequeue number. ++ */ ++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, ++ uint32_t opr_id, uint32_t seqnum, int incomplete); ++ ++/** ++ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence ++ * without any enqueue ++ * @d: the enqueue descriptor. ++ * @opr_id: the order point record id. ++ * @seqnum: the order restoration sequence number. ++ */ ++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum); ++ ++/** ++ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) ++ * without any enqueue ++ * @d: the enqueue descriptor. ++ * @opr_id: the order point record id. ++ * @seqnum: the order restoration sequence number. ++ */ ++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum); ++ ++/** ++ * qbman_eq_desc_set_response() - Set the enqueue response info. ++ * @d: the enqueue descriptor ++ * @storage_phys: the physical address of the enqueue response in memory. ++ * @stash: indicate that the write allocation enabled or not. ++ * ++ * In the case where an enqueue response is DMA'd, this determines where that ++ * response should go. (The physical/DMA address is given for hardware's ++ * benefit, but software should interpret it as a "struct qbman_eq_response" ++ * data structure.) 'stash' controls whether or not the write to main-memory ++ * expresses a cache-warming attribute. ++ */ ++void qbman_eq_desc_set_response(struct qbman_eq_desc *d, ++ dma_addr_t storage_phys, ++ int stash); ++/** ++ * qbman_eq_desc_set_token() - Set token for the enqueue command ++ * @d: the enqueue descriptor ++ * @token: the token to be set. ++ * ++ * token is the value that shows up in an enqueue response that can be used to ++ * detect when the results have been published. The easiest technique is to zero ++ * result "storage" before issuing an enqueue, and use any non-zero 'token' ++ * value. ++ */ ++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); ++ ++/** ++ * qbman_eq_desc_set_fq() ++ * qbman_eq_desc_set_qd() - Set eithe FQ or Queuing Destination for the enqueue ++ * command. ++ * @d: the enqueue descriptor ++ * @fqid: the id of the frame queue to be enqueued. ++ * @qdid: the id of the queuing destination to be enqueued. ++ * @qd_bin: the queuing destination bin ++ * @qd_prio: the queuing destination priority. ++ * ++ * Exactly one of the following descriptor "targets" should be set. (Calling any ++ * one of these will replace the effect of any prior call to one of these.) ++ * - enqueue to a frame queue ++ * - enqueue to a queuing destination ++ * Note, that none of these will have any affect if the "action" type has been ++ * set to "orp_hole" or "orp_nesn". ++ */ ++void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid); ++void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid, ++ uint32_t qd_bin, uint32_t qd_prio); ++ ++/** ++ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt ++ * @d: the enqueue descriptor ++ * @enable: boolean to enable/disable EQDI ++ * ++ * Determines whether or not the portal's EQDI interrupt source should be ++ * asserted after the enqueue command is completed. ++ */ ++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *, int enable); ++ ++/** ++ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. ++ * @d: the enqueue descriptor. ++ * @enable: enabled/disable DCA mode. ++ * @dqrr_idx: DCAP_CI, the DCAP consumer index. ++ * @park: determine the whether park the FQ or not ++ * ++ * Determines whether or not a portal DQRR entry should be consumed once the ++ * enqueue command is completed. (And if so, and the DQRR entry corresponds ++ * to a held-active (order-preserving) FQ, whether the FQ should be parked ++ * instead of being rescheduled.) ++ */ ++void qbman_eq_desc_set_dca(struct qbman_eq_desc *, int enable, ++ uint32_t dqrr_idx, int park); ++ ++/** ++ * qbman_swp_enqueue() - Issue an enqueue command. ++ * @s: the software portal used for enqueue. ++ * @d: the enqueue descriptor. ++ * @fd: the frame descriptor to be enqueued. ++ * ++ * Please note that 'fd' should only be NULL if the "action" of the ++ * descriptor is "orp_hole" or "orp_nesn". ++ * ++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. ++ */ ++int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *, ++ const struct qbman_fd *fd); ++ ++/** ++ * qbman_swp_enqueue_thresh() - Set the threshold for EQRI interrupt. ++ * ++ * An EQRI interrupt can be generated when the fill-level of EQCR falls below ++ * the 'thresh' value set here. Setting thresh==0 (the default) disables. ++ */ ++int qbman_swp_enqueue_thresh(struct qbman_swp *, unsigned int thresh); ++ ++ /*******************/ ++ /* Buffer releases */ ++ /*******************/ ++/** ++ * struct qbman_release_desc - The structure for buffer release descriptor ++ */ ++struct qbman_release_desc { ++ uint32_t dont_manipulate_directly[1]; ++}; ++ ++/** ++ * qbman_release_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ */ ++void qbman_release_desc_clear(struct qbman_release_desc *); ++ ++/** ++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to ++ */ ++void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid); ++ ++/** ++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI ++ * interrupt source should be asserted after the release command is completed. ++ */ ++void qbman_release_desc_set_rcdi(struct qbman_release_desc *, int enable); ++ ++/** ++ * qbman_swp_release() - Issue a buffer release command. ++ * @s: the software portal object. ++ * @d: the release descriptor. ++ * @buffers: a pointer pointing to the buffer address to be released. ++ * @num_buffers: number of buffers to be released, must be less than 8. ++ * ++ * Return 0 for success, -EBUSY if the release command ring is not ready. ++ */ ++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ++ const uint64_t *buffers, unsigned int num_buffers); ++ ++ /*******************/ ++ /* Buffer acquires */ ++ /*******************/ ++ ++/** ++ * qbman_swp_acquire() - Issue a buffer acquire command. ++ * @s: the software portal object. ++ * @bpid: the buffer pool index. ++ * @buffers: a pointer pointing to the acquired buffer address|es. ++ * @num_buffers: number of buffers to be acquired, must be less than 8. ++ * ++ * Return 0 for success, or negative error code if the acquire command ++ * fails. ++ */ ++int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers, ++ unsigned int num_buffers); ++ ++ /*****************/ ++ /* FQ management */ ++ /*****************/ ++ ++/** ++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. ++ * @s: the software portal object. ++ * @fqid: the index of frame queue to be scheduled. ++ * ++ * There are a couple of different ways that a FQ can end up parked state, ++ * This schedules it. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); ++ ++/** ++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. ++ * @s: the software portal object. ++ * @fqid: the index of frame queue to be forced. ++ * ++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled ++ * and thus be available for selection by any channel-dequeuing behaviour (push ++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still ++ * empty at the time this happens, the resulting dq_entry will have no FD. ++ * (qbman_result_DQ_fd() will return NULL.) ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); ++ ++/** ++ * qbman_swp_fq_xon() ++ * qbman_swp_fq_xoff() - XON/XOFF the frame queue. ++ * @s: the software portal object. ++ * @fqid: the index of frame queue. ++ * ++ * These functions change the FQ flow-control stuff between XON/XOFF. (The ++ * default is XON.) This setting doesn't affect enqueues to the FQ, just ++ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when ++ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is ++ * changed to XOFF after it had already become truly-scheduled to a channel, and ++ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, ++ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will ++ * return NULL.) ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); ++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); ++ ++ /**********************/ ++ /* Channel management */ ++ /**********************/ ++ ++/* If the user has been allocated a channel object that is going to generate ++ * CDANs to another channel, then these functions will be necessary. ++ * CDAN-enabled channels only generate a single CDAN notification, after which ++ * it they need to be reenabled before they'll generate another. (The idea is ++ * that pull dequeuing will occur in reaction to the CDAN, followed by a ++ * reenable step.) Each function generates a distinct command to hardware, so a ++ * combination function is provided if the user wishes to modify the "context" ++ * (which shows up in each CDAN message) each time they reenable, as a single ++ * command to hardware. */ ++/** ++ * qbman_swp_CDAN_set_context() - Set CDAN context ++ * @s: the software portal object. ++ * @channelid: the channel index. ++ * @ctx: the context to be set in CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_set_context(struct qbman_swp *, uint16_t channelid, ++ uint64_t ctx); ++ ++/** ++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. ++ * @s: the software portal object. ++ * @channelid: the index of the channel to generate CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_enable(struct qbman_swp *, uint16_t channelid); ++ ++/** ++ * qbman_swp_CDAN_disable() - disable CDAN for the channel. ++ * @s: the software portal object. ++ * @channelid: the index of the channel to generate CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_disable(struct qbman_swp *, uint16_t channelid); ++ ++/** ++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN ++ * @s: the software portal object. ++ * @channelid: the index of the channel to generate CDAN. ++ * @ctx: the context set in CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *, uint16_t channelid, ++ uint64_t ctx); ++ ++#endif /* !_FSL_QBMAN_PORTAL_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c +new file mode 100644 +index 0000000..12e33d3 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c +@@ -0,0 +1,846 @@ ++/* Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qbman_portal.h" ++#include "qbman_debug.h" ++#include "fsl_qbman_portal.h" ++ ++/* QBMan portal management command code */ ++#define QBMAN_BP_QUERY 0x32 ++#define QBMAN_FQ_QUERY 0x44 ++#define QBMAN_FQ_QUERY_NP 0x45 ++#define QBMAN_CGR_QUERY 0x51 ++#define QBMAN_WRED_QUERY 0x54 ++#define QBMAN_CGR_STAT_QUERY 0x55 ++#define QBMAN_CGR_STAT_QUERY_CLR 0x56 ++ ++enum qbman_attr_usage_e { ++ qbman_attr_usage_fq, ++ qbman_attr_usage_bpool, ++ qbman_attr_usage_cgr, ++}; ++ ++struct int_qbman_attr { ++ uint32_t words[32]; ++ enum qbman_attr_usage_e usage; ++}; ++ ++#define attr_type_set(a, e) \ ++{ \ ++ struct qbman_attr *__attr = a; \ ++ enum qbman_attr_usage_e __usage = e; \ ++ ((struct int_qbman_attr *)__attr)->usage = __usage; \ ++} ++ ++#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) ++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) ++ ++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); ++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); ++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); ++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); ++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); ++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); ++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); ++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); ++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); ++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); ++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); ++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); ++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); ++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); ++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); ++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); ++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); ++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); ++static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); ++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); ++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); ++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8); ++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8); ++ ++void qbman_bp_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_bpool); ++} ++ ++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, ++ struct qbman_attr *a) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *attr = ATTR32(a); ++ ++ qbman_bp_attr_clear(a); ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_bp_bpid, p, bpid); ++ ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_BP_QUERY); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); ++ return -EIO; ++ } ++ ++ /* For the query, word[0] of the result contains only the ++ * verb/rslt fields, so skip word[0]. ++ */ ++ word_copy(&attr[1], &p[1], 15); ++ return 0; ++} ++ ++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); ++ *va = !!qb_attr_code_decode(&code_bp_va, p); ++ *wae = !!qb_attr_code_decode(&code_bp_wae, p); ++} ++ ++static uint32_t qbman_bp_thresh_to_value(uint32_t val) ++{ ++ return (val & 0xff) << ((val & 0xf00) >> 8); ++} ++ ++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, ++ p)); ++} ++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, ++ p)); ++} ++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, ++ p)); ++} ++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, ++ p)); ++} ++ ++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); ++} ++ ++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *icid = qb_attr_code_decode(&code_bp_icid, p); ++ *pl = !!qb_attr_code_decode(&code_bp_pl, p); ++} ++ ++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, ++ p) << 32) | ++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, ++ p); ++} ++ ++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) ++ << 32) | ++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, ++ p); ++} ++ ++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); ++} ++ ++int qbman_bp_info_has_free_bufs(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); ++} ++ ++int qbman_bp_info_is_depleted(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); ++} ++ ++int qbman_bp_info_is_surplus(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); ++} ++ ++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_fill, p); ++} ++ ++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_hdptr, p); ++} ++ ++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_sdcnt, p); ++} ++ ++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_hdcnt, p); ++} ++ ++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_sscnt, p); ++} ++ ++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); ++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); ++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); ++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); ++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); ++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); ++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); ++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); ++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); ++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); ++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); ++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); ++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); ++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); ++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); ++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); ++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); ++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); ++ ++void qbman_fq_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_fq); ++} ++ ++/* FQ query function for programmable fields */ ++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *d = ATTR32(desc); ++ ++ qbman_fq_attr_clear(desc); ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ qb_attr_code_encode(&code_fq_fqid, p, fqid); ++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_FQ_QUERY); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", ++ fqid, rslt); ++ return -EIO; ++ } ++ /* For the configure, word[0] of the command contains only the WE-mask. ++ * For the query, word[0] of the result contains only the verb/rslt ++ * fields. Skip word[0] in the latter case. */ ++ word_copy(&d[1], &p[1], 15); ++ return 0; ++} ++ ++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); ++} ++ ++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); ++} ++ ++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *destwq = qb_attr_code_decode(&code_fq_destwq, p); ++} ++ ++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *icscred = qb_attr_code_decode(&code_fq_icscred, p); ++} ++ ++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); ++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); ++static uint32_t qbman_thresh_to_value(uint32_t val) ++{ ++ uint32_t m, e; ++ ++ m = qb_attr_code_decode(&code_tdthresh_mant, &val); ++ e = qb_attr_code_decode(&code_tdthresh_exp, &val); ++ return m << e; ++} ++ ++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, ++ p)); ++} ++ ++void qbman_fq_attr_get_oa(struct qbman_attr *d, ++ int *oa_ics, int *oa_cgr, int32_t *oa_len) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); ++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); ++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, ++ qb_attr_code_decode(&code_fq_oa_len, p)); ++} ++ ++void qbman_fq_attr_get_mctl(struct qbman_attr *d, ++ int *bdi, int *ff, int *va, int *ps) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); ++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); ++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); ++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); ++} ++ ++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); ++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); ++} ++ ++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *icid = qb_attr_code_decode(&code_fq_icid, p); ++ *pl = !!qb_attr_code_decode(&code_fq_pl, p); ++} ++ ++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); ++} ++ ++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); ++} ++ ++/* Query FQ Non-Programmalbe Fields */ ++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); ++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); ++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); ++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); ++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); ++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); ++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); ++ ++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, ++ struct qbman_attr *state) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *d = ATTR32(state); ++ ++ qbman_fq_attr_clear(state); ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ qb_attr_code_encode(&code_fq_fqid, p, fqid); ++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_FQ_QUERY_NP); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", ++ fqid, rslt); ++ return -EIO; ++ } ++ word_copy(&d[0], &p[0], 16); ++ return 0; ++} ++ ++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_state, p); ++} ++ ++int qbman_fq_state_force_eligible(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_fe, p); ++} ++ ++int qbman_fq_state_xoff(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_x, p); ++} ++ ++int qbman_fq_state_retirement_pending(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_r, p); ++} ++ ++int qbman_fq_state_overflow_error(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_oe, p); ++} ++ ++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); ++} ++ ++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); ++} ++ ++/* Query CGR */ ++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); ++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); ++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); ++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); ++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); ++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); ++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); ++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); ++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); ++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); ++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); ++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); ++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); ++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); ++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); ++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); ++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); ++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); ++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); ++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); ++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); ++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); ++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); ++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); ++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); ++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); ++ ++void qbman_cgr_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_cgr); ++} ++ ++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *d[2]; ++ int i; ++ uint32_t query_verb; ++ ++ d[0] = ATTR32(attr); ++ d[1] = ATTR32_1(attr); ++ ++ qbman_cgr_attr_clear(attr); ++ ++ for (i = 0; i < 2; i++) { ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; ++ ++ qb_attr_code_encode(&code_cgr_cgid, p, cgid); ++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != query_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query CGID 0x%x failed,", cgid); ++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); ++ return -EIO; ++ } ++ /* For the configure, word[0] of the command contains only the ++ * verb/cgid. For the query, word[0] of the result contains ++ * only the verb/rslt fields. Skip word[0] in the latter case. ++ */ ++ word_copy(&d[i][1], &p[1], 15); ++ } ++ return 0; ++} ++ ++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, ++ int *cscn_wq_en_exit, int *cscn_wq_icd) ++ { ++ uint32_t *p = ATTR32(d); ++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, ++ p); ++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); ++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); ++} ++ ++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, ++ int *rej_cnt_mode, int *cscn_bdi) ++{ ++ uint32_t *p = ATTR32(d); ++ *mode = qb_attr_code_decode(&code_cgr_mode, p); ++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); ++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); ++} ++ ++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, ++ int *cscn_wr_en_exit, int *cg_wr_ae, ++ int *cscn_dcp_en, int *cg_wr_va) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, ++ p); ++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); ++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); ++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); ++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); ++} ++ ++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, ++ uint32_t *i_cnt_wr_bnd) ++{ ++ uint32_t *p = ATTR32(d); ++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); ++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); ++} ++ ++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) ++{ ++ uint32_t *p = ATTR32(d); ++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); ++} ++ ++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) ++{ ++ uint32_t *p = ATTR32(d); ++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_cs_thres, p)); ++} ++ ++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, ++ uint32_t *cs_thres_x) ++{ ++ uint32_t *p = ATTR32(d); ++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_cs_thres_x, p)); ++} ++ ++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) ++{ ++ uint32_t *p = ATTR32(d); ++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_td_thres, p)); ++} ++ ++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); ++} ++ ++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); ++} ++ ++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, ++ uint32_t *cscn_vcgid) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); ++} ++ ++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, ++ int *pl) ++{ ++ uint32_t *p = ATTR32(d); ++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); ++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); ++} ++ ++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, ++ uint64_t *cg_wr_addr) ++{ ++ uint32_t *p = ATTR32(d); ++ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, ++ p) << 32) | ++ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, ++ p); ++} ++ ++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) ++ << 32) | ++ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); ++} ++ ++#define WRED_EDP_WORD(n) (18 + n/4) ++#define WRED_EDP_OFFSET(n) (8 * (n % 4)) ++#define WRED_PARM_DP_WORD(n) (n + 20) ++#define WRED_WE_EDP(n) (16 + n * 2) ++#define WRED_WE_PARM_DP(n) (17 + n * 2) ++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, ++ int *edp) ++{ ++ uint32_t *p = ATTR32(d); ++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), ++ WRED_EDP_OFFSET(idx), 8); ++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); ++} ++ ++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, ++ uint64_t *maxth, uint8_t *maxp) ++{ ++ uint8_t ma, mn, step_i, step_s, pn; ++ ++ ma = (uint8_t)(dp >> 24); ++ mn = (uint8_t)(dp >> 19) & 0x1f; ++ step_i = (uint8_t)(dp >> 11); ++ step_s = (uint8_t)(dp >> 6) & 0x1f; ++ pn = (uint8_t)dp & 0x3f; ++ ++ *maxp = ((pn<<2) * 100)/256; ++ ++ if (mn == 0) ++ *maxth = ma; ++ else ++ *maxth = ((ma+256) * (1<<(mn-1))); ++ ++ if (step_s == 0) ++ *minth = *maxth - step_i; ++ else ++ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1)); ++} ++ ++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, ++ uint32_t *dp) ++{ ++ uint32_t *p = ATTR32(d); ++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), ++ 0, 8); ++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); ++} ++ ++/* Query CGR/CCGR/CQ statistics */ ++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); ++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); ++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); ++static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, ++ int clear, uint32_t command_type, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t query_verb; ++ uint32_t hi, lo; ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ qb_attr_code_encode(&code_cgr_cgid, p, cgid); ++ if (command_type < 2) ++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); ++ query_verb = clear ? ++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; ++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != query_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query statistics of CGID 0x%x failed,", cgid); ++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); ++ return -EIO; ++ } ++ ++ if (*frame_cnt) { ++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); ++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); ++ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; ++ } ++ if (*byte_cnt) { ++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); ++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); ++ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; ++ } ++ ++ return 0; ++} ++ ++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, ++ frame_cnt, byte_cnt); ++} ++ ++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 1, ++ frame_cnt, byte_cnt); ++} ++ ++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 0, ++ frame_cnt, byte_cnt); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h +new file mode 100644 +index 0000000..1e6b002 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h +@@ -0,0 +1,136 @@ ++/* Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++struct qbman_attr { ++ uint32_t dont_manipulate_directly[40]; ++}; ++ ++/* Buffer pool query commands */ ++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, ++ struct qbman_attr *a); ++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); ++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); ++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); ++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); ++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); ++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); ++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); ++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); ++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); ++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); ++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); ++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); ++int qbman_bp_info_has_free_bufs(struct qbman_attr *a); ++int qbman_bp_info_is_depleted(struct qbman_attr *a); ++int qbman_bp_info_is_surplus(struct qbman_attr *a); ++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); ++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); ++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); ++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); ++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); ++ ++/* FQ query function for programmable fields */ ++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, ++ struct qbman_attr *desc); ++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); ++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); ++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); ++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); ++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); ++void qbman_fq_attr_get_oa(struct qbman_attr *d, ++ int *oa_ics, int *oa_cgr, int32_t *oa_len); ++void qbman_fq_attr_get_mctl(struct qbman_attr *d, ++ int *bdi, int *ff, int *va, int *ps); ++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); ++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); ++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); ++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); ++ ++/* FQ query command for non-programmable fields*/ ++enum qbman_fq_schedstate_e { ++ qbman_fq_schedstate_oos = 0, ++ qbman_fq_schedstate_retired, ++ qbman_fq_schedstate_tentatively_scheduled, ++ qbman_fq_schedstate_truly_scheduled, ++ qbman_fq_schedstate_parked, ++ qbman_fq_schedstate_held_active, ++}; ++ ++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, ++ struct qbman_attr *state); ++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); ++int qbman_fq_state_force_eligible(const struct qbman_attr *state); ++int qbman_fq_state_xoff(const struct qbman_attr *state); ++int qbman_fq_state_retirement_pending(const struct qbman_attr *state); ++int qbman_fq_state_overflow_error(const struct qbman_attr *state); ++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); ++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); ++ ++/* CGR query */ ++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, ++ struct qbman_attr *attr); ++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, ++ int *cscn_wq_en_exit, int *cscn_wq_icd); ++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, ++ int *rej_cnt_mode, int *cscn_bdi); ++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, ++ int *cscn_wr_en_exit, int *cg_wr_ae, ++ int *cscn_dcp_en, int *cg_wr_va); ++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, ++ uint32_t *i_cnt_wr_bnd); ++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); ++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); ++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, ++ uint32_t *cs_thres_x); ++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); ++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); ++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); ++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, ++ uint32_t *cscn_vcgid); ++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, ++ int *pl); ++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, ++ uint64_t *cg_wr_addr); ++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); ++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, ++ int *edp); ++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, ++ uint64_t *maxth, uint8_t *maxp); ++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, ++ uint32_t *dp); ++ ++/* CGR/CCGR/CQ statistics query */ ++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt); ++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt); ++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt); +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c +new file mode 100644 +index 0000000..6c5638b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c +@@ -0,0 +1,1212 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qbman_portal.h" ++ ++/* QBMan portal management command codes */ ++#define QBMAN_MC_ACQUIRE 0x30 ++#define QBMAN_WQCHAN_CONFIGURE 0x46 ++ ++/* CINH register offsets */ ++#define QBMAN_CINH_SWP_EQAR 0x8c0 ++#define QBMAN_CINH_SWP_DQPI 0xa00 ++#define QBMAN_CINH_SWP_DCAP 0xac0 ++#define QBMAN_CINH_SWP_SDQCR 0xb00 ++#define QBMAN_CINH_SWP_RAR 0xcc0 ++#define QBMAN_CINH_SWP_ISR 0xe00 ++#define QBMAN_CINH_SWP_IER 0xe40 ++#define QBMAN_CINH_SWP_ISDR 0xe80 ++#define QBMAN_CINH_SWP_IIR 0xec0 ++ ++/* CENA register offsets */ ++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) ++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) ++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) ++#define QBMAN_CENA_SWP_CR 0x600 ++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) ++#define QBMAN_CENA_SWP_VDQCR 0x780 ++ ++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ ++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) ++ ++/* QBMan FQ management command codes */ ++#define QBMAN_FQ_SCHEDULE 0x48 ++#define QBMAN_FQ_FORCE 0x49 ++#define QBMAN_FQ_XON 0x4d ++#define QBMAN_FQ_XOFF 0x4e ++ ++/*******************************/ ++/* Pre-defined attribute codes */ ++/*******************************/ ++ ++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); ++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); ++ ++/*************************/ ++/* SDQCR attribute codes */ ++/*************************/ ++ ++/* we put these here because at least some of them are required by ++ * qbman_swp_init() */ ++struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); ++struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); ++struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); ++#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) ++enum qbman_sdqcr_dct { ++ qbman_sdqcr_dct_null = 0, ++ qbman_sdqcr_dct_prio_ics, ++ qbman_sdqcr_dct_active_ics, ++ qbman_sdqcr_dct_active ++}; ++enum qbman_sdqcr_fc { ++ qbman_sdqcr_fc_one = 0, ++ qbman_sdqcr_fc_up_to_3 = 1 ++}; ++struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); ++ ++/*********************************/ ++/* Portal constructor/destructor */ ++/*********************************/ ++ ++/* Software portals should always be in the power-on state when we initialise, ++ * due to the CCSR-based portal reset functionality that MC has. ++ * ++ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR ++ * valid-bits, so we need to support a workaround where we don't trust ++ * valid-bits when detecting new entries until any stale ring entries have been ++ * overwritten at least once. The idea is that we read PI for the first few ++ * entries, then switch to valid-bit after that. The trick is to clear the ++ * bug-work-around boolean once the PI wraps around the ring for the first time. ++ * ++ * Note: this still carries a slight additional cost once the decrementer hits ++ * zero, so ideally the workaround should only be compiled in if the compiled ++ * image needs to support affected chips. We use WORKAROUND_DQRR_RESET_BUG for ++ * this. ++ */ ++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ++{ ++ int ret; ++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); ++ ++ if (!p) ++ return NULL; ++ p->desc = d; ++#ifdef QBMAN_CHECKING ++ p->mc.check = swp_mc_can_start; ++#endif ++ p->mc.valid_bit = QB_VALID_BIT; ++ p->sdq = 0; ++ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); ++ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); ++ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); ++ atomic_set(&p->vdq.busy, 1); ++ p->vdq.valid_bit = QB_VALID_BIT; ++ p->dqrr.next_idx = 0; ++ p->dqrr.valid_bit = QB_VALID_BIT; ++ /* TODO: should also read PI/CI type registers and check that they're on ++ * PoR values. If we're asked to initialise portals that aren't in reset ++ * state, bad things will follow. */ ++#ifdef WORKAROUND_DQRR_RESET_BUG ++ p->dqrr.reset_bug = 1; ++#endif ++ if ((p->desc->qman_version & 0xFFFF0000) < QMAN_REV_4100) ++ p->dqrr.dqrr_size = 4; ++ else ++ p->dqrr.dqrr_size = 8; ++ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); ++ if (ret) { ++ kfree(p); ++ pr_err("qbman_swp_sys_init() failed %d\n", ret); ++ return NULL; ++ } ++ /* SDQCR needs to be initialized to 0 when no channels are ++ being dequeued from or else the QMan HW will indicate an ++ error. The values that were calculated above will be ++ applied when dequeues from a specific channel are enabled */ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); ++ return p; ++} ++ ++void qbman_swp_finish(struct qbman_swp *p) ++{ ++#ifdef QBMAN_CHECKING ++ BUG_ON(p->mc.check != swp_mc_can_start); ++#endif ++ qbman_swp_sys_finish(&p->sys); ++ kfree(p); ++} ++ ++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) ++{ ++ return p->desc; ++} ++ ++/**************/ ++/* Interrupts */ ++/**************/ ++ ++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); ++} ++ ++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); ++} ++ ++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); ++} ++ ++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); ++} ++ ++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); ++} ++ ++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); ++} ++ ++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); ++} ++ ++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); ++} ++ ++/***********************/ ++/* Management commands */ ++/***********************/ ++ ++/* ++ * Internal code common to all types of management commands. ++ */ ++ ++void *qbman_swp_mc_start(struct qbman_swp *p) ++{ ++ void *ret; ++#ifdef QBMAN_CHECKING ++ BUG_ON(p->mc.check != swp_mc_can_start); ++#endif ++ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); ++#ifdef QBMAN_CHECKING ++ if (!ret) ++ p->mc.check = swp_mc_can_submit; ++#endif ++ return ret; ++} ++ ++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) ++{ ++ uint32_t *v = cmd; ++#ifdef QBMAN_CHECKING ++ BUG_ON(!p->mc.check != swp_mc_can_submit); ++#endif ++ /* TBD: "|=" is going to hurt performance. Need to move as many fields ++ * out of word zero, and for those that remain, the "OR" needs to occur ++ * at the caller side. This debug check helps to catch cases where the ++ * caller wants to OR but has forgotten to do so. */ ++ BUG_ON((*v & cmd_verb) != *v); ++ *v = cmd_verb | p->mc.valid_bit; ++ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); ++#ifdef QBMAN_CHECKING ++ p->mc.check = swp_mc_can_poll; ++#endif ++} ++ ++void *qbman_swp_mc_result(struct qbman_swp *p) ++{ ++ uint32_t *ret, verb; ++#ifdef QBMAN_CHECKING ++ BUG_ON(p->mc.check != swp_mc_can_poll); ++#endif ++ qbman_cena_invalidate_prefetch(&p->sys, ++ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); ++ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); ++ /* Remove the valid-bit - command completed iff the rest is non-zero */ ++ verb = ret[0] & ~QB_VALID_BIT; ++ if (!verb) ++ return NULL; ++#ifdef QBMAN_CHECKING ++ p->mc.check = swp_mc_can_start; ++#endif ++ p->mc.valid_bit ^= QB_VALID_BIT; ++ return ret; ++} ++ ++/***********/ ++/* Enqueue */ ++/***********/ ++ ++/* These should be const, eventually */ ++static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); ++static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); ++static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); ++static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); ++static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2); ++static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); ++static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); ++static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); ++static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); ++static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); ++static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); ++/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ ++static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); ++static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); ++static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); ++static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); ++static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); ++static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); ++ ++enum qbman_eq_cmd_e { ++ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ ++ qbman_eq_cmd_empty, ++ /* DMA an enqueue response once complete */ ++ qbman_eq_cmd_respond, ++ /* DMA an enqueue response only if the enqueue fails */ ++ qbman_eq_cmd_respond_reject ++}; ++ ++void qbman_eq_desc_clear(struct qbman_eq_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++} ++ ++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 0); ++ qb_attr_code_encode(&code_eq_cmd, cl, ++ respond_success ? qbman_eq_cmd_respond : ++ qbman_eq_cmd_respond_reject); ++} ++ ++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, ++ uint32_t opr_id, uint32_t seqnum, int incomplete) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 1); ++ qb_attr_code_encode(&code_eq_cmd, cl, ++ respond_success ? qbman_eq_cmd_respond : ++ qbman_eq_cmd_respond_reject); ++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); ++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); ++ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); ++} ++ ++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 1); ++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); ++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); ++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); ++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); ++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); ++} ++ ++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 1); ++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); ++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); ++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); ++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); ++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); ++} ++ ++void qbman_eq_desc_set_response(struct qbman_eq_desc *d, ++ dma_addr_t storage_phys, ++ int stash) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); ++ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); ++} ++ ++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); ++} ++ ++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_qd_en, cl, 0); ++ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); ++} ++ ++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, ++ uint32_t qd_bin, uint32_t qd_prio) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_qd_en, cl, 1); ++ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); ++ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); ++ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); ++} ++ ++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); ++} ++ ++void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, ++ uint32_t dqrr_idx, int park) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); ++ if (enable) { ++ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); ++ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); ++ } ++} ++ ++#define EQAR_IDX(eqar) ((eqar) & 0x7) ++#define EQAR_VB(eqar) ((eqar) & 0x80) ++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) ++ ++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, ++ const struct qbman_fd *fd) ++{ ++ uint32_t *p; ++ const uint32_t *cl = qb_cl(d); ++ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); ++ ++ pr_debug("EQAR=%08x\n", eqar); ++ if (!EQAR_SUCCESS(eqar)) ++ return -EBUSY; ++ p = qbman_cena_write_start(&s->sys, ++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); ++ word_copy(&p[1], &cl[1], 7); ++ word_copy(&p[8], fd, sizeof(*fd) >> 2); ++ /* Set the verb byte, have to substitute in the valid-bit */ ++ p[0] = cl[0] | EQAR_VB(eqar); ++ qbman_cena_write_complete(&s->sys, ++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)), ++ p); ++ return 0; ++} ++ ++/*************************/ ++/* Static (push) dequeue */ ++/*************************/ ++ ++void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) ++{ ++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); ++ ++ BUG_ON(channel_idx > 15); ++ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); ++} ++ ++void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) ++{ ++ uint16_t dqsrc; ++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); ++ ++ BUG_ON(channel_idx > 15); ++ qb_attr_code_encode(&code, &s->sdq, !!enable); ++ /* Read make the complete src map. If no channels are enabled ++ the SDQCR must be 0 or else QMan will assert errors */ ++ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); ++ if (dqsrc != 0) ++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); ++ else ++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); ++} ++ ++/***************************/ ++/* Volatile (pull) dequeue */ ++/***************************/ ++ ++/* These should be const, eventually */ ++static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); ++static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); ++static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); ++static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); ++static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); ++static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); ++static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); ++ ++enum qb_pull_dt_e { ++ qb_pull_dt_channel, ++ qb_pull_dt_workqueue, ++ qb_pull_dt_framequeue ++}; ++ ++void qbman_pull_desc_clear(struct qbman_pull_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++} ++ ++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, ++ struct dpaa2_dq *storage, ++ dma_addr_t storage_phys, ++ int stash) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ /* Squiggle the pointer 'storage' into the extra 2 words of the ++ * descriptor (which aren't copied to the hw command) */ ++ *(void **)&cl[4] = storage; ++ if (!storage) { ++ qb_attr_code_encode(&code_pull_rls, cl, 0); ++ return; ++ } ++ qb_attr_code_encode(&code_pull_rls, cl, 1); ++ qb_attr_code_encode(&code_pull_stash, cl, !!stash); ++ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); ++} ++ ++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ BUG_ON(!numframes || (numframes > 16)); ++ qb_attr_code_encode(&code_pull_numframes, cl, ++ (uint32_t)(numframes - 1)); ++} ++ ++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_token, cl, token); ++} ++ ++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_dct, cl, 1); ++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); ++ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); ++} ++ ++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, ++ enum qbman_pull_type_e dct) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_dct, cl, dct); ++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); ++ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); ++} ++ ++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, ++ enum qbman_pull_type_e dct) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_dct, cl, dct); ++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); ++ qb_attr_code_encode(&code_pull_dqsource, cl, chid); ++} ++ ++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) ++{ ++ uint32_t *p; ++ uint32_t *cl = qb_cl(d); ++ ++ if (!atomic_dec_and_test(&s->vdq.busy)) { ++ atomic_inc(&s->vdq.busy); ++ return -EBUSY; ++ } ++ s->vdq.storage = *(void **)&cl[4]; ++ qb_attr_code_encode(&code_pull_token, cl, 1); ++ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR); ++ word_copy(&p[1], &cl[1], 3); ++ /* Set the verb byte, have to substitute in the valid-bit */ ++ p[0] = cl[0] | s->vdq.valid_bit; ++ s->vdq.valid_bit ^= QB_VALID_BIT; ++ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p); ++ return 0; ++} ++ ++/****************/ ++/* Polling DQRR */ ++/****************/ ++ ++static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); ++static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); ++static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); ++static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); ++static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); ++/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ ++static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); ++static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); ++static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); ++ ++#define QBMAN_RESULT_DQ 0x60 ++#define QBMAN_RESULT_FQRN 0x21 ++#define QBMAN_RESULT_FQRNI 0x22 ++#define QBMAN_RESULT_FQPN 0x24 ++#define QBMAN_RESULT_FQDAN 0x25 ++#define QBMAN_RESULT_CDAN 0x26 ++#define QBMAN_RESULT_CSCN_MEM 0x27 ++#define QBMAN_RESULT_CGCU 0x28 ++#define QBMAN_RESULT_BPSCN 0x29 ++#define QBMAN_RESULT_CSCN_WQ 0x2a ++ ++static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); ++ ++/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry ++ * only once, so repeated calls can return a sequence of DQRR entries, without ++ * requiring they be consumed immediately or in any particular order. */ ++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) ++{ ++ uint32_t verb; ++ uint32_t response_verb; ++ uint32_t flags; ++ const struct dpaa2_dq *dq; ++ const uint32_t *p; ++ ++ /* Before using valid-bit to detect if something is there, we have to ++ * handle the case of the DQRR reset bug... */ ++#ifdef WORKAROUND_DQRR_RESET_BUG ++ if (unlikely(s->dqrr.reset_bug)) { ++ /* We pick up new entries by cache-inhibited producer index, ++ * which means that a non-coherent mapping would require us to ++ * invalidate and read *only* once that PI has indicated that ++ * there's an entry here. The first trip around the DQRR ring ++ * will be much less efficient than all subsequent trips around ++ * it... ++ */ ++ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); ++ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); ++ /* there are new entries iff pi != next_idx */ ++ if (pi == s->dqrr.next_idx) ++ return NULL; ++ /* if next_idx is/was the last ring index, and 'pi' is ++ * different, we can disable the workaround as all the ring ++ * entries have now been DMA'd to so valid-bit checking is ++ * repaired. Note: this logic needs to be based on next_idx ++ * (which increments one at a time), rather than on pi (which ++ * can burst and wrap-around between our snapshots of it). ++ */ ++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { ++ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", ++ s->dqrr.next_idx, pi); ++ s->dqrr.reset_bug = 0; ++ } ++ qbman_cena_invalidate_prefetch(&s->sys, ++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ } ++#endif ++ ++ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ p = qb_cl(dq); ++ verb = qb_attr_code_decode(&code_dqrr_verb, p); ++ ++ /* If the valid-bit isn't of the expected polarity, nothing there. Note, ++ * in the DQRR reset bug workaround, we shouldn't need to skip these ++ * check, because we've already determined that a new entry is available ++ * and we've invalidated the cacheline before reading it, so the ++ * valid-bit behaviour is repaired and should tell us what we already ++ * knew from reading PI. ++ */ ++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { ++ qbman_cena_invalidate_prefetch(&s->sys, ++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ return NULL; ++ } ++ /* There's something there. Move "next_idx" attention to the next ring ++ * entry (and prefetch it) before returning what we found. */ ++ s->dqrr.next_idx++; ++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ ++ /* TODO: it's possible to do all this without conditionals, optimise it ++ * later. */ ++ if (!s->dqrr.next_idx) ++ s->dqrr.valid_bit ^= QB_VALID_BIT; ++ ++ /* If this is the final response to a volatile dequeue command ++ indicate that the vdq is no longer busy */ ++ flags = dpaa2_dq_flags(dq); ++ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); ++ if ((response_verb == QBMAN_RESULT_DQ) && ++ (flags & DPAA2_DQ_STAT_VOLATILE) && ++ (flags & DPAA2_DQ_STAT_EXPIRED)) ++ atomic_inc(&s->vdq.busy); ++ ++ qbman_cena_invalidate_prefetch(&s->sys, ++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ return dq; ++} ++ ++/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ ++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) ++{ ++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); ++} ++ ++/*********************************/ ++/* Polling user-provided storage */ ++/*********************************/ ++ ++int qbman_result_has_new_result(struct qbman_swp *s, ++ const struct dpaa2_dq *dq) ++{ ++ /* To avoid converting the little-endian DQ entry to host-endian prior ++ * to us knowing whether there is a valid entry or not (and run the ++ * risk of corrupting the incoming hardware LE write), we detect in ++ * hardware endianness rather than host. This means we need a different ++ * "code" depending on whether we are BE or LE in software, which is ++ * where DQRR_TOK_OFFSET comes in... */ ++ static struct qb_attr_code code_dqrr_tok_detect = ++ QB_CODE(0, DQRR_TOK_OFFSET, 8); ++ /* The user trying to poll for a result treats "dq" as const. It is ++ * however the same address that was provided to us non-const in the ++ * first place, for directing hardware DMA to. So we can cast away the ++ * const because it is mutable from our perspective. */ ++ uint32_t *p = qb_cl((struct dpaa2_dq *)dq); ++ uint32_t token; ++ ++ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); ++ if (token != 1) ++ return 0; ++ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); ++ ++ /* Only now do we convert from hardware to host endianness. Also, as we ++ * are returning success, the user has promised not to call us again, so ++ * there's no risk of us converting the endianness twice... */ ++ make_le32_n(p, 16); ++ ++ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the ++ * fact "VDQCR" shows busy doesn't mean that the result we're looking at ++ * is from the same command. Eg. we may be looking at our 10th dequeue ++ * result from our first VDQCR command, yet the second dequeue command ++ * could have been kicked off already, after seeing the 1st result. Ie. ++ * the result we're looking at is not necessarily proof that we can ++ * reset "busy". We instead base the decision on whether the current ++ * result is sitting at the first 'storage' location of the busy ++ * command. */ ++ if (s->vdq.storage == dq) { ++ s->vdq.storage = NULL; ++ atomic_inc(&s->vdq.busy); ++ } ++ return 1; ++} ++ ++/********************************/ ++/* Categorising qbman_result */ ++/********************************/ ++ ++static struct qb_attr_code code_result_in_mem = ++ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); ++ ++static inline int __qbman_result_is_x(const struct dpaa2_dq *dq, uint32_t x) ++{ ++ const uint32_t *p = qb_cl(dq); ++ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); ++ ++ return response_verb == x; ++} ++ ++static inline int __qbman_result_is_x_in_mem(const struct dpaa2_dq *dq, ++ uint32_t x) ++{ ++ const uint32_t *p = qb_cl(dq); ++ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); ++ ++ return (response_verb == x); ++} ++ ++int qbman_result_is_DQ(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); ++} ++ ++int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); ++} ++ ++int qbman_result_is_CDAN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); ++} ++ ++int qbman_result_is_CSCN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || ++ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); ++} ++ ++int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); ++} ++ ++int qbman_result_is_CGCU(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); ++} ++ ++int qbman_result_is_FQRN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); ++} ++ ++int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); ++} ++ ++int qbman_result_is_FQPN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); ++} ++ ++/*********************************/ ++/* Parsing frame dequeue results */ ++/*********************************/ ++ ++/* These APIs assume qbman_result_is_DQ() is TRUE */ ++ ++uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_stat, p); ++} ++ ++uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); ++} ++ ++uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); ++} ++ ++uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_fqid, p); ++} ++ ++uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_byte_count, p); ++} ++ ++uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_frame_count, p); ++} ++ ++uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq) ++{ ++ const uint64_t *p = (uint64_t *)qb_cl(dq); ++ ++ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); ++} ++EXPORT_SYMBOL(dpaa2_dq_fqd_ctx); ++ ++const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return (const struct dpaa2_fd *)&p[8]; ++} ++EXPORT_SYMBOL(dpaa2_dq_fd); ++ ++/**************************************/ ++/* Parsing state-change notifications */ ++/**************************************/ ++ ++static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); ++static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_scn_state_in_mem = ++ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); ++static struct qb_attr_code code_scn_rid_in_mem = ++ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); ++static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); ++ ++uint8_t qbman_result_SCN_state(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ ++ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); ++} ++ ++uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ ++ return qb_attr_code_decode(&code_scn_rid, p); ++} ++ ++uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *scn) ++{ ++ const uint64_t *p = (uint64_t *)qb_cl(scn); ++ ++ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); ++} ++ ++uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ ++ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); ++} ++ ++uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ uint32_t result_rid; ++ ++ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); ++ return make_le24(result_rid); ++} ++ ++/*****************/ ++/* Parsing BPSCN */ ++/*****************/ ++uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *scn) ++{ ++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; ++} ++ ++int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *scn) ++{ ++ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); ++} ++ ++int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *scn) ++{ ++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); ++} ++ ++int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *scn) ++{ ++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); ++} ++ ++uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *scn) ++{ ++ return qbman_result_SCN_ctx(scn); ++} ++ ++/*****************/ ++/* Parsing CGCU */ ++/*****************/ ++uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *scn) ++{ ++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; ++} ++ ++uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *scn) ++{ ++ return qbman_result_SCN_ctx(scn) & 0xFFFFFFFFFF; ++} ++ ++/******************/ ++/* Buffer release */ ++/******************/ ++ ++/* These should be const, eventually */ ++/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ ++static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); ++static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); ++static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); ++ ++void qbman_release_desc_clear(struct qbman_release_desc *d) ++{ ++ uint32_t *cl; ++ ++ memset(d, 0, sizeof(*d)); ++ cl = qb_cl(d); ++ qb_attr_code_encode(&code_release_set_me, cl, 1); ++} ++ ++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_release_bpid, cl, bpid); ++} ++ ++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); ++} ++ ++#define RAR_IDX(rar) ((rar) & 0x7) ++#define RAR_VB(rar) ((rar) & 0x80) ++#define RAR_SUCCESS(rar) ((rar) & 0x100) ++ ++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ++ const uint64_t *buffers, unsigned int num_buffers) ++{ ++ uint32_t *p; ++ const uint32_t *cl = qb_cl(d); ++ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); ++ ++ pr_debug("RAR=%08x\n", rar); ++ if (!RAR_SUCCESS(rar)) ++ return -EBUSY; ++ BUG_ON(!num_buffers || (num_buffers > 7)); ++ /* Start the release command */ ++ p = qbman_cena_write_start(&s->sys, ++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); ++ /* Copy the caller's buffer pointers to the command */ ++ u64_to_le32_copy(&p[2], buffers, num_buffers); ++ /* Set the verb byte, have to substitute in the valid-bit and the number ++ * of buffers. */ ++ p[0] = cl[0] | RAR_VB(rar) | num_buffers; ++ qbman_cena_write_complete(&s->sys, ++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)), ++ p); ++ return 0; ++} ++ ++/*******************/ ++/* Buffer acquires */ ++/*******************/ ++ ++/* These should be const, eventually */ ++static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); ++static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); ++ ++int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, ++ unsigned int num_buffers) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt, num; ++ ++ BUG_ON(!num_buffers || (num_buffers > 7)); ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_acquire_bpid, p, bpid); ++ qb_attr_code_encode(&code_acquire_num, p, num_buffers); ++ ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ num = qb_attr_code_decode(&code_acquire_r_num, p); ++ BUG_ON(verb != QBMAN_MC_ACQUIRE); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", ++ bpid, rslt); ++ return -EIO; ++ } ++ BUG_ON(num > num_buffers); ++ /* Copy the acquired buffers to the caller's array */ ++ u64_from_le32_copy(buffers, &p[2], num); ++ return (int)num; ++} ++ ++/*****************/ ++/* FQ management */ ++/*****************/ ++ ++static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); ++ ++static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, ++ uint8_t alt_fq_verb) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != alt_fq_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", ++ fqid, alt_fq_verb, rslt); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); ++} ++ ++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); ++} ++ ++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); ++} ++ ++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); ++} ++ ++/**********************/ ++/* Channel management */ ++/**********************/ ++ ++static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); ++static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); ++static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); ++static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); ++ ++/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it ++ * would be irresponsible to expose it. */ ++#define CODE_CDAN_WE_EN 0x1 ++#define CODE_CDAN_WE_CTX 0x4 ++ ++static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, ++ uint8_t we_mask, uint8_t cdan_en, ++ uint64_t ctx) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_cdan_cid, p, channelid); ++ qb_attr_code_encode(&code_cdan_we, p, we_mask); ++ qb_attr_code_encode(&code_cdan_en, p, cdan_en); ++ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_WQCHAN_CONFIGURE); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("CDAN cQID %d failed: code = 0x%02x\n", ++ channelid, rslt); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, ++ uint64_t ctx) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_CTX, ++ 0, ctx); ++} ++ ++int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN, ++ 1, 0); ++} ++int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN, ++ 0, 0); ++} ++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, ++ uint64_t ctx) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, ++ 1, ctx); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h +new file mode 100644 +index 0000000..65ebf3f +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h +@@ -0,0 +1,261 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qbman_private.h" ++#include "fsl_qbman_portal.h" ++#include "../../include/fsl_dpaa2_fd.h" ++ ++/* All QBMan command and result structures use this "valid bit" encoding */ ++#define QB_VALID_BIT ((uint32_t)0x80) ++ ++/* Management command result codes */ ++#define QBMAN_MC_RSLT_OK 0xf0 ++ ++/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */ ++#define QBMAN_DQRR_SIZE 4 ++ ++/* DQRR valid-bit reset bug. See qbman_portal.c::qbman_swp_init(). */ ++#define WORKAROUND_DQRR_RESET_BUG ++ ++/* --------------------- */ ++/* portal data structure */ ++/* --------------------- */ ++ ++struct qbman_swp { ++ const struct qbman_swp_desc *desc; ++ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it ++ * needs in here. */ ++ struct qbman_swp_sys sys; ++ /* Management commands */ ++ struct { ++#ifdef QBMAN_CHECKING ++ enum swp_mc_check { ++ swp_mc_can_start, /* call __qbman_swp_mc_start() */ ++ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ ++ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ ++ } check; ++#endif ++ uint32_t valid_bit; /* 0x00 or 0x80 */ ++ } mc; ++ /* Push dequeues */ ++ uint32_t sdq; ++ /* Volatile dequeues */ ++ struct { ++ /* VDQCR supports a "1 deep pipeline", meaning that if you know ++ * the last-submitted command is already executing in the ++ * hardware (as evidenced by at least 1 valid dequeue result), ++ * you can write another dequeue command to the register, the ++ * hardware will start executing it as soon as the ++ * already-executing command terminates. (This minimises latency ++ * and stalls.) With that in mind, this "busy" variable refers ++ * to whether or not a command can be submitted, not whether or ++ * not a previously-submitted command is still executing. In ++ * other words, once proof is seen that the previously-submitted ++ * command is executing, "vdq" is no longer "busy". ++ */ ++ atomic_t busy; ++ uint32_t valid_bit; /* 0x00 or 0x80 */ ++ /* We need to determine when vdq is no longer busy. This depends ++ * on whether the "busy" (last-submitted) dequeue command is ++ * targeting DQRR or main-memory, and detected is based on the ++ * presence of the dequeue command's "token" showing up in ++ * dequeue entries in DQRR or main-memory (respectively). */ ++ struct dpaa2_dq *storage; /* NULL if DQRR */ ++ } vdq; ++ /* DQRR */ ++ struct { ++ uint32_t next_idx; ++ uint32_t valid_bit; ++ uint8_t dqrr_size; ++#ifdef WORKAROUND_DQRR_RESET_BUG ++ int reset_bug; ++#endif ++ } dqrr; ++}; ++ ++/* -------------------------- */ ++/* portal management commands */ ++/* -------------------------- */ ++ ++/* Different management commands all use this common base layer of code to issue ++ * commands and poll for results. The first function returns a pointer to where ++ * the caller should fill in their MC command (though they should ignore the ++ * verb byte), the second function commits merges in the caller-supplied command ++ * verb (which should not include the valid-bit) and submits the command to ++ * hardware, and the third function checks for a completed response (returns ++ * non-NULL if only if the response is complete). */ ++void *qbman_swp_mc_start(struct qbman_swp *p); ++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); ++void *qbman_swp_mc_result(struct qbman_swp *p); ++ ++/* Wraps up submit + poll-for-result */ ++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, ++ uint32_t cmd_verb) ++{ ++ int loopvar; ++ ++ qbman_swp_mc_submit(swp, cmd, cmd_verb); ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ cmd = qbman_swp_mc_result(swp); ++ } while (!cmd); ++ return cmd; ++} ++ ++/* ------------ */ ++/* qb_attr_code */ ++/* ------------ */ ++ ++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which ++ * is either serving as a configuration command or a query result. The ++ * representation is inherently little-endian, as the indexing of the words is ++ * itself little-endian in nature and layerscape is little endian for anything ++ * that crosses a word boundary too (64-bit fields are the obvious examples). ++ */ ++struct qb_attr_code { ++ unsigned int word; /* which uint32_t[] array member encodes the field */ ++ unsigned int lsoffset; /* encoding offset from ls-bit */ ++ unsigned int width; /* encoding width. (bool must be 1.) */ ++}; ++ ++/* Some pre-defined codes */ ++extern struct qb_attr_code code_generic_verb; ++extern struct qb_attr_code code_generic_rslt; ++ ++/* Macros to define codes */ ++#define QB_CODE(a, b, c) { a, b, c} ++#define QB_CODE_NULL \ ++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) ++ ++/* Rotate a code "ms", meaning that it moves from less-significant bytes to ++ * more-significant, from less-significant words to more-significant, etc. The ++ * "ls" version does the inverse, from more-significant towards ++ * less-significant. ++ */ ++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, ++ unsigned int bits) ++{ ++ code->lsoffset += bits; ++ while (code->lsoffset > 31) { ++ code->word++; ++ code->lsoffset -= 32; ++ } ++} ++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, ++ unsigned int bits) ++{ ++ /* Don't be fooled, this trick should work because the types are ++ * unsigned. So the case that interests the while loop (the rotate has ++ * gone too far and the word count needs to compensate for it), is ++ * manifested when lsoffset is negative. But that equates to a really ++ * large unsigned value, starting with lots of "F"s. As such, we can ++ * continue adding 32 back to it until it wraps back round above zero, ++ * to a value of 31 or less... ++ */ ++ code->lsoffset -= bits; ++ while (code->lsoffset > 31) { ++ code->word--; ++ code->lsoffset += 32; ++ } ++} ++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ ++#define qb_attr_code_for_ms(code, bits, expr) \ ++ for (; expr; qb_attr_code_rotate_ms(code, bits)) ++#define qb_attr_code_for_ls(code, bits, expr) \ ++ for (; expr; qb_attr_code_rotate_ls(code, bits)) ++ ++/* decode a field from a cacheline */ ++static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, ++ const uint32_t *cacheline) ++{ ++ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); ++} ++static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, ++ const uint64_t *cacheline) ++{ ++ uint64_t res; ++ u64_from_le32_copy(&res, &cacheline[code->word/2], 1); ++ return res; ++} ++ ++/* encode a field to a cacheline */ ++static inline void qb_attr_code_encode(const struct qb_attr_code *code, ++ uint32_t *cacheline, uint32_t val) ++{ ++ cacheline[code->word] = ++ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) ++ | e32_uint32_t(code->lsoffset, code->width, val); ++} ++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, ++ uint64_t *cacheline, uint64_t val) ++{ ++ u64_to_le32_copy(&cacheline[code->word/2], &val, 1); ++} ++ ++/* Small-width signed values (two's-complement) will decode into medium-width ++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to ++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value ++ * 249. Likewise -120 would decode as 136.) This function allows the caller to ++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit ++ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). ++ */ ++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, ++ uint32_t val) ++{ ++ BUG_ON(val >= (1 << code->width)); ++ /* If the high bit was set, it was encoding a negative */ ++ if (val >= (1 << (code->width - 1))) ++ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - ++ val); ++ /* Otherwise, it was encoding a positive */ ++ return (int32_t)val; ++} ++ ++/* ---------------------- */ ++/* Descriptors/cachelines */ ++/* ---------------------- */ ++ ++/* To avoid needless dynamic allocation, the driver API often gives the caller ++ * a "descriptor" type that the caller can instantiate however they like. ++ * Ultimately though, it is just a cacheline of binary storage (or something ++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for ++ * holding pre-formatted pieces of hardware commands. The performance-critical ++ * code can then copy these descriptors directly into hardware command ++ * registers more efficiently than trying to construct/format commands ++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in ++ * order for the compiler to know its size, but the internal details are not ++ * exposed. The following macro is used within the driver for converting *any* ++ * descriptor pointer to a usable array pointer. The use of a macro (instead of ++ * an inline) is necessary to work with different descriptor types and to work ++ * correctly with const and non-const inputs (and similarly-qualified outputs). ++ */ ++#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_private.h b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h +new file mode 100644 +index 0000000..e376b80 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h +@@ -0,0 +1,173 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++*/ ++ ++/* Perform extra checking */ ++#define QBMAN_CHECKING ++ ++/* To maximise the amount of logic that is common between the Linux driver and ++ * other targets (such as the embedded MC firmware), we pivot here between the ++ * inclusion of two platform-specific headers. ++ * ++ * The first, qbman_sys_decl.h, includes any and all required system headers as ++ * well as providing any definitions for the purposes of compatibility. The ++ * second, qbman_sys.h, is where platform-specific routines go. ++ * ++ * The point of the split is that the platform-independent code (including this ++ * header) may depend on platform-specific declarations, yet other ++ * platform-specific routines may depend on platform-independent definitions. ++ */ ++ ++#include "qbman_sys_decl.h" ++ ++#define QMAN_REV_4000 0x04000000 ++#define QMAN_REV_4100 0x04010000 ++#define QMAN_REV_4101 0x04010001 ++ ++/* When things go wrong, it is a convenient trick to insert a few FOO() ++ * statements in the code to trace progress. TODO: remove this once we are ++ * hacking the code less actively. ++ */ ++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) ++ ++/* Any time there is a register interface which we poll on, this provides a ++ * "break after x iterations" scheme for it. It's handy for debugging, eg. ++ * where you don't want millions of lines of log output from a polling loop ++ * that won't, because such things tend to drown out the earlier log output ++ * that might explain what caused the problem. (NB: put ";" after each macro!) ++ * TODO: we should probably remove this once we're done sanitising the ++ * simulator... ++ */ ++#define DBG_POLL_START(loopvar) (loopvar = 10) ++#define DBG_POLL_CHECK(loopvar) \ ++ do {if (!(loopvar--)) BUG_ON(1); } while (0) ++ ++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets ++ * and widths, these macro-generated encode/decode/isolate/remove inlines can ++ * be used. ++ * ++ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), ++ * where the field is located 3 bits "up" from the least-significant bit of the ++ * register (ie. the field location within the 32-bit register corresponds to a ++ * mask of 0x0001fff8), you would do; ++ * uint16_t field = d32_uint16_t(3, 14, reg_value); ++ * ++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, ++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" ++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the ++ * LS bit), do; ++ * reg_value |= e32_int(19, 1, !!field); ++ * ++ * If you wish to read-modify-write a register, such that you leave the 14-bit ++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit ++ * value using; ++ * reg_value = i32_uint16_t(3, 14, reg_value); ++ * ++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to ++ * zero) but leaving all other fields as-is; ++ * reg_val = r32_int(19, 1, reg_value); ++ * ++ */ ++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ ++ (uint32_t)((1 << width) - 1)) ++#define DECLARE_CODEC32(t) \ ++static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ ++} \ ++static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ ++} \ ++static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ ++ uint32_t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ ++} \ ++static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ ++ uint32_t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return ~(MAKE_MASK32(width) << lsoffset) & val; \ ++} ++DECLARE_CODEC32(uint32_t) ++DECLARE_CODEC32(uint16_t) ++DECLARE_CODEC32(uint8_t) ++DECLARE_CODEC32(int) ++ ++ /*********************/ ++ /* Debugging assists */ ++ /*********************/ ++ ++static inline void __hexdump(unsigned long start, unsigned long end, ++ unsigned long p, size_t sz, const unsigned char *c) ++{ ++ while (start < end) { ++ unsigned int pos = 0; ++ char buf[64]; ++ int nl = 0; ++ ++ pos += sprintf(buf + pos, "%08lx: ", start); ++ do { ++ if ((start < p) || (start >= (p + sz))) ++ pos += sprintf(buf + pos, ".."); ++ else ++ pos += sprintf(buf + pos, "%02x", *(c++)); ++ if (!(++start & 15)) { ++ buf[pos++] = '\n'; ++ nl = 1; ++ } else { ++ nl = 0; ++ if (!(start & 1)) ++ buf[pos++] = ' '; ++ if (!(start & 3)) ++ buf[pos++] = ' '; ++ } ++ } while (start & 15); ++ if (!nl) ++ buf[pos++] = '\n'; ++ buf[pos] = '\0'; ++ pr_info("%s", buf); ++ } ++} ++static inline void hexdump(const void *ptr, size_t sz) ++{ ++ unsigned long p = (unsigned long)ptr; ++ unsigned long start = p & ~(unsigned long)15; ++ unsigned long end = (p + sz + 15) & ~(unsigned long)15; ++ const unsigned char *c = ptr; ++ ++ __hexdump(start, end, p, sz, c); ++} ++ ++#include "qbman_sys.h" +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h +new file mode 100644 +index 0000000..4849212 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h +@@ -0,0 +1,307 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the ++ * driver. They are only included via qbman_private.h, which is itself a ++ * platform-independent file and is included by all the other driver source. ++ * ++ * qbman_sys_decl.h is included prior to all other declarations and logic, and ++ * it exists to provide compatibility with any linux interfaces our ++ * single-source driver code is dependent on (eg. kmalloc). Ie. this file ++ * provides linux compatibility. ++ * ++ * This qbman_sys.h header, on the other hand, is included *after* any common ++ * and platform-neutral declarations and logic in qbman_private.h, and exists to ++ * implement any platform-specific logic of the qbman driver itself. Ie. it is ++ * *not* to provide linux compatibility. ++ */ ++ ++/* Trace the 3 different classes of read/write access to QBMan. #undef as ++ * required. */ ++#undef QBMAN_CCSR_TRACE ++#undef QBMAN_CINH_TRACE ++#undef QBMAN_CENA_TRACE ++ ++static inline void word_copy(void *d, const void *s, unsigned int cnt) ++{ ++ uint32_t *dd = d; ++ const uint32_t *ss = s; ++ ++ while (cnt--) ++ *(dd++) = *(ss++); ++} ++ ++/* Currently, the CENA support code expects each 32-bit word to be written in ++ * host order, and these are converted to hardware (little-endian) order on ++ * command submission. However, 64-bit quantities are must be written (and read) ++ * as two 32-bit words with the least-significant word first, irrespective of ++ * host endianness. */ ++static inline void u64_to_le32_copy(void *d, const uint64_t *s, ++ unsigned int cnt) ++{ ++ uint32_t *dd = d; ++ const uint32_t *ss = (const uint32_t *)s; ++ ++ while (cnt--) { ++ /* TBD: the toolchain was choking on the use of 64-bit types up ++ * until recently so this works entirely with 32-bit variables. ++ * When 64-bit types become usable again, investigate better ++ * ways of doing this. */ ++#if defined(__BIG_ENDIAN) ++ *(dd++) = ss[1]; ++ *(dd++) = ss[0]; ++ ss += 2; ++#else ++ *(dd++) = *(ss++); ++ *(dd++) = *(ss++); ++#endif ++ } ++} ++static inline void u64_from_le32_copy(uint64_t *d, const void *s, ++ unsigned int cnt) ++{ ++ const uint32_t *ss = s; ++ uint32_t *dd = (uint32_t *)d; ++ ++ while (cnt--) { ++#if defined(__BIG_ENDIAN) ++ dd[1] = *(ss++); ++ dd[0] = *(ss++); ++ dd += 2; ++#else ++ *(dd++) = *(ss++); ++ *(dd++) = *(ss++); ++#endif ++ } ++} ++ ++/* Convert a host-native 32bit value into little endian */ ++#if defined(__BIG_ENDIAN) ++static inline uint32_t make_le32(uint32_t val) ++{ ++ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | ++ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); ++} ++static inline uint32_t make_le24(uint32_t val) ++{ ++ return (((val & 0xff) << 16) | (val & 0xff00) | ++ ((val & 0xff0000) >> 16)); ++} ++#else ++#define make_le32(val) (val) ++#define make_le24(val) (val) ++#endif ++static inline void make_le32_n(uint32_t *val, unsigned int num) ++{ ++ while (num--) { ++ *val = make_le32(*val); ++ val++; ++ } ++} ++ ++ /******************/ ++ /* Portal access */ ++ /******************/ ++struct qbman_swp_sys { ++ /* On GPP, the sys support for qbman_swp is here. The CENA region isi ++ * not an mmap() of the real portal registers, but an allocated ++ * place-holder, because the actual writes/reads to/from the portal are ++ * marshalled from these allocated areas using QBMan's "MC access ++ * registers". CINH accesses are atomic so there's no need for a ++ * place-holder. */ ++ void *cena; ++ void __iomem *addr_cena; ++ void __iomem *addr_cinh; ++}; ++ ++/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal ++ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) ++ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index ++ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) ++ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) ++ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) ++ */ ++ ++static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, ++ uint32_t val) ++{ ++ ++ writel_relaxed(val, s->addr_cinh + offset); ++#ifdef QBMAN_CINH_TRACE ++ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n", ++ s->addr_cinh, offset, val); ++#endif ++} ++ ++static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) ++{ ++ uint32_t reg = readl_relaxed(s->addr_cinh + offset); ++ ++#ifdef QBMAN_CINH_TRACE ++ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n", ++ s->addr_cinh, offset, reg); ++#endif ++ return reg; ++} ++ ++static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, ++ uint32_t offset) ++{ ++ void *shadow = s->cena + offset; ++ ++#ifdef QBMAN_CENA_TRACE ++ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n", ++ s->addr_cena, offset, shadow); ++#endif ++ BUG_ON(offset & 63); ++ dcbz(shadow); ++ return shadow; ++} ++ ++static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, ++ uint32_t offset, void *cmd) ++{ ++ const uint32_t *shadow = cmd; ++ int loop; ++ ++#ifdef QBMAN_CENA_TRACE ++ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n", ++ s->addr_cena, offset, shadow); ++ hexdump(cmd, 64); ++#endif ++ for (loop = 15; loop >= 1; loop--) ++ writel_relaxed(shadow[loop], s->addr_cena + ++ offset + loop * 4); ++ lwsync(); ++ writel_relaxed(shadow[0], s->addr_cena + offset); ++ dcbf(s->addr_cena + offset); ++} ++ ++static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) ++{ ++ uint32_t *shadow = s->cena + offset; ++ unsigned int loop; ++ ++#ifdef QBMAN_CENA_TRACE ++ pr_info("qbman_cena_read(%p:0x%03x) %p\n", ++ s->addr_cena, offset, shadow); ++#endif ++ ++ for (loop = 0; loop < 16; loop++) ++ shadow[loop] = readl_relaxed(s->addr_cena + offset ++ + loop * 4); ++#ifdef QBMAN_CENA_TRACE ++ hexdump(shadow, 64); ++#endif ++ return shadow; ++} ++ ++static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, ++ uint32_t offset) ++{ ++ dcivac(s->addr_cena + offset); ++ prefetch_for_load(s->addr_cena + offset); ++} ++ ++ /******************/ ++ /* Portal support */ ++ /******************/ ++ ++/* The SWP_CFG portal register is special, in that it is used by the ++ * platform-specific code rather than the platform-independent code in ++ * qbman_portal.c. So use of it is declared locally here. */ ++#define QBMAN_CINH_SWP_CFG 0xd00 ++ ++/* For MC portal use, we always configure with ++ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) ++ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0) ++ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) ++ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) ++ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3) ++ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE) ++ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) ++ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0) ++ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) ++ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0) ++ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE) ++ */ ++static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, ++ uint8_t est, uint8_t rpm, uint8_t dcm, ++ uint8_t epm, int sd, int sp, int se, ++ int dp, int de, int ep) ++{ ++ uint32_t reg; ++ ++ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | ++ e32_uint8_t(16, 3, est) | e32_uint8_t(12, 2, rpm) | ++ e32_uint8_t(10, 2, dcm) | e32_uint8_t(8, 2, epm) | ++ e32_int(5, 1, sd) | e32_int(4, 1, sp) | e32_int(3, 1, se) | ++ e32_int(2, 1, dp) | e32_int(1, 1, de) | e32_int(0, 1, ep) | ++ e32_uint8_t(14, 1, wn); ++ return reg; ++} ++ ++static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, ++ const struct qbman_swp_desc *d, ++ uint8_t dqrr_size) ++{ ++ uint32_t reg; ++ ++ s->addr_cena = d->cena_bar; ++ s->addr_cinh = d->cinh_bar; ++ s->cena = (void *)get_zeroed_page(GFP_KERNEL); ++ if (!s->cena) { ++ pr_err("Could not allocate page for cena shadow\n"); ++ return -1; ++ } ++ ++#ifdef QBMAN_CHECKING ++ /* We should never be asked to initialise for a portal that isn't in ++ * the power-on state. (Ie. don't forget to reset portals when they are ++ * decommissioned!) ++ */ ++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); ++ BUG_ON(reg); ++#endif ++ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0); ++ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); ++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); ++ if (!reg) { ++ pr_err("The portal is not enabled!\n"); ++ kfree(s->cena); ++ return -1; ++ } ++ return 0; ++} ++ ++static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) ++{ ++ free_page((unsigned long)s->cena); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h +new file mode 100644 +index 0000000..5b3a224 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h +@@ -0,0 +1,86 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "fsl_qbman_base.h" ++ ++/* The platform-independent code shouldn't need endianness, except for ++ * weird/fast-path cases like qbman_result_has_token(), which needs to ++ * perform a passive and endianness-specific test on a read-only data structure ++ * very quickly. It's an exception, and this symbol is used for that case. */ ++#if defined(__BIG_ENDIAN) ++#define DQRR_TOK_OFFSET 0 ++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 ++#define SCN_STATE_OFFSET_IN_MEM 8 ++#define SCN_RID_OFFSET_IN_MEM 8 ++#else ++#define DQRR_TOK_OFFSET 24 ++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 ++#define SCN_STATE_OFFSET_IN_MEM 16 ++#define SCN_RID_OFFSET_IN_MEM 0 ++#endif ++ ++/* Similarly-named functions */ ++#define upper32(a) upper_32_bits(a) ++#define lower32(a) lower_32_bits(a) ++ ++ /****************/ ++ /* arch assists */ ++ /****************/ ++ ++#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } ++#define lwsync() { asm volatile("dmb st" : : : "memory"); } ++#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } ++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } ++static inline void prefetch_for_load(void *p) ++{ ++ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); ++} ++static inline void prefetch_for_store(void *p) ++{ ++ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_test.c b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c +new file mode 100644 +index 0000000..28396e7 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c +@@ -0,0 +1,664 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++ ++#include "qbman_private.h" ++#include "fsl_qbman_portal.h" ++#include "qbman_debug.h" ++#include "../../include/fsl_dpaa2_fd.h" ++ ++#define QBMAN_SWP_CENA_BASE 0x818000000 ++#define QBMAN_SWP_CINH_BASE 0x81c000000 ++ ++#define QBMAN_PORTAL_IDX 2 ++#define QBMAN_TEST_FQID 19 ++#define QBMAN_TEST_BPID 23 ++#define QBMAN_USE_QD ++#ifdef QBMAN_USE_QD ++#define QBMAN_TEST_QDID 1 ++#endif ++#define QBMAN_TEST_LFQID 0xf00010 ++ ++#define NUM_EQ_FRAME 10 ++#define NUM_DQ_FRAME 10 ++#define NUM_DQ_IN_DQRR 5 ++#define NUM_DQ_IN_MEM (NUM_DQ_FRAME - NUM_DQ_IN_DQRR) ++ ++static struct qbman_swp *swp; ++static struct qbman_eq_desc eqdesc; ++static struct qbman_pull_desc pulldesc; ++static struct qbman_release_desc releasedesc; ++static struct qbman_eq_response eq_storage[1]; ++static struct dpaa2_dq dq_storage[NUM_DQ_IN_MEM] __aligned(64); ++static dma_addr_t eq_storage_phys; ++static dma_addr_t dq_storage_phys; ++ ++/* FQ ctx attribute values for the test code. */ ++#define FQCTX_HI 0xabbaf00d ++#define FQCTX_LO 0x98765432 ++#define FQ_VFQID 0x123456 ++ ++/* Sample frame descriptor */ ++static struct qbman_fd_simple fd = { ++ .addr_lo = 0xbabaf33d, ++ .addr_hi = 0x01234567, ++ .len = 0x7777, ++ .frc = 0xdeadbeef, ++ .flc_lo = 0xcafecafe, ++ .flc_hi = 0xbeadabba ++}; ++ ++static void fd_inc(struct qbman_fd_simple *_fd) ++{ ++ _fd->addr_lo += _fd->len; ++ _fd->flc_lo += 0x100; ++ _fd->frc += 0x10; ++} ++ ++static int fd_cmp(struct qbman_fd *fda, struct qbman_fd *fdb) ++{ ++ int i; ++ ++ for (i = 0; i < 8; i++) ++ if (fda->words[i] - fdb->words[i]) ++ return 1; ++ return 0; ++} ++ ++struct qbman_fd fd_eq[NUM_EQ_FRAME]; ++struct qbman_fd fd_dq[NUM_DQ_FRAME]; ++ ++/* "Buffers" to be released (and storage for buffers to be acquired) */ ++static uint64_t rbufs[320]; ++static uint64_t abufs[320]; ++ ++static void do_enqueue(struct qbman_swp *swp) ++{ ++ int i, j, ret; ++ ++#ifdef QBMAN_USE_QD ++ pr_info("*****QBMan_test: Enqueue %d frames to QD %d\n", ++ NUM_EQ_FRAME, QBMAN_TEST_QDID); ++#else ++ pr_info("*****QBMan_test: Enqueue %d frames to FQ %d\n", ++ NUM_EQ_FRAME, QBMAN_TEST_FQID); ++#endif ++ for (i = 0; i < NUM_EQ_FRAME; i++) { ++ /*********************************/ ++ /* Prepare a enqueue descriptor */ ++ /*********************************/ ++ memset(eq_storage, 0, sizeof(eq_storage)); ++ eq_storage_phys = virt_to_phys(eq_storage); ++ qbman_eq_desc_clear(&eqdesc); ++ qbman_eq_desc_set_no_orp(&eqdesc, 0); ++ qbman_eq_desc_set_response(&eqdesc, eq_storage_phys, 0); ++ qbman_eq_desc_set_token(&eqdesc, 0x99); ++#ifdef QBMAN_USE_QD ++ /**********************************/ ++ /* Prepare a Queueing Destination */ ++ /**********************************/ ++ qbman_eq_desc_set_qd(&eqdesc, QBMAN_TEST_QDID, 0, 3); ++#else ++ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_FQID); ++#endif ++ ++ /******************/ ++ /* Try an enqueue */ ++ /******************/ ++ ret = qbman_swp_enqueue(swp, &eqdesc, ++ (const struct qbman_fd *)&fd); ++ BUG_ON(ret); ++ for (j = 0; j < 8; j++) ++ fd_eq[i].words[j] = *((uint32_t *)&fd + j); ++ fd_inc(&fd); ++ } ++} ++ ++static void do_push_dequeue(struct qbman_swp *swp) ++{ ++ int i, j; ++ const struct dpaa2_dq *dq_storage1; ++ const struct qbman_fd *__fd; ++ int loopvar; ++ ++ pr_info("*****QBMan_test: Start push dequeue\n"); ++ for (i = 0; i < NUM_DQ_FRAME; i++) { ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ dq_storage1 = qbman_swp_dqrr_next(swp); ++ } while (!dq_storage1); ++ if (dq_storage1) { ++ __fd = (const struct qbman_fd *) ++ dpaa2_dq_fd(dq_storage1); ++ for (j = 0; j < 8; j++) ++ fd_dq[i].words[j] = __fd->words[j]; ++ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { ++ pr_info("enqueue FD is\n"); ++ hexdump(&fd_eq[i], 32); ++ pr_info("dequeue FD is\n"); ++ hexdump(&fd_dq[i], 32); ++ } ++ qbman_swp_dqrr_consume(swp, dq_storage1); ++ } else { ++ pr_info("The push dequeue fails\n"); ++ } ++ } ++} ++ ++static void do_pull_dequeue(struct qbman_swp *swp) ++{ ++ int i, j, ret; ++ const struct dpaa2_dq *dq_storage1; ++ const struct qbman_fd *__fd; ++ int loopvar; ++ ++ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in DQRR\n", ++ NUM_DQ_IN_DQRR); ++ for (i = 0; i < NUM_DQ_IN_DQRR; i++) { ++ qbman_pull_desc_clear(&pulldesc); ++ qbman_pull_desc_set_storage(&pulldesc, NULL, 0, 0); ++ qbman_pull_desc_set_numframes(&pulldesc, 1); ++ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); ++ ++ ret = qbman_swp_pull(swp, &pulldesc); ++ BUG_ON(ret); ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ dq_storage1 = qbman_swp_dqrr_next(swp); ++ } while (!dq_storage1); ++ ++ if (dq_storage1) { ++ __fd = (const struct qbman_fd *) ++ dpaa2_dq_fd(dq_storage1); ++ for (j = 0; j < 8; j++) ++ fd_dq[i].words[j] = __fd->words[j]; ++ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { ++ pr_info("enqueue FD is\n"); ++ hexdump(&fd_eq[i], 32); ++ pr_info("dequeue FD is\n"); ++ hexdump(&fd_dq[i], 32); ++ } ++ qbman_swp_dqrr_consume(swp, dq_storage1); ++ } else { ++ pr_info("Dequeue with dq entry in DQRR fails\n"); ++ } ++ } ++ ++ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in memory\n", ++ NUM_DQ_IN_MEM); ++ for (i = 0; i < NUM_DQ_IN_MEM; i++) { ++ dq_storage_phys = virt_to_phys(&dq_storage[i]); ++ qbman_pull_desc_clear(&pulldesc); ++ qbman_pull_desc_set_storage(&pulldesc, &dq_storage[i], ++ dq_storage_phys, 1); ++ qbman_pull_desc_set_numframes(&pulldesc, 1); ++ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); ++ ret = qbman_swp_pull(swp, &pulldesc); ++ BUG_ON(ret); ++ ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ ret = qbman_result_has_new_result(swp, ++ &dq_storage[i]); ++ } while (!ret); ++ ++ if (ret) { ++ for (j = 0; j < 8; j++) ++ fd_dq[i + NUM_DQ_IN_DQRR].words[j] = ++ dq_storage[i].dont_manipulate_directly[j + 8]; ++ j = i + NUM_DQ_IN_DQRR; ++ if (fd_cmp(&fd_eq[j], &fd_dq[j])) { ++ pr_info("enqueue FD is\n"); ++ hexdump(&fd_eq[i + NUM_DQ_IN_DQRR], 32); ++ pr_info("dequeue FD is\n"); ++ hexdump(&fd_dq[i + NUM_DQ_IN_DQRR], 32); ++ hexdump(&dq_storage[i], 64); ++ } ++ } else { ++ pr_info("Dequeue with dq entry in memory fails\n"); ++ } ++ } ++} ++ ++static void release_buffer(struct qbman_swp *swp, unsigned int num) ++{ ++ int ret; ++ unsigned int i, j; ++ ++ qbman_release_desc_clear(&releasedesc); ++ qbman_release_desc_set_bpid(&releasedesc, QBMAN_TEST_BPID); ++ pr_info("*****QBMan_test: Release %d buffers to BP %d\n", ++ num, QBMAN_TEST_BPID); ++ for (i = 0; i < (num / 7 + 1); i++) { ++ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); ++ ret = qbman_swp_release(swp, &releasedesc, &rbufs[i * 7], j); ++ BUG_ON(ret); ++ } ++} ++ ++static void acquire_buffer(struct qbman_swp *swp, unsigned int num) ++{ ++ int ret; ++ unsigned int i, j; ++ ++ pr_info("*****QBMan_test: Acquire %d buffers from BP %d\n", ++ num, QBMAN_TEST_BPID); ++ ++ for (i = 0; i < (num / 7 + 1); i++) { ++ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); ++ ret = qbman_swp_acquire(swp, QBMAN_TEST_BPID, &abufs[i * 7], j); ++ BUG_ON(ret != j); ++ } ++} ++ ++static void buffer_pool_test(struct qbman_swp *swp) ++{ ++ struct qbman_attr info; ++ struct dpaa2_dq *bpscn_message; ++ dma_addr_t bpscn_phys; ++ uint64_t bpscn_ctx; ++ uint64_t ctx = 0xbbccddaadeadbeefull; ++ int i, ret; ++ uint32_t hw_targ; ++ ++ pr_info("*****QBMan_test: test buffer pool management\n"); ++ ret = qbman_bp_query(swp, QBMAN_TEST_BPID, &info); ++ qbman_bp_attr_get_bpscn_addr(&info, &bpscn_phys); ++ pr_info("The bpscn is %llx, info_phys is %llx\n", bpscn_phys, ++ virt_to_phys(&info)); ++ bpscn_message = phys_to_virt(bpscn_phys); ++ ++ for (i = 0; i < 320; i++) ++ rbufs[i] = 0xf00dabba01234567ull + i * 0x40; ++ ++ release_buffer(swp, 320); ++ ++ pr_info("QBMan_test: query the buffer pool\n"); ++ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); ++ hexdump(&info, 64); ++ qbman_bp_attr_get_hw_targ(&info, &hw_targ); ++ pr_info("hw_targ is %d\n", hw_targ); ++ ++ /* Acquire buffers to trigger BPSCN */ ++ acquire_buffer(swp, 300); ++ /* BPSCN should be written to the memory */ ++ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); ++ hexdump(&info, 64); ++ hexdump(bpscn_message, 64); ++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); ++ /* There should be free buffers in the pool */ ++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); ++ /* Buffer pool is depleted */ ++ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); ++ /* The ctx should match */ ++ bpscn_ctx = qbman_result_bpscn_ctx(bpscn_message); ++ pr_info("BPSCN test: ctx %llx, bpscn_ctx %llx\n", ctx, bpscn_ctx); ++ BUG_ON(ctx != bpscn_ctx); ++ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); ++ ++ /* Re-seed the buffer pool to trigger BPSCN */ ++ release_buffer(swp, 240); ++ /* BPSCN should be written to the memory */ ++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); ++ /* There should be free buffers in the pool */ ++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); ++ /* Buffer pool is not depleted */ ++ BUG_ON(qbman_result_bpscn_is_depleted(bpscn_message)); ++ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); ++ ++ acquire_buffer(swp, 260); ++ /* BPSCN should be written to the memory */ ++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); ++ /* There should be free buffers in the pool while BPSCN generated */ ++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); ++ /* Buffer pool is depletion */ ++ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); ++} ++ ++static void ceetm_test(struct qbman_swp *swp) ++{ ++ int i, j, ret; ++ ++ qbman_eq_desc_clear(&eqdesc); ++ qbman_eq_desc_set_no_orp(&eqdesc, 0); ++ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_LFQID); ++ pr_info("*****QBMan_test: Enqueue to LFQID %x\n", ++ QBMAN_TEST_LFQID); ++ for (i = 0; i < NUM_EQ_FRAME; i++) { ++ ret = qbman_swp_enqueue(swp, &eqdesc, ++ (const struct qbman_fd *)&fd); ++ BUG_ON(ret); ++ for (j = 0; j < 8; j++) ++ fd_eq[i].words[j] = *((uint32_t *)&fd + j); ++ fd_inc(&fd); ++ } ++} ++ ++int qbman_test(void) ++{ ++ struct qbman_swp_desc pd; ++ uint32_t reg; ++ ++ pd.cena_bar = ioremap_cache_ns(QBMAN_SWP_CENA_BASE + ++ QBMAN_PORTAL_IDX * 0x10000, 0x10000); ++ pd.cinh_bar = ioremap(QBMAN_SWP_CINH_BASE + ++ QBMAN_PORTAL_IDX * 0x10000, 0x10000); ++ ++ /* Detect whether the mc image is the test image with GPP setup */ ++ reg = readl_relaxed(pd.cena_bar + 0x4); ++ if (reg != 0xdeadbeef) { ++ pr_err("The MC image doesn't have GPP test setup, stop!\n"); ++ iounmap(pd.cena_bar); ++ iounmap(pd.cinh_bar); ++ return -1; ++ } ++ ++ pr_info("*****QBMan_test: Init QBMan SWP %d\n", QBMAN_PORTAL_IDX); ++ swp = qbman_swp_init(&pd); ++ if (!swp) { ++ iounmap(pd.cena_bar); ++ iounmap(pd.cinh_bar); ++ return -1; ++ } ++ ++ /*******************/ ++ /* Enqueue frames */ ++ /*******************/ ++ do_enqueue(swp); ++ ++ /*******************/ ++ /* Do pull dequeue */ ++ /*******************/ ++ do_pull_dequeue(swp); ++ ++ /*******************/ ++ /* Enqueue frames */ ++ /*******************/ ++ qbman_swp_push_set(swp, 0, 1); ++ qbman_swp_fq_schedule(swp, QBMAN_TEST_FQID); ++ do_enqueue(swp); ++ ++ /*******************/ ++ /* Do push dequeue */ ++ /*******************/ ++ do_push_dequeue(swp); ++ ++ /**************************/ ++ /* Test buffer pool funcs */ ++ /**************************/ ++ buffer_pool_test(swp); ++ ++ /******************/ ++ /* CEETM test */ ++ /******************/ ++ ceetm_test(swp); ++ ++ qbman_swp_finish(swp); ++ pr_info("*****QBMan_test: Kernel test Passed\n"); ++ return 0; ++} ++ ++/* user-space test-case, definitions: ++ * ++ * 1 portal only, using portal index 3. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define QBMAN_TEST_US_SWP 3 /* portal index for user space */ ++ ++#define QBMAN_TEST_MAGIC 'q' ++struct qbman_test_swp_ioctl { ++ unsigned long portal1_cinh; ++ unsigned long portal1_cena; ++}; ++struct qbman_test_dma_ioctl { ++ unsigned long ptr; ++ uint64_t phys_addr; ++}; ++ ++struct qbman_test_priv { ++ int has_swp_map; ++ int has_dma_map; ++ unsigned long pgoff; ++}; ++ ++#define QBMAN_TEST_SWP_MAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x01, struct qbman_test_swp_ioctl) ++#define QBMAN_TEST_SWP_UNMAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x02, struct qbman_test_swp_ioctl) ++#define QBMAN_TEST_DMA_MAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x03, struct qbman_test_dma_ioctl) ++#define QBMAN_TEST_DMA_UNMAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x04, struct qbman_test_dma_ioctl) ++ ++#define TEST_PORTAL1_CENA_PGOFF ((QBMAN_SWP_CENA_BASE + QBMAN_TEST_US_SWP * \ ++ 0x10000) >> PAGE_SHIFT) ++#define TEST_PORTAL1_CINH_PGOFF ((QBMAN_SWP_CINH_BASE + QBMAN_TEST_US_SWP * \ ++ 0x10000) >> PAGE_SHIFT) ++ ++static int qbman_test_open(struct inode *inode, struct file *filp) ++{ ++ struct qbman_test_priv *priv; ++ ++ priv = kmalloc(sizeof(struct qbman_test_priv), GFP_KERNEL); ++ if (!priv) ++ return -EIO; ++ filp->private_data = priv; ++ priv->has_swp_map = 0; ++ priv->has_dma_map = 0; ++ priv->pgoff = 0; ++ return 0; ++} ++ ++static int qbman_test_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ int ret; ++ struct qbman_test_priv *priv = filp->private_data; ++ ++ BUG_ON(!priv); ++ ++ if (vma->vm_pgoff == TEST_PORTAL1_CINH_PGOFF) ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ else if (vma->vm_pgoff == TEST_PORTAL1_CENA_PGOFF) ++ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); ++ else if (vma->vm_pgoff == priv->pgoff) ++ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); ++ else { ++ pr_err("Damn, unrecognised pg_off!!\n"); ++ return -EINVAL; ++ } ++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot); ++ return ret; ++} ++ ++static long qbman_test_ioctl(struct file *fp, unsigned int cmd, ++ unsigned long arg) ++{ ++ void __user *a = (void __user *)arg; ++ unsigned long longret, populate; ++ int ret = 0; ++ struct qbman_test_priv *priv = fp->private_data; ++ ++ BUG_ON(!priv); ++ ++ switch (cmd) { ++ case QBMAN_TEST_SWP_MAP: ++ { ++ struct qbman_test_swp_ioctl params; ++ ++ if (priv->has_swp_map) ++ return -EINVAL; ++ down_write(¤t->mm->mmap_sem); ++ /* Map portal1 CINH */ ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ TEST_PORTAL1_CINH_PGOFF, &populate); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ goto out; ++ } ++ params.portal1_cinh = longret; ++ /* Map portal1 CENA */ ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ TEST_PORTAL1_CENA_PGOFF, &populate); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ goto out; ++ } ++ params.portal1_cena = longret; ++ priv->has_swp_map = 1; ++out: ++ up_write(¤t->mm->mmap_sem); ++ if (!ret && copy_to_user(a, ¶ms, sizeof(params))) ++ return -EFAULT; ++ return ret; ++ } ++ case QBMAN_TEST_SWP_UNMAP: ++ { ++ struct qbman_test_swp_ioctl params; ++ ++ if (!priv->has_swp_map) ++ return -EINVAL; ++ ++ if (copy_from_user(¶ms, a, sizeof(params))) ++ return -EFAULT; ++ down_write(¤t->mm->mmap_sem); ++ do_munmap(current->mm, params.portal1_cena, 0x10000); ++ do_munmap(current->mm, params.portal1_cinh, 0x10000); ++ up_write(¤t->mm->mmap_sem); ++ priv->has_swp_map = 0; ++ return 0; ++ } ++ case QBMAN_TEST_DMA_MAP: ++ { ++ struct qbman_test_dma_ioctl params; ++ void *vaddr; ++ ++ if (priv->has_dma_map) ++ return -EINVAL; ++ vaddr = (void *)get_zeroed_page(GFP_KERNEL); ++ params.phys_addr = virt_to_phys(vaddr); ++ priv->pgoff = (unsigned long)params.phys_addr >> PAGE_SHIFT; ++ down_write(¤t->mm->mmap_sem); ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, PAGE_SIZE, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ priv->pgoff, &populate); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ return ret; ++ } ++ params.ptr = longret; ++ priv->has_dma_map = 1; ++ up_write(¤t->mm->mmap_sem); ++ if (copy_to_user(a, ¶ms, sizeof(params))) ++ return -EFAULT; ++ return 0; ++ } ++ case QBMAN_TEST_DMA_UNMAP: ++ { ++ struct qbman_test_dma_ioctl params; ++ ++ if (!priv->has_dma_map) ++ return -EINVAL; ++ if (copy_from_user(¶ms, a, sizeof(params))) ++ return -EFAULT; ++ down_write(¤t->mm->mmap_sem); ++ do_munmap(current->mm, params.ptr, PAGE_SIZE); ++ up_write(¤t->mm->mmap_sem); ++ free_page((unsigned long)phys_to_virt(params.phys_addr)); ++ priv->has_dma_map = 0; ++ return 0; ++ } ++ default: ++ pr_err("Bad ioctl cmd!\n"); ++ } ++ return -EINVAL; ++} ++ ++static const struct file_operations qbman_fops = { ++ .open = qbman_test_open, ++ .mmap = qbman_test_mmap, ++ .unlocked_ioctl = qbman_test_ioctl ++}; ++ ++static struct miscdevice qbman_miscdev = { ++ .name = "qbman-test", ++ .fops = &qbman_fops, ++ .minor = MISC_DYNAMIC_MINOR, ++}; ++ ++static int qbman_miscdev_init; ++ ++static int test_init(void) ++{ ++ int ret = qbman_test(); ++ ++ if (!ret) { ++ /* MC image supports the test cases, so instantiate the ++ * character devic that the user-space test case will use to do ++ * its memory mappings. */ ++ ret = misc_register(&qbman_miscdev); ++ if (ret) { ++ pr_err("qbman-test: failed to register misc device\n"); ++ return ret; ++ } ++ pr_info("qbman-test: misc device registered!\n"); ++ qbman_miscdev_init = 1; ++ } ++ return 0; ++} ++ ++static void test_exit(void) ++{ ++ if (qbman_miscdev_init) { ++ misc_deregister(&qbman_miscdev); ++ qbman_miscdev_init = 0; ++ } ++} ++ ++module_init(test_init); ++module_exit(test_exit); +diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h +new file mode 100644 +index 0000000..c9b52dd +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h +@@ -0,0 +1,56 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMCP_CMD_H ++#define _FSL_DPMCP_CMD_H ++ ++/* Minimal supported DPMCP Version */ ++#define DPMCP_MIN_VER_MAJOR 3 ++#define DPMCP_MIN_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPMCP_CMDID_CLOSE 0x800 ++#define DPMCP_CMDID_OPEN 0x80b ++#define DPMCP_CMDID_CREATE 0x90b ++#define DPMCP_CMDID_DESTROY 0x900 ++ ++#define DPMCP_CMDID_GET_ATTR 0x004 ++#define DPMCP_CMDID_RESET 0x005 ++ ++#define DPMCP_CMDID_SET_IRQ 0x010 ++#define DPMCP_CMDID_GET_IRQ 0x011 ++#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPMCP_CMDID_SET_IRQ_MASK 0x014 ++#define DPMCP_CMDID_GET_IRQ_MASK 0x015 ++#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 ++ ++#endif /* _FSL_DPMCP_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c +new file mode 100644 +index 0000000..e23592a +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmcp.c +@@ -0,0 +1,318 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "dpmcp.h" ++#include "dpmcp-cmd.h" ++ ++int dpmcp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmcp_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd.params[0] |= mc_enc(0, 32, dpmcp_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return err; ++} ++ ++int dpmcp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmcp_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ cmd.params[0] |= mc_enc(0, 32, cfg->portal_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpmcp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmcp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 8, irq_index); ++ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmcp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); ++ *type = (int)mc_dec(cmd.params[2], 32, 32); ++ return 0; ++} ++ ++int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 8, en); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); ++ return 0; ++} ++ ++int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, mask); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpmcp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmcp_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ attr->id = (int)mc_dec(cmd.params[0], 32, 32); ++ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); ++ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); ++ return 0; ++} +diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h +new file mode 100644 +index 0000000..e434a24 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmcp.h +@@ -0,0 +1,323 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMCP_H ++#define __FSL_DPMCP_H ++ ++/* Data Path Management Command Portal API ++ * Contains initialization APIs and runtime control APIs for DPMCP ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpmcp_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpmcp_id: DPMCP unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmcp_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmcp_id, ++ uint16_t *token); ++ ++/* Get portal ID from pool */ ++#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) ++ ++/** ++ * dpmcp_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpmcp_cfg - Structure representing DPMCP configuration ++ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID ++ * from pool ++ */ ++struct dpmcp_cfg { ++ int portal_id; ++}; ++ ++/** ++ * dpmcp_create() - Create the DPMCP object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPMCP object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpmcp_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmcp_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmcp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/* IRQ */ ++/* IRQ Index */ ++#define DPMCP_IRQ_INDEX 0 ++/* irq event - Indicates that the link state changed */ ++#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 ++ ++/** ++ * struct dpmcp_irq_cfg - IRQ configuration ++ * @paddr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpmcp_irq_cfg { ++ uint64_t paddr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmcp_irq_cfg *irq_cfg); ++ ++/** ++ * dpmcp_get_irq() - Get IRQ information from the DPMCP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmcp_irq_cfg *irq_cfg); ++ ++/** ++ * dpmcp_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpmcp_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpmcp_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpmcp_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpmcp_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * struct dpmcp_attr - Structure representing DPMCP attributes ++ * @id: DPMCP object ID ++ * @version: DPMCP version ++ */ ++struct dpmcp_attr { ++ int id; ++ /** ++ * struct version - Structure representing DPMCP version ++ * @major: DPMCP major version ++ * @minor: DPMCP minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dpmcp_get_attributes - Retrieve DPMCP attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmcp_attr *attr); ++ ++#endif /* __FSL_DPMCP_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h +new file mode 100644 +index 0000000..ba8cfa9 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h +@@ -0,0 +1,47 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/*************************************************************************//* ++ dpmng-cmd.h ++ ++ defines portal commands ++ ++ *//**************************************************************************/ ++ ++#ifndef __FSL_DPMNG_CMD_H ++#define __FSL_DPMNG_CMD_H ++ ++/* Command IDs */ ++#define DPMNG_CMDID_GET_CONT_ID 0x830 ++#define DPMNG_CMDID_GET_VERSION 0x831 ++ ++#endif /* __FSL_DPMNG_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c +new file mode 100644 +index 0000000..387390b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmng.c +@@ -0,0 +1,85 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions are met: ++* * Redistributions of source code must retain the above copyright ++* notice, this list of conditions and the following disclaimer. ++* * Redistributions in binary form must reproduce the above copyright ++* notice, this list of conditions and the following disclaimer in the ++* documentation and/or other materials provided with the distribution. ++* * Neither the name of the above-listed copyright holders nor the ++* names of any contributors may be used to endorse or promote products ++* derived from this software without specific prior written permission. ++* ++* ++* ALTERNATIVELY, this software may be distributed under the terms of the ++* GNU General Public License ("GPL") as published by the Free Software ++* Foundation, either version 2 of that License or (at your option) any ++* later version. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++* POSSIBILITY OF SUCH DAMAGE. ++*/ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpmng.h" ++#include "dpmng-cmd.h" ++ ++int mc_get_version(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ struct mc_version *mc_ver_info) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, ++ cmd_flags, ++ 0); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32); ++ mc_ver_info->major = mc_dec(cmd.params[0], 32, 32); ++ mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(mc_get_version); ++ ++int dpmng_get_container_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int *container_id) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, ++ cmd_flags, ++ 0); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *container_id = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++ +diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h +new file mode 100644 +index 0000000..9b854fa +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h +@@ -0,0 +1,87 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/*************************************************************************//* ++ dprc-cmd.h ++ ++ defines dprc portal commands ++ ++ *//**************************************************************************/ ++ ++#ifndef _FSL_DPRC_CMD_H ++#define _FSL_DPRC_CMD_H ++ ++/* Minimal supported DPRC Version */ ++#define DPRC_MIN_VER_MAJOR 5 ++#define DPRC_MIN_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPRC_CMDID_CLOSE 0x800 ++#define DPRC_CMDID_OPEN 0x805 ++#define DPRC_CMDID_CREATE 0x905 ++ ++#define DPRC_CMDID_GET_ATTR 0x004 ++#define DPRC_CMDID_RESET_CONT 0x005 ++ ++#define DPRC_CMDID_SET_IRQ 0x010 ++#define DPRC_CMDID_GET_IRQ 0x011 ++#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPRC_CMDID_SET_IRQ_MASK 0x014 ++#define DPRC_CMDID_GET_IRQ_MASK 0x015 ++#define DPRC_CMDID_GET_IRQ_STATUS 0x016 ++#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPRC_CMDID_CREATE_CONT 0x151 ++#define DPRC_CMDID_DESTROY_CONT 0x152 ++#define DPRC_CMDID_SET_RES_QUOTA 0x155 ++#define DPRC_CMDID_GET_RES_QUOTA 0x156 ++#define DPRC_CMDID_ASSIGN 0x157 ++#define DPRC_CMDID_UNASSIGN 0x158 ++#define DPRC_CMDID_GET_OBJ_COUNT 0x159 ++#define DPRC_CMDID_GET_OBJ 0x15A ++#define DPRC_CMDID_GET_RES_COUNT 0x15B ++#define DPRC_CMDID_GET_RES_IDS 0x15C ++#define DPRC_CMDID_GET_OBJ_REG 0x15E ++#define DPRC_CMDID_SET_OBJ_IRQ 0x15F ++#define DPRC_CMDID_GET_OBJ_IRQ 0x160 ++#define DPRC_CMDID_SET_OBJ_LABEL 0x161 ++#define DPRC_CMDID_GET_OBJ_DESC 0x162 ++ ++#define DPRC_CMDID_CONNECT 0x167 ++#define DPRC_CMDID_DISCONNECT 0x168 ++#define DPRC_CMDID_GET_POOL 0x169 ++#define DPRC_CMDID_GET_POOL_COUNT 0x16A ++ ++#define DPRC_CMDID_GET_CONNECTION 0x16C ++ ++#endif /* _FSL_DPRC_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c +new file mode 100644 +index 0000000..f8d8cbe +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c +@@ -0,0 +1,1084 @@ ++/* ++ * Freescale data path resource container (DPRC) driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include "../include/mc-sys.h" ++#include ++#include ++#include ++#include "dprc-cmd.h" ++#include "dpmcp.h" ++ ++struct dprc_child_objs { ++ int child_count; ++ struct dprc_obj_desc *child_array; ++}; ++ ++static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data) ++{ ++ int i; ++ struct dprc_child_objs *objs; ++ struct fsl_mc_device *mc_dev; ++ ++ WARN_ON(!dev); ++ WARN_ON(!data); ++ mc_dev = to_fsl_mc_device(dev); ++ objs = data; ++ ++ for (i = 0; i < objs->child_count; i++) { ++ struct dprc_obj_desc *obj_desc = &objs->child_array[i]; ++ ++ if (strlen(obj_desc->type) != 0 && ++ FSL_MC_DEVICE_MATCH(mc_dev, obj_desc)) ++ break; ++ } ++ ++ if (i == objs->child_count) ++ fsl_mc_device_remove(mc_dev); ++ ++ return 0; ++} ++ ++static int __fsl_mc_device_remove(struct device *dev, void *data) ++{ ++ WARN_ON(!dev); ++ WARN_ON(data); ++ fsl_mc_device_remove(to_fsl_mc_device(dev)); ++ return 0; ++} ++ ++/** ++ * dprc_remove_devices - Removes devices for objects removed from a DPRC ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @obj_desc_array: array of object descriptors for child objects currently ++ * present in the DPRC in the MC. ++ * @num_child_objects_in_mc: number of entries in obj_desc_array ++ * ++ * Synchronizes the state of the Linux bus driver with the actual state of ++ * the MC by removing devices that represent MC objects that have ++ * been dynamically removed in the physical DPRC. ++ */ ++static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev, ++ struct dprc_obj_desc *obj_desc_array, ++ int num_child_objects_in_mc) ++{ ++ if (num_child_objects_in_mc != 0) { ++ /* ++ * Remove child objects that are in the DPRC in Linux, ++ * but not in the MC: ++ */ ++ struct dprc_child_objs objs; ++ ++ objs.child_count = num_child_objects_in_mc; ++ objs.child_array = obj_desc_array; ++ device_for_each_child(&mc_bus_dev->dev, &objs, ++ __fsl_mc_device_remove_if_not_in_mc); ++ } else { ++ /* ++ * There are no child objects for this DPRC in the MC. ++ * So, remove all the child devices from Linux: ++ */ ++ device_for_each_child(&mc_bus_dev->dev, NULL, ++ __fsl_mc_device_remove); ++ } ++} ++ ++static int __fsl_mc_device_match(struct device *dev, void *data) ++{ ++ struct dprc_obj_desc *obj_desc = data; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc); ++} ++ ++static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc ++ *obj_desc, ++ struct fsl_mc_device ++ *mc_bus_dev) ++{ ++ struct device *dev; ++ ++ dev = device_find_child(&mc_bus_dev->dev, obj_desc, ++ __fsl_mc_device_match); ++ ++ return dev ? to_fsl_mc_device(dev) : NULL; ++} ++ ++/** ++ * check_plugged_state_change - Check change in an MC object's plugged state ++ * ++ * @mc_dev: pointer to the fsl-mc device for a given MC object ++ * @obj_desc: pointer to the MC object's descriptor in the MC ++ * ++ * If the plugged state has changed from unplugged to plugged, the fsl-mc ++ * device is bound to the corresponding device driver. ++ * If the plugged state has changed from plugged to unplugged, the fsl-mc ++ * device is unbound from the corresponding device driver. ++ */ ++static void check_plugged_state_change(struct fsl_mc_device *mc_dev, ++ struct dprc_obj_desc *obj_desc) ++{ ++ int error; ++ uint32_t plugged_flag_at_mc = ++ (obj_desc->state & DPRC_OBJ_STATE_PLUGGED); ++ ++ if (plugged_flag_at_mc != ++ (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) { ++ if (plugged_flag_at_mc) { ++ mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED; ++ error = device_attach(&mc_dev->dev); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "device_attach() failed: %d\n", ++ error); ++ } ++ } else { ++ mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED; ++ device_release_driver(&mc_dev->dev); ++ } ++ } ++} ++ ++/** ++ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @driver_override: driver override to apply to new objects found in the DPRC, ++ * or NULL, if none. ++ * @obj_desc_array: array of device descriptors for child devices currently ++ * present in the physical DPRC. ++ * @num_child_objects_in_mc: number of entries in obj_desc_array ++ * ++ * Synchronizes the state of the Linux bus driver with the actual ++ * state of the MC by adding objects that have been newly discovered ++ * in the physical DPRC. ++ */ ++static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, ++ struct dprc_obj_desc *obj_desc_array, ++ int num_child_objects_in_mc) ++{ ++ int error; ++ int i; ++ ++ for (i = 0; i < num_child_objects_in_mc; i++) { ++ struct fsl_mc_device *child_dev; ++ struct dprc_obj_desc *obj_desc = &obj_desc_array[i]; ++ ++ if (strlen(obj_desc->type) == 0) ++ continue; ++ ++ /* ++ * Check if device is already known to Linux: ++ */ ++ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev); ++ if (child_dev) { ++ check_plugged_state_change(child_dev, obj_desc); ++ continue; ++ } ++ ++ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev, ++ driver_override, &child_dev); ++ if (error < 0) ++ continue; ++ } ++} ++ ++void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) ++{ ++ int pool_type; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ ++ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[pool_type]; ++ ++ res_pool->type = pool_type; ++ res_pool->max_count = 0; ++ res_pool->free_count = 0; ++ res_pool->mc_bus = mc_bus; ++ INIT_LIST_HEAD(&res_pool->free_list); ++ mutex_init(&res_pool->mutex); ++ } ++} ++ ++static void dprc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev, ++ enum fsl_mc_pool_type pool_type) ++{ ++ struct fsl_mc_resource *resource; ++ struct fsl_mc_resource *next; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[pool_type]; ++ int free_count = 0; ++ ++ WARN_ON(res_pool->type != pool_type); ++ WARN_ON(res_pool->free_count != res_pool->max_count); ++ ++ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) { ++ free_count++; ++ WARN_ON(resource->type != res_pool->type); ++ WARN_ON(resource->parent_pool != res_pool); ++ devm_kfree(&mc_bus_dev->dev, resource); ++ } ++ ++ WARN_ON(free_count != res_pool->free_count); ++} ++ ++/* ++ * Clean up all resource pools other than the IRQ pool ++ */ ++void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) ++{ ++ int pool_type; ++ ++ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { ++ if (pool_type != FSL_MC_POOL_IRQ) ++ dprc_cleanup_resource_pool(mc_bus_dev, pool_type); ++ } ++} ++ ++/** ++ * dprc_scan_objects - Discover objects in a DPRC ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @driver_override: driver override to apply to new objects found in the DPRC, ++ * or NULL, if none. ++ * @total_irq_count: total number of IRQs needed by objects in the DPRC. ++ * ++ * Detects objects added and removed from a DPRC and synchronizes the ++ * state of the Linux bus driver, MC by adding and removing ++ * devices accordingly. ++ * Two types of devices can be found in a DPRC: allocatable objects (e.g., ++ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni). ++ * All allocatable devices needed to be probed before all non-allocatable ++ * devices, to ensure that device drivers for non-allocatable ++ * devices can allocate any type of allocatable devices. ++ * That is, we need to ensure that the corresponding resource pools are ++ * populated before they can get allocation requests from probe callbacks ++ * of the device drivers for the non-allocatable devices. ++ */ ++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, ++ unsigned int *total_irq_count) ++{ ++ int num_child_objects; ++ int dprc_get_obj_failures; ++ int error; ++ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count; ++ struct dprc_obj_desc *child_obj_desc_array = NULL; ++ ++ error = dprc_get_obj_count(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, ++ &num_child_objects); ++ if (error < 0) { ++ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ if (num_child_objects != 0) { ++ int i; ++ ++ child_obj_desc_array = ++ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects, ++ sizeof(*child_obj_desc_array), ++ GFP_KERNEL); ++ if (!child_obj_desc_array) ++ return -ENOMEM; ++ ++ /* ++ * Discover objects currently present in the physical DPRC: ++ */ ++ dprc_get_obj_failures = 0; ++ for (i = 0; i < num_child_objects; i++) { ++ struct dprc_obj_desc *obj_desc = ++ &child_obj_desc_array[i]; ++ ++ error = dprc_get_obj(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, ++ i, obj_desc); ++ ++ /* ++ * -ENXIO means object index was invalid. ++ * This is caused when the DPRC was changed at ++ * the MC during the scan. In this case, ++ * abort the current scan. ++ */ ++ if (error == -ENXIO) ++ return error; ++ ++ if (error < 0) { ++ dev_err(&mc_bus_dev->dev, ++ "dprc_get_obj(i=%d) failed: %d\n", ++ i, error); ++ /* ++ * Mark the obj entry as "invalid", by using the ++ * empty string as obj type: ++ */ ++ obj_desc->type[0] = '\0'; ++ obj_desc->id = error; ++ dprc_get_obj_failures++; ++ continue; ++ } ++ ++ /* ++ * for DPRC versions that do not support the ++ * shareability attribute, make simplifying assumption ++ * that only SEC is not shareable. ++ */ ++ if ((strcmp(obj_desc->type, "dpseci") == 0) && ++ (obj_desc->ver_major < 4)) ++ obj_desc->flags |= ++ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY; ++ ++ irq_count += obj_desc->irq_count; ++ dev_dbg(&mc_bus_dev->dev, ++ "Discovered object: type %s, id %d\n", ++ obj_desc->type, obj_desc->id); ++ } ++ ++ if (dprc_get_obj_failures != 0) { ++ dev_err(&mc_bus_dev->dev, ++ "%d out of %d devices could not be retrieved\n", ++ dprc_get_obj_failures, num_child_objects); ++ } ++ } ++ ++ *total_irq_count = irq_count; ++ dprc_remove_devices(mc_bus_dev, child_obj_desc_array, ++ num_child_objects); ++ ++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array, ++ num_child_objects); ++ ++ if (child_obj_desc_array) ++ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(dprc_scan_objects); ++ ++/** ++ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * ++ * Scans the physical DPRC and synchronizes the state of the Linux ++ * bus driver with the actual state of the MC by adding and removing ++ * devices as appropriate. ++ */ ++static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) ++{ ++ int error; ++ unsigned int irq_count; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ ++ dprc_init_all_resource_pools(mc_bus_dev); ++ ++ /* ++ * Discover objects in the DPRC: ++ */ ++ mutex_lock(&mc_bus->scan_mutex); ++ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count); ++ mutex_unlock(&mc_bus->scan_mutex); ++ if (error < 0) ++ goto error; ++ ++ if (fsl_mc_interrupts_supported() && !mc_bus->irq_resources) { ++ irq_count += FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS; ++ error = fsl_mc_populate_irq_pool(mc_bus, irq_count); ++ if (error < 0) ++ goto error; ++ } ++ ++ return 0; ++error: ++ device_for_each_child(&mc_bus_dev->dev, NULL, __fsl_mc_device_remove); ++ dprc_cleanup_all_resource_pools(mc_bus_dev); ++ return error; ++} ++ ++/** ++ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0 ++ * ++ * @irq: IRQ number of the interrupt being handled ++ * @arg: Pointer to device structure ++ */ ++static irqreturn_t dprc_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++/** ++ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0 ++ * ++ * @irq: IRQ number of the interrupt being handled ++ * @arg: Pointer to device structure ++ */ ++static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) ++{ ++ int error; ++ uint32_t status; ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); ++ struct fsl_mc_io *mc_io = mc_dev->mc_io; ++ int irq_index = 0; ++ ++ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", ++ irq_num, smp_processor_id()); ++ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) ++ return IRQ_HANDLED; ++ ++ mutex_lock(&mc_bus->scan_mutex); ++ if (WARN_ON(mc_dev->irqs[irq_index]->irq_number != (uint32_t)irq_num)) ++ goto out; ++ ++ status = 0; ++ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, ++ &status); ++ if (error < 0) { ++ dev_err(dev, ++ "dprc_get_irq_status() failed: %d\n", error); ++ goto out; ++ } ++ ++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, ++ status); ++ if (error < 0) { ++ dev_err(dev, ++ "dprc_clear_irq_status() failed: %d\n", error); ++ goto out; ++ } ++ ++ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED | ++ DPRC_IRQ_EVENT_OBJ_REMOVED | ++ DPRC_IRQ_EVENT_CONTAINER_DESTROYED | ++ DPRC_IRQ_EVENT_OBJ_DESTROYED | ++ DPRC_IRQ_EVENT_OBJ_CREATED)) { ++ unsigned int irq_count; ++ ++ error = dprc_scan_objects(mc_dev, NULL, &irq_count); ++ if (error < 0) { ++ if (error != -ENXIO) /* don't need to report aborted scan */ ++ dev_err(dev, "dprc_scan_objects() failed: %d\n", error); ++ goto out; ++ } ++ ++ WARN_ON((int16_t)irq_count < 0); ++ ++ if ((int16_t)irq_count > ++ mc_bus->resource_pools[FSL_MC_POOL_IRQ].max_count) { ++ dev_warn(dev, ++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", ++ irq_count, ++ mc_bus->resource_pools[FSL_MC_POOL_IRQ]. ++ max_count); ++ } ++ } ++ ++out: ++ mutex_unlock(&mc_bus->scan_mutex); ++ return IRQ_HANDLED; ++} ++ ++/* ++ * Disable and clear interrupts for a given DPRC object ++ */ ++static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int error; ++ struct fsl_mc_io *mc_io = mc_dev->mc_io; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ if (WARN_ON(irq_count == 0)) ++ return -EINVAL; ++ ++ for (i = 0; i < irq_count; i++) { ++ /* ++ * Disable generation of interrupt i, while we configure it: ++ */ ++ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, i, 0); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Disabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ ++ /* ++ * Disable all interrupt causes for interrupt i: ++ */ ++ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, i, 0x0); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Disabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ ++ /* ++ * Clear any leftover interrupt i: ++ */ ++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, i, ++ ~0x0U); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Disabling DPRC IRQ %d failed: dprc_clear_irq_status() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ } ++ ++ return 0; ++} ++ ++static void unregister_dprc_irq_handlers(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ struct fsl_mc_device_irq *irq; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = mc_dev->irqs[i]; ++ devm_free_irq(&mc_dev->dev, irq->irq_number, ++ &mc_dev->dev); ++ } ++} ++ ++static int register_dprc_irq_handlers(struct fsl_mc_device *mc_dev) ++{ ++ static const struct irq_handler { ++ irq_handler_t irq_handler; ++ irq_handler_t irq_handler_thread; ++ const char *irq_name; ++ } irq_handlers[] = { ++ [0] = { ++ .irq_handler = dprc_irq0_handler, ++ .irq_handler_thread = dprc_irq0_handler_thread, ++ .irq_name = "FSL MC DPRC irq0", ++ }, ++ }; ++ ++ unsigned int i; ++ int error; ++ struct fsl_mc_device_irq *irq; ++ unsigned int num_irq_handlers_registered = 0; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ if (WARN_ON(irq_count != ARRAY_SIZE(irq_handlers))) ++ return -EINVAL; ++ ++ for (i = 0; i < ARRAY_SIZE(irq_handlers); i++) { ++ irq = mc_dev->irqs[i]; ++ ++ /* ++ * NOTE: devm_request_threaded_irq() invokes the device-specific ++ * function that programs the MSI physically in the device ++ */ ++ error = devm_request_threaded_irq(&mc_dev->dev, ++ irq->irq_number, ++ irq_handlers[i].irq_handler, ++ irq_handlers[i]. ++ irq_handler_thread, ++ IRQF_NO_SUSPEND | ++ IRQF_ONESHOT, ++ irq_handlers[i].irq_name, ++ &mc_dev->dev); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "devm_request_threaded_irq() failed: %d\n", ++ error); ++ goto error_unregister_irq_handlers; ++ } ++ ++ num_irq_handlers_registered++; ++ } ++ ++ return 0; ++ ++error_unregister_irq_handlers: ++ for (i = 0; i < num_irq_handlers_registered; i++) { ++ irq = mc_dev->irqs[i]; ++ devm_free_irq(&mc_dev->dev, irq->irq_number, ++ &mc_dev->dev); ++ } ++ ++ return error; ++} ++ ++static int enable_dprc_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int error; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ for (i = 0; i < irq_count; i++) { ++ /* ++ * Enable all interrupt causes for the interrupt: ++ */ ++ error = dprc_set_irq_mask(mc_dev->mc_io, ++ 0, ++ mc_dev->mc_handle, ++ i, ++ ~0x0u); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Enabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ ++ /* ++ * Enable generation of the interrupt: ++ */ ++ error = dprc_set_irq_enable(mc_dev->mc_io, ++ 0, ++ mc_dev->mc_handle, ++ i, 1); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Enabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * Setup interrupts for a given DPRC device ++ */ ++static int dprc_setup_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ ++ error = fsl_mc_allocate_irqs(mc_dev); ++ if (error < 0) ++ return error; ++ ++ error = disable_dprc_irqs(mc_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = register_dprc_irq_handlers(mc_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = enable_dprc_irqs(mc_dev); ++ if (error < 0) ++ goto error_unregister_irq_handlers; ++ ++ return 0; ++ ++error_unregister_irq_handlers: ++ unregister_dprc_irq_handlers(mc_dev); ++ ++error_free_irqs: ++ fsl_mc_free_irqs(mc_dev); ++ return error; ++} ++ ++/* ++ * Creates a DPMCP for a DPRC's built-in MC portal ++ */ ++static int dprc_create_dpmcp(struct fsl_mc_device *dprc_dev) ++{ ++ int error; ++ struct dpmcp_cfg dpmcp_cfg; ++ uint16_t dpmcp_handle; ++ struct dprc_res_req res_req; ++ struct dpmcp_attr dpmcp_attr; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); ++ ++ dpmcp_cfg.portal_id = mc_bus->dprc_attr.portal_id; ++ error = dpmcp_create(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ &dpmcp_cfg, ++ &dpmcp_handle); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_create() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ /* ++ * Set the state of the newly created DPMCP object to be "plugged": ++ */ ++ ++ error = dpmcp_get_attributes(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle, ++ &dpmcp_attr); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_get_attributes() failed: %d\n", ++ error); ++ goto error_destroy_dpmcp; ++ } ++ ++ if (WARN_ON(dpmcp_attr.id != mc_bus->dprc_attr.portal_id)) { ++ error = -EINVAL; ++ goto error_destroy_dpmcp; ++ } ++ ++ strcpy(res_req.type, "dpmcp"); ++ res_req.num = 1; ++ res_req.options = ++ (DPRC_RES_REQ_OPT_EXPLICIT | DPRC_RES_REQ_OPT_PLUGGED); ++ res_req.id_base_align = dpmcp_attr.id; ++ ++ error = dprc_assign(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dprc_dev->mc_handle, ++ dprc_dev->obj_desc.id, ++ &res_req); ++ ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dprc_assign() failed: %d\n", error); ++ goto error_destroy_dpmcp; ++ } ++ ++ (void)dpmcp_close(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle); ++ return 0; ++ ++error_destroy_dpmcp: ++ (void)dpmcp_destroy(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle); ++ return error; ++} ++ ++/* ++ * Destroys the DPMCP for a DPRC's built-in MC portal ++ */ ++static void dprc_destroy_dpmcp(struct fsl_mc_device *dprc_dev) ++{ ++ int error; ++ uint16_t dpmcp_handle; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); ++ ++ if (WARN_ON(!dprc_dev->mc_io || dprc_dev->mc_io->dpmcp_dev)) ++ return; ++ ++ error = dpmcp_open(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ mc_bus->dprc_attr.portal_id, ++ &dpmcp_handle); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_open() failed: %d\n", ++ error); ++ return; ++ } ++ ++ error = dpmcp_destroy(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_destroy() failed: %d\n", ++ error); ++ return; ++ } ++} ++ ++/** ++ * dprc_probe - callback invoked when a DPRC is being bound to this driver ++ * ++ * @mc_dev: Pointer to fsl-mc device representing a DPRC ++ * ++ * It opens the physical DPRC in the MC. ++ * It scans the DPRC to discover the MC objects contained in it. ++ * It creates the interrupt pool for the MC bus associated with the DPRC. ++ * It configures the interrupts for the DPRC device itself. ++ */ ++static int dprc_probe(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ size_t region_size; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); ++ bool mc_io_created = false; ++ bool dev_root_set = false; ++ ++ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) ++ return -EINVAL; ++ ++ if (mc_dev->mc_io) { ++ /* ++ * This is the root DPRC ++ */ ++ if (WARN_ON(fsl_mc_bus_type.dev_root)) ++ return -EINVAL; ++ ++ fsl_mc_bus_type.dev_root = &mc_dev->dev; ++ dev_root_set = true; ++ } else { ++ /* ++ * This is a child DPRC ++ */ ++ if (WARN_ON(!fsl_mc_bus_type.dev_root)) ++ return -EINVAL; ++ ++ if (WARN_ON(mc_dev->obj_desc.region_count == 0)) ++ return -EINVAL; ++ ++ region_size = mc_dev->regions[0].end - ++ mc_dev->regions[0].start + 1; ++ ++ error = fsl_create_mc_io(&mc_dev->dev, ++ mc_dev->regions[0].start, ++ region_size, ++ NULL, 0, &mc_dev->mc_io); ++ if (error < 0) ++ return error; ++ ++ mc_io_created = true; ++ } ++ ++ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error); ++ goto error_cleanup_mc_io; ++ } ++ ++ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ &mc_bus->dprc_attr); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n", ++ error); ++ goto error_cleanup_open; ++ } ++ ++ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || ++ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && ++ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { ++ dev_err(&mc_dev->dev, ++ "ERROR: DPRC version %d.%d not supported\n", ++ mc_bus->dprc_attr.version.major, ++ mc_bus->dprc_attr.version.minor); ++ error = -ENOTSUPP; ++ goto error_cleanup_open; ++ } ++ ++ if (fsl_mc_interrupts_supported()) { ++ /* ++ * Create DPMCP for the DPRC's built-in portal: ++ */ ++ error = dprc_create_dpmcp(mc_dev); ++ if (error < 0) ++ goto error_cleanup_open; ++ } ++ ++ mutex_init(&mc_bus->scan_mutex); ++ ++ /* ++ * Discover MC objects in the DPRC object: ++ */ ++ error = dprc_scan_container(mc_dev); ++ if (error < 0) ++ goto error_destroy_dpmcp; ++ ++ if (fsl_mc_interrupts_supported()) { ++ /* ++ * The fsl_mc_device object associated with the DPMCP object ++ * created above was created as part of the ++ * dprc_scan_container() call above: ++ */ ++ if (WARN_ON(!mc_dev->mc_io->dpmcp_dev)) { ++ error = -EINVAL; ++ goto error_cleanup_dprc_scan; ++ } ++ ++ /* ++ * Allocate MC portal to be used in atomic context ++ * (e.g., to program MSIs from program_msi_at_mc()) ++ */ ++ error = fsl_mc_portal_allocate(NULL, ++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, ++ &mc_bus->atomic_mc_io); ++ if (error < 0) ++ goto error_cleanup_dprc_scan; ++ ++ pr_info("fsl-mc: Allocated dpmcp.%d to dprc.%d for atomic MC I/O\n", ++ mc_bus->atomic_mc_io->dpmcp_dev->obj_desc.id, ++ mc_dev->obj_desc.id); ++ ++ /* ++ * Open DPRC handle to be used with mc_bus->atomic_mc_io: ++ */ ++ error = dprc_open(mc_bus->atomic_mc_io, 0, mc_dev->obj_desc.id, ++ &mc_bus->atomic_dprc_handle); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", ++ error); ++ goto error_cleanup_atomic_mc_io; ++ } ++ ++ /* ++ * Configure interrupt for the DPMCP object associated with the ++ * DPRC object's built-in portal: ++ * ++ * NOTE: We have to do this after calling dprc_scan_container(), ++ * since dprc_scan_container() populates the IRQ pool for ++ * this DPRC. ++ */ ++ error = fsl_mc_io_setup_dpmcp_irq(mc_dev->mc_io); ++ if (error < 0) ++ goto error_cleanup_atomic_dprc_handle; ++ ++ /* ++ * Configure interrupts for the DPRC object associated with ++ * this MC bus: ++ */ ++ error = dprc_setup_irqs(mc_dev); ++ if (error < 0) ++ goto error_cleanup_atomic_dprc_handle; ++ } ++ ++ dev_info(&mc_dev->dev, "DPRC device bound to driver"); ++ return 0; ++ ++error_cleanup_atomic_dprc_handle: ++ (void)dprc_close(mc_bus->atomic_mc_io, 0, mc_bus->atomic_dprc_handle); ++ ++error_cleanup_atomic_mc_io: ++ fsl_mc_portal_free(mc_bus->atomic_mc_io); ++ ++error_cleanup_dprc_scan: ++ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); ++ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); ++ dprc_cleanup_all_resource_pools(mc_dev); ++ if (fsl_mc_interrupts_supported()) ++ fsl_mc_cleanup_irq_pool(mc_bus); ++ ++error_destroy_dpmcp: ++ dprc_destroy_dpmcp(mc_dev); ++ ++error_cleanup_open: ++ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ ++error_cleanup_mc_io: ++ if (mc_io_created) { ++ fsl_destroy_mc_io(mc_dev->mc_io); ++ mc_dev->mc_io = NULL; ++ } ++ ++ if (dev_root_set) ++ fsl_mc_bus_type.dev_root = NULL; ++ ++ return error; ++} ++ ++/* ++ * Tear down interrupts for a given DPRC object ++ */ ++static void dprc_teardown_irqs(struct fsl_mc_device *mc_dev) ++{ ++ (void)disable_dprc_irqs(mc_dev); ++ unregister_dprc_irq_handlers(mc_dev); ++ fsl_mc_free_irqs(mc_dev); ++} ++ ++/** ++ * dprc_remove - callback invoked when a DPRC is being unbound from this driver ++ * ++ * @mc_dev: Pointer to fsl-mc device representing the DPRC ++ * ++ * It removes the DPRC's child objects from Linux (not from the MC) and ++ * closes the DPRC device in the MC. ++ * It tears down the interrupts that were configured for the DPRC device. ++ * It destroys the interrupt pool associated with this MC bus. ++ */ ++static int dprc_remove(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); ++ ++ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) ++ return -EINVAL; ++ if (WARN_ON(!mc_dev->mc_io)) ++ return -EINVAL; ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ return -EINVAL; ++ ++ if (fsl_mc_interrupts_supported()) { ++ dprc_teardown_irqs(mc_dev); ++ error = dprc_close(mc_bus->atomic_mc_io, 0, ++ mc_bus->atomic_dprc_handle); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", ++ error); ++ } ++ ++ fsl_mc_portal_free(mc_bus->atomic_mc_io); ++ } ++ ++ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); ++ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); ++ dprc_cleanup_all_resource_pools(mc_dev); ++ dprc_destroy_dpmcp(mc_dev); ++ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ if (error < 0) ++ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); ++ ++ if (fsl_mc_interrupts_supported()) ++ fsl_mc_cleanup_irq_pool(mc_bus); ++ ++ fsl_destroy_mc_io(mc_dev->mc_io); ++ mc_dev->mc_io = NULL; ++ ++ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) ++ fsl_mc_bus_type.dev_root = NULL; ++ ++ dev_info(&mc_dev->dev, "DPRC device unbound from driver"); ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dprc"}, ++ {.vendor = 0x0}, ++}; ++ ++static struct fsl_mc_driver dprc_driver = { ++ .driver = { ++ .name = FSL_MC_DPRC_DRIVER_NAME, ++ .owner = THIS_MODULE, ++ .pm = NULL, ++ }, ++ .match_id_table = match_id_table, ++ .probe = dprc_probe, ++ .remove = dprc_remove, ++}; ++ ++int __init dprc_driver_init(void) ++{ ++ return fsl_mc_driver_register(&dprc_driver); ++} ++ ++void __exit dprc_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dprc_driver); ++} +diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c +new file mode 100644 +index 0000000..4d86438 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dprc.c +@@ -0,0 +1,1218 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions are met: ++* * Redistributions of source code must retain the above copyright ++* notice, this list of conditions and the following disclaimer. ++* * Redistributions in binary form must reproduce the above copyright ++* notice, this list of conditions and the following disclaimer in the ++* documentation and/or other materials provided with the distribution. ++* * Neither the name of the above-listed copyright holders nor the ++* names of any contributors may be used to endorse or promote products ++* derived from this software without specific prior written permission. ++* ++* ++* ALTERNATIVELY, this software may be distributed under the terms of the ++* GNU General Public License ("GPL") as published by the Free Software ++* Foundation, either version 2 of that License or (at your option) any ++* later version. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++* POSSIBILITY OF SUCH DAMAGE. ++*/ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dprc.h" ++#include "dprc-cmd.h" ++ ++int dprc_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int container_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags, ++ 0); ++ cmd.params[0] |= mc_enc(0, 32, container_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_open); ++ ++int dprc_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_close); ++ ++int dprc_create_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_cfg *cfg, ++ int *child_container_id, ++ uint64_t *child_portal_offset) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.params[0] |= mc_enc(32, 16, cfg->icid); ++ cmd.params[0] |= mc_enc(0, 32, cfg->options); ++ cmd.params[1] |= mc_enc(32, 32, cfg->portal_id); ++ cmd.params[2] |= mc_enc(0, 8, cfg->label[0]); ++ cmd.params[2] |= mc_enc(8, 8, cfg->label[1]); ++ cmd.params[2] |= mc_enc(16, 8, cfg->label[2]); ++ cmd.params[2] |= mc_enc(24, 8, cfg->label[3]); ++ cmd.params[2] |= mc_enc(32, 8, cfg->label[4]); ++ cmd.params[2] |= mc_enc(40, 8, cfg->label[5]); ++ cmd.params[2] |= mc_enc(48, 8, cfg->label[6]); ++ cmd.params[2] |= mc_enc(56, 8, cfg->label[7]); ++ cmd.params[3] |= mc_enc(0, 8, cfg->label[8]); ++ cmd.params[3] |= mc_enc(8, 8, cfg->label[9]); ++ cmd.params[3] |= mc_enc(16, 8, cfg->label[10]); ++ cmd.params[3] |= mc_enc(24, 8, cfg->label[11]); ++ cmd.params[3] |= mc_enc(32, 8, cfg->label[12]); ++ cmd.params[3] |= mc_enc(40, 8, cfg->label[13]); ++ cmd.params[3] |= mc_enc(48, 8, cfg->label[14]); ++ cmd.params[3] |= mc_enc(56, 8, cfg->label[15]); ++ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *child_container_id = mc_dec(cmd.params[1], 0, 32); ++ *child_portal_offset = mc_dec(cmd.params[2], 0, 64); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_create_container); ++ ++int dprc_destroy_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_destroy_container); ++ ++int dprc_reset_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_reset_container); ++ ++int dprc_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32); ++ *type = mc_dec(cmd.params[2], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq); ++ ++int dprc_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_irq); ++ ++int dprc_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = mc_dec(cmd.params[0], 0, 8); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq_enable); ++ ++int dprc_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 8, en); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_irq_enable); ++ ++int dprc_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *mask = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq_mask); ++ ++int dprc_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, mask); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_irq_mask); ++ ++int dprc_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, *status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *status = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq_status); ++ ++int dprc_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_clear_irq_status); ++ ++int dprc_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_attributes *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ attr->container_id = mc_dec(cmd.params[0], 0, 32); ++ attr->icid = mc_dec(cmd.params[0], 32, 16); ++ attr->options = mc_dec(cmd.params[1], 0, 32); ++ attr->portal_id = mc_dec(cmd.params[1], 32, 32); ++ attr->version.major = mc_dec(cmd.params[2], 0, 16); ++ attr->version.minor = mc_dec(cmd.params[2], 16, 16); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_attributes); ++ ++int dprc_set_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t quota) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ cmd.params[0] |= mc_enc(32, 16, quota); ++ cmd.params[1] |= mc_enc(0, 8, type[0]); ++ cmd.params[1] |= mc_enc(8, 8, type[1]); ++ cmd.params[1] |= mc_enc(16, 8, type[2]); ++ cmd.params[1] |= mc_enc(24, 8, type[3]); ++ cmd.params[1] |= mc_enc(32, 8, type[4]); ++ cmd.params[1] |= mc_enc(40, 8, type[5]); ++ cmd.params[1] |= mc_enc(48, 8, type[6]); ++ cmd.params[1] |= mc_enc(56, 8, type[7]); ++ cmd.params[2] |= mc_enc(0, 8, type[8]); ++ cmd.params[2] |= mc_enc(8, 8, type[9]); ++ cmd.params[2] |= mc_enc(16, 8, type[10]); ++ cmd.params[2] |= mc_enc(24, 8, type[11]); ++ cmd.params[2] |= mc_enc(32, 8, type[12]); ++ cmd.params[2] |= mc_enc(40, 8, type[13]); ++ cmd.params[2] |= mc_enc(48, 8, type[14]); ++ cmd.params[2] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_res_quota); ++ ++int dprc_get_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t *quota) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ cmd.params[1] |= mc_enc(0, 8, type[0]); ++ cmd.params[1] |= mc_enc(8, 8, type[1]); ++ cmd.params[1] |= mc_enc(16, 8, type[2]); ++ cmd.params[1] |= mc_enc(24, 8, type[3]); ++ cmd.params[1] |= mc_enc(32, 8, type[4]); ++ cmd.params[1] |= mc_enc(40, 8, type[5]); ++ cmd.params[1] |= mc_enc(48, 8, type[6]); ++ cmd.params[1] |= mc_enc(56, 8, type[7]); ++ cmd.params[2] |= mc_enc(0, 8, type[8]); ++ cmd.params[2] |= mc_enc(8, 8, type[9]); ++ cmd.params[2] |= mc_enc(16, 8, type[10]); ++ cmd.params[2] |= mc_enc(24, 8, type[11]); ++ cmd.params[2] |= mc_enc(32, 8, type[12]); ++ cmd.params[2] |= mc_enc(40, 8, type[13]); ++ cmd.params[2] |= mc_enc(48, 8, type[14]); ++ cmd.params[2] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *quota = mc_dec(cmd.params[0], 32, 16); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_res_quota); ++ ++int dprc_assign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int container_id, ++ struct dprc_res_req *res_req) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, container_id); ++ cmd.params[0] |= mc_enc(32, 32, res_req->options); ++ cmd.params[1] |= mc_enc(0, 32, res_req->num); ++ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); ++ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); ++ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); ++ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); ++ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); ++ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); ++ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); ++ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); ++ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); ++ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); ++ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); ++ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); ++ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); ++ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); ++ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); ++ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); ++ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_assign); ++ ++int dprc_unassign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ struct dprc_res_req *res_req) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ cmd.params[0] |= mc_enc(32, 32, res_req->options); ++ cmd.params[1] |= mc_enc(0, 32, res_req->num); ++ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); ++ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); ++ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); ++ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); ++ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); ++ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); ++ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); ++ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); ++ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); ++ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); ++ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); ++ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); ++ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); ++ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); ++ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); ++ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); ++ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_unassign); ++ ++int dprc_get_pool_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *pool_count) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *pool_count = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_pool_count); ++ ++int dprc_get_pool(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int pool_index, ++ char *type) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, pool_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ type[0] = mc_dec(cmd.params[1], 0, 8); ++ type[1] = mc_dec(cmd.params[1], 8, 8); ++ type[2] = mc_dec(cmd.params[1], 16, 8); ++ type[3] = mc_dec(cmd.params[1], 24, 8); ++ type[4] = mc_dec(cmd.params[1], 32, 8); ++ type[5] = mc_dec(cmd.params[1], 40, 8); ++ type[6] = mc_dec(cmd.params[1], 48, 8); ++ type[7] = mc_dec(cmd.params[1], 56, 8); ++ type[8] = mc_dec(cmd.params[2], 0, 8); ++ type[9] = mc_dec(cmd.params[2], 8, 8); ++ type[10] = mc_dec(cmd.params[2], 16, 8); ++ type[11] = mc_dec(cmd.params[2], 24, 8); ++ type[12] = mc_dec(cmd.params[2], 32, 8); ++ type[13] = mc_dec(cmd.params[2], 40, 8); ++ type[14] = mc_dec(cmd.params[2], 48, 8); ++ type[15] = '\0'; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_pool); ++ ++int dprc_get_obj_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *obj_count) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *obj_count = mc_dec(cmd.params[0], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_count); ++ ++int dprc_get_obj(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int obj_index, ++ struct dprc_obj_desc *obj_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ obj_desc->id = mc_dec(cmd.params[0], 32, 32); ++ obj_desc->vendor = mc_dec(cmd.params[1], 0, 16); ++ obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8); ++ obj_desc->region_count = mc_dec(cmd.params[1], 24, 8); ++ obj_desc->state = mc_dec(cmd.params[1], 32, 32); ++ obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16); ++ obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16); ++ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); ++ obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8); ++ obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8); ++ obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8); ++ obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8); ++ obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8); ++ obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8); ++ obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8); ++ obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8); ++ obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8); ++ obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8); ++ obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8); ++ obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8); ++ obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8); ++ obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8); ++ obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8); ++ obj_desc->type[15] = '\0'; ++ obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8); ++ obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8); ++ obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8); ++ obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8); ++ obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8); ++ obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8); ++ obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8); ++ obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8); ++ obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8); ++ obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8); ++ obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8); ++ obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8); ++ obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8); ++ obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8); ++ obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8); ++ obj_desc->label[15] = '\0'; ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj); ++ ++int dprc_get_obj_desc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ struct dprc_obj_desc *obj_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32); ++ obj_desc->vendor = (uint16_t)mc_dec(cmd.params[1], 0, 16); ++ obj_desc->vendor = (uint8_t)mc_dec(cmd.params[1], 16, 8); ++ obj_desc->region_count = (uint8_t)mc_dec(cmd.params[1], 24, 8); ++ obj_desc->state = (uint32_t)mc_dec(cmd.params[1], 32, 32); ++ obj_desc->ver_major = (uint16_t)mc_dec(cmd.params[2], 0, 16); ++ obj_desc->ver_minor = (uint16_t)mc_dec(cmd.params[2], 16, 16); ++ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); ++ obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8); ++ obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8); ++ obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8); ++ obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8); ++ obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8); ++ obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8); ++ obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8); ++ obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8); ++ obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8); ++ obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8); ++ obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8); ++ obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8); ++ obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8); ++ obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8); ++ obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8); ++ obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8); ++ obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8); ++ obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8); ++ obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8); ++ obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8); ++ obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8); ++ obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8); ++ obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8); ++ obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8); ++ obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8); ++ obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8); ++ obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8); ++ obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8); ++ obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8); ++ obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8); ++ obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8); ++ obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_desc); ++ ++int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ cmd.params[2] |= mc_enc(32, 32, obj_id); ++ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_obj_irq); ++ ++int dprc_get_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); ++ *type = (int)mc_dec(cmd.params[2], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_irq); ++ ++int dprc_get_res_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ int *res_count) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ *res_count = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT, ++ cmd_flags, ++ token); ++ cmd.params[1] |= mc_enc(0, 8, type[0]); ++ cmd.params[1] |= mc_enc(8, 8, type[1]); ++ cmd.params[1] |= mc_enc(16, 8, type[2]); ++ cmd.params[1] |= mc_enc(24, 8, type[3]); ++ cmd.params[1] |= mc_enc(32, 8, type[4]); ++ cmd.params[1] |= mc_enc(40, 8, type[5]); ++ cmd.params[1] |= mc_enc(48, 8, type[6]); ++ cmd.params[1] |= mc_enc(56, 8, type[7]); ++ cmd.params[2] |= mc_enc(0, 8, type[8]); ++ cmd.params[2] |= mc_enc(8, 8, type[9]); ++ cmd.params[2] |= mc_enc(16, 8, type[10]); ++ cmd.params[2] |= mc_enc(24, 8, type[11]); ++ cmd.params[2] |= mc_enc(32, 8, type[12]); ++ cmd.params[2] |= mc_enc(40, 8, type[13]); ++ cmd.params[2] |= mc_enc(48, 8, type[14]); ++ cmd.params[2] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *res_count = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_res_count); ++ ++int dprc_get_res_ids(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ struct dprc_res_ids_range_desc *range_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status); ++ cmd.params[1] |= mc_enc(0, 32, range_desc->base_id); ++ cmd.params[1] |= mc_enc(32, 32, range_desc->last_id); ++ cmd.params[2] |= mc_enc(0, 8, type[0]); ++ cmd.params[2] |= mc_enc(8, 8, type[1]); ++ cmd.params[2] |= mc_enc(16, 8, type[2]); ++ cmd.params[2] |= mc_enc(24, 8, type[3]); ++ cmd.params[2] |= mc_enc(32, 8, type[4]); ++ cmd.params[2] |= mc_enc(40, 8, type[5]); ++ cmd.params[2] |= mc_enc(48, 8, type[6]); ++ cmd.params[2] |= mc_enc(56, 8, type[7]); ++ cmd.params[3] |= mc_enc(0, 8, type[8]); ++ cmd.params[3] |= mc_enc(8, 8, type[9]); ++ cmd.params[3] |= mc_enc(16, 8, type[10]); ++ cmd.params[3] |= mc_enc(24, 8, type[11]); ++ cmd.params[3] |= mc_enc(32, 8, type[12]); ++ cmd.params[3] |= mc_enc(40, 8, type[13]); ++ cmd.params[3] |= mc_enc(48, 8, type[14]); ++ cmd.params[3] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ range_desc->iter_status = mc_dec(cmd.params[0], 42, 7); ++ range_desc->base_id = mc_dec(cmd.params[1], 0, 32); ++ range_desc->last_id = mc_dec(cmd.params[1], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_res_ids); ++ ++int dprc_get_obj_region(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t region_index, ++ struct dprc_region_desc *region_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[0] |= mc_enc(48, 8, region_index); ++ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[4] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ region_desc->base_offset = mc_dec(cmd.params[1], 0, 64); ++ region_desc->size = mc_dec(cmd.params[2], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_region); ++ ++int dprc_set_obj_label(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ char *label) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[1] |= mc_enc(0, 8, label[0]); ++ cmd.params[1] |= mc_enc(8, 8, label[1]); ++ cmd.params[1] |= mc_enc(16, 8, label[2]); ++ cmd.params[1] |= mc_enc(24, 8, label[3]); ++ cmd.params[1] |= mc_enc(32, 8, label[4]); ++ cmd.params[1] |= mc_enc(40, 8, label[5]); ++ cmd.params[1] |= mc_enc(48, 8, label[6]); ++ cmd.params[1] |= mc_enc(56, 8, label[7]); ++ cmd.params[2] |= mc_enc(0, 8, label[8]); ++ cmd.params[2] |= mc_enc(8, 8, label[9]); ++ cmd.params[2] |= mc_enc(16, 8, label[10]); ++ cmd.params[2] |= mc_enc(24, 8, label[11]); ++ cmd.params[2] |= mc_enc(32, 8, label[12]); ++ cmd.params[2] |= mc_enc(40, 8, label[13]); ++ cmd.params[2] |= mc_enc(48, 8, label[14]); ++ cmd.params[2] |= mc_enc(56, 8, label[15]); ++ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_obj_label); ++ ++int dprc_connect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ const struct dprc_endpoint *endpoint2, ++ const struct dprc_connection_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); ++ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); ++ cmd.params[1] |= mc_enc(0, 32, endpoint2->id); ++ cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id); ++ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]); ++ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]); ++ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]); ++ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]); ++ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]); ++ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]); ++ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]); ++ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]); ++ cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]); ++ cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]); ++ cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]); ++ cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]); ++ cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]); ++ cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]); ++ cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]); ++ cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]); ++ cmd.params[4] |= mc_enc(0, 32, cfg->max_rate); ++ cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate); ++ cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]); ++ cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]); ++ cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]); ++ cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]); ++ cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]); ++ cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]); ++ cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]); ++ cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]); ++ cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]); ++ cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]); ++ cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]); ++ cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]); ++ cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]); ++ cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]); ++ cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]); ++ cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_connect); ++ ++int dprc_disconnect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, endpoint->id); ++ cmd.params[0] |= mc_enc(32, 32, endpoint->if_id); ++ cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]); ++ cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]); ++ cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]); ++ cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]); ++ cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]); ++ cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]); ++ cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]); ++ cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]); ++ cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]); ++ cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]); ++ cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]); ++ cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]); ++ cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]); ++ cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]); ++ cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]); ++ cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_disconnect); ++ ++int dprc_get_connection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ struct dprc_endpoint *endpoint2, ++ int *state) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); ++ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); ++ cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]); ++ cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]); ++ cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]); ++ cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]); ++ cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]); ++ cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]); ++ cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]); ++ cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]); ++ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]); ++ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]); ++ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]); ++ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]); ++ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]); ++ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]); ++ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]); ++ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ endpoint2->id = mc_dec(cmd.params[3], 0, 32); ++ endpoint2->if_id = mc_dec(cmd.params[3], 32, 32); ++ endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8); ++ endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8); ++ endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8); ++ endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8); ++ endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8); ++ endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8); ++ endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8); ++ endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8); ++ endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8); ++ endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8); ++ endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8); ++ endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8); ++ endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8); ++ endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8); ++ endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8); ++ endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8); ++ *state = mc_dec(cmd.params[6], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_connection); +diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c +new file mode 100644 +index 0000000..a3940a0 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c +@@ -0,0 +1,716 @@ ++/* ++ * Freescale MC object device allocator driver ++ * ++ * Copyright (C) 2013 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include "../include/mc-sys.h" ++#include ++#include "../include/dpbp-cmd.h" ++#include "../include/dpcon-cmd.h" ++#include "dpmcp-cmd.h" ++#include "dpmcp.h" ++ ++/** ++ * fsl_mc_resource_pool_add_device - add allocatable device to a resource ++ * pool of a given MC bus ++ * ++ * @mc_bus: pointer to the MC bus ++ * @pool_type: MC bus pool type ++ * @mc_dev: Pointer to allocatable MC object device ++ * ++ * It adds an allocatable MC object device to a container's resource pool of ++ * the given resource type ++ */ ++static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus ++ *mc_bus, ++ enum fsl_mc_pool_type ++ pool_type, ++ struct fsl_mc_device ++ *mc_dev) ++{ ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc_resource *resource; ++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; ++ int error = -EINVAL; ++ bool mutex_locked = false; ++ ++ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) ++ goto out; ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ goto out; ++ if (WARN_ON(mc_dev->resource)) ++ goto out; ++ ++ res_pool = &mc_bus->resource_pools[pool_type]; ++ if (WARN_ON(res_pool->type != pool_type)) ++ goto out; ++ if (WARN_ON(res_pool->mc_bus != mc_bus)) ++ goto out; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ ++ if (WARN_ON(res_pool->max_count < 0)) ++ goto out; ++ if (WARN_ON(res_pool->free_count < 0 || ++ res_pool->free_count > res_pool->max_count)) ++ goto out; ++ ++ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource), ++ GFP_KERNEL); ++ if (!resource) { ++ error = -ENOMEM; ++ dev_err(&mc_bus_dev->dev, ++ "Failed to allocate memory for fsl_mc_resource\n"); ++ goto out; ++ } ++ ++ resource->type = pool_type; ++ resource->id = mc_dev->obj_desc.id; ++ resource->data = mc_dev; ++ resource->parent_pool = res_pool; ++ INIT_LIST_HEAD(&resource->node); ++ list_add_tail(&resource->node, &res_pool->free_list); ++ mc_dev->resource = resource; ++ res_pool->free_count++; ++ res_pool->max_count++; ++ error = 0; ++out: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++ ++ return error; ++} ++ ++/** ++ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a ++ * resource pool ++ * ++ * @mc_dev: Pointer to allocatable MC object device ++ * ++ * It permanently removes an allocatable MC object device from the resource ++ * pool, the device is currently in, as long as it is in the pool's free list. ++ */ ++static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device ++ *mc_dev) ++{ ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc_resource *resource; ++ int error = -EINVAL; ++ bool mutex_locked = false; ++ ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ goto out; ++ ++ resource = mc_dev->resource; ++ if (WARN_ON(!resource || resource->data != mc_dev)) ++ goto out; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ res_pool = resource->parent_pool; ++ if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type])) ++ goto out; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ ++ if (WARN_ON(res_pool->max_count <= 0)) ++ goto out; ++ if (WARN_ON(res_pool->free_count <= 0 || ++ res_pool->free_count > res_pool->max_count)) ++ goto out; ++ ++ /* ++ * If the device is currently allocated, its resource is not ++ * in the free list and thus, the device cannot be removed. ++ */ ++ if (list_empty(&resource->node)) { ++ error = -EBUSY; ++ dev_err(&mc_bus_dev->dev, ++ "Device %s cannot be removed from resource pool\n", ++ dev_name(&mc_dev->dev)); ++ goto out; ++ } ++ ++ list_del(&resource->node); ++ INIT_LIST_HEAD(&resource->node); ++ res_pool->free_count--; ++ res_pool->max_count--; ++ ++ devm_kfree(&mc_bus_dev->dev, resource); ++ mc_dev->resource = NULL; ++ error = 0; ++out: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++ ++ return error; ++} ++ ++static const char *const fsl_mc_pool_type_strings[] = { ++ [FSL_MC_POOL_DPMCP] = "dpmcp", ++ [FSL_MC_POOL_DPBP] = "dpbp", ++ [FSL_MC_POOL_DPCON] = "dpcon", ++ [FSL_MC_POOL_IRQ] = "irq", ++}; ++ ++static int __must_check object_type_to_pool_type(const char *object_type, ++ enum fsl_mc_pool_type ++ *pool_type) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) { ++ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) { ++ *pool_type = i; ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_resource **new_resource) ++{ ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc_resource *resource; ++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; ++ int error = -EINVAL; ++ bool mutex_locked = false; ++ ++ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) != ++ FSL_MC_NUM_POOL_TYPES); ++ ++ *new_resource = NULL; ++ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) ++ goto error; ++ ++ res_pool = &mc_bus->resource_pools[pool_type]; ++ if (WARN_ON(res_pool->mc_bus != mc_bus)) ++ goto error; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ resource = list_first_entry_or_null(&res_pool->free_list, ++ struct fsl_mc_resource, node); ++ ++ if (!resource) { ++ WARN_ON(res_pool->free_count != 0); ++ error = -ENXIO; ++ dev_err(&mc_bus_dev->dev, ++ "No more resources of type %s left\n", ++ fsl_mc_pool_type_strings[pool_type]); ++ goto error; ++ } ++ ++ if (WARN_ON(resource->type != pool_type)) ++ goto error; ++ if (WARN_ON(resource->parent_pool != res_pool)) ++ goto error; ++ if (WARN_ON(res_pool->free_count <= 0 || ++ res_pool->free_count > res_pool->max_count)) ++ goto error; ++ ++ list_del(&resource->node); ++ INIT_LIST_HEAD(&resource->node); ++ ++ res_pool->free_count--; ++ mutex_unlock(&res_pool->mutex); ++ *new_resource = resource; ++ return 0; ++error: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate); ++ ++void fsl_mc_resource_free(struct fsl_mc_resource *resource) ++{ ++ struct fsl_mc_resource_pool *res_pool; ++ bool mutex_locked = false; ++ ++ res_pool = resource->parent_pool; ++ if (WARN_ON(resource->type != res_pool->type)) ++ goto out; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ if (WARN_ON(res_pool->free_count < 0 || ++ res_pool->free_count >= res_pool->max_count)) ++ goto out; ++ ++ if (WARN_ON(!list_empty(&resource->node))) ++ goto out; ++ ++ list_add_tail(&resource->node, &res_pool->free_list); ++ res_pool->free_count++; ++out: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_resource_free); ++ ++/** ++ * fsl_mc_portal_allocate - Allocates an MC portal ++ * ++ * @mc_dev: MC device for which the MC portal is to be allocated ++ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated ++ * MC portal. ++ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object ++ * that wraps the allocated MC portal is to be returned ++ * ++ * This function allocates an MC portal from the device's parent DPRC, ++ * from the corresponding MC bus' pool of MC portals and wraps ++ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the ++ * portal is allocated from its own MC bus. ++ */ ++int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, ++ uint16_t mc_io_flags, ++ struct fsl_mc_io **new_mc_io) ++{ ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ phys_addr_t mc_portal_phys_addr; ++ size_t mc_portal_size; ++ struct fsl_mc_device *dpmcp_dev; ++ int error = -EINVAL; ++ struct fsl_mc_resource *resource = NULL; ++ struct fsl_mc_io *mc_io = NULL; ++ ++ if (!mc_dev) { ++ if (WARN_ON(!fsl_mc_bus_type.dev_root)) ++ return error; ++ ++ mc_bus_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ } else if (mc_dev->flags & FSL_MC_IS_DPRC) { ++ mc_bus_dev = mc_dev; ++ } else { ++ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) ++ return error; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ } ++ ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ *new_mc_io = NULL; ++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource); ++ if (error < 0) ++ return error; ++ ++ error = -EINVAL; ++ dpmcp_dev = resource->data; ++ if (WARN_ON(!dpmcp_dev || ++ strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) ++ goto error_cleanup_resource; ++ ++ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR || ++ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR && ++ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) { ++ dev_err(&dpmcp_dev->dev, ++ "ERROR: Version %d.%d of DPMCP not supported.\n", ++ dpmcp_dev->obj_desc.ver_major, ++ dpmcp_dev->obj_desc.ver_minor); ++ error = -ENOTSUPP; ++ goto error_cleanup_resource; ++ } ++ ++ if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) ++ goto error_cleanup_resource; ++ ++ mc_portal_phys_addr = dpmcp_dev->regions[0].start; ++ mc_portal_size = dpmcp_dev->regions[0].end - ++ dpmcp_dev->regions[0].start + 1; ++ ++ if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size)) ++ goto error_cleanup_resource; ++ ++ error = fsl_create_mc_io(&mc_bus_dev->dev, ++ mc_portal_phys_addr, ++ mc_portal_size, dpmcp_dev, ++ mc_io_flags, &mc_io); ++ if (error < 0) ++ goto error_cleanup_resource; ++ ++ *new_mc_io = mc_io; ++ return 0; ++ ++error_cleanup_resource: ++ fsl_mc_resource_free(resource); ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate); ++ ++/** ++ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals ++ * of a given MC bus ++ * ++ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free ++ */ ++void fsl_mc_portal_free(struct fsl_mc_io *mc_io) ++{ ++ struct fsl_mc_device *dpmcp_dev; ++ struct fsl_mc_resource *resource; ++ ++ /* ++ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed ++ * to have a DPMCP object associated with. ++ */ ++ dpmcp_dev = mc_io->dpmcp_dev; ++ if (WARN_ON(!dpmcp_dev)) ++ return; ++ if (WARN_ON(strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) ++ return; ++ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) ++ return; ++ ++ resource = dpmcp_dev->resource; ++ if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP)) ++ return; ++ ++ if (WARN_ON(resource->data != dpmcp_dev)) ++ return; ++ ++ fsl_destroy_mc_io(mc_io); ++ fsl_mc_resource_free(resource); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_portal_free); ++ ++/** ++ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object ++ * ++ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free ++ */ ++int fsl_mc_portal_reset(struct fsl_mc_io *mc_io) ++{ ++ int error; ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return -EINVAL; ++ ++ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error); ++ return error; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_portal_reset); ++ ++/** ++ * fsl_mc_object_allocate - Allocates a MC object device of the given ++ * pool type from a given MC bus ++ * ++ * @mc_dev: MC device for which the MC object device is to be allocated ++ * @pool_type: MC bus resource pool type ++ * @new_mc_dev: Pointer to area where the pointer to the allocated ++ * MC object device is to be returned ++ * ++ * This function allocates a MC object device from the device's parent DPRC, ++ * from the corresponding MC bus' pool of allocatable MC object devices of ++ * the given resource type. mc_dev cannot be a DPRC itself. ++ * ++ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC ++ * portals are allocated using fsl_mc_portal_allocate(), instead of ++ * this function. ++ */ ++int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_device **new_mc_adev) ++{ ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_device *mc_adev; ++ int error = -EINVAL; ++ struct fsl_mc_resource *resource = NULL; ++ ++ *new_mc_adev = NULL; ++ if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC)) ++ goto error; ++ ++ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) ++ goto error; ++ ++ if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP)) ++ goto error; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource); ++ if (error < 0) ++ goto error; ++ ++ mc_adev = resource->data; ++ if (WARN_ON(!mc_adev)) ++ goto error; ++ ++ *new_mc_adev = mc_adev; ++ return 0; ++error: ++ if (resource) ++ fsl_mc_resource_free(resource); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); ++ ++/** ++ * fsl_mc_object_free - Returns an allocatable MC object device to the ++ * corresponding resource pool of a given MC bus. ++ * ++ * @mc_adev: Pointer to the MC object device ++ */ ++void fsl_mc_object_free(struct fsl_mc_device *mc_adev) ++{ ++ struct fsl_mc_resource *resource; ++ ++ resource = mc_adev->resource; ++ if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP)) ++ return; ++ if (WARN_ON(resource->data != mc_adev)) ++ return; ++ ++ fsl_mc_resource_free(resource); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_object_free); ++ ++/** ++ * It allocates the IRQs required by a given MC object device. The ++ * IRQs are allocated from the interrupt pool associated with the ++ * MC bus that contains the device, if the device is not a DPRC device. ++ * Otherwise, the IRQs are allocated from the interrupt pool associated ++ * with the MC bus that represents the DPRC device itself. ++ */ ++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int irq_count; ++ int res_allocated_count = 0; ++ int error = -EINVAL; ++ struct fsl_mc_device_irq **irqs = NULL; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ ++ if (!mc->gic_supported) ++ return -ENOTSUPP; ++ ++ if (WARN_ON(mc_dev->irqs)) ++ goto error; ++ ++ irq_count = mc_dev->obj_desc.irq_count; ++ if (WARN_ON(irq_count == 0)) ++ goto error; ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ else ++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ goto error; ++ ++ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; ++ if (res_pool->free_count < irq_count) { ++ dev_err(&mc_dev->dev, ++ "Not able to allocate %u irqs for device\n", irq_count); ++ error = -ENOSPC; ++ goto error; ++ } ++ ++ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), ++ GFP_KERNEL); ++ if (!irqs) { ++ error = -ENOMEM; ++ dev_err(&mc_dev->dev, "No memory to allocate irqs[]\n"); ++ goto error; ++ } ++ ++ for (i = 0; i < irq_count; i++) { ++ struct fsl_mc_resource *resource; ++ ++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ, ++ &resource); ++ if (error < 0) ++ goto error; ++ ++ irqs[i] = to_fsl_mc_irq(resource); ++ res_allocated_count++; ++ ++ WARN_ON(irqs[i]->mc_dev); ++ irqs[i]->mc_dev = mc_dev; ++ irqs[i]->dev_irq_index = i; ++ } ++ ++ mc_dev->irqs = irqs; ++ return 0; ++error: ++ for (i = 0; i < res_allocated_count; i++) { ++ irqs[i]->mc_dev = NULL; ++ fsl_mc_resource_free(&irqs[i]->resource); ++ } ++ ++ if (irqs) ++ devm_kfree(&mc_dev->dev, irqs); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); ++ ++/* ++ * It frees the IRQs that were allocated for a MC object device, by ++ * returning them to the corresponding interrupt pool. ++ */ ++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int irq_count; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_device_irq **irqs = mc_dev->irqs; ++ ++ if (WARN_ON(!irqs)) ++ return; ++ ++ irq_count = mc_dev->obj_desc.irq_count; ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ else ++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ return; ++ ++ for (i = 0; i < irq_count; i++) { ++ WARN_ON(!irqs[i]->mc_dev); ++ irqs[i]->mc_dev = NULL; ++ fsl_mc_resource_free(&irqs[i]->resource); ++ } ++ ++ devm_kfree(&mc_dev->dev, mc_dev->irqs); ++ mc_dev->irqs = NULL; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_free_irqs); ++ ++/** ++ * fsl_mc_allocator_probe - callback invoked when an allocatable device is ++ * being added to the system ++ */ ++static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) ++{ ++ enum fsl_mc_pool_type pool_type; ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ int error = -EINVAL; ++ ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ goto error; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ if (WARN_ON(mc_bus_dev->dev.bus != &fsl_mc_bus_type)) ++ goto error; ++ ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ ++ /* ++ * If mc_dev is the DPMCP object for the parent DPRC's built-in ++ * portal, we don't add this DPMCP to the DPMCP object pool, ++ * but instead allocate it directly to the parent DPRC (mc_bus_dev): ++ */ ++ if (strcmp(mc_dev->obj_desc.type, "dpmcp") == 0 && ++ mc_dev->obj_desc.id == mc_bus->dprc_attr.portal_id) { ++ error = fsl_mc_io_set_dpmcp(mc_bus_dev->mc_io, mc_dev); ++ if (error < 0) ++ goto error; ++ } else { ++ error = object_type_to_pool_type(mc_dev->obj_desc.type, ++ &pool_type); ++ if (error < 0) ++ goto error; ++ ++ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, ++ mc_dev); ++ if (error < 0) ++ goto error; ++ } ++ ++ dev_dbg(&mc_dev->dev, ++ "Allocatable MC object device bound to fsl_mc_allocator driver"); ++ return 0; ++error: ++ ++ return error; ++} ++ ++/** ++ * fsl_mc_allocator_remove - callback invoked when an allocatable device is ++ * being removed from the system ++ */ ++static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ return -EINVAL; ++ ++ if (mc_dev->resource) { ++ error = fsl_mc_resource_pool_remove_device(mc_dev); ++ if (error < 0) ++ return error; ++ } ++ ++ dev_dbg(&mc_dev->dev, ++ "Allocatable MC object device unbound from fsl_mc_allocator driver"); ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpbp", ++ }, ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpmcp", ++ }, ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpcon", ++ }, ++ {.vendor = 0x0}, ++}; ++ ++static struct fsl_mc_driver fsl_mc_allocator_driver = { ++ .driver = { ++ .name = "fsl_mc_allocator", ++ .owner = THIS_MODULE, ++ .pm = NULL, ++ }, ++ .match_id_table = match_id_table, ++ .probe = fsl_mc_allocator_probe, ++ .remove = fsl_mc_allocator_remove, ++}; ++ ++int __init fsl_mc_allocator_driver_init(void) ++{ ++ return fsl_mc_driver_register(&fsl_mc_allocator_driver); ++} ++ ++void __exit fsl_mc_allocator_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&fsl_mc_allocator_driver); ++} +diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c +new file mode 100644 +index 0000000..f173b35 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-bus.c +@@ -0,0 +1,1347 @@ ++/* ++ * Freescale Management Complex (MC) bus driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../include/dpmng.h" ++#include "../include/mc-sys.h" ++#include "dprc-cmd.h" ++ ++/* ++ * IOMMU stream ID flags ++ */ ++#define STREAM_ID_PL_MASK BIT(9) /* privilege level */ ++#define STREAM_ID_BMT_MASK BIT(8) /* bypass memory translation */ ++#define STREAM_ID_VA_MASK BIT(7) /* virtual address translation ++ * (two-stage translation) */ ++#define STREAM_ID_ICID_MASK (BIT(7) - 1) /* isolation context ID ++ * (translation context) */ ++ ++#define MAX_STREAM_ID_ICID STREAM_ID_ICID_MASK ++ ++static struct kmem_cache *mc_dev_cache; ++ ++/** ++ * fsl_mc_bus_match - device to driver matching callback ++ * @dev: the MC object device structure to match against ++ * @drv: the device driver to search for matching MC object device id ++ * structures ++ * ++ * Returns 1 on success, 0 otherwise. ++ */ ++static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) ++{ ++ const struct fsl_mc_device_match_id *id; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); ++ bool found = false; ++ ++ /* When driver_override is set, only bind to the matching driver */ ++ if (mc_dev->driver_override) { ++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name); ++ goto out; ++ } ++ ++ if (!mc_drv->match_id_table) ++ goto out; ++ ++ /* ++ * If the object is not 'plugged' don't match. ++ * Only exception is the root DPRC, which is a special case. ++ * ++ * NOTE: Only when this function is invoked for the root DPRC, ++ * mc_dev->mc_io is not NULL ++ */ ++ if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 && ++ !mc_dev->mc_io) ++ goto out; ++ ++ /* ++ * Traverse the match_id table of the given driver, trying to find ++ * a matching for the given MC object device. ++ */ ++ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { ++ if (id->vendor == mc_dev->obj_desc.vendor && ++ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) { ++ found = true; ++ ++ break; ++ } ++ } ++ ++out: ++ dev_dbg(dev, "%smatched\n", found ? "" : "not "); ++ return found; ++} ++ ++/** ++ * fsl_mc_bus_uevent - callback invoked when a device is added ++ */ ++static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ++{ ++ pr_debug("%s invoked\n", __func__); ++ return 0; ++} ++ ++static ssize_t driver_override_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ const char *driver_override, *old = mc_dev->driver_override; ++ char *cp; ++ ++ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) ++ return -EINVAL; ++ ++ if (count > PATH_MAX) ++ return -EINVAL; ++ ++ driver_override = kstrndup(buf, count, GFP_KERNEL); ++ if (!driver_override) ++ return -ENOMEM; ++ ++ cp = strchr(driver_override, '\n'); ++ if (cp) ++ *cp = '\0'; ++ ++ if (strlen(driver_override)) { ++ mc_dev->driver_override = driver_override; ++ } else { ++ kfree(driver_override); ++ mc_dev->driver_override = NULL; ++ } ++ ++ kfree(old); ++ ++ return count; ++} ++ ++static ssize_t driver_override_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ return sprintf(buf, "%s\n", mc_dev->driver_override); ++} ++ ++static DEVICE_ATTR_RW(driver_override); ++ ++static ssize_t rescan_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ unsigned int irq_count; ++ struct fsl_mc_device *root_mc_dev; ++ struct fsl_mc_bus *root_mc_bus; ++ ++ if (!is_root_dprc(dev)) ++ return -EINVAL; ++ ++ root_mc_dev = to_fsl_mc_device(dev); ++ root_mc_bus = to_fsl_mc_bus(root_mc_dev); ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) { ++ mutex_lock(&root_mc_bus->scan_mutex); ++ dprc_scan_objects(root_mc_dev, NULL, &irq_count); ++ mutex_unlock(&root_mc_bus->scan_mutex); ++ } ++ ++ return count; ++} ++ ++static DEVICE_ATTR_WO(rescan); ++ ++static struct attribute *fsl_mc_dev_attrs[] = { ++ &dev_attr_driver_override.attr, ++ &dev_attr_rescan.attr, ++ NULL, ++}; ++ ++static const struct attribute_group fsl_mc_dev_group = { ++ .attrs = fsl_mc_dev_attrs, ++}; ++ ++static const struct attribute_group *fsl_mc_dev_groups[] = { ++ &fsl_mc_dev_group, ++ NULL, ++}; ++ ++static int scan_fsl_mc_bus(struct device *dev, void *data) ++{ ++ unsigned int irq_count; ++ struct fsl_mc_device *root_mc_dev; ++ struct fsl_mc_bus *root_mc_bus; ++ ++ if (is_root_dprc(dev)) { ++ root_mc_dev = to_fsl_mc_device(dev); ++ root_mc_bus = to_fsl_mc_bus(root_mc_dev); ++ mutex_lock(&root_mc_bus->scan_mutex); ++ dprc_scan_objects(root_mc_dev, NULL, &irq_count); ++ mutex_unlock(&root_mc_bus->scan_mutex); ++ } ++ ++ return 0; ++} ++ ++static ssize_t bus_rescan_store(struct bus_type *bus, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) ++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus); ++ ++ return count; ++} ++static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); ++ ++static struct attribute *fsl_mc_bus_attrs[] = { ++ &bus_attr_rescan.attr, ++ NULL, ++}; ++ ++static const struct attribute_group fsl_mc_bus_group = { ++ .attrs = fsl_mc_bus_attrs, ++}; ++ ++static const struct attribute_group *fsl_mc_bus_groups[] = { ++ &fsl_mc_bus_group, ++ NULL, ++}; ++ ++struct bus_type fsl_mc_bus_type = { ++ .name = "fsl-mc", ++ .match = fsl_mc_bus_match, ++ .uevent = fsl_mc_bus_uevent, ++ .dev_groups = fsl_mc_dev_groups, ++ .bus_groups = fsl_mc_bus_groups, ++}; ++EXPORT_SYMBOL_GPL(fsl_mc_bus_type); ++ ++static int fsl_mc_driver_probe(struct device *dev) ++{ ++ struct fsl_mc_driver *mc_drv; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ int error; ++ ++ if (WARN_ON(!dev->driver)) ++ return -EINVAL; ++ ++ mc_drv = to_fsl_mc_driver(dev->driver); ++ if (WARN_ON(!mc_drv->probe)) ++ return -EINVAL; ++ ++ error = mc_drv->probe(mc_dev); ++ if (error < 0) { ++ dev_err(dev, "MC object device probe callback failed: %d\n", ++ error); ++ return error; ++ } ++ ++ return 0; ++} ++ ++static int fsl_mc_driver_remove(struct device *dev) ++{ ++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ int error; ++ ++ if (WARN_ON(!dev->driver)) ++ return -EINVAL; ++ ++ error = mc_drv->remove(mc_dev); ++ if (error < 0) { ++ dev_err(dev, ++ "MC object device remove callback failed: %d\n", ++ error); ++ return error; ++ } ++ ++ return 0; ++} ++ ++static void fsl_mc_driver_shutdown(struct device *dev) ++{ ++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ mc_drv->shutdown(mc_dev); ++} ++ ++/** ++ * __fsl_mc_driver_register - registers a child device driver with the ++ * MC bus ++ * ++ * This function is implicitly invoked from the registration function of ++ * fsl_mc device drivers, which is generated by the ++ * module_fsl_mc_driver() macro. ++ */ ++int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver, ++ struct module *owner) ++{ ++ int error; ++ ++ mc_driver->driver.owner = owner; ++ mc_driver->driver.bus = &fsl_mc_bus_type; ++ ++ if (mc_driver->probe) ++ mc_driver->driver.probe = fsl_mc_driver_probe; ++ ++ if (mc_driver->remove) ++ mc_driver->driver.remove = fsl_mc_driver_remove; ++ ++ if (mc_driver->shutdown) ++ mc_driver->driver.shutdown = fsl_mc_driver_shutdown; ++ ++ error = driver_register(&mc_driver->driver); ++ if (error < 0) { ++ pr_err("driver_register() failed for %s: %d\n", ++ mc_driver->driver.name, error); ++ return error; ++ } ++ ++ pr_info("MC object device driver %s registered\n", ++ mc_driver->driver.name); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); ++ ++/** ++ * fsl_mc_driver_unregister - unregisters a device driver from the ++ * MC bus ++ */ ++void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver) ++{ ++ driver_unregister(&mc_driver->driver); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister); ++ ++bool fsl_mc_interrupts_supported(void) ++{ ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ ++ return mc->gic_supported; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_interrupts_supported); ++ ++static int get_dprc_attr(struct fsl_mc_io *mc_io, ++ int container_id, struct dprc_attributes *attr) ++{ ++ uint16_t dprc_handle; ++ int error; ++ ++ error = dprc_open(mc_io, 0, container_id, &dprc_handle); ++ if (error < 0) { ++ pr_err("dprc_open() failed: %d\n", error); ++ return error; ++ } ++ ++ memset(attr, 0, sizeof(struct dprc_attributes)); ++ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr); ++ if (error < 0) { ++ pr_err("dprc_get_attributes() failed: %d\n", error); ++ goto common_cleanup; ++ } ++ ++ error = 0; ++ ++common_cleanup: ++ (void)dprc_close(mc_io, 0, dprc_handle); ++ return error; ++} ++ ++static int get_dprc_icid(struct fsl_mc_io *mc_io, ++ int container_id, uint16_t *icid) ++{ ++ struct dprc_attributes attr; ++ int error; ++ ++ error = get_dprc_attr(mc_io, container_id, &attr); ++ if (error == 0) ++ *icid = attr.icid; ++ ++ return error; ++} ++ ++static int get_dprc_version(struct fsl_mc_io *mc_io, ++ int container_id, uint16_t *major, uint16_t *minor) ++{ ++ struct dprc_attributes attr; ++ int error; ++ ++ error = get_dprc_attr(mc_io, container_id, &attr); ++ if (error == 0) { ++ *major = attr.version.major; ++ *minor = attr.version.minor; ++ } ++ ++ return error; ++} ++ ++static int translate_mc_addr(enum fsl_mc_region_types mc_region_type, ++ uint64_t mc_offset, phys_addr_t *phys_addr) ++{ ++ int i; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ ++ if (mc->num_translation_ranges == 0) { ++ /* ++ * Do identity mapping: ++ */ ++ *phys_addr = mc_offset; ++ return 0; ++ } ++ ++ for (i = 0; i < mc->num_translation_ranges; i++) { ++ struct fsl_mc_addr_translation_range *range = ++ &mc->translation_ranges[i]; ++ ++ if (mc_region_type == range->mc_region_type && ++ mc_offset >= range->start_mc_offset && ++ mc_offset < range->end_mc_offset) { ++ *phys_addr = range->start_phys_addr + ++ (mc_offset - range->start_mc_offset); ++ return 0; ++ } ++ } ++ ++ return -EFAULT; ++} ++ ++static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, ++ struct fsl_mc_device *mc_bus_dev) ++{ ++ int i; ++ int error; ++ struct resource *regions; ++ struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc; ++ struct device *parent_dev = mc_dev->dev.parent; ++ enum fsl_mc_region_types mc_region_type; ++ ++ if (strcmp(obj_desc->type, "dprc") == 0 || ++ strcmp(obj_desc->type, "dpmcp") == 0) { ++ mc_region_type = FSL_MC_PORTAL; ++ } else if (strcmp(obj_desc->type, "dpio") == 0) { ++ mc_region_type = FSL_QBMAN_PORTAL; ++ } else { ++ /* ++ * This function should not have been called for this MC object ++ * type, as this object type is not supposed to have MMIO ++ * regions ++ */ ++ WARN_ON(true); ++ return -EINVAL; ++ } ++ ++ regions = kmalloc_array(obj_desc->region_count, ++ sizeof(regions[0]), GFP_KERNEL); ++ if (!regions) ++ return -ENOMEM; ++ ++ for (i = 0; i < obj_desc->region_count; i++) { ++ struct dprc_region_desc region_desc; ++ ++ error = dprc_get_obj_region(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, ++ obj_desc->type, ++ obj_desc->id, i, ®ion_desc); ++ if (error < 0) { ++ dev_err(parent_dev, ++ "dprc_get_obj_region() failed: %d\n", error); ++ goto error_cleanup_regions; ++ } ++ ++ WARN_ON(region_desc.size == 0); ++ error = translate_mc_addr(mc_region_type, ++ region_desc.base_offset, ++ ®ions[i].start); ++ if (error < 0) { ++ dev_err(parent_dev, ++ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", ++ region_desc.base_offset, ++ obj_desc->type, obj_desc->id, i); ++ goto error_cleanup_regions; ++ } ++ ++ regions[i].end = regions[i].start + region_desc.size - 1; ++ regions[i].name = "fsl-mc object MMIO region"; ++ regions[i].flags = IORESOURCE_IO; ++ if (region_desc.flags & DPRC_REGION_CACHEABLE) ++ regions[i].flags |= IORESOURCE_CACHEABLE; ++ } ++ ++ mc_dev->regions = regions; ++ return 0; ++ ++error_cleanup_regions: ++ kfree(regions); ++ return error; ++} ++ ++/** ++ * Add a newly discovered MC object device to be visible in Linux ++ */ ++int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, ++ struct fsl_mc_io *mc_io, ++ struct device *parent_dev, ++ const char *driver_override, ++ struct fsl_mc_device **new_mc_dev) ++{ ++ int error; ++ struct fsl_mc_device *mc_dev = NULL; ++ struct fsl_mc_bus *mc_bus = NULL; ++ struct fsl_mc_device *parent_mc_dev; ++ ++ if (parent_dev->bus == &fsl_mc_bus_type) ++ parent_mc_dev = to_fsl_mc_device(parent_dev); ++ else ++ parent_mc_dev = NULL; ++ ++ if (strcmp(obj_desc->type, "dprc") == 0) { ++ /* ++ * Allocate an MC bus device object: ++ */ ++ mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL); ++ if (!mc_bus) ++ return -ENOMEM; ++ ++ mc_dev = &mc_bus->mc_dev; ++ } else { ++ /* ++ * Allocate a regular fsl_mc_device object: ++ */ ++ mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL); ++ if (!mc_dev) ++ return -ENOMEM; ++ } ++ ++ mc_dev->obj_desc = *obj_desc; ++ mc_dev->mc_io = mc_io; ++ if (driver_override) { ++ /* ++ * We trust driver_override, so we don't need to use ++ * kstrndup() here ++ */ ++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL); ++ if (!mc_dev->driver_override) { ++ error = -ENOMEM; ++ goto error_cleanup_dev; ++ } ++ } ++ ++ device_initialize(&mc_dev->dev); ++ INIT_LIST_HEAD(&mc_dev->dev.msi_list); ++ mc_dev->dev.parent = parent_dev; ++ mc_dev->dev.bus = &fsl_mc_bus_type; ++ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id); ++ ++ if (strcmp(obj_desc->type, "dprc") == 0) { ++ struct fsl_mc_io *mc_io2; ++ ++ mc_dev->flags |= FSL_MC_IS_DPRC; ++ ++ /* ++ * To get the DPRC's ICID, we need to open the DPRC ++ * in get_dprc_icid(). For child DPRCs, we do so using the ++ * parent DPRC's MC portal instead of the child DPRC's MC ++ * portal, in case the child DPRC is already opened with ++ * its own portal (e.g., the DPRC used by AIOP). ++ * ++ * NOTE: There cannot be more than one active open for a ++ * given MC object, using the same MC portal. ++ */ ++ if (parent_mc_dev) { ++ /* ++ * device being added is a child DPRC device ++ */ ++ mc_io2 = parent_mc_dev->mc_io; ++ } else { ++ /* ++ * device being added is the root DPRC device ++ */ ++ if (WARN_ON(!mc_io)) { ++ error = -EINVAL; ++ goto error_cleanup_dev; ++ } ++ ++ mc_io2 = mc_io; ++ } ++ ++ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid); ++ if (error < 0) ++ goto error_cleanup_dev; ++ } else { ++ /* ++ * A non-DPRC MC object device has to be a child of another ++ * MC object (specifically a DPRC object) ++ */ ++ mc_dev->icid = parent_mc_dev->icid; ++ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; ++ mc_dev->dev.dma_mask = &mc_dev->dma_mask; ++ } ++ ++ /* ++ * Get MMIO regions for the device from the MC: ++ * ++ * NOTE: the root DPRC is a special case as its MMIO region is ++ * obtained from the device tree ++ */ ++ if (parent_mc_dev && obj_desc->region_count != 0) { ++ error = fsl_mc_device_get_mmio_regions(mc_dev, ++ parent_mc_dev); ++ if (error < 0) ++ goto error_cleanup_dev; ++ } ++ ++ /* ++ * Objects are coherent, unless 'no shareability' flag set. ++ * FIXME: fill up @dma_base, @size, @iommu ++ */ ++ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)) ++ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true); ++ ++ /* ++ * The device-specific probe callback will get invoked by device_add() ++ */ ++ error = device_add(&mc_dev->dev); ++ if (error < 0) { ++ dev_err(parent_dev, ++ "device_add() failed for device %s: %d\n", ++ dev_name(&mc_dev->dev), error); ++ goto error_cleanup_dev; ++ } ++ ++ (void)get_device(&mc_dev->dev); ++ dev_dbg(parent_dev, "Added MC object device %s\n", ++ dev_name(&mc_dev->dev)); ++ ++ *new_mc_dev = mc_dev; ++ return 0; ++ ++error_cleanup_dev: ++ kfree(mc_dev->regions); ++ if (mc_bus) ++ devm_kfree(parent_dev, mc_bus); ++ else ++ kmem_cache_free(mc_dev_cache, mc_dev); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_device_add); ++ ++/** ++ * fsl_mc_device_remove - Remove a MC object device from being visible to ++ * Linux ++ * ++ * @mc_dev: Pointer to a MC object device object ++ */ ++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) ++{ ++ struct fsl_mc_bus *mc_bus = NULL; ++ ++ kfree(mc_dev->regions); ++ ++ /* ++ * The device-specific remove callback will get invoked by device_del() ++ */ ++ device_del(&mc_dev->dev); ++ put_device(&mc_dev->dev); ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ ++ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) ++ fsl_mc_bus_type.dev_root = NULL; ++ } else ++ WARN_ON(mc_dev->mc_io != NULL); ++ ++ kfree(mc_dev->driver_override); ++ mc_dev->driver_override = NULL; ++ if (mc_bus) ++ devm_kfree(mc_dev->dev.parent, mc_bus); ++ else ++ kmem_cache_free(mc_dev_cache, mc_dev); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_device_remove); ++ ++static int mc_bus_msi_prepare(struct irq_domain *domain, struct device *dev, ++ int nvec, msi_alloc_info_t *info) ++{ ++ int error; ++ u32 its_dev_id; ++ struct dprc_attributes dprc_attr; ++ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(dev); ++ ++ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC))) ++ return -EINVAL; ++ ++ error = dprc_get_attributes(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, &dprc_attr); ++ if (error < 0) { ++ dev_err(&mc_bus_dev->dev, ++ "dprc_get_attributes() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ /* ++ * Build the device Id to be passed to the GIC-ITS: ++ * ++ * NOTE: This device id corresponds to the IOMMU stream ID ++ * associated with the DPRC object. ++ */ ++ its_dev_id = mc_bus_dev->icid; ++ if (its_dev_id > STREAM_ID_ICID_MASK) { ++ dev_err(&mc_bus_dev->dev, ++ "Invalid ICID: %#x\n", its_dev_id); ++ return -ERANGE; ++ } ++ ++ if (dprc_attr.options & DPRC_CFG_OPT_AIOP) ++ its_dev_id |= STREAM_ID_PL_MASK | STREAM_ID_BMT_MASK; ++ ++ return __its_msi_prepare(domain, its_dev_id, dev, nvec, info); ++} ++ ++static void mc_bus_mask_msi_irq(struct irq_data *d) ++{ ++ /* Bus specefic Mask */ ++ irq_chip_mask_parent(d); ++} ++ ++static void mc_bus_unmask_msi_irq(struct irq_data *d) ++{ ++ /* Bus specefic unmask */ ++ irq_chip_unmask_parent(d); ++} ++ ++static void program_msi_at_mc(struct fsl_mc_device *mc_bus_dev, ++ struct fsl_mc_device_irq *irq) ++{ ++ int error; ++ struct fsl_mc_device *owner_mc_dev = irq->mc_dev; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ struct dprc_irq_cfg irq_cfg; ++ ++ /* ++ * irq->msi_paddr is 0x0 when this function is invoked in the ++ * free_irq() code path. In this case, for the MC, we don't ++ * really need to "unprogram" the MSI, so we just return. ++ * This helps avoid subtle ordering problems in the MC ++ * bus IRQ teardown logic. ++ * FIXME: evaluate whether there is a better way to address ++ * the underlying issue (upstreamability concern) ++ */ ++ if (irq->msi_paddr == 0x0) ++ return; ++ ++ if (WARN_ON(!owner_mc_dev)) ++ return; ++ ++ irq_cfg.paddr = irq->msi_paddr; ++ irq_cfg.val = irq->msi_value; ++ irq_cfg.irq_num = irq->irq_number; ++ ++ if (owner_mc_dev == mc_bus_dev) { ++ /* ++ * IRQ is for the mc_bus_dev's DPRC itself ++ */ ++ error = dprc_set_irq(mc_bus->atomic_mc_io, ++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, ++ mc_bus->atomic_dprc_handle, ++ irq->dev_irq_index, ++ &irq_cfg); ++ if (error < 0) { ++ dev_err(&owner_mc_dev->dev, ++ "dprc_set_irq() failed: %d\n", error); ++ } ++ } else { ++ error = dprc_set_obj_irq(mc_bus->atomic_mc_io, ++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, ++ mc_bus->atomic_dprc_handle, ++ owner_mc_dev->obj_desc.type, ++ owner_mc_dev->obj_desc.id, ++ irq->dev_irq_index, ++ &irq_cfg); ++ if (error < 0) { ++ dev_err(&owner_mc_dev->dev, ++ "dprc_obj_set_irq() failed: %d\n", error); ++ } ++ } ++} ++ ++/* ++ * This function is invoked from devm_request_irq(), ++ * devm_request_threaded_irq(), dev_free_irq() ++ */ ++static void mc_bus_msi_domain_write_msg(struct irq_data *irq_data, ++ struct msi_msg *msg) ++{ ++ struct msi_desc *msi_entry = irq_data->msi_desc; ++ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_entry->dev); ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ struct fsl_mc_device_irq *irq_res = ++ &mc_bus->irq_resources[msi_entry->msi_attrib.entry_nr]; ++ ++ /* ++ * NOTE: This function is invoked with interrupts disabled ++ */ ++ ++ if (irq_res->irq_number == irq_data->irq) { ++ irq_res->msi_paddr = ++ ((u64)msg->address_hi << 32) | msg->address_lo; ++ ++ irq_res->msi_value = msg->data; ++ ++ /* ++ * Program the MSI (paddr, value) pair in the device: ++ */ ++ program_msi_at_mc(mc_bus_dev, irq_res); ++ } ++} ++ ++static struct irq_chip mc_bus_msi_irq_chip = { ++ .name = "fsl-mc-bus-msi", ++ .irq_unmask = mc_bus_unmask_msi_irq, ++ .irq_mask = mc_bus_mask_msi_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_write_msi_msg = mc_bus_msi_domain_write_msg, ++}; ++ ++static struct msi_domain_ops mc_bus_msi_ops = { ++ .msi_prepare = mc_bus_msi_prepare, ++}; ++ ++static struct msi_domain_info mc_bus_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), ++ .ops = &mc_bus_msi_ops, ++ .chip = &mc_bus_msi_irq_chip, ++}; ++ ++static int create_mc_irq_domain(struct platform_device *mc_pdev, ++ struct irq_domain **new_irq_domain) ++{ ++ int error; ++ struct device_node *its_of_node; ++ struct irq_domain *its_domain; ++ struct irq_domain *irq_domain; ++ struct device_node *mc_of_node = mc_pdev->dev.of_node; ++ ++ its_of_node = of_parse_phandle(mc_of_node, "msi-parent", 0); ++ if (!its_of_node) { ++ dev_err(&mc_pdev->dev, ++ "msi-parent phandle missing for %s\n", ++ mc_of_node->full_name); ++ return -ENOENT; ++ } ++ ++ /* ++ * Extract MSI parent node: ++ */ ++ its_domain = irq_find_host(its_of_node); ++ if (!its_domain) { ++ dev_err(&mc_pdev->dev, "Unable to find parent domain\n"); ++ error = -ENOENT; ++ goto cleanup_its_of_node; ++ } ++ ++ irq_domain = msi_create_irq_domain(mc_of_node, &mc_bus_msi_domain_info, ++ its_domain->parent); ++ if (!irq_domain) { ++ dev_err(&mc_pdev->dev, "Failed to allocate msi_domain\n"); ++ error = -ENOMEM; ++ goto cleanup_its_of_node; ++ } ++ ++ dev_dbg(&mc_pdev->dev, "Allocated MSI domain\n"); ++ *new_irq_domain = irq_domain; ++ return 0; ++ ++cleanup_its_of_node: ++ of_node_put(its_of_node); ++ return error; ++} ++ ++/* ++ * Initialize the interrupt pool associated with a MC bus. ++ * It allocates a block of IRQs from the GIC-ITS ++ */ ++int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, ++ unsigned int irq_count) ++{ ++ unsigned int i; ++ struct msi_desc *msi_entry; ++ struct msi_desc *next_msi_entry; ++ struct fsl_mc_device_irq *irq_resources; ++ struct fsl_mc_device_irq *irq_res; ++ int error; ++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; ++ ++ /* ++ * Detect duplicate invocations of this function: ++ */ ++ if (WARN_ON(!list_empty(&mc_bus_dev->dev.msi_list))) ++ return -EINVAL; ++ ++ if (WARN_ON(irq_count == 0 || ++ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)) ++ return -EINVAL; ++ ++ irq_resources = ++ devm_kzalloc(&mc_bus_dev->dev, ++ sizeof(*irq_resources) * irq_count, ++ GFP_KERNEL); ++ if (!irq_resources) ++ return -ENOMEM; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq_res = &irq_resources[i]; ++ msi_entry = alloc_msi_entry(&mc_bus_dev->dev); ++ if (!msi_entry) { ++ dev_err(&mc_bus_dev->dev, "Failed to allocate msi entry\n"); ++ error = -ENOMEM; ++ goto cleanup_msi_entries; ++ } ++ ++ msi_entry->msi_attrib.is_msix = 1; ++ msi_entry->msi_attrib.is_64 = 1; ++ msi_entry->msi_attrib.entry_nr = i; ++ msi_entry->nvec_used = 1; ++ list_add_tail(&msi_entry->list, &mc_bus_dev->dev.msi_list); ++ ++ /* ++ * NOTE: irq_res->msi_paddr will be set by the ++ * mc_bus_msi_domain_write_msg() callback ++ */ ++ irq_res->resource.type = res_pool->type; ++ irq_res->resource.data = irq_res; ++ irq_res->resource.parent_pool = res_pool; ++ INIT_LIST_HEAD(&irq_res->resource.node); ++ list_add_tail(&irq_res->resource.node, &res_pool->free_list); ++ } ++ ++ /* ++ * NOTE: Calling this function will trigger the invocation of the ++ * mc_bus_msi_prepare() callback ++ */ ++ error = msi_domain_alloc_irqs(mc->irq_domain, ++ &mc_bus_dev->dev, irq_count); ++ ++ if (error) { ++ dev_err(&mc_bus_dev->dev, "Failed to allocate IRQs\n"); ++ goto cleanup_msi_entries; ++ } ++ ++ for_each_msi_entry(msi_entry, &mc_bus_dev->dev) { ++ u32 irq_num = msi_entry->irq; ++ ++ irq_res = &irq_resources[msi_entry->msi_attrib.entry_nr]; ++ irq_res->irq_number = irq_num; ++ irq_res->resource.id = irq_num; ++ } ++ ++ res_pool->max_count = irq_count; ++ res_pool->free_count = irq_count; ++ mc_bus->irq_resources = irq_resources; ++ return 0; ++ ++cleanup_msi_entries: ++ list_for_each_entry_safe(msi_entry, next_msi_entry, ++ &mc_bus_dev->dev.msi_list, list) { ++ list_del(&msi_entry->list); ++ kfree(msi_entry); ++ } ++ ++ devm_kfree(&mc_bus_dev->dev, irq_resources); ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); ++ ++/** ++ * Teardown the interrupt pool associated with an MC bus. ++ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. ++ */ ++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) ++{ ++ struct msi_desc *msi_entry; ++ struct msi_desc *next_msi_entry; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ return; ++ ++ if (WARN_ON(res_pool->max_count == 0)) ++ return; ++ ++ if (WARN_ON(res_pool->free_count != res_pool->max_count)) ++ return; ++ ++ msi_domain_free_irqs(mc->irq_domain, &mc_bus->mc_dev.dev); ++ list_for_each_entry_safe(msi_entry, next_msi_entry, ++ &mc_bus->mc_dev.dev.msi_list, list) { ++ list_del(&msi_entry->list); ++ kfree(msi_entry); ++ } ++ ++ devm_kfree(&mc_bus->mc_dev.dev, mc_bus->irq_resources); ++ res_pool->max_count = 0; ++ res_pool->free_count = 0; ++ mc_bus->irq_resources = NULL; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); ++ ++static int parse_mc_ranges(struct device *dev, ++ int *paddr_cells, ++ int *mc_addr_cells, ++ int *mc_size_cells, ++ const __be32 **ranges_start, ++ uint8_t *num_ranges) ++{ ++ const __be32 *prop; ++ int range_tuple_cell_count; ++ int ranges_len; ++ int tuple_len; ++ struct device_node *mc_node = dev->of_node; ++ ++ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len); ++ if (!(*ranges_start) || !ranges_len) { ++ dev_warn(dev, ++ "missing or empty ranges property for device tree node '%s'\n", ++ mc_node->name); ++ ++ *num_ranges = 0; ++ return 0; ++ } ++ ++ *paddr_cells = of_n_addr_cells(mc_node); ++ ++ prop = of_get_property(mc_node, "#address-cells", NULL); ++ if (prop) ++ *mc_addr_cells = be32_to_cpup(prop); ++ else ++ *mc_addr_cells = *paddr_cells; ++ ++ prop = of_get_property(mc_node, "#size-cells", NULL); ++ if (prop) ++ *mc_size_cells = be32_to_cpup(prop); ++ else ++ *mc_size_cells = of_n_size_cells(mc_node); ++ ++ range_tuple_cell_count = *paddr_cells + *mc_addr_cells + ++ *mc_size_cells; ++ ++ tuple_len = range_tuple_cell_count * sizeof(__be32); ++ if (ranges_len % tuple_len != 0) { ++ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name); ++ return -EINVAL; ++ } ++ ++ *num_ranges = ranges_len / tuple_len; ++ return 0; ++} ++ ++static int get_mc_addr_translation_ranges(struct device *dev, ++ struct fsl_mc_addr_translation_range ++ **ranges, ++ uint8_t *num_ranges) ++{ ++ int error; ++ int paddr_cells; ++ int mc_addr_cells; ++ int mc_size_cells; ++ int i; ++ const __be32 *ranges_start; ++ const __be32 *cell; ++ ++ error = parse_mc_ranges(dev, ++ &paddr_cells, ++ &mc_addr_cells, ++ &mc_size_cells, ++ &ranges_start, ++ num_ranges); ++ if (error < 0) ++ return error; ++ ++ if (!(*num_ranges)) { ++ /* ++ * Missing or empty ranges property ("ranges;") for the ++ * 'fsl,qoriq-mc' node. In this case, identity mapping ++ * will be used. ++ */ ++ *ranges = NULL; ++ return 0; ++ } ++ ++ *ranges = devm_kcalloc(dev, *num_ranges, ++ sizeof(struct fsl_mc_addr_translation_range), ++ GFP_KERNEL); ++ if (!(*ranges)) ++ return -ENOMEM; ++ ++ cell = ranges_start; ++ for (i = 0; i < *num_ranges; ++i) { ++ struct fsl_mc_addr_translation_range *range = &(*ranges)[i]; ++ ++ range->mc_region_type = of_read_number(cell, 1); ++ range->start_mc_offset = of_read_number(cell + 1, ++ mc_addr_cells - 1); ++ cell += mc_addr_cells; ++ range->start_phys_addr = of_read_number(cell, paddr_cells); ++ cell += paddr_cells; ++ range->end_mc_offset = range->start_mc_offset + ++ of_read_number(cell, mc_size_cells); ++ ++ cell += mc_size_cells; ++ } ++ ++ return 0; ++} ++ ++/** ++ * fsl_mc_bus_probe - callback invoked when the root MC bus is being ++ * added ++ */ ++static int fsl_mc_bus_probe(struct platform_device *pdev) ++{ ++ struct dprc_obj_desc obj_desc; ++ int error; ++ struct fsl_mc *mc; ++ struct fsl_mc_device *mc_bus_dev = NULL; ++ struct fsl_mc_io *mc_io = NULL; ++ int container_id; ++ phys_addr_t mc_portal_phys_addr; ++ uint32_t mc_portal_size; ++ struct mc_version mc_version; ++ struct resource res; ++ ++ dev_info(&pdev->dev, "Root MC bus device probed"); ++ ++ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); ++ if (!mc) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, mc); ++ error = create_mc_irq_domain(pdev, &mc->irq_domain); ++ if (error < 0) { ++ dev_warn(&pdev->dev, ++ "WARNING: MC bus driver will run without interrupt support\n"); ++ } else { ++ mc->gic_supported = true; ++ } ++ ++ /* ++ * Get physical address of MC portal for the root DPRC: ++ */ ++ error = of_address_to_resource(pdev->dev.of_node, 0, &res); ++ if (error < 0) { ++ dev_err(&pdev->dev, ++ "of_address_to_resource() failed for %s\n", ++ pdev->dev.of_node->full_name); ++ goto error_cleanup_irq_domain; ++ } ++ ++ mc_portal_phys_addr = res.start; ++ mc_portal_size = resource_size(&res); ++ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, ++ mc_portal_size, NULL, 0, &mc_io); ++ if (error < 0) ++ goto error_cleanup_irq_domain; ++ ++ error = mc_get_version(mc_io, 0, &mc_version); ++ if (error != 0) { ++ dev_err(&pdev->dev, ++ "mc_get_version() failed with error %d\n", error); ++ goto error_cleanup_mc_io; ++ } ++ ++ dev_info(&pdev->dev, ++ "Freescale Management Complex Firmware version: %u.%u.%u\n", ++ mc_version.major, mc_version.minor, mc_version.revision); ++ ++ error = get_mc_addr_translation_ranges(&pdev->dev, ++ &mc->translation_ranges, ++ &mc->num_translation_ranges); ++ if (error < 0) ++ goto error_cleanup_mc_io; ++ ++ error = dpmng_get_container_id(mc_io, 0, &container_id); ++ if (error < 0) { ++ dev_err(&pdev->dev, ++ "dpmng_get_container_id() failed: %d\n", error); ++ goto error_cleanup_mc_io; ++ } ++ ++ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); ++ error = get_dprc_version(mc_io, container_id, ++ &obj_desc.ver_major, &obj_desc.ver_minor); ++ if (error < 0) ++ goto error_cleanup_mc_io; ++ ++ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE; ++ strcpy(obj_desc.type, "dprc"); ++ obj_desc.id = container_id; ++ obj_desc.irq_count = 1; ++ obj_desc.region_count = 0; ++ ++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL, ++ &mc_bus_dev); ++ if (error < 0) ++ goto error_cleanup_mc_io; ++ ++ mc->root_mc_bus_dev = mc_bus_dev; ++ return 0; ++ ++error_cleanup_mc_io: ++ fsl_destroy_mc_io(mc_io); ++ ++error_cleanup_irq_domain: ++ if (mc->gic_supported) ++ irq_domain_remove(mc->irq_domain); ++ ++ return error; ++} ++ ++/** ++ * fsl_mc_bus_remove - callback invoked when the root MC bus is being ++ * removed ++ */ ++static int fsl_mc_bus_remove(struct platform_device *pdev) ++{ ++ struct fsl_mc *mc = platform_get_drvdata(pdev); ++ ++ if (WARN_ON(&mc->root_mc_bus_dev->dev != fsl_mc_bus_type.dev_root)) ++ return -EINVAL; ++ ++ if (mc->gic_supported) ++ irq_domain_remove(mc->irq_domain); ++ ++ fsl_mc_device_remove(mc->root_mc_bus_dev); ++ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); ++ mc->root_mc_bus_dev->mc_io = NULL; ++ ++ dev_info(&pdev->dev, "Root MC bus device removed"); ++ return 0; ++} ++ ++static const struct of_device_id fsl_mc_bus_match_table[] = { ++ {.compatible = "fsl,qoriq-mc",}, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table); ++ ++static struct platform_driver fsl_mc_bus_driver = { ++ .driver = { ++ .name = "fsl_mc_bus", ++ .owner = THIS_MODULE, ++ .pm = NULL, ++ .of_match_table = fsl_mc_bus_match_table, ++ }, ++ .probe = fsl_mc_bus_probe, ++ .remove = fsl_mc_bus_remove, ++}; ++ ++static int __init fsl_mc_bus_driver_init(void) ++{ ++ int error; ++ ++ mc_dev_cache = kmem_cache_create("fsl_mc_device", ++ sizeof(struct fsl_mc_device), 0, 0, ++ NULL); ++ if (!mc_dev_cache) { ++ pr_err("Could not create fsl_mc_device cache\n"); ++ return -ENOMEM; ++ } ++ ++ error = bus_register(&fsl_mc_bus_type); ++ if (error < 0) { ++ pr_err("fsl-mc bus type registration failed: %d\n", error); ++ goto error_cleanup_cache; ++ } ++ ++ pr_info("fsl-mc bus type registered\n"); ++ ++ error = platform_driver_register(&fsl_mc_bus_driver); ++ if (error < 0) { ++ pr_err("platform_driver_register() failed: %d\n", error); ++ goto error_cleanup_bus; ++ } ++ ++ error = dprc_driver_init(); ++ if (error < 0) ++ goto error_cleanup_driver; ++ ++ error = fsl_mc_allocator_driver_init(); ++ if (error < 0) ++ goto error_cleanup_dprc_driver; ++ ++ return 0; ++ ++error_cleanup_dprc_driver: ++ dprc_driver_exit(); ++ ++error_cleanup_driver: ++ platform_driver_unregister(&fsl_mc_bus_driver); ++ ++error_cleanup_bus: ++ bus_unregister(&fsl_mc_bus_type); ++ ++error_cleanup_cache: ++ kmem_cache_destroy(mc_dev_cache); ++ return error; ++} ++ ++postcore_initcall(fsl_mc_bus_driver_init); ++ ++static void __exit fsl_mc_bus_driver_exit(void) ++{ ++ if (WARN_ON(!mc_dev_cache)) ++ return; ++ ++ fsl_mc_allocator_driver_exit(); ++ dprc_driver_exit(); ++ platform_driver_unregister(&fsl_mc_bus_driver); ++ bus_unregister(&fsl_mc_bus_type); ++ kmem_cache_destroy(mc_dev_cache); ++ pr_info("MC bus unregistered\n"); ++} ++ ++module_exit(fsl_mc_bus_driver_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor Inc."); ++MODULE_DESCRIPTION("Freescale Management Complex (MC) bus driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/fsl-mc/bus/mc-ioctl.h b/drivers/staging/fsl-mc/bus/mc-ioctl.h +new file mode 100644 +index 0000000..d5c1bc3 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h +@@ -0,0 +1,25 @@ ++/* ++ * Freescale Management Complex (MC) ioclt interface ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * Lijun Pan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_IOCTL_H_ ++#define _FSL_MC_IOCTL_H_ ++ ++#include ++ ++#define RESTOOL_IOCTL_TYPE 'R' ++ ++#define RESTOOL_GET_ROOT_DPRC_INFO \ ++ _IOR(RESTOOL_IOCTL_TYPE, 0x1, uint32_t) ++ ++#define RESTOOL_SEND_MC_COMMAND \ ++ _IOWR(RESTOOL_IOCTL_TYPE, 0x4, struct mc_command) ++ ++#endif /* _FSL_MC_IOCTL_H_ */ +diff --git a/drivers/staging/fsl-mc/bus/mc-restool.c b/drivers/staging/fsl-mc/bus/mc-restool.c +new file mode 100644 +index 0000000..d261c1a +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-restool.c +@@ -0,0 +1,312 @@ ++/* ++ * Freescale Management Complex (MC) restool driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * Lijun Pan ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mc-ioctl.h" ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpmng.h" ++ ++/** ++ * Maximum number of DPRCs that can be opened at the same time ++ */ ++#define MAX_DPRC_HANDLES 64 ++ ++/** ++ * struct fsl_mc_restool - Management Complex (MC) resource manager object ++ * @tool_mc_io: pointer to the MC I/O object used by the restool ++ */ ++struct fsl_mc_restool { ++ struct fsl_mc_io *tool_mc_io; ++}; ++ ++/** ++ * struct global_state - indicating the number of static and dynamic instance ++ * @dynamic_instance_count - number of dynamically created instances ++ * @static_instance_in_use - static instance is in use or not ++ * @mutex - mutex lock to serialze the operations ++ */ ++struct global_state { ++ uint32_t dynamic_instance_count; ++ bool static_instance_in_use; ++ struct mutex mutex; ++}; ++ ++static struct fsl_mc_restool fsl_mc_restool = { 0 }; ++static struct global_state global_state = { 0 }; ++ ++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep) ++{ ++ struct fsl_mc_device *root_mc_dev; ++ int error = 0; ++ struct fsl_mc_restool *fsl_mc_restool_new = NULL; ++ ++ mutex_lock(&global_state.mutex); ++ ++ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) { ++ error = -EINVAL; ++ goto error; ++ } ++ ++ if (!global_state.static_instance_in_use) { ++ global_state.static_instance_in_use = true; ++ filep->private_data = &fsl_mc_restool; ++ } else { ++ fsl_mc_restool_new = kmalloc(sizeof(struct fsl_mc_restool), ++ GFP_KERNEL); ++ if (fsl_mc_restool_new == NULL) { ++ error = -ENOMEM; ++ goto error; ++ } ++ memset(fsl_mc_restool_new, 0, sizeof(*fsl_mc_restool_new)); ++ ++ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ error = fsl_mc_portal_allocate(root_mc_dev, 0, ++ &fsl_mc_restool_new->tool_mc_io); ++ if (error < 0) { ++ pr_err("Not able to allocate MC portal\n"); ++ goto error; ++ } ++ ++global_state.dynamic_instance_count; ++ filep->private_data = fsl_mc_restool_new; ++ } ++ ++ mutex_unlock(&global_state.mutex); ++ return 0; ++error: ++ if (fsl_mc_restool_new != NULL && ++ fsl_mc_restool_new->tool_mc_io != NULL) { ++ fsl_mc_portal_free(fsl_mc_restool_new->tool_mc_io); ++ fsl_mc_restool_new->tool_mc_io = NULL; ++ } ++ ++ kfree(fsl_mc_restool_new); ++ mutex_unlock(&global_state.mutex); ++ return error; ++} ++ ++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep) ++{ ++ struct fsl_mc_restool *fsl_mc_restool_local = filep->private_data; ++ ++ if (WARN_ON(filep->private_data == NULL)) ++ return -EINVAL; ++ ++ mutex_lock(&global_state.mutex); ++ ++ if (WARN_ON(global_state.dynamic_instance_count == 0 && ++ !global_state.static_instance_in_use)) { ++ mutex_unlock(&global_state.mutex); ++ return -EINVAL; ++ } ++ ++ /* Globally clean up opened/untracked handles */ ++ fsl_mc_portal_reset(fsl_mc_restool_local->tool_mc_io); ++ ++ pr_debug("dynamic instance count: %d\n", ++ global_state.dynamic_instance_count); ++ pr_debug("static instance count: %d\n", ++ global_state.static_instance_in_use); ++ ++ /* ++ * must check ++ * whether fsl_mc_restool_local is dynamic or global instance ++ * Otherwise it will free up the reserved portal by accident ++ * or even not free up the dynamic allocated portal ++ * if 2 or more instances running concurrently ++ */ ++ if (fsl_mc_restool_local == &fsl_mc_restool) { ++ pr_debug("this is reserved portal"); ++ pr_debug("reserved portal not in use\n"); ++ global_state.static_instance_in_use = false; ++ } else { ++ pr_debug("this is dynamically allocated portal"); ++ pr_debug("free one dynamically allocated portal\n"); ++ fsl_mc_portal_free(fsl_mc_restool_local->tool_mc_io); ++ kfree(filep->private_data); ++ --global_state.dynamic_instance_count; ++ } ++ ++ filep->private_data = NULL; ++ mutex_unlock(&global_state.mutex); ++ return 0; ++} ++ ++static int restool_get_root_dprc_info(unsigned long arg) ++{ ++ int error = -EINVAL; ++ uint32_t root_dprc_id; ++ struct fsl_mc_device *root_mc_dev; ++ ++ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ root_dprc_id = root_mc_dev->obj_desc.id; ++ error = copy_to_user((void __user *)arg, &root_dprc_id, ++ sizeof(root_dprc_id)); ++ if (error < 0) { ++ pr_err("copy_to_user() failed with error %d\n", error); ++ goto error; ++ } ++ ++ return 0; ++error: ++ return error; ++} ++ ++static int restool_send_mc_command(unsigned long arg, ++ struct fsl_mc_restool *fsl_mc_restool) ++{ ++ int error = -EINVAL; ++ struct mc_command mc_cmd; ++ ++ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)); ++ if (error < 0) { ++ pr_err("copy_to_user() failed with error %d\n", error); ++ goto error; ++ } ++ ++ /* ++ * Send MC command to the MC: ++ */ ++ error = mc_send_command(fsl_mc_restool->tool_mc_io, &mc_cmd); ++ if (error < 0) ++ goto error; ++ ++ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)); ++ if (error < 0) { ++ pr_err("copy_to_user() failed with error %d\n", error); ++ goto error; ++ } ++ ++ return 0; ++error: ++ return error; ++} ++ ++static long ++fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ int error = -EINVAL; ++ ++ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) ++ goto out; ++ ++ switch (cmd) { ++ case RESTOOL_GET_ROOT_DPRC_INFO: ++ error = restool_get_root_dprc_info(arg); ++ break; ++ ++ case RESTOOL_SEND_MC_COMMAND: ++ error = restool_send_mc_command(arg, file->private_data); ++ break; ++ default: ++ error = -EINVAL; ++ } ++out: ++ return error; ++} ++ ++static const struct file_operations fsl_mc_restool_dev_fops = { ++ .owner = THIS_MODULE, ++ .open = fsl_mc_restool_dev_open, ++ .release = fsl_mc_restool_dev_release, ++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl, ++ .compat_ioctl = fsl_mc_restool_dev_ioctl, ++}; ++ ++static struct miscdevice fsl_mc_restool_dev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "mc_restool", ++ .fops = &fsl_mc_restool_dev_fops ++}; ++ ++static int __init fsl_mc_restool_driver_init(void) ++{ ++ struct fsl_mc_device *root_mc_dev; ++ int error = -EINVAL; ++ bool restool_dev_registered = false; ++ ++ mutex_init(&global_state.mutex); ++ ++ if (WARN_ON(fsl_mc_restool.tool_mc_io != NULL)) ++ goto error; ++ ++ if (WARN_ON(global_state.dynamic_instance_count != 0)) ++ goto error; ++ ++ if (WARN_ON(global_state.static_instance_in_use)) ++ goto error; ++ ++ if (fsl_mc_bus_type.dev_root == NULL) { ++ pr_err("fsl-mc bus not found, restool driver registration failed\n"); ++ goto error; ++ } ++ ++ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ error = fsl_mc_portal_allocate(root_mc_dev, 0, ++ &fsl_mc_restool.tool_mc_io); ++ if (error < 0) { ++ pr_err("Not able to allocate MC portal\n"); ++ goto error; ++ } ++ ++ error = misc_register(&fsl_mc_restool_dev); ++ if (error < 0) { ++ pr_err("misc_register() failed: %d\n", error); ++ goto error; ++ } ++ ++ restool_dev_registered = true; ++ pr_info("%s driver registered\n", fsl_mc_restool_dev.name); ++ return 0; ++error: ++ if (restool_dev_registered) ++ misc_deregister(&fsl_mc_restool_dev); ++ ++ if (fsl_mc_restool.tool_mc_io != NULL) { ++ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); ++ fsl_mc_restool.tool_mc_io = NULL; ++ } ++ ++ return error; ++} ++ ++module_init(fsl_mc_restool_driver_init); ++ ++static void __exit fsl_mc_restool_driver_exit(void) ++{ ++ if (WARN_ON(fsl_mc_restool.tool_mc_io == NULL)) ++ return; ++ ++ if (WARN_ON(global_state.dynamic_instance_count != 0)) ++ return; ++ ++ if (WARN_ON(global_state.static_instance_in_use)) ++ return; ++ ++ misc_deregister(&fsl_mc_restool_dev); ++ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); ++ fsl_mc_restool.tool_mc_io = NULL; ++ pr_info("%s driver unregistered\n", fsl_mc_restool_dev.name); ++} ++ ++module_exit(fsl_mc_restool_driver_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor Inc."); ++MODULE_DESCRIPTION("Freescale's MC restool driver"); ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c +new file mode 100644 +index 0000000..d3b6940 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-sys.c +@@ -0,0 +1,677 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * I/O services to send MC commands to the MC hardware ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/mc.h" ++#include ++#include ++#include ++#include ++#include ++#include "dpmcp.h" ++ ++/** ++ * Timeout in milliseconds to wait for the completion of an MC command ++ * 5000 ms is barely enough for dpsw/dpdmux creation ++ * TODO: if MC firmware could response faster, we should decrease this value ++ */ ++#define MC_CMD_COMPLETION_TIMEOUT_MS 5000 ++ ++/* ++ * usleep_range() min and max values used to throttle down polling ++ * iterations while waiting for MC command completion ++ */ ++#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10 ++#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 ++ ++#define MC_CMD_HDR_READ_CMDID(_hdr) \ ++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S)) ++ ++/** ++ * dpmcp_irq0_handler - Regular ISR for DPMCP interrupt 0 ++ * ++ * @irq: IRQ number of the interrupt being handled ++ * @arg: Pointer to device structure ++ */ ++static irqreturn_t dpmcp_irq0_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *dpmcp_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_io *mc_io = dpmcp_dev->mc_io; ++ ++ dev_dbg(dev, "DPMCP IRQ %d triggered on CPU %u\n", irq_num, ++ smp_processor_id()); ++ ++ if (WARN_ON(dpmcp_dev->irqs[0]->irq_number != (uint32_t)irq_num)) ++ goto out; ++ ++ if (WARN_ON(!mc_io)) ++ goto out; ++ ++ complete(&mc_io->mc_command_done_completion); ++out: ++ return IRQ_HANDLED; ++} ++ ++/* ++ * Disable and clear interrupts for a given DPMCP object ++ */ ++static int disable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ ++ /* ++ * Disable generation of the DPMCP interrupt: ++ */ ++ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, 0); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_enable() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ /* ++ * Disable all DPMCP interrupt causes: ++ */ ++ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, 0x0); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_mask() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ return 0; ++} ++ ++static void unregister_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) ++{ ++ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; ++ ++ devm_free_irq(&dpmcp_dev->dev, irq->irq_number, &dpmcp_dev->dev); ++} ++ ++static int register_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; ++ ++ error = devm_request_irq(&dpmcp_dev->dev, ++ irq->irq_number, ++ dpmcp_irq0_handler, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ "FSL MC DPMCP irq0", ++ &dpmcp_dev->dev); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "devm_request_irq() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ return 0; ++} ++ ++static int enable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ ++ /* ++ * Enable MC command completion event to trigger DPMCP interrupt: ++ */ ++ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, ++ DPMCP_IRQ_EVENT_CMD_DONE); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_mask() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ /* ++ * Enable generation of the interrupt: ++ */ ++ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, 1); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_enable() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Setup MC command completion interrupt for the DPMCP device associated with a ++ * given fsl_mc_io object ++ */ ++int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io) ++{ ++ int error; ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) ++ return -EINVAL; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(!fsl_mc_interrupts_supported())) ++ return -EINVAL; ++ ++ if (WARN_ON(dpmcp_dev->obj_desc.irq_count != 1)) ++ return -EINVAL; ++ ++ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) ++ return -EINVAL; ++ ++ error = fsl_mc_allocate_irqs(dpmcp_dev); ++ if (error < 0) ++ return error; ++ ++ error = disable_dpmcp_irq(dpmcp_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = register_dpmcp_irq_handler(dpmcp_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = enable_dpmcp_irq(dpmcp_dev); ++ if (error < 0) ++ goto error_unregister_irq_handler; ++ ++ mc_io->mc_command_done_irq_armed = true; ++ return 0; ++ ++error_unregister_irq_handler: ++ unregister_dpmcp_irq_handler(dpmcp_dev); ++ ++error_free_irqs: ++ fsl_mc_free_irqs(dpmcp_dev); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_io_setup_dpmcp_irq); ++ ++/* ++ * Tear down interrupts for the DPMCP device associated with a given fsl_mc_io ++ * object ++ */ ++static void teardown_dpmcp_irq(struct fsl_mc_io *mc_io) ++{ ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return; ++ if (WARN_ON(!fsl_mc_interrupts_supported())) ++ return; ++ if (WARN_ON(!dpmcp_dev->irqs)) ++ return; ++ ++ mc_io->mc_command_done_irq_armed = false; ++ (void)disable_dpmcp_irq(dpmcp_dev); ++ unregister_dpmcp_irq_handler(dpmcp_dev); ++ fsl_mc_free_irqs(dpmcp_dev); ++} ++ ++/** ++ * Creates an MC I/O object ++ * ++ * @dev: device to be associated with the MC I/O object ++ * @mc_portal_phys_addr: physical address of the MC portal to use ++ * @mc_portal_size: size in bytes of the MC portal ++ * @resource: Pointer to MC bus object allocator resource associated ++ * with this MC I/O object or NULL if none. ++ * @flags: flags for the new MC I/O object ++ * @new_mc_io: Area to return pointer to newly created MC I/O object ++ * ++ * Returns '0' on Success; Error code otherwise. ++ */ ++int __must_check fsl_create_mc_io(struct device *dev, ++ phys_addr_t mc_portal_phys_addr, ++ uint32_t mc_portal_size, ++ struct fsl_mc_device *dpmcp_dev, ++ uint32_t flags, struct fsl_mc_io **new_mc_io) ++{ ++ int error; ++ struct fsl_mc_io *mc_io; ++ void __iomem *mc_portal_virt_addr; ++ struct resource *res; ++ ++ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL); ++ if (!mc_io) ++ return -ENOMEM; ++ ++ mc_io->dev = dev; ++ mc_io->flags = flags; ++ mc_io->portal_phys_addr = mc_portal_phys_addr; ++ mc_io->portal_size = mc_portal_size; ++ mc_io->mc_command_done_irq_armed = false; ++ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) { ++ spin_lock_init(&mc_io->spinlock); ++ } else { ++ mutex_init(&mc_io->mutex); ++ init_completion(&mc_io->mc_command_done_completion); ++ } ++ ++ res = devm_request_mem_region(dev, ++ mc_portal_phys_addr, ++ mc_portal_size, ++ "mc_portal"); ++ if (!res) { ++ dev_err(dev, ++ "devm_request_mem_region failed for MC portal %#llx\n", ++ mc_portal_phys_addr); ++ return -EBUSY; ++ } ++ ++ mc_portal_virt_addr = devm_ioremap_nocache(dev, ++ mc_portal_phys_addr, ++ mc_portal_size); ++ if (!mc_portal_virt_addr) { ++ dev_err(dev, ++ "devm_ioremap_nocache failed for MC portal %#llx\n", ++ mc_portal_phys_addr); ++ return -ENXIO; ++ } ++ ++ mc_io->portal_virt_addr = mc_portal_virt_addr; ++ if (dpmcp_dev) { ++ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); ++ if (error < 0) ++ goto error_destroy_mc_io; ++ ++ if (!(flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) && ++ fsl_mc_interrupts_supported()) { ++ error = fsl_mc_io_setup_dpmcp_irq(mc_io); ++ if (error < 0) ++ goto error_destroy_mc_io; ++ } ++ } ++ ++ *new_mc_io = mc_io; ++ return 0; ++ ++error_destroy_mc_io: ++ fsl_destroy_mc_io(mc_io); ++ return error; ++ ++} ++EXPORT_SYMBOL_GPL(fsl_create_mc_io); ++ ++/** ++ * Destroys an MC I/O object ++ * ++ * @mc_io: MC I/O object to destroy ++ */ ++void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) ++{ ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (dpmcp_dev) ++ fsl_mc_io_unset_dpmcp(mc_io); ++ ++ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr); ++ devm_release_mem_region(mc_io->dev, ++ mc_io->portal_phys_addr, ++ mc_io->portal_size); ++ ++ mc_io->portal_virt_addr = NULL; ++ devm_kfree(mc_io->dev, mc_io); ++} ++EXPORT_SYMBOL_GPL(fsl_destroy_mc_io); ++ ++int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, ++ struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(mc_io->dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(dpmcp_dev->mc_io)) ++ return -EINVAL; ++ ++ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { ++ error = dpmcp_open(mc_io, ++ 0, ++ dpmcp_dev->obj_desc.id, ++ &dpmcp_dev->mc_handle); ++ if (error < 0) ++ return error; ++ } ++ ++ mc_io->dpmcp_dev = dpmcp_dev; ++ dpmcp_dev->mc_io = mc_io; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp); ++ ++void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io) ++{ ++ int error; ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return; ++ ++ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) ++ return; ++ ++ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { ++ if (dpmcp_dev->irqs) ++ teardown_dpmcp_irq(mc_io); ++ ++ error = dpmcp_close(mc_io, ++ 0, ++ dpmcp_dev->mc_handle); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n", ++ error); ++ } ++ } ++ ++ mc_io->dpmcp_dev = NULL; ++ dpmcp_dev->mc_io = NULL; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp); ++ ++static int mc_status_to_error(enum mc_cmd_status status) ++{ ++ static const int mc_status_to_error_map[] = { ++ [MC_CMD_STATUS_OK] = 0, ++ [MC_CMD_STATUS_AUTH_ERR] = -EACCES, ++ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM, ++ [MC_CMD_STATUS_DMA_ERR] = -EIO, ++ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO, ++ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT, ++ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL, ++ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM, ++ [MC_CMD_STATUS_BUSY] = -EBUSY, ++ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP, ++ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV, ++ }; ++ ++ if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map))) ++ return -EINVAL; ++ ++ return mc_status_to_error_map[status]; ++} ++ ++static const char *mc_status_to_string(enum mc_cmd_status status) ++{ ++ static const char *const status_strings[] = { ++ [MC_CMD_STATUS_OK] = "Command completed successfully", ++ [MC_CMD_STATUS_READY] = "Command ready to be processed", ++ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error", ++ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege", ++ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error", ++ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error", ++ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out", ++ [MC_CMD_STATUS_NO_RESOURCE] = "No resources", ++ [MC_CMD_STATUS_NO_MEMORY] = "No memory available", ++ [MC_CMD_STATUS_BUSY] = "Device is busy", ++ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation", ++ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state" ++ }; ++ ++ if ((unsigned int)status >= ARRAY_SIZE(status_strings)) ++ return "Unknown MC error"; ++ ++ return status_strings[status]; ++} ++ ++/** ++ * mc_write_command - writes a command to a Management Complex (MC) portal ++ * ++ * @portal: pointer to an MC portal ++ * @cmd: pointer to a filled command ++ */ ++static inline void mc_write_command(struct mc_command __iomem *portal, ++ struct mc_command *cmd) ++{ ++ int i; ++ ++ /* copy command parameters into the portal */ ++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) ++ writeq(cmd->params[i], &portal->params[i]); ++ ++ /* submit the command by writing the header */ ++ writeq(cmd->header, &portal->header); ++} ++ ++/** ++ * mc_read_response - reads the response for the last MC command from a ++ * Management Complex (MC) portal ++ * ++ * @portal: pointer to an MC portal ++ * @resp: pointer to command response buffer ++ * ++ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. ++ */ ++static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * ++ portal, ++ struct mc_command *resp) ++{ ++ int i; ++ enum mc_cmd_status status; ++ ++ /* Copy command response header from MC portal: */ ++ resp->header = readq(&portal->header); ++ status = MC_CMD_HDR_READ_STATUS(resp->header); ++ if (status != MC_CMD_STATUS_OK) ++ return status; ++ ++ /* Copy command response data from MC portal: */ ++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) ++ resp->params[i] = readq(&portal->params[i]); ++ ++ return status; ++} ++ ++static int mc_completion_wait(struct fsl_mc_io *mc_io, struct mc_command *cmd, ++ enum mc_cmd_status *mc_status) ++{ ++ enum mc_cmd_status status; ++ unsigned long jiffies_left; ++ unsigned long timeout_jiffies = ++ msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); ++ ++ if (WARN_ON(!mc_io->dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) ++ return -EINVAL; ++ ++ for (;;) { ++ status = mc_read_response(mc_io->portal_virt_addr, cmd); ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ jiffies_left = wait_for_completion_timeout( ++ &mc_io->mc_command_done_completion, ++ timeout_jiffies); ++ if (jiffies_left == 0) ++ return -ETIMEDOUT; ++ } ++ ++ *mc_status = status; ++ return 0; ++} ++ ++static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, ++ struct mc_command *cmd, ++ enum mc_cmd_status *mc_status) ++{ ++ enum mc_cmd_status status; ++ unsigned long jiffies_until_timeout = ++ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); ++ ++ for (;;) { ++ status = mc_read_response(mc_io->portal_virt_addr, cmd); ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, ++ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); ++ ++ if (time_after_eq(jiffies, jiffies_until_timeout)) ++ return -ETIMEDOUT; ++ } ++ ++ *mc_status = status; ++ return 0; ++} ++ ++static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, ++ struct mc_command *cmd, ++ enum mc_cmd_status *mc_status) ++{ ++ enum mc_cmd_status status; ++ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; ++ ++ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) % ++ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0); ++ ++ for (;;) { ++ status = mc_read_response(mc_io->portal_virt_addr, cmd); ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); ++ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; ++ if (timeout_usecs == 0) ++ return -ETIMEDOUT; ++ } ++ ++ *mc_status = status; ++ return 0; ++} ++ ++/** ++ * Sends a command to the MC device using the given MC I/O object ++ * ++ * @mc_io: MC I/O object to be used ++ * @cmd: command to be sent ++ * ++ * Returns '0' on Success; Error code otherwise. ++ */ ++int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) ++{ ++ int error; ++ enum mc_cmd_status status; ++ unsigned long irq_flags = 0; ++ bool dpmcp_completion_intr_disabled = ++ (MC_CMD_HDR_READ_FLAGS(cmd->header) & MC_CMD_FLAG_INTR_DIS); ++ ++ if (WARN_ON(in_irq() && ++ (!dpmcp_completion_intr_disabled || ++ !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))) ++ return -EINVAL; ++ ++ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) ++ spin_lock_irqsave(&mc_io->spinlock, irq_flags); ++ else ++ mutex_lock(&mc_io->mutex); ++ ++ /* ++ * Send command to the MC hardware: ++ */ ++ mc_write_command(mc_io->portal_virt_addr, cmd); ++ ++ /* ++ * Wait for response from the MC hardware: ++ */ ++ if (mc_io->mc_command_done_irq_armed && !dpmcp_completion_intr_disabled) ++ error = mc_completion_wait(mc_io, cmd, &status); ++ else if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) ++ error = mc_polling_wait_preemptible(mc_io, cmd, &status); ++ else ++ error = mc_polling_wait_atomic(mc_io, cmd, &status); ++ ++ if (error < 0) { ++ if (error == -ETIMEDOUT) { ++ pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", ++ mc_io->portal_phys_addr, ++ (unsigned int) ++ MC_CMD_HDR_READ_TOKEN(cmd->header), ++ (unsigned int) ++ MC_CMD_HDR_READ_CMDID(cmd->header)); ++ } ++ goto common_exit; ++ ++ } ++ ++ if (status != MC_CMD_STATUS_OK) { ++ pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", ++ mc_io->portal_phys_addr, ++ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), ++ (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), ++ mc_status_to_string(status), ++ (unsigned int)status); ++ ++ error = mc_status_to_error(status); ++ goto common_exit; ++ } ++ ++ error = 0; ++ ++common_exit: ++ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) ++ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); ++ else ++ mutex_unlock(&mc_io->mutex); ++ ++ return error; ++} ++EXPORT_SYMBOL(mc_send_command); +diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h +new file mode 100644 +index 0000000..1ec04e4 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h +@@ -0,0 +1,62 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPBP_CMD_H ++#define _FSL_DPBP_CMD_H ++ ++/* DPBP Version */ ++#define DPBP_VER_MAJOR 2 ++#define DPBP_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPBP_CMDID_CLOSE 0x800 ++#define DPBP_CMDID_OPEN 0x804 ++#define DPBP_CMDID_CREATE 0x904 ++#define DPBP_CMDID_DESTROY 0x900 ++ ++#define DPBP_CMDID_ENABLE 0x002 ++#define DPBP_CMDID_DISABLE 0x003 ++#define DPBP_CMDID_GET_ATTR 0x004 ++#define DPBP_CMDID_RESET 0x005 ++#define DPBP_CMDID_IS_ENABLED 0x006 ++ ++#define DPBP_CMDID_SET_IRQ 0x010 ++#define DPBP_CMDID_GET_IRQ 0x011 ++#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPBP_CMDID_SET_IRQ_MASK 0x014 ++#define DPBP_CMDID_GET_IRQ_MASK 0x015 ++#define DPBP_CMDID_GET_IRQ_STATUS 0x016 ++#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 ++#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 ++#endif /* _FSL_DPBP_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h +new file mode 100644 +index 0000000..9856bb8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpbp.h +@@ -0,0 +1,438 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPBP_H ++#define __FSL_DPBP_H ++ ++/* Data Path Buffer Pool API ++ * Contains initialization APIs and runtime control APIs for DPBP ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpbp_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpbp_id: DPBP unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpbp_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpbp_id, ++ uint16_t *token); ++ ++/** ++ * dpbp_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpbp_cfg - Structure representing DPBP configuration ++ * @options: place holder ++ */ ++struct dpbp_cfg { ++ uint32_t options; ++}; ++ ++/** ++ * dpbp_create() - Create the DPBP object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPBP object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpbp_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpbp_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpbp_destroy() - Destroy the DPBP object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpbp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpbp_enable() - Enable the DPBP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpbp_disable() - Disable the DPBP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpbp_is_enabled() - Check if the DPBP is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpbp_reset() - Reset the DPBP, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpbp_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpbp_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpbp_irq_cfg *irq_cfg); ++ ++/** ++ * dpbp_get_irq() - Get IRQ information from the DPBP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpbp_irq_cfg *irq_cfg); ++ ++/** ++ * dpbp_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpbp_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpbp_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpbp_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpbp_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpbp_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpbp_attr - Structure representing DPBP attributes ++ * @id: DPBP object ID ++ * @version: DPBP version ++ * @bpid: Hardware buffer pool ID; should be used as an argument in ++ * acquire/release operations on buffers ++ */ ++struct dpbp_attr { ++ int id; ++ /** ++ * struct version - Structure representing DPBP version ++ * @major: DPBP major version ++ * @minor: DPBP minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint16_t bpid; ++}; ++ ++/** ++ * dpbp_get_attributes - Retrieve DPBP attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_attr *attr); ++ ++/** ++ * DPBP notifications options ++ */ ++ ++/** ++ * BPSCN write will attempt to allocate into a cache (coherent write) ++ */ ++#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 ++ ++/** ++ * struct dpbp_notification_cfg - Structure representing DPBP notifications ++ * towards software ++ * @depletion_entry: below this threshold the pool is "depleted"; ++ * set it to '0' to disable it ++ * @depletion_exit: greater than or equal to this threshold the pool exit its ++ * "depleted" state ++ * @surplus_entry: above this threshold the pool is in "surplus" state; ++ * set it to '0' to disable it ++ * @surplus_exit: less than or equal to this threshold the pool exit its ++ * "surplus" state ++ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' ++ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), ++ * must be 16B aligned. ++ * @message_ctx: The context that will be part of the BPSCN message and will ++ * be written to 'message_iova' ++ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values ++ */ ++struct dpbp_notification_cfg { ++ uint32_t depletion_entry; ++ uint32_t depletion_exit; ++ uint32_t surplus_entry; ++ uint32_t surplus_exit; ++ uint64_t message_iova; ++ uint64_t message_ctx; ++ uint16_t options; ++}; ++ ++/** ++ * dpbp_set_notifications() - Set notifications towards software ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @cfg: notifications configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg); ++ ++/** ++ * dpbp_get_notifications() - Get the notifications configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @cfg: notifications configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg); ++ ++#endif /* __FSL_DPBP_H */ +diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/include/dpcon-cmd.h +new file mode 100644 +index 0000000..ecb40d0 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpcon-cmd.h +@@ -0,0 +1,162 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPCON_CMD_H ++#define _FSL_DPCON_CMD_H ++ ++/* DPCON Version */ ++#define DPCON_VER_MAJOR 2 ++#define DPCON_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPCON_CMDID_CLOSE 0x800 ++#define DPCON_CMDID_OPEN 0x808 ++#define DPCON_CMDID_CREATE 0x908 ++#define DPCON_CMDID_DESTROY 0x900 ++ ++#define DPCON_CMDID_ENABLE 0x002 ++#define DPCON_CMDID_DISABLE 0x003 ++#define DPCON_CMDID_GET_ATTR 0x004 ++#define DPCON_CMDID_RESET 0x005 ++#define DPCON_CMDID_IS_ENABLED 0x006 ++ ++#define DPCON_CMDID_SET_IRQ 0x010 ++#define DPCON_CMDID_GET_IRQ 0x011 ++#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPCON_CMDID_SET_IRQ_MASK 0x014 ++#define DPCON_CMDID_GET_IRQ_MASK 0x015 ++#define DPCON_CMDID_GET_IRQ_STATUS 0x016 ++#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPCON_CMDID_SET_NOTIFICATION 0x100 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_OPEN(cmd, dpcon_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_CREATE(cmd, cfg) \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ ++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ ++} while (0) ++ ++#endif /* _FSL_DPCON_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h +new file mode 100644 +index 0000000..2555be5 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpcon.h +@@ -0,0 +1,407 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPCON_H ++#define __FSL_DPCON_H ++ ++/* Data Path Concentrator API ++ * Contains initialization APIs and runtime control APIs for DPCON ++ */ ++ ++struct fsl_mc_io; ++ ++/** General DPCON macros */ ++ ++/** ++ * Use it to disable notifications; see dpcon_set_notification() ++ */ ++#define DPCON_INVALID_DPIO_ID (int)(-1) ++ ++/** ++ * dpcon_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpcon_id: DPCON unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpcon_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpcon_id, ++ uint16_t *token); ++ ++/** ++ * dpcon_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpcon_cfg - Structure representing DPCON configuration ++ * @num_priorities: Number of priorities for the DPCON channel (1-8) ++ */ ++struct dpcon_cfg { ++ uint8_t num_priorities; ++}; ++ ++/** ++ * dpcon_create() - Create the DPCON object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPCON object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpcon_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpcon_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpcon_destroy() - Destroy the DPCON object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpcon_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpcon_enable() - Enable the DPCON ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpcon_disable() - Disable the DPCON ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpcon_is_enabled() - Check if the DPCON is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpcon_reset() - Reset the DPCON, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpcon_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpcon_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpcon_irq_cfg *irq_cfg); ++ ++/** ++ * dpcon_get_irq() - Get IRQ information from the DPCON. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpcon_irq_cfg *irq_cfg); ++ ++/** ++ * dpcon_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpcon_get_irq_enable() - Get overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpcon_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpcon_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpcon_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @status: interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpcon_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpcon_attr - Structure representing DPCON attributes ++ * @id: DPCON object ID ++ * @version: DPCON version ++ * @qbman_ch_id: Channel ID to be used by dequeue operation ++ * @num_priorities: Number of priorities for the DPCON channel (1-8) ++ */ ++struct dpcon_attr { ++ int id; ++ /** ++ * struct version - DPCON version ++ * @major: DPCON major version ++ * @minor: DPCON minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint16_t qbman_ch_id; ++ uint8_t num_priorities; ++}; ++ ++/** ++ * dpcon_get_attributes() - Retrieve DPCON attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @attr: Object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_attr *attr); ++ ++/** ++ * struct dpcon_notification_cfg - Structure representing notification parameters ++ * @dpio_id: DPIO object ID; must be configured with a notification channel; ++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; ++ * @priority: Priority selection within the DPIO channel; valid values ++ * are 0-7, depending on the number of priorities in that channel ++ * @user_ctx: User context value provided with each CDAN message ++ */ ++struct dpcon_notification_cfg { ++ int dpio_id; ++ uint8_t priority; ++ uint64_t user_ctx; ++}; ++ ++/** ++ * dpcon_set_notification() - Set DPCON notification destination ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @cfg: Notification parameters ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_set_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_notification_cfg *cfg); ++ ++#endif /* __FSL_DPCON_H */ +diff --git a/drivers/staging/fsl-mc/include/dpmac-cmd.h b/drivers/staging/fsl-mc/include/dpmac-cmd.h +new file mode 100644 +index 0000000..c123aab +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpmac-cmd.h +@@ -0,0 +1,192 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMAC_CMD_H ++#define _FSL_DPMAC_CMD_H ++ ++/* DPMAC Version */ ++#define DPMAC_VER_MAJOR 3 ++#define DPMAC_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPMAC_CMDID_CLOSE 0x800 ++#define DPMAC_CMDID_OPEN 0x80c ++#define DPMAC_CMDID_CREATE 0x90c ++#define DPMAC_CMDID_DESTROY 0x900 ++ ++#define DPMAC_CMDID_GET_ATTR 0x004 ++#define DPMAC_CMDID_RESET 0x005 ++ ++#define DPMAC_CMDID_SET_IRQ 0x010 ++#define DPMAC_CMDID_GET_IRQ 0x011 ++#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPMAC_CMDID_SET_IRQ_MASK 0x014 ++#define DPMAC_CMDID_GET_IRQ_MASK 0x015 ++#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 ++#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPMAC_CMDID_MDIO_READ 0x0c0 ++#define DPMAC_CMDID_MDIO_WRITE 0x0c1 ++#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 ++#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 ++#define DPMAC_CMDID_GET_COUNTER 0x0c4 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CREATE(cmd, cfg) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_addr, irq_val, user_irq_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ ++ MC_CMD_OP(cmd, 2, 0, 32, int, user_irq_id); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ(cmd, type, irq_addr, irq_val, user_irq_id) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, user_irq_id); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ ++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ ++ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_MDIO_READ(cmd, data) \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_COUNTER(cmd, type) \ ++ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) ++ ++#endif /* _FSL_DPMAC_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/dpmac.h b/drivers/staging/fsl-mc/include/dpmac.h +new file mode 100644 +index 0000000..88091b5 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpmac.h +@@ -0,0 +1,528 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMAC_H ++#define __FSL_DPMAC_H ++ ++/* Data Path MAC API ++ * Contains initialization APIs and runtime control APIs for DPMAC ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpmac_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dpmac_id: DPMAC unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmac_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_open(struct fsl_mc_io *mc_io, int dpmac_id, uint16_t *token); ++ ++/** ++ * dpmac_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_close(struct fsl_mc_io *mc_io, uint16_t token); ++ ++/** ++ * enum dpmac_link_type - DPMAC link type ++ * @DPMAC_LINK_TYPE_NONE: No link ++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type ++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID ++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type ++ */ ++enum dpmac_link_type { ++ DPMAC_LINK_TYPE_NONE, ++ DPMAC_LINK_TYPE_FIXED, ++ DPMAC_LINK_TYPE_PHY, ++ DPMAC_LINK_TYPE_BACKPLANE ++}; ++ ++/** ++ * enum dpmac_eth_if - DPMAC Ethrnet interface ++ * @DPMAC_ETH_IF_MII: MII interface ++ * @DPMAC_ETH_IF_RMII: RMII interface ++ * @DPMAC_ETH_IF_SMII: SMII interface ++ * @DPMAC_ETH_IF_GMII: GMII interface ++ * @DPMAC_ETH_IF_RGMII: RGMII interface ++ * @DPMAC_ETH_IF_SGMII: SGMII interface ++ * @DPMAC_ETH_IF_XGMII: XGMII interface ++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface ++ * @DPMAC_ETH_IF_XAUI: XAUI interface ++ * @DPMAC_ETH_IF_XFI: XFI interface ++ */ ++enum dpmac_eth_if { ++ DPMAC_ETH_IF_MII, ++ DPMAC_ETH_IF_RMII, ++ DPMAC_ETH_IF_SMII, ++ DPMAC_ETH_IF_GMII, ++ DPMAC_ETH_IF_RGMII, ++ DPMAC_ETH_IF_SGMII, ++ DPMAC_ETH_IF_XGMII, ++ DPMAC_ETH_IF_QSGMII, ++ DPMAC_ETH_IF_XAUI, ++ DPMAC_ETH_IF_XFI ++}; ++ ++/** ++ * struct dpmac_cfg() - Structure representing DPMAC configuration ++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, ++ * the MAC IDs are continuous. ++ * For example: 2 WRIOPs, 16 MACs in each: ++ * MAC IDs for the 1st WRIOP: 1-16, ++ * MAC IDs for the 2nd WRIOP: 17-32. ++ */ ++struct dpmac_cfg { ++ int mac_id; ++}; ++ ++/** ++ * dpmac_create() - Create the DPMAC object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPMAC object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpmac_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ const struct dpmac_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmac_destroy(struct fsl_mc_io *mc_io, uint16_t token); ++ ++/* DPMAC IRQ Index and Events */ ++ ++/* IRQ index */ ++#define DPMAC_IRQ_INDEX 0 ++/* IRQ event - indicates a change in link state */ ++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 ++/* irq event - Indicates that the link state changed */ ++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 ++ ++/** ++ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_addr: Address that must be written to ++ * signal a message-based interrupt ++ * @irq_val: Value to write into irq_addr address ++ * @user_irq_id: A user defined number associated with this IRQ ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint64_t irq_addr, ++ uint32_t irq_val, ++ int user_irq_id); ++ ++/** ++ * dpmac_get_irq() - Get IRQ information from the DPMAC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_addr: Returned address that must be written to ++ * signal the message-based interrupt ++ * @irq_val: Value to write into irq_addr address ++ * @user_irq_id: A user defined number associated with this IRQ ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ uint64_t *irq_addr, ++ uint32_t *irq_val, ++ int *user_irq_id); ++ ++/** ++ * dpmac_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpmac_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpmac_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpmac_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpmac_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpmac_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpmac_attr - Structure representing DPMAC attributes ++ * @id: DPMAC object ID ++ * @phy_id: PHY ID ++ * @link_type: link type ++ * @eth_if: Ethernet interface ++ * @max_rate: Maximum supported rate - in Mbps ++ * @version: DPMAC version ++ */ ++struct dpmac_attr { ++ int id; ++ int phy_id; ++ enum dpmac_link_type link_type; ++ enum dpmac_eth_if eth_if; ++ uint32_t max_rate; ++ /** ++ * struct version - Structure representing DPMAC version ++ * @major: DPMAC major version ++ * @minor: DPMAC minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dpmac_get_attributes - Retrieve DPMAC attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ struct dpmac_attr *attr); ++ ++/** ++ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters ++ * @phy_addr: MDIO device address ++ * @reg: Address of the register within the Clause 45 PHY device from which data ++ * is to be read ++ * @data: Data read/write from/to MDIO ++ */ ++struct dpmac_mdio_cfg { ++ uint8_t phy_addr; ++ uint8_t reg; ++ uint16_t data; ++}; ++ ++/** ++ * dpmac_mdio_read() - Perform MDIO read transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_read(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++ ++/** ++ * dpmac_mdio_write() - Perform MDIO write transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_write(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++/* DPMAC link configuration/state options */ ++ ++/* Enable auto-negotiation */ ++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/* Enable half-duplex mode */ ++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/* Enable pause frames */ ++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL ++/* Enable a-symmetric pause frames */ ++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration ++ * @rate: Link's rate - in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ */ ++struct dpmac_link_cfg { ++ uint32_t rate; ++ uint64_t options; ++}; ++ ++/** ++ * dpmac_get_link_cfg() - Get Ethernet link configuration ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @cfg: Returned structure with the link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_link_cfg *cfg); ++ ++/** ++ * struct dpmac_link_state - DPMAC link configuration request ++ * @rate: Rate in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ * @up: Link state ++ */ ++struct dpmac_link_state { ++ uint32_t rate; ++ uint64_t options; ++ int up; ++}; ++ ++/** ++ * dpmac_set_link_state() - Set the Ethernet link status ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @link_state: Link state configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_link_state *link_state); ++ ++/** ++ * enum dpni_counter - DPNI counter types ++ * @DPMAC_CNT_ING_FRAME_64: counts 64-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-octet frame and larger ++ * (up to max frame length specified), ++ * good or bad. ++ * @DPMAC_CNT_ING_FRAG: counts packet which is shorter than 64 octets received ++ * with a wrong CRC ++ * @DPMAC_CNT_ING_JABBER: counts packet longer than the maximum frame length ++ * specified, with a bad frame check sequence. ++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped packet due to internal errors. ++ * Occurs when a receive FIFO overflows. ++ * Includes also packets truncated as a result of ++ * the receive FIFO overflow. ++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frame with an alignment error ++ * (optional used for wrong SFD) ++ * @DPMAC_CNT_EGR_UNDERSIZED: counts packet transmitted that was less than 64 ++ * octets long with a good CRC. ++ * @DPMAC_CNT_ING_OVERSIZED: counts packet longer than the maximum frame length ++ * specified, with a good frame check sequence. ++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frame (regular and PFC). ++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frame transmitted ++ * (regular and PFC). ++ * @DPMAC_CNT_ING_BYTE: counts octet received except preamble for all valid ++ frames and valid pause frames. ++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frame ++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frame ++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad packet received. ++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frame ++ * @DPMAC_CNT_ING_ERR_FRAME: counts frame received with an error ++ * (except for undersized/fragment frame) ++ * @DPMAC_CNT_EGR_BYTE: counts octet transmitted except preamble for all valid ++ * frames and valid pause frames transmitted. ++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frame ++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frame ++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frame ++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frame transmitted with an error ++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frame received without error, including ++ * pause frames. ++ */ ++enum dpmac_counter { ++ DPMAC_CNT_ING_FRAME_64, ++ DPMAC_CNT_ING_FRAME_127, ++ DPMAC_CNT_ING_FRAME_255, ++ DPMAC_CNT_ING_FRAME_511, ++ DPMAC_CNT_ING_FRAME_1023, ++ DPMAC_CNT_ING_FRAME_1518, ++ DPMAC_CNT_ING_FRAME_1519_MAX, ++ DPMAC_CNT_ING_FRAG, ++ DPMAC_CNT_ING_JABBER, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ DPMAC_CNT_ING_ALIGN_ERR, ++ DPMAC_CNT_EGR_UNDERSIZED, ++ DPMAC_CNT_ING_OVERSIZED, ++ DPMAC_CNT_ING_VALID_PAUSE_FRAME, ++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, ++ DPMAC_CNT_ING_BYTE, ++ DPMAC_CNT_ING_MCAST_FRAME, ++ DPMAC_CNT_ING_BCAST_FRAME, ++ DPMAC_CNT_ING_ALL_FRAME, ++ DPMAC_CNT_ING_UCAST_FRAME, ++ DPMAC_CNT_ING_ERR_FRAME, ++ DPMAC_CNT_EGR_BYTE, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ DPMAC_CNT_EGR_BCAST_FRAME, ++ DPMAC_CNT_EGR_UCAST_FRAME, ++ DPMAC_CNT_EGR_ERR_FRAME, ++ DPMAC_CNT_ING_GOOD_FRAME ++}; ++ ++/** ++ * dpmac_get_counter() - Read a specific DPMAC counter ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @type: The requested counter ++ * @counter: Returned counter value ++ * ++ * Return: The requested counter; '0' otherwise. ++ */ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, uint16_t token, ++ enum dpmac_counter type, ++ uint64_t *counter); ++ ++#endif /* __FSL_DPMAC_H */ +diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h +new file mode 100644 +index 0000000..d1c4588 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpmng.h +@@ -0,0 +1,80 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMNG_H ++#define __FSL_DPMNG_H ++ ++/* Management Complex General API ++ * Contains general API for the Management Complex firmware ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * struct mc_version ++ * @major: Major version number: incremented on API compatibility changes ++ * @minor: Minor version number: incremented on API additions (that are ++ * backward compatible); reset when major version is incremented ++ * @revision: Internal revision number: incremented on implementation changes ++ * and/or bug fixes that have no impact on API ++ */ ++struct mc_version { ++ uint32_t major; ++ uint32_t minor; ++ uint32_t revision; ++}; ++ ++/** ++ * mc_get_version() - Retrieves the Management Complex firmware ++ * version information ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @mc_ver_info: Returned version information structure ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int mc_get_version(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ struct mc_version *mc_ver_info); ++ ++/** ++ * dpmng_get_container_id() - Get container ID associated with a given portal. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @container_id: Requested container ID ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmng_get_container_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int *container_id); ++ ++#endif /* __FSL_DPMNG_H */ +diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h +new file mode 100644 +index 0000000..810ded0 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dprc.h +@@ -0,0 +1,990 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPRC_H ++#define _FSL_DPRC_H ++ ++#include "mc-cmd.h" ++ ++/* Data Path Resource Container API ++ * Contains DPRC API for managing and querying DPAA resources ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * Set this value as the icid value in dprc_cfg structure when creating a ++ * container, in case the ICID is not selected by the user and should be ++ * allocated by the DPRC from the pool of ICIDs. ++ */ ++#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0)) ++ ++/** ++ * Set this value as the portal_id value in dprc_cfg structure when creating a ++ * container, in case the portal ID is not specifically selected by the ++ * user and should be allocated by the DPRC from the pool of portal ids. ++ */ ++#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) ++ ++/** ++ * dprc_open() - Open DPRC object for use ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @container_id: Container ID to open ++ * @token: Returned token of DPRC object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Required before any operation on the object. ++ */ ++int dprc_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int container_id, ++ uint16_t *token); ++ ++/** ++ * dprc_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * Container general options ++ * ++ * These options may be selected at container creation by the container creator ++ * and can be retrieved using dprc_get_attributes() ++ */ ++ ++/* Spawn Policy Option allowed - Indicates that the new container is allowed ++ * to spawn and have its own child containers. ++ */ ++#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 ++ ++/* General Container allocation policy - Indicates that the new container is ++ * allowed to allocate requested resources from its parent container; if not ++ * set, the container is only allowed to use resources in its own pools; Note ++ * that this is a container's global policy, but the parent container may ++ * override it and set specific quota per resource type. ++ */ ++#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 ++ ++/* Object initialization allowed - software context associated with this ++ * container is allowed to invoke object initialization operations. ++ */ ++#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 ++ ++/* Topology change allowed - software context associated with this ++ * container is allowed to invoke topology operations, such as attach/detach ++ * of network objects. ++ */ ++#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 ++ ++/* AIOP - Indicates that container belongs to AIOP. */ ++#define DPRC_CFG_OPT_AIOP 0x00000020 ++ ++/* IRQ Config - Indicates that the container allowed to configure its IRQs. */ ++#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 ++ ++/** ++ * struct dprc_cfg - Container configuration options ++ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free ++ * ICID value is allocated by the DPRC ++ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free ++ * portal ID is allocated by the DPRC ++ * @options: Combination of 'DPRC_CFG_OPT_' options ++ * @label: Object's label ++ */ ++struct dprc_cfg { ++ uint16_t icid; ++ int portal_id; ++ uint64_t options; ++ char label[16]; ++}; ++ ++/** ++ * dprc_create_container() - Create child container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @cfg: Child container configuration ++ * @child_container_id: Returned child container ID ++ * @child_portal_offset: Returned child portal offset from MC portal base ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_create_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_cfg *cfg, ++ int *child_container_id, ++ uint64_t *child_portal_offset); ++ ++/** ++ * dprc_destroy_container() - Destroy child container. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the container to destroy ++ * ++ * This function terminates the child container, so following this call the ++ * child container ID becomes invalid. ++ * ++ * Notes: ++ * - All resources and objects of the destroyed container are returned to the ++ * parent container or destroyed if were created be the destroyed container. ++ * - This function destroy all the child containers of the specified ++ * container prior to destroying the container itself. ++ * ++ * warning: Only the parent container is allowed to destroy a child policy ++ * Container 0 can't be destroyed ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ */ ++int dprc_destroy_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id); ++ ++/** ++ * dprc_reset_container - Reset child container. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the container to reset ++ * ++ * In case a software context crashes or becomes non-responsive, the parent ++ * may wish to reset its resources container before the software context is ++ * restarted. ++ * ++ * This routine informs all objects assigned to the child container that the ++ * container is being reset, so they may perform any cleanup operations that are ++ * needed. All objects handles that were owned by the child container shall be ++ * closed. ++ * ++ * Note that such request may be submitted even if the child software context ++ * has not crashed, but the resulting object cleanup operations will not be ++ * aware of that. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_reset_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id); ++ ++/* IRQ */ ++ ++/* IRQ index */ ++#define DPRC_IRQ_INDEX 0 ++ ++/* Number of dprc's IRQs */ ++#define DPRC_NUM_OF_IRQS 1 ++ ++/* DPRC IRQ events */ ++ ++/* IRQ event - Indicates that a new object added to the container */ ++#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001 ++ ++/* IRQ event - Indicates that an object was removed from the container */ ++#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002 ++ ++/* IRQ event - Indicates that resources added to the container */ ++#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 ++ ++/* IRQ event - Indicates that resources removed from the container */ ++#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 ++ ++/* IRQ event - Indicates that one of the descendant containers that opened by ++ * this container is destroyed ++ */ ++#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 ++ ++/* IRQ event - Indicates that on one of the container's opened object is ++ * destroyed ++ */ ++#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 ++ ++/* Irq event - Indicates that object is created at the container */ ++#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040 ++ ++/** ++ * struct dprc_irq_cfg - IRQ configuration ++ * @paddr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dprc_irq_cfg { ++ uint64_t paddr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_get_irq() - Get IRQ information from the DPRC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dprc_get_irq_enable() - Get overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dprc_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting irq ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dprc_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dprc_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dprc_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dprc_attributes - Container attributes ++ * @container_id: Container's ID ++ * @icid: Container's ICID ++ * @portal_id: Container's portal ID ++ * @options: Container's options as set at container's creation ++ * @version: DPRC version ++ */ ++struct dprc_attributes { ++ int container_id; ++ uint16_t icid; ++ int portal_id; ++ uint64_t options; ++ /** ++ * struct version - DPRC version ++ * @major: DPRC major version ++ * @minor: DPRC minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dprc_get_attributes() - Obtains container attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @attributes: Returned container attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_attributes *attributes); ++ ++/** ++ * dprc_set_res_quota() - Set allocation policy for a specific resource/object ++ * type in a child container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the child container ++ * @type: Resource/object type ++ * @quota: Sets the maximum number of resources of the selected type ++ * that the child container is allowed to allocate from its parent; ++ * when quota is set to -1, the policy is the same as container's ++ * general policy. ++ * ++ * Allocation policy determines whether or not a container may allocate ++ * resources from its parent. Each container has a 'global' allocation policy ++ * that is set when the container is created. ++ * ++ * This function sets allocation policy for a specific resource type. ++ * The default policy for all resource types matches the container's 'global' ++ * allocation policy. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Only the parent container is allowed to change a child policy. ++ */ ++int dprc_set_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t quota); ++ ++/** ++ * dprc_get_res_quota() - Gets the allocation policy of a specific ++ * resource/object type in a child container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the child container ++ * @type: resource/object type ++ * @quota: Returnes the maximum number of resources of the selected type ++ * that the child container is allowed to allocate from the parent; ++ * when quota is set to -1, the policy is the same as container's ++ * general policy. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t *quota); ++ ++/* Resource request options */ ++ ++/* Explicit resource ID request - The requested objects/resources ++ * are explicit and sequential (in case of resources). ++ * The base ID is given at res_req at base_align field ++ */ ++#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 ++ ++/* Aligned resources request - Relevant only for resources ++ * request (and not objects). Indicates that resources base ID should be ++ * sequential and aligned to the value given at dprc_res_req base_align field ++ */ ++#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 ++ ++/* Plugged Flag - Relevant only for object assignment request. ++ * Indicates that after all objects assigned. An interrupt will be invoked at ++ * the relevant GPP. The assigned object will be marked as plugged. ++ * plugged objects can't be assigned from their container ++ */ ++#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 ++ ++/** ++ * struct dprc_res_req - Resource request descriptor, to be used in assignment ++ * or un-assignment of resources and objects. ++ * @type: Resource/object type: Represent as a NULL terminated string. ++ * This string may received by using dprc_get_pool() to get resource ++ * type and dprc_get_obj() to get object type; ++ * Note: it is not possible to assign/un-assign DPRC objects ++ * @num: Number of resources ++ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options ++ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT ++ * is set at option), this field represents the required base ID ++ * for resource allocation; In case of aligned assignment ++ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field ++ * indicates the required alignment for the resource ID(s) - ++ * use 0 if there is no alignment or explicit ID requirements ++ */ ++struct dprc_res_req { ++ char type[16]; ++ uint32_t num; ++ uint32_t options; ++ int id_base_align; ++}; ++ ++/** ++ * dprc_assign() - Assigns objects or resource to a child container. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @container_id: ID of the child container ++ * @res_req: Describes the type and amount of resources to ++ * assign to the given container ++ * ++ * Assignment is usually done by a parent (this DPRC) to one of its child ++ * containers. ++ * ++ * According to the DPRC allocation policy, the assigned resources may be taken ++ * (allocated) from the container's ancestors, if not enough resources are ++ * available in the container itself. ++ * ++ * The type of assignment depends on the dprc_res_req options, as follows: ++ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have ++ * the explicit base ID specified at the id_base_align field of res_req. ++ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be ++ * aligned to the value given at id_base_align field of res_req. ++ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, ++ * and indicates that the object must be set to the plugged state. ++ * ++ * A container may use this function with its own ID in order to change a ++ * object state to plugged or unplugged. ++ * ++ * If IRQ information has been set in the child DPRC, it will signal an ++ * interrupt following every change in its object assignment. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_assign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int container_id, ++ struct dprc_res_req *res_req); ++ ++/** ++ * dprc_unassign() - Un-assigns objects or resources from a child container ++ * and moves them into this (parent) DPRC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the child container ++ * @res_req: Describes the type and amount of resources to un-assign from ++ * the child container ++ * ++ * Un-assignment of objects can succeed only if the object is not in the ++ * plugged or opened state. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_unassign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ struct dprc_res_req *res_req); ++ ++/** ++ * dprc_get_pool_count() - Get the number of dprc's pools ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPRC object ++ * @pool_count: Returned number of resource pools in the dprc ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_pool_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *pool_count); ++ ++/** ++ * dprc_get_pool() - Get the type (string) of a certain dprc's pool ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @pool_index: Index of the pool to be queried (< pool_count) ++ * @type: The type of the pool ++ * ++ * The pool types retrieved one by one by incrementing ++ * pool_index up to (not including) the value of pool_count returned ++ * from dprc_get_pool_count(). dprc_get_pool_count() must ++ * be called prior to dprc_get_pool(). ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_pool(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int pool_index, ++ char *type); ++ ++/** ++ * dprc_get_obj_count() - Obtains the number of objects in the DPRC ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_count: Number of objects assigned to the DPRC ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *obj_count); ++ ++/* Objects Attributes Flags */ ++ ++/* Opened state - Indicates that an object is open by at least one owner */ ++#define DPRC_OBJ_STATE_OPEN 0x00000001 ++/* Plugged state - Indicates that the object is plugged */ ++#define DPRC_OBJ_STATE_PLUGGED 0x00000002 ++ ++/** ++ * Shareability flag - Object flag indicating no memory shareability. ++ * the object generates memory accesses that are non coherent with other ++ * masters; ++ * user is responsible for proper memory handling through IOMMU configuration. ++ */ ++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 ++ ++/** ++ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() ++ * @type: Type of object: NULL terminated string ++ * @id: ID of logical object resource ++ * @vendor: Object vendor identifier ++ * @ver_major: Major version number ++ * @ver_minor: Minor version number ++ * @irq_count: Number of interrupts supported by the object ++ * @region_count: Number of mappable regions supported by the object ++ * @state: Object state: combination of DPRC_OBJ_STATE_ states ++ * @label: Object label ++ * @flags: Object's flags ++ */ ++struct dprc_obj_desc { ++ char type[16]; ++ int id; ++ uint16_t vendor; ++ uint16_t ver_major; ++ uint16_t ver_minor; ++ uint8_t irq_count; ++ uint8_t region_count; ++ uint32_t state; ++ char label[16]; ++ uint16_t flags; ++}; ++ ++/** ++ * dprc_get_obj() - Get general information on an object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_index: Index of the object to be queried (< obj_count) ++ * @obj_desc: Returns the requested object descriptor ++ * ++ * The object descriptors are retrieved one by one by incrementing ++ * obj_index up to (not including) the value of obj_count returned ++ * from dprc_get_obj_count(). dprc_get_obj_count() must ++ * be called prior to dprc_get_obj(). ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int obj_index, ++ struct dprc_obj_desc *obj_desc); ++ ++/** ++ * dprc_get_obj_desc() - Get object descriptor. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: The type of the object to get its descriptor. ++ * @obj_id: The id of the object to get its descriptor ++ * @obj_desc: The returned descriptor to fill and return to the user ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ */ ++int dprc_get_obj_desc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ struct dprc_obj_desc *obj_desc); ++ ++/** ++ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Type of the object to set its IRQ ++ * @obj_id: ID of the object to set its IRQ ++ * @irq_index: The interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_get_obj_irq() - Get IRQ information from object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Type od the object to get its IRQ ++ * @obj_id: ID of the object to get its IRQ ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: The returned IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_get_res_count() - Obtains the number of free resources that are assigned ++ * to this container, by pool type ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @type: pool type ++ * @res_count: Returned number of free resources of the given ++ * resource type that are assigned to this DPRC ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_res_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ int *res_count); ++ ++/** ++ * enum dprc_iter_status - Iteration status ++ * @DPRC_ITER_STATUS_FIRST: Perform first iteration ++ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed ++ * @DPRC_ITER_STATUS_LAST: Indicates last iteration ++ */ ++enum dprc_iter_status { ++ DPRC_ITER_STATUS_FIRST = 0, ++ DPRC_ITER_STATUS_MORE = 1, ++ DPRC_ITER_STATUS_LAST = 2 ++}; ++ ++/** ++ * struct dprc_res_ids_range_desc - Resource ID range descriptor ++ * @base_id: Base resource ID of this range ++ * @last_id: Last resource ID of this range ++ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at ++ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, ++ * additional iterations are needed, until the returned marker is ++ * DPRC_ITER_STATUS_LAST ++ */ ++struct dprc_res_ids_range_desc { ++ int base_id; ++ int last_id; ++ enum dprc_iter_status iter_status; ++}; ++ ++/** ++ * dprc_get_res_ids() - Obtains IDs of free resources in the container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @type: pool type ++ * @range_desc: range descriptor ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_res_ids(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ struct dprc_res_ids_range_desc *range_desc); ++ ++/* Region flags */ ++/* Cacheable - Indicates that region should be mapped as cacheable */ ++#define DPRC_REGION_CACHEABLE 0x00000001 ++ ++/** ++ * enum dprc_region_type - Region type ++ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region ++ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region ++ */ ++enum dprc_region_type { ++ DPRC_REGION_TYPE_MC_PORTAL, ++ DPRC_REGION_TYPE_QBMAN_PORTAL ++}; ++ ++/** ++ * struct dprc_region_desc - Mappable region descriptor ++ * @base_offset: Region offset from region's base address. ++ * For DPMCP and DPRC objects, region base is offset from SoC MC portals ++ * base address; For DPIO, region base is offset from SoC QMan portals ++ * base address ++ * @size: Region size (in bytes) ++ * @flags: Region attributes ++ * @type: Portal region type ++ */ ++struct dprc_region_desc { ++ uint32_t base_offset; ++ uint32_t size; ++ uint32_t flags; ++ enum dprc_region_type type; ++}; ++ ++/** ++ * dprc_get_obj_region() - Get region information for a specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Object type as returned in dprc_get_obj() ++ * @obj_id: Unique object instance as returned in dprc_get_obj() ++ * @region_index: The specific region to query ++ * @region_desc: Returns the requested region descriptor ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj_region(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t region_index, ++ struct dprc_region_desc *region_desc); ++ ++/** ++ * dprc_set_obj_label() - Set object label. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Object's type ++ * @obj_id: Object's ID ++ * @label: The required label. The maximum length is 16 chars. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_obj_label(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ char *label); ++ ++/** ++ * struct dprc_endpoint - Endpoint description for link connect/disconnect ++ * operations ++ * @type: Endpoint object type: NULL terminated string ++ * @id: Endpoint object ID ++ * @if_id: Interface ID; should be set for endpoints with multiple ++ * interfaces ("dpsw", "dpdmux"); for others, always set to 0 ++ */ ++struct dprc_endpoint { ++ char type[16]; ++ int id; ++ int if_id; ++}; ++ ++/** ++ * struct dprc_connection_cfg - Connection configuration. ++ * Used for virtual connections only ++ * @committed_rate: Committed rate (Mbits/s) ++ * @max_rate: Maximum rate (Mbits/s) ++ */ ++struct dprc_connection_cfg { ++ uint32_t committed_rate; ++ uint32_t max_rate; ++}; ++ ++/** ++ * dprc_connect() - Connect two endpoints to create a network link between them ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @endpoint1: Endpoint 1 configuration parameters ++ * @endpoint2: Endpoint 2 configuration parameters ++ * @cfg: Connection configuration. The connection configuration is ignored for ++ * connections made to DPMAC objects, where rate is retrieved from the ++ * MAC configuration. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_connect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ const struct dprc_endpoint *endpoint2, ++ const struct dprc_connection_cfg *cfg); ++ ++/** ++ * dprc_disconnect() - Disconnect one endpoint to remove its network connection ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @endpoint: Endpoint configuration parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_disconnect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint); ++ ++/** ++* dprc_get_connection() - Get connected endpoint and link status if connection ++* exists. ++* @mc_io: Pointer to MC portal's I/O object ++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++* @token: Token of DPRC object ++* @endpoint1: Endpoint 1 configuration parameters ++* @endpoint2: Returned endpoint 2 configuration parameters ++* @state: Returned link state: ++* 1 - link is up; ++* 0 - link is down; ++* -1 - no connection (endpoint2 information is irrelevant) ++* ++* Return: '0' on Success; -ENAVAIL if connection does not exist. ++*/ ++int dprc_get_connection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ struct dprc_endpoint *endpoint2, ++ int *state); ++ ++#endif /* _FSL_DPRC_H */ ++ +diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h +new file mode 100644 +index 0000000..3e9af59 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h +@@ -0,0 +1,774 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_FD_H ++#define __FSL_DPAA2_FD_H ++ ++/** ++ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2 ++ * ++ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2. ++ * Frames can be enqueued and dequeued to Frame Queues which are consumed ++ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE) ++ * ++ * There are three types of frames: Single, Scatter Gather and Frame Lists. ++ * ++ * The set of APIs in this file must be used to create, manipulate and ++ * query Frame Descriptor. ++ * ++ */ ++ ++/** ++ * struct dpaa2_fd - Place-holder for FDs. ++ * @words: for easier/faster copying the whole FD structure. ++ * @addr_lo: the lower 32 bits of the address in FD. ++ * @addr_hi: the upper 32 bits of the address in FD. ++ * @len: the length field in FD. ++ * @bpid_offset: represent the bpid and offset fields in FD ++ * @frc: frame context ++ * @ctrl: the 32bit control bits including dd, sc,... va, err. ++ * @flc_lo: the lower 32bit of flow context. ++ * @flc_hi: the upper 32bits of flow context. ++ * ++ * This structure represents the basic Frame Descriptor used in the system. ++ * We represent it via the simplest form that we need for now. Different ++ * overlays may be needed to support different options, etc. (It is impractical ++ * to define One True Struct, because the resulting encoding routines (lots of ++ * read-modify-writes) would be worst-case performance whether or not ++ * circumstances required them.) ++ */ ++struct dpaa2_fd { ++ union { ++ u32 words[8]; ++ struct dpaa2_fd_simple { ++ u32 addr_lo; ++ u32 addr_hi; ++ u32 len; ++ /* offset in the MS 16 bits, BPID in the LS 16 bits */ ++ u32 bpid_offset; ++ u32 frc; /* frame context */ ++ /* "err", "va", "cbmt", "asal", [...] */ ++ u32 ctrl; ++ /* flow context */ ++ u32 flc_lo; ++ u32 flc_hi; ++ } simple; ++ }; ++}; ++ ++enum dpaa2_fd_format { ++ dpaa2_fd_single = 0, ++ dpaa2_fd_list, ++ dpaa2_fd_sg ++}; ++ ++/* Accessors for SG entry fields ++ * ++ * These setters and getters assume little endian format. For converting ++ * between LE and cpu endianness, the specific conversion functions must be ++ * called before the SGE contents are accessed by the core (on Rx), ++ * respectively before the SG table is sent to hardware (on Tx) ++ */ ++ ++/** ++ * dpaa2_fd_get_addr() - get the addr field of frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the address in the frame descriptor. ++ */ ++static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd) ++{ ++ return (dma_addr_t)((((uint64_t)fd->simple.addr_hi) << 32) ++ + fd->simple.addr_lo); ++} ++ ++/** ++ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor ++ * @fd: the given frame descriptor. ++ * @addr: the address needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr) ++{ ++ fd->simple.addr_hi = upper_32_bits(addr); ++ fd->simple.addr_lo = lower_32_bits(addr); ++} ++ ++/** ++ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the frame context field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd) ++{ ++ return fd->simple.frc; ++} ++ ++/** ++ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor ++ * @fd: the given frame descriptor. ++ * @frc: the frame context needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc) ++{ ++ fd->simple.frc = frc; ++} ++ ++/** ++ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the flow context in the frame descriptor. ++ */ ++static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd) ++{ ++ return (dma_addr_t)((((uint64_t)fd->simple.flc_hi) << 32) + ++ fd->simple.flc_lo); ++} ++ ++/** ++ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor ++ * @fd: the given frame descriptor. ++ * @flc_addr: the flow context needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr) ++{ ++ fd->simple.flc_hi = upper_32_bits(flc_addr); ++ fd->simple.flc_lo = lower_32_bits(flc_addr); ++} ++ ++/** ++ * dpaa2_fd_get_len() - Get the length in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the length field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd) ++{ ++ return fd->simple.len; ++} ++ ++/** ++ * dpaa2_fd_set_len() - Set the length field of frame descriptor ++ * @fd: the given frame descriptor. ++ * @len: the length needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len) ++{ ++ fd->simple.len = len; ++} ++ ++/** ++ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the offset. ++ */ ++static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd) ++{ ++ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF; ++} ++ ++/** ++ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor ++ * ++ * @fd: the given frame descriptor. ++ * @offset: the offset needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset) ++{ ++ fd->simple.bpid_offset &= 0xF000FFFF; ++ fd->simple.bpid_offset |= (u32)offset << 16; ++} ++ ++/** ++ * dpaa2_fd_get_format() - Get the format field in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_fd_format dpaa2_fd_get_format( ++ const struct dpaa2_fd *fd) ++{ ++ return (enum dpaa2_fd_format)((fd->simple.bpid_offset >> 28) & 0x3); ++} ++ ++/** ++ * dpaa2_fd_set_format() - Set the format field of frame descriptor ++ * ++ * @fd: the given frame descriptor. ++ * @format: the format needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd, ++ enum dpaa2_fd_format format) ++{ ++ fd->simple.bpid_offset &= 0xCFFFFFFF; ++ fd->simple.bpid_offset |= (u32)format << 28; ++} ++ ++/** ++ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the bpid. ++ */ ++static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd) ++{ ++ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF); ++} ++ ++/** ++ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor ++ * ++ * @fd: the given frame descriptor. ++ * @bpid: the bpid needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid) ++{ ++ fd->simple.bpid_offset &= 0xFFFF0000; ++ fd->simple.bpid_offset |= (u32)bpid; ++} ++ ++/** ++ * struct dpaa2_sg_entry - the scatter-gathering structure ++ * @addr_lo: the lower 32bit of address ++ * @addr_hi: the upper 32bit of address ++ * @len: the length in this sg entry. ++ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. ++ */ ++struct dpaa2_sg_entry { ++ u32 addr_lo; ++ u32 addr_hi; ++ u32 len; ++ u32 bpid_offset; ++}; ++ ++enum dpaa2_sg_format { ++ dpaa2_sg_single = 0, ++ dpaa2_sg_frame_data, ++ dpaa2_sg_sgt_ext ++}; ++ ++/** ++ * dpaa2_sg_get_addr() - Get the address from SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the address. ++ */ ++static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg) ++{ ++ return (dma_addr_t)((((u64)sg->addr_hi) << 32) + sg->addr_lo); ++} ++ ++/** ++ * dpaa2_sg_set_addr() - Set the address in SG entry ++ * @sg: the given scatter-gathering object. ++ * @addr: the address to be set. ++ */ ++static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr) ++{ ++ sg->addr_hi = upper_32_bits(addr); ++ sg->addr_lo = lower_32_bits(addr); ++} ++ ++ ++static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg) ++{ ++ return (sg->bpid_offset >> 30) & 0x1; ++} ++ ++/** ++ * dpaa2_sg_get_len() - Get the length in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the length. ++ */ ++static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg) ++{ ++ if (dpaa2_sg_short_len(sg)) ++ return sg->len & 0x1FFFF; ++ return sg->len; ++} ++ ++/** ++ * dpaa2_sg_set_len() - Set the length in SG entry ++ * @sg: the given scatter-gathering object. ++ * @len: the length to be set. ++ */ ++static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len) ++{ ++ sg->len = len; ++} ++ ++/** ++ * dpaa2_sg_get_offset() - Get the offset in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the offset. ++ */ ++static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg) ++{ ++ return (u16)(sg->bpid_offset >> 16) & 0x0FFF; ++} ++ ++/** ++ * dpaa2_sg_set_offset() - Set the offset in SG entry ++ * @sg: the given scatter-gathering object. ++ * @offset: the offset to be set. ++ */ ++static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg, ++ u16 offset) ++{ ++ sg->bpid_offset &= 0xF000FFFF; ++ sg->bpid_offset |= (u32)offset << 16; ++} ++ ++/** ++ * dpaa2_sg_get_format() - Get the SG format in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_sg_format ++ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg) ++{ ++ return (enum dpaa2_sg_format)((sg->bpid_offset >> 28) & 0x3); ++} ++ ++/** ++ * dpaa2_sg_set_format() - Set the SG format in SG entry ++ * @sg: the given scatter-gathering object. ++ * @format: the format to be set. ++ */ ++static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg, ++ enum dpaa2_sg_format format) ++{ ++ sg->bpid_offset &= 0xCFFFFFFF; ++ sg->bpid_offset |= (u32)format << 28; ++} ++ ++/** ++ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the bpid. ++ */ ++static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg) ++{ ++ return (u16)(sg->bpid_offset & 0x3FFF); ++} ++ ++/** ++ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry ++ * @sg: the given scatter-gathering object. ++ * @bpid: the bpid to be set. ++ */ ++static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid) ++{ ++ sg->bpid_offset &= 0xFFFFC000; ++ sg->bpid_offset |= (u32)bpid; ++} ++ ++/** ++ * dpaa2_sg_is_final() - Check final bit in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return bool. ++ */ ++static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg) ++{ ++ return !!(sg->bpid_offset >> 31); ++} ++ ++/** ++ * dpaa2_sg_set_final() - Set the final bit in SG entry ++ * @sg: the given scatter-gathering object. ++ * @final: the final boolean to be set. ++ */ ++static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) ++{ ++ sg->bpid_offset &= 0x7FFFFFFF; ++ sg->bpid_offset |= (u32)final << 31; ++} ++ ++/* Endianness conversion helper functions ++ * The accelerator drivers which construct / read scatter gather entries ++ * need to call these in order to account for endianness mismatches between ++ * hardware and cpu ++ */ ++#ifdef __BIG_ENDIAN ++/** ++ * dpaa2_sg_cpu_to_le() - convert scatter gather entry from native cpu ++ * format little endian format. ++ * @sg: the given scatter gather entry. ++ */ ++static inline void dpaa2_sg_cpu_to_le(struct dpaa2_sg_entry *sg) ++{ ++ uint32_t *p = (uint32_t *)sg; ++ int i; ++ ++ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) ++ cpu_to_le32s(p++); ++} ++ ++/** ++ * dpaa2_sg_le_to_cpu() - convert scatter gather entry from little endian ++ * format to native cpu format. ++ * @sg: the given scatter gather entry. ++ */ ++static inline void dpaa2_sg_le_to_cpu(struct dpaa2_sg_entry *sg) ++{ ++ uint32_t *p = (uint32_t *)sg; ++ int i; ++ ++ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) ++ le32_to_cpus(p++); ++} ++#else ++#define dpaa2_sg_cpu_to_le(sg) ++#define dpaa2_sg_le_to_cpu(sg) ++#endif /* __BIG_ENDIAN */ ++ ++ ++/** ++ * struct dpaa2_fl_entry - structure for frame list entry. ++ * @addr_lo: the lower 32bit of address ++ * @addr_hi: the upper 32bit of address ++ * @len: the length in this sg entry. ++ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. ++ * @frc: frame context ++ * @ctrl: the 32bit control bits including dd, sc,... va, err. ++ * @flc_lo: the lower 32bit of flow context. ++ * @flc_hi: the upper 32bits of flow context. ++ * ++ * Frame List Entry (FLE) ++ * Identical to dpaa2_fd.simple layout, but some bits are different ++ */ ++struct dpaa2_fl_entry { ++ u32 addr_lo; ++ u32 addr_hi; ++ u32 len; ++ u32 bpid_offset; ++ u32 frc; ++ u32 ctrl; ++ u32 flc_lo; ++ u32 flc_hi; ++}; ++ ++enum dpaa2_fl_format { ++ dpaa2_fl_single = 0, ++ dpaa2_fl_res, ++ dpaa2_fl_sg ++}; ++ ++/** ++ * dpaa2_fl_get_addr() - Get address in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return address for the get function. ++ */ ++static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) ++{ ++ return (dma_addr_t)((((uint64_t)fle->addr_hi) << 32) + fle->addr_lo); ++} ++ ++/** ++ * dpaa2_fl_set_addr() - Set the address in the frame list entry ++ * @fle: the given frame list entry. ++ * @addr: the address needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, ++ dma_addr_t addr) ++{ ++ fle->addr_hi = upper_32_bits(addr); ++ fle->addr_lo = lower_32_bits(addr); ++} ++ ++/** ++ * dpaa2_fl_get_flc() - Get the flow context in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return flow context for the get function. ++ */ ++static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) ++{ ++ return (dma_addr_t)((((uint64_t)fle->flc_hi) << 32) + fle->flc_lo); ++} ++ ++/** ++ * dpaa2_fl_set_flc() - Set the flow context in the frame list entry ++ * @fle: the given frame list entry. ++ * @flc_addr: the flow context address needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, ++ dma_addr_t flc_addr) ++{ ++ fle->flc_hi = upper_32_bits(flc_addr); ++ fle->flc_lo = lower_32_bits(flc_addr); ++} ++ ++/** ++ * dpaa2_fl_get_len() - Get the length in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return length for the get function. ++ */ ++static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) ++{ ++ return fle->len; ++} ++ ++/** ++ * dpaa2_fl_set_len() - Set the length in the frame list entry ++ * @fle: the given frame list entry. ++ * @len: the length needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) ++{ ++ fle->len = len; ++} ++ ++/** ++ * dpaa2_fl_get_offset() - Get/Set the offset in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return offset for the get function. ++ */ ++static inline uint16_t dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) ++{ ++ return (uint16_t)(fle->bpid_offset >> 16) & 0x0FFF; ++} ++ ++/** ++ * dpaa2_fl_set_offset() - Set the offset in the frame list entry ++ * @fle: the given frame list entry. ++ * @offset: the offset needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, ++ uint16_t offset) ++{ ++ fle->bpid_offset &= 0xF000FFFF; ++ fle->bpid_offset |= (u32)(offset & 0x0FFF) << 16; ++} ++ ++/** ++ * dpaa2_fl_get_format() - Get the format in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return frame list format for the get function. ++ */ ++static inline enum dpaa2_fl_format dpaa2_fl_get_format( ++ const struct dpaa2_fl_entry *fle) ++{ ++ return (enum dpaa2_fl_format)((fle->bpid_offset >> 28) & 0x3); ++} ++ ++/** ++ * dpaa2_fl_set_format() - Set the format in the frame list entry ++ * @fle: the given frame list entry. ++ * @format: the frame list format needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, ++ enum dpaa2_fl_format format) ++{ ++ fle->bpid_offset &= 0xCFFFFFFF; ++ fle->bpid_offset |= (u32)(format & 0x3) << 28; ++} ++ ++/** ++ * dpaa2_fl_get_bpid() - Get the buffer pool id in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return bpid for the get function. ++ */ ++static inline uint16_t dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) ++{ ++ return (uint16_t)(fle->bpid_offset & 0x3FFF); ++} ++ ++/** ++ * dpaa2_fl_set_bpid() - Set the buffer pool id in the frame list entry ++ * @fle: the given frame list entry. ++ * @bpid: the buffer pool id needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, uint16_t bpid) ++{ ++ fle->bpid_offset &= 0xFFFFC000; ++ fle->bpid_offset |= (u32)bpid; ++} ++ ++/** dpaa2_fl_is_final() - check the final bit is set or not in the frame list. ++ * @fle: the given frame list entry. ++ * ++ * Return final bit settting. ++ */ ++static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) ++{ ++ return !!(fle->bpid_offset >> 31); ++} ++ ++/** ++ * dpaa2_fl_set_final() - Set the final bit in the frame list entry ++ * @fle: the given frame list entry. ++ * @final: the final bit needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) ++{ ++ fle->bpid_offset &= 0x7FFFFFFF; ++ fle->bpid_offset |= (u32)final << 31; ++} ++ ++/** ++ * struct dpaa2_dq - the qman result structure ++ * @dont_manipulate_directly: the 16 32bit data to represent the whole ++ * possible qman dequeue result. ++ * ++ * When frames are dequeued, the FDs show up inside "dequeue" result structures ++ * (if at all, not all dequeue results contain valid FDs). This structure type ++ * is intentionally defined without internal detail, and the only reason it ++ * isn't declared opaquely (without size) is to allow the user to provide ++ * suitably-sized (and aligned) memory for these entries. ++ */ ++struct dpaa2_dq { ++ uint32_t dont_manipulate_directly[16]; ++}; ++ ++/* Parsing frame dequeue results */ ++/* FQ empty */ ++#define DPAA2_DQ_STAT_FQEMPTY 0x80 ++/* FQ held active */ ++#define DPAA2_DQ_STAT_HELDACTIVE 0x40 ++/* FQ force eligible */ ++#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20 ++/* Valid frame */ ++#define DPAA2_DQ_STAT_VALIDFRAME 0x10 ++/* FQ ODP enable */ ++#define DPAA2_DQ_STAT_ODPVALID 0x04 ++/* Volatile dequeue */ ++#define DPAA2_DQ_STAT_VOLATILE 0x02 ++/* volatile dequeue command is expired */ ++#define DPAA2_DQ_STAT_EXPIRED 0x01 ++ ++/** ++ * dpaa2_dq_flags() - Get the stat field of dequeue response ++ * @dq: the dequeue result. ++ */ ++uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull ++ * command. ++ * @dq: the dequeue result. ++ * ++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. ++ */ ++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq) ++{ ++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE); ++} ++ ++/** ++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed. ++ * @dq: the dequeue result. ++ * ++ * Return boolean. ++ */ ++static inline int dpaa2_dq_is_pull_complete( ++ const struct dpaa2_dq *dq) ++{ ++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED); ++} ++ ++/** ++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response ++ * seqnum is valid only if VALIDFRAME flag is TRUE ++ * @dq: the dequeue result. ++ * ++ * Return seqnum. ++ */ ++uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_odpid() - Get the seqnum field in dequeue response ++ * odpid is valid only if ODPVAILD flag is TRUE. ++ * @dq: the dequeue result. ++ * ++ * Return odpid. ++ */ ++uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_fqid() - Get the fqid in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return fqid. ++ */ ++uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the byte count remaining in the FQ. ++ */ ++uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the frame count remaining in the FQ. ++ */ ++uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the frame queue context. ++ */ ++uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the frame descriptor. ++ */ ++const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq); ++ ++#endif /* __FSL_DPAA2_FD_H */ +diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h +new file mode 100644 +index 0000000..6ea2ff9 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h +@@ -0,0 +1,619 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_IO_H ++#define __FSL_DPAA2_IO_H ++ ++#include "fsl_dpaa2_fd.h" ++ ++struct dpaa2_io; ++struct dpaa2_io_store; ++ ++/** ++ * DOC: DPIO Service Management ++ * ++ * The DPIO service provides APIs for users to interact with the datapath ++ * by enqueueing and dequeing frame descriptors. ++ * ++ * The following set of APIs can be used to enqueue and dequeue frames ++ * as well as producing notification callbacks when data is available ++ * for dequeue. ++ */ ++ ++/** ++ * struct dpaa2_io_desc - The DPIO descriptor. ++ * @receives_notifications: Use notificaton mode. ++ * @has_irq: use irq-based proessing. ++ * @will_poll: use poll processing. ++ * @has_8prio: set for channel with 8 priority WQs. ++ * @cpu: the cpu index that at least interrupt handlers will execute on. ++ * @stash_affinity: the stash affinity for this portal favour 'cpu' ++ * @regs_cena: the cache enabled regs. ++ * @regs_cinh: the cache inhibited regs. ++ * @dpio_id: The dpio index. ++ * @qman_version: the qman version ++ * ++ * Describe the attributes and features of the DPIO object. ++ */ ++struct dpaa2_io_desc { ++ /* non-zero iff the DPIO has a channel */ ++ int receives_notifications; ++ /* non-zero if the DPIO portal interrupt is handled. If so, the ++ * caller/OS handles the interrupt and calls dpaa2_io_service_irq(). */ ++ int has_irq; ++ /* non-zero if the caller/OS is prepared to called the ++ * dpaa2_io_service_poll() routine as part of its run-to-completion (or ++ * scheduling) loop. If so, the DPIO service may dynamically switch some ++ * of its processing between polling-based and irq-based. It is illegal ++ * combination to have (!has_irq && !will_poll). */ ++ int will_poll; ++ /* ignored unless 'receives_notifications'. Non-zero iff the channel has ++ * 8 priority WQs, otherwise the channel has 2. */ ++ int has_8prio; ++ /* the cpu index that at least interrupt handlers will execute on. And ++ * if 'stash_affinity' is non-zero, the cache targeted by stash ++ * transactions is affine to this cpu. */ ++ int cpu; ++ /* non-zero if stash transactions for this portal favour 'cpu' over ++ * other CPUs. (Eg. zero if there's no stashing, or stashing is to ++ * shared cache.) */ ++ int stash_affinity; ++ /* Caller-provided flags, determined by bus-scanning and/or creation of ++ * DPIO objects via MC commands. */ ++ void *regs_cena; ++ void *regs_cinh; ++ int dpio_id; ++ uint32_t qman_version; ++}; ++ ++/** ++ * dpaa2_io_create() - create a dpaa2_io object. ++ * @desc: the dpaa2_io descriptor ++ * ++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual ++ * DPIO object. This handle can be used on it's own (like a one-portal "DPIO ++ * service") or later be added to a service-type "struct dpaa2_io" object. Note, ++ * the information required on 'cfg' is copied so the caller is free to do as ++ * they wish with the input parameter upon return. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc); ++ ++/** ++ * dpaa2_io_create_service() - Create an (initially empty) DPIO service. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_create_service(void); ++ ++/** ++ * dpaa2_io_default_service() - Use the driver's own global (and initially ++ * empty) DPIO service. ++ * ++ * This increments the reference count, so don't forget to use dpaa2_io_down() ++ * for each time this function is called. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_default_service(void); ++ ++/** ++ * dpaa2_io_down() - release the dpaa2_io object. ++ * @d: the dpaa2_io object to be released. ++ * ++ * The "struct dpaa2_io" type can represent an individual DPIO object (as ++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", ++ * which can be used to group/encapsulate multiple DPIO objects. In all cases, ++ * each handle obtained should be released using this function. ++ */ ++void dpaa2_io_down(struct dpaa2_io *d); ++ ++/** ++ * dpaa2_io_service_add() - Add the given DPIO object to the given DPIO service. ++ * @service: the given DPIO service. ++ * @obj: the given DPIO object. ++ * ++ * 'service' must have been created by dpaa2_io_create_service() and 'obj' ++ * must have been created by dpaa2_io_create(). This increments the reference ++ * count on the object that 'obj' refers to, so the user could call ++ * dpaa2_io_down(obj) after this and the object will persist within the service ++ * (and will be destroyed when the service is destroyed). ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_service_add(struct dpaa2_io *service, struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_get_descriptor() - Get the DPIO descriptor of the given DPIO object. ++ * @obj: the given DPIO object. ++ * @desc: the returned DPIO descriptor. ++ * ++ * This function will return failure if the given dpaa2_io struct represents a ++ * service rather than an individual DPIO object, otherwise it returns zero and ++ * the given 'cfg' structure is filled in. ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc); ++ ++/** ++ * dpaa2_io_poll() - Process any notifications and h/w-initiated events that ++ * are polling-driven. ++ * @obj: the given DPIO object. ++ * ++ * Obligatory for DPIO objects that have dpaa2_io_desc::will_poll non-zero. ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_poll(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_irq() - Process any notifications and h/w-initiated events that are ++ * irq-driven. ++ * @obj: the given DPIO object. ++ * ++ * Obligatory for DPIO objects that have dpaa2_io_desc::has_irq non-zero. ++ * ++ * Return IRQ_HANDLED for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_irq(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_pause_poll() - Used to stop polling. ++ * @obj: the given DPIO object. ++ * ++ * If a polling application is going to stop polling for a period of time and ++ * supports interrupt processing, it can call this function to convert all ++ * processing to IRQ. (Eg. when sleeping.) ++ * ++ * Return -EINVAL. ++ */ ++int dpaa2_io_pause_poll(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_resume_poll() - Resume polling ++ * @obj: the given DPIO object. ++ * ++ * Return -EINVAL. ++ */ ++int dpaa2_io_resume_poll(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_service_notifications() - Get a mask of cpus that the DPIO service ++ * can receive notifications on. ++ * @s: the given DPIO object. ++ * @mask: the mask of cpus. ++ * ++ * Note that this is a run-time snapshot. If things like cpu-hotplug are ++ * supported in the target system, then an attempt to register notifications ++ * for a cpu that appears present in the given mask might fail if that cpu has ++ * gone offline in the mean time. ++ */ ++void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask); ++ ++/** ++ * dpaa2_io_service_stashing - Get a mask of cpus that the DPIO service has stash ++ * affinity to. ++ * @s: the given DPIO object. ++ * @mask: the mask of cpus. ++ */ ++void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask); ++ ++/** ++ * dpaa2_io_service_nonaffine() - Check the DPIO service's cpu affinity ++ * for stashing. ++ * @s: the given DPIO object. ++ * ++ * Return a boolean, whether or not the DPIO service has resources that have no ++ * particular cpu affinity for stashing. (Useful to know if you wish to operate ++ * on CPUs that the service has no affinity to, you would choose to use ++ * resources that are neutral, rather than affine to a different CPU.) Unlike ++ * other service-specific APIs, this one doesn't return an error if it is passed ++ * a non-service object. So don't do it. ++ */ ++int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s); ++ ++/*************************/ ++/* Notification handling */ ++/*************************/ ++ ++/** ++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure. ++ * @cb: the callback to be invoked when the notification arrives. ++ * @is_cdan: Zero/FALSE for FQDAN, non-zero/TRUE for CDAN. ++ * @id: FQID or channel ID, needed for rearm. ++ * @desired_cpu: the cpu on which the notifications will show up. ++ * @actual_cpu: the cpu the notification actually shows up. ++ * @migration_cb: callback function used for migration. ++ * @dpio_id: the dpio index. ++ * @qman64: the 64-bit context value shows up in the FQDAN/CDAN. ++ * @node: the list node. ++ * @dpio_private: the dpio object internal to dpio_service. ++ * ++ * When a FQDAN/CDAN registration is made (eg. by DPNI/DPCON/DPAI code), a ++ * context of the following type is used. The caller can embed it within a ++ * larger structure in order to add state that is tracked along with the ++ * notification (this may be useful when callbacks are invoked that pass this ++ * notification context as a parameter). ++ */ ++struct dpaa2_io_notification_ctx { ++ void (*cb)(struct dpaa2_io_notification_ctx *); ++ int is_cdan; ++ uint32_t id; ++ /* This specifies which cpu the user wants notifications to show up on ++ * (ie. to execute 'cb'). If notification-handling on that cpu is not ++ * available at the time of notification registration, the registration ++ * will fail. */ ++ int desired_cpu; ++ /* If the target platform supports cpu-hotplug or other features ++ * (related to power-management, one would expect) that can migrate IRQ ++ * handling of a given DPIO object, then this value will potentially be ++ * different to 'desired_cpu' at run-time. */ ++ int actual_cpu; ++ /* And if migration does occur and this callback is non-NULL, it will ++ * be invoked prior to any futher notification callbacks executing on ++ * 'newcpu'. Note that 'oldcpu' is what 'actual_cpu' was prior to the ++ * migration, and 'newcpu' is what it is now. Both could conceivably be ++ * different to 'desired_cpu'. */ ++ void (*migration_cb)(struct dpaa2_io_notification_ctx *, ++ int oldcpu, int newcpu); ++ /* These are returned from dpaa2_io_service_register(). ++ * 'dpio_id' is the dpaa2_io_desc::dpio_id value of the DPIO object that ++ * has been selected by the service for receiving the notifications. The ++ * caller can use this value in the MC command that attaches the FQ (or ++ * channel) of their DPNI (or DPCON, respectively) to this DPIO for ++ * notification-generation. ++ * 'qman64' is the 64-bit context value that needs to be sent in the ++ * same MC command in order to be programmed into the FQ or channel - ++ * this is the 64-bit value that shows up in the FQDAN/CDAN messages to ++ * the DPIO object, and the DPIO service specifies this value back to ++ * the caller so that the notifications that show up will be ++ * comprensible/demux-able to the DPIO service. */ ++ int dpio_id; ++ uint64_t qman64; ++ /* These fields are internal to the DPIO service once the context is ++ * registered. TBD: may require more internal state fields. */ ++ struct list_head node; ++ void *dpio_private; ++}; ++ ++/** ++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN ++ * notifications on the given DPIO service. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * The MC command to attach the caller's DPNI/DPCON/DPAI device to a ++ * DPIO object is performed after this function is called. In that way, (a) the ++ * DPIO service is "ready" to handle a notification arrival (which might happen ++ * before the "attach" command to MC has returned control of execution back to ++ * the caller), and (b) the DPIO service can provide back to the caller the ++ * 'dpio_id' and 'qman64' parameters that it should pass along in the MC command ++ * in order for the DPNI/DPCON/DPAI resources to be configured to produce the ++ * right notification fields to the DPIO service. ++ * ++ * Return 0 for success, or -ENODEV for failure. ++ */ ++int dpaa2_io_service_register(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++/** ++ * dpaa2_io_service_deregister - The opposite of 'register'. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * Note that 'register' should be called *before* ++ * making the MC call to attach the notification-producing device to the ++ * notification-handling DPIO service, the 'unregister' function should be ++ * called *after* making the MC call to detach the notification-producing ++ * device. ++ * ++ * Return 0 for success. ++ */ ++int dpaa2_io_service_deregister(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++/** ++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is ++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that ++ * traffic source for as long as it likes. Eventually it may wish to "rearm" ++ * that source to allow it to produce another FQDAN/CDAN, that's what this ++ * function achieves. ++ * ++ * Return 0 for success, or -ENODEV if no service available, -EBUSY/-EIO for not ++ * being able to implement the rearm the notifiaton due to setting CDAN or ++ * scheduling fq. ++ */ ++int dpaa2_io_service_rearm(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++/** ++ * dpaa2_io_from_registration() - Get the DPIO object from the given notification ++ * context. ++ * @ctx: the given notifiation context. ++ * @ret: the returned DPIO object. ++ * ++ * Like 'dpaa2_io_service_get_persistent()' (see below), except that the ++ * returned handle is not selected based on a 'cpu' argument, but is the same ++ * DPIO object that the given notification context is registered against. The ++ * returned handle carries a reference count, so a corresponding dpaa2_io_down() ++ * would be required when the reference is no longer needed. ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, ++ struct dpaa2_io **ret); ++ ++/**********************************/ ++/* General usage of DPIO services */ ++/**********************************/ ++ ++/** ++ * dpaa2_io_service_get_persistent() - Get the DPIO resource from the given ++ * notification context and cpu. ++ * @service: the DPIO service. ++ * @cpu: the cpu that the DPIO resource has stashing affinity to. ++ * @ret: the returned DPIO resource. ++ * ++ * The various DPIO interfaces can accept a "struct dpaa2_io" handle that refers ++ * to an individual DPIO object or to a whole service. In the latter case, an ++ * internal choice is made for each operation. This function supports the former ++ * case, by selecting an individual DPIO object *from* the service in order for ++ * it to be used multiple times to provide "persistence". The returned handle ++ * also carries a reference count, so a corresponding dpaa2_io_down() would be ++ * required when the reference is no longer needed. Note, a parameter of -1 for ++ * 'cpu' will select a DPIO resource that has no particular stashing affinity to ++ * any cpu (eg. one that stashes to platform cache). ++ * ++ * Return 0 for success, or -ENODEV for failure. ++ */ ++int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, ++ struct dpaa2_io **ret); ++ ++/*****************/ ++/* Pull dequeues */ ++/*****************/ ++ ++/** ++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. ++ * @d: the given DPIO service. ++ * @fqid: the given frame queue id. ++ * @s: the dpaa2_io_store object for the result. ++ * ++ * To support DCA/order-preservation, it will be necessary to support an ++ * alternative form, because they must ultimately dequeue to DQRR rather than a ++ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will ++ * "complete" using a caller-provided callback (from DQRR processing) rather ++ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. ++ * the alternative form will likely take a callback parameter rather than a ++ * store parameter. Ignoring it for now to keep the picture clearer. ++ * ++ * Return 0 for success, or error code for failure. ++ */ ++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, ++ struct dpaa2_io_store *s); ++ ++/** ++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. ++ * @d: the given DPIO service. ++ * @channelid: the given channel id. ++ * @s: the dpaa2_io_store object for the result. ++ * ++ * To support DCA/order-preservation, it will be necessary to support an ++ * alternative form, because they must ultimately dequeue to DQRR rather than a ++ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will ++ * "complete" using a caller-provided callback (from DQRR processing) rather ++ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. ++ * the alternative form will likely take a callback parameter rather than a ++ * store parameter. Ignoring it for now to keep the picture clearer. ++ * ++ * Return 0 for success, or error code for failure. ++ */ ++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, ++ struct dpaa2_io_store *s); ++ ++/************/ ++/* Enqueues */ ++/************/ ++ ++/** ++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. ++ * @d: the given DPIO service. ++ * @fqid: the given frame queue id. ++ * @fd: the frame descriptor which is enqueued. ++ * ++ * This definition bypasses some features that are not expected to be priority-1 ++ * features, and may not be needed at all via current assumptions (QBMan's ++ * feature set is wider than the MC object model is intendeding to support, ++ * initially at least). Plus, keeping them out (for now) keeps the API view ++ * simpler. Missing features are; ++ * - enqueue confirmation (results DMA'd back to the user) ++ * - ORP ++ * - DCA/order-preservation (see note in "pull dequeues") ++ * - enqueue consumption interrupts ++ * ++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, ++ * or -ENODEV if there is no dpio service. ++ */ ++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, ++ uint32_t fqid, ++ const struct dpaa2_fd *fd); ++ ++/** ++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. ++ * @d: the given DPIO service. ++ * @qdid: the given queuing destination id. ++ * @prio: the given queuing priority. ++ * @qdbin: the given queuing destination bin. ++ * @fd: the frame descriptor which is enqueued. ++ * ++ * This definition bypasses some features that are not expected to be priority-1 ++ * features, and may not be needed at all via current assumptions (QBMan's ++ * feature set is wider than the MC object model is intendeding to support, ++ * initially at least). Plus, keeping them out (for now) keeps the API view ++ * simpler. Missing features are; ++ * - enqueue confirmation (results DMA'd back to the user) ++ * - ORP ++ * - DCA/order-preservation (see note in "pull dequeues") ++ * - enqueue consumption interrupts ++ * ++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, ++ * or -ENODEV if there is no dpio service. ++ */ ++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, ++ uint32_t qdid, uint8_t prio, uint16_t qdbin, ++ const struct dpaa2_fd *fd); ++ ++/*******************/ ++/* Buffer handling */ ++/*******************/ ++ ++/** ++ * dpaa2_io_service_release() - Release buffers to a buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the buffer pool id. ++ * @buffers: the buffers to be released. ++ * @num_buffers: the number of the buffers to be released. ++ * ++ * Return 0 for success, and negative error code for failure. ++ */ ++int dpaa2_io_service_release(struct dpaa2_io *d, ++ uint32_t bpid, ++ const uint64_t *buffers, ++ unsigned int num_buffers); ++ ++/** ++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the buffer pool id. ++ * @buffers: the buffer addresses for acquired buffers. ++ * @num_buffers: the expected number of the buffers to acquire. ++ * ++ * Return a negative error code if the command failed, otherwise it returns ++ * the number of buffers acquired, which may be less than the number requested. ++ * Eg. if the buffer pool is empty, this will return zero. ++ */ ++int dpaa2_io_service_acquire(struct dpaa2_io *d, ++ uint32_t bpid, ++ uint64_t *buffers, ++ unsigned int num_buffers); ++ ++/***************/ ++/* DPIO stores */ ++/***************/ ++ ++/* These are reusable memory blocks for retrieving dequeue results into, and to ++ * assist with parsing those results once they show up. They also hide the ++ * details of how to use "tokens" to make detection of DMA results possible (ie. ++ * comparing memory before the DMA and after it) while minimising the needless ++ * clearing/rewriting of those memory locations between uses. ++ */ ++ ++/** ++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue ++ * result. ++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16. ++ * @dev: the device to allow mapping/unmapping the DMAable region. ++ * ++ * Constructor - max_frames must be <= 16. The user provides the ++ * device struct to allow mapping/unmapping of the DMAable region. Area for ++ * storage will be allocated during create. The size of this storage is ++ * "max_frames*sizeof(struct dpaa2_dq)". The 'dpaa2_io_store' returned is a ++ * wrapper structure allocated within the DPIO code, which owns and manages ++ * allocated store. ++ * ++ * Return dpaa2_io_store struct for successfuly created storage memory, or NULL ++ * if not getting the stroage for dequeue result in create API. ++ */ ++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, ++ struct device *dev); ++ ++/** ++ * dpaa2_io_store_destroy() - Destroy the dma memory storage for dequeue ++ * result. ++ * @s: the storage memory to be destroyed. ++ * ++ * Frees to specified storage memory. ++ */ ++void dpaa2_io_store_destroy(struct dpaa2_io_store *s); ++ ++/** ++ * dpaa2_io_store_next() - Determine when the next dequeue result is available. ++ * @s: the dpaa2_io_store object. ++ * @is_last: indicate whether this is the last frame in the pull command. ++ * ++ * Once dpaa2_io_store has been passed to a function that performs dequeues to ++ * it, like dpaa2_ni_rx(), this function can be used to determine when the next ++ * frame result is available. Once this function returns non-NULL, a subsequent ++ * call to it will try to find the *next* dequeue result. ++ * ++ * Note that if a pull-dequeue has a null result because the target FQ/channel ++ * was empty, then this function will return NULL rather than expect the caller ++ * to always check for this on his own side. As such, "is_last" can be used to ++ * differentiate between "end-of-empty-dequeue" and "still-waiting". ++ * ++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. ++ */ ++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++/** ++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. ++ * @d: the given DPIO object. ++ * @fqid: the id of frame queue to be queried. ++ * @fcnt: the queried frame count. ++ * @bcnt: the queried byte count. ++ * ++ * Knowing the FQ count at run-time can be useful in debugging situations. ++ * The instantaneous frame- and byte-count are hereby returned. ++ * ++ * Return 0 for a successful query, and negative error code if query fails. ++ */ ++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, ++ uint32_t *fcnt, uint32_t *bcnt); ++ ++/** ++ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a ++ * buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the index of buffer pool to be queried. ++ * @num: the queried number of buffers in the buffer pool. ++ * ++ * Return 0 for a sucessful query, and negative error code if query fails. ++ */ ++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, ++ uint32_t *num); ++#endif ++#endif /* __FSL_DPAA2_IO_H */ +diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h +new file mode 100644 +index 0000000..00f0b74 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc-cmd.h +@@ -0,0 +1,133 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_MC_CMD_H ++#define __FSL_MC_CMD_H ++ ++#define MC_CMD_NUM_OF_PARAMS 7 ++ ++#define MAKE_UMASK64(_width) \ ++ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ ++ (uint64_t)-1)) ++ ++static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) ++{ ++ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); ++} ++ ++static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) ++{ ++ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); ++} ++ ++struct mc_command { ++ uint64_t header; ++ uint64_t params[MC_CMD_NUM_OF_PARAMS]; ++}; ++ ++enum mc_cmd_status { ++ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ ++ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ ++ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ ++ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ ++ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ ++ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ ++ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ ++ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ ++ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ ++ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ ++ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ ++ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ ++}; ++ ++/* ++ * MC command flags ++ */ ++ ++/* High priority flag */ ++#define MC_CMD_FLAG_PRI 0x00008000 ++/* Command completion flag */ ++#define MC_CMD_FLAG_INTR_DIS 0x01000000 ++ ++/* TODO Remove following two defines after completion of flib 8.0.0 ++integration */ ++#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */ ++#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */ ++ ++#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */ ++#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */ ++#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */ ++#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */ ++#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */ ++#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/ ++#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */ ++#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/ ++#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */ ++ ++#define MC_CMD_HDR_READ_STATUS(_hdr) \ ++ ((enum mc_cmd_status)mc_dec((_hdr), \ ++ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S)) ++ ++#define MC_CMD_HDR_READ_TOKEN(_hdr) \ ++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) ++ ++#define MC_CMD_HDR_READ_FLAGS(_hdr) \ ++ ((uint32_t)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S)) ++ ++#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ ++ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) ++ ++#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ ++ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) ++ ++#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) ++ ++#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) ++ ++static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ uint64_t hdr; ++ ++ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id); ++ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S, ++ (cmd_flags & MC_CMD_HDR_FLAGS_MASK)); ++ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token); ++ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S, ++ MC_CMD_STATUS_READY); ++ ++ return hdr; ++} ++ ++#endif /* __FSL_MC_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h +new file mode 100644 +index 0000000..1246ca8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc-private.h +@@ -0,0 +1,168 @@ ++/* ++ * Freescale Management Complex (MC) bus private declarations ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_PRIVATE_H_ ++#define _FSL_MC_PRIVATE_H_ ++ ++#include "../include/mc.h" ++#include ++#include ++ ++#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc" ++ ++#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \ ++ (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \ ++ (_mc_dev)->obj_desc.id == (_obj_desc)->id) ++ ++#define FSL_MC_IS_ALLOCATABLE(_obj_type) \ ++ (strcmp(_obj_type, "dpbp") == 0 || \ ++ strcmp(_obj_type, "dpmcp") == 0 || \ ++ strcmp(_obj_type, "dpcon") == 0) ++ ++/** ++ * Maximum number of total IRQs that can be pre-allocated for an MC bus' ++ * IRQ pool ++ */ ++#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256 ++ ++/** ++ * Maximum number of extra IRQs pre-reallocated for an MC bus' IRQ pool, ++ * to be used by dynamically created MC objects ++ */ ++#define FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS 64 ++ ++/** ++ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device ++ * @root_mc_bus_dev: MC object device representing the root DPRC ++ * @irq_domain: IRQ domain for the fsl-mc bus type ++ * @gic_supported: boolean flag that indicates if the GIC interrupt controller ++ * is supported. ++ * @num_translation_ranges: number of entries in addr_translation_ranges ++ * @addr_translation_ranges: array of bus to system address translation ranges ++ */ ++struct fsl_mc { ++ struct fsl_mc_device *root_mc_bus_dev; ++ struct irq_domain *irq_domain; ++ bool gic_supported; ++ uint8_t num_translation_ranges; ++ struct fsl_mc_addr_translation_range *translation_ranges; ++}; ++ ++/** ++ * enum mc_region_types - Types of MC MMIO regions ++ */ ++enum fsl_mc_region_types { ++ FSL_MC_PORTAL = 0x0, ++ FSL_QBMAN_PORTAL, ++ ++ /* ++ * New offset types must be added above this entry ++ */ ++ FSL_NUM_MC_OFFSET_TYPES ++}; ++ ++/** ++ * struct fsl_mc_addr_translation_range - bus to system address translation ++ * range ++ * @mc_region_type: Type of MC region for the range being translated ++ * @start_mc_offset: Start MC offset of the range being translated ++ * @end_mc_offset: MC offset of the first byte after the range (last MC ++ * offset of the range is end_mc_offset - 1) ++ * @start_phys_addr: system physical address corresponding to start_mc_addr ++ */ ++struct fsl_mc_addr_translation_range { ++ enum fsl_mc_region_types mc_region_type; ++ uint64_t start_mc_offset; ++ uint64_t end_mc_offset; ++ phys_addr_t start_phys_addr; ++}; ++ ++/** ++ * struct fsl_mc_resource_pool - Pool of MC resources of a given ++ * type ++ * @type: type of resources in the pool ++ * @max_count: maximum number of resources in the pool ++ * @free_count: number of free resources in the pool ++ * @mutex: mutex to serialize access to the pool's free list ++ * @free_list: anchor node of list of free resources in the pool ++ * @mc_bus: pointer to the MC bus that owns this resource pool ++ */ ++struct fsl_mc_resource_pool { ++ enum fsl_mc_pool_type type; ++ int16_t max_count; ++ int16_t free_count; ++ struct mutex mutex; /* serializes access to free_list */ ++ struct list_head free_list; ++ struct fsl_mc_bus *mc_bus; ++}; ++ ++/** ++ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC ++ * @mc_dev: fsl-mc device for the bus device itself. ++ * @resource_pools: array of resource pools (one pool per resource type) ++ * for this MC bus. These resources represent allocatable entities ++ * from the physical DPRC. ++ * @atomic_mc_io: mc_io object to be used to send DPRC commands to the MC ++ * in atomic context (e.g., when programming MSIs in program_msi_at_mc()). ++ * @atomic_dprc_handle: DPRC handle opened using the atomic_mc_io's portal. ++ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool. ++ * @scan_mutex: Serializes bus scanning ++ * @dprc_attr: DPRC attributes ++ */ ++struct fsl_mc_bus { ++ struct fsl_mc_device mc_dev; ++ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; ++ struct fsl_mc_device_irq *irq_resources; ++ struct fsl_mc_io *atomic_mc_io; ++ uint16_t atomic_dprc_handle; ++ struct mutex scan_mutex; /* serializes bus scanning */ ++ struct dprc_attributes dprc_attr; ++}; ++ ++#define to_fsl_mc_bus(_mc_dev) \ ++ container_of(_mc_dev, struct fsl_mc_bus, mc_dev) ++ ++int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, ++ struct fsl_mc_io *mc_io, ++ struct device *parent_dev, ++ const char *driver_override, ++ struct fsl_mc_device **new_mc_dev); ++ ++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); ++ ++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, ++ unsigned int *total_irq_count); ++ ++int __init dprc_driver_init(void); ++ ++void __exit dprc_driver_exit(void); ++ ++int __init fsl_mc_allocator_driver_init(void); ++ ++void __exit fsl_mc_allocator_driver_exit(void); ++ ++int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_resource ++ **new_resource); ++ ++void fsl_mc_resource_free(struct fsl_mc_resource *resource); ++ ++int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, ++ unsigned int irq_count); ++ ++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); ++ ++void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); ++ ++void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); ++ ++#endif /* _FSL_MC_PRIVATE_H_ */ +diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h +new file mode 100644 +index 0000000..b08df85 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc-sys.h +@@ -0,0 +1,128 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * Interface of the I/O services to send MC commands to the MC hardware ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef _FSL_MC_SYS_H ++#define _FSL_MC_SYS_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * Bit masks for a MC I/O object (struct fsl_mc_io) flags ++ */ ++#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 ++ ++struct fsl_mc_resource; ++struct mc_command; ++ ++/** ++ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() ++ * @dev: device associated with this Mc I/O object ++ * @flags: flags for mc_send_command() ++ * @portal_size: MC command portal size in bytes ++ * @portal_phys_addr: MC command portal physical address ++ * @portal_virt_addr: MC command portal virtual address ++ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. ++ * @mc_command_done_irq_armed: Flag indicating that the MC command done IRQ ++ * is currently armed. ++ * @mc_command_done_completion: Completion variable to be signaled when an MC ++ * command sent to the MC fw is completed. ++ * ++ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not ++ * set: ++ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC ++ * portal, if the fsl_mc_io object was created with the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this ++ * fsl_mc_io object must be made only from non-atomic context. ++ * @mc_command_done_completion: Linux completion variable to be signaled ++ * when a DPMCP command completion interrupts is received. ++ * @mc_command_done_irq_armed: Boolean flag that indicates if interrupts have ++ * been successfully configured for the corresponding DPMCP object. ++ * ++ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is ++ * set: ++ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC ++ * portal, if the fsl_mc_io object was created with the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this ++ * fsl_mc_io object can be made from atomic or non-atomic context. ++ */ ++struct fsl_mc_io { ++ struct device *dev; ++ uint16_t flags; ++ uint16_t portal_size; ++ phys_addr_t portal_phys_addr; ++ void __iomem *portal_virt_addr; ++ struct fsl_mc_device *dpmcp_dev; ++ union { ++ /* ++ * These fields are only meaningful if the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set ++ */ ++ struct { ++ struct mutex mutex; /* serializes mc_send_command() */ ++ struct completion mc_command_done_completion; ++ bool mc_command_done_irq_armed; ++ }; ++ ++ /* ++ * This field is only meaningful if the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set ++ */ ++ spinlock_t spinlock; /* serializes mc_send_command() */ ++ }; ++}; ++ ++int __must_check fsl_create_mc_io(struct device *dev, ++ phys_addr_t mc_portal_phys_addr, ++ uint32_t mc_portal_size, ++ struct fsl_mc_device *dpmcp_dev, ++ uint32_t flags, struct fsl_mc_io **new_mc_io); ++ ++void fsl_destroy_mc_io(struct fsl_mc_io *mc_io); ++ ++int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, ++ struct fsl_mc_device *dpmcp_dev); ++ ++void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io); ++ ++int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io); ++ ++int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); ++ ++#endif /* _FSL_MC_SYS_H */ +diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h +new file mode 100644 +index 0000000..bbeb121 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc.h +@@ -0,0 +1,244 @@ ++/* ++ * Freescale Management Complex (MC) bus public interface ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_H_ ++#define _FSL_MC_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "../include/dprc.h" ++ ++#define FSL_MC_VENDOR_FREESCALE 0x1957 ++ ++struct fsl_mc_device; ++struct fsl_mc_io; ++ ++/** ++ * struct fsl_mc_driver - MC object device driver object ++ * @driver: Generic device driver ++ * @match_id_table: table of supported device matching Ids ++ * @probe: Function called when a device is added ++ * @remove: Function called when a device is removed ++ * @shutdown: Function called at shutdown time to quiesce the device ++ * @suspend: Function called when a device is stopped ++ * @resume: Function called when a device is resumed ++ * ++ * Generic DPAA device driver object for device drivers that are registered ++ * with a DPRC bus. This structure is to be embedded in each device-specific ++ * driver structure. ++ */ ++struct fsl_mc_driver { ++ struct device_driver driver; ++ const struct fsl_mc_device_match_id *match_id_table; ++ int (*probe)(struct fsl_mc_device *dev); ++ int (*remove)(struct fsl_mc_device *dev); ++ void (*shutdown)(struct fsl_mc_device *dev); ++ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); ++ int (*resume)(struct fsl_mc_device *dev); ++}; ++ ++#define to_fsl_mc_driver(_drv) \ ++ container_of(_drv, struct fsl_mc_driver, driver) ++ ++/** ++ * struct fsl_mc_device_match_id - MC object device Id entry for driver matching ++ * @vendor: vendor ID ++ * @obj_type: MC object type ++ * @ver_major: MC object version major number ++ * @ver_minor: MC object version minor number ++ * ++ * Type of entries in the "device Id" table for MC object devices supported by ++ * a MC object device driver. The last entry of the table has vendor set to 0x0 ++ */ ++struct fsl_mc_device_match_id { ++ uint16_t vendor; ++ const char obj_type[16]; ++ uint32_t ver_major; ++ uint32_t ver_minor; ++}; ++ ++/** ++ * enum fsl_mc_pool_type - Types of allocatable MC bus resources ++ * ++ * Entries in these enum are used as indices in the array of resource ++ * pools of an fsl_mc_bus object. ++ */ ++enum fsl_mc_pool_type { ++ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ ++ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ ++ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ ++ FSL_MC_POOL_IRQ, ++ ++ /* ++ * NOTE: New resource pool types must be added before this entry ++ */ ++ FSL_MC_NUM_POOL_TYPES ++}; ++ ++/** ++ * struct fsl_mc_resource - MC generic resource ++ * @type: type of resource ++ * @id: unique MC resource Id within the resources of the same type ++ * @data: pointer to resource-specific data if the resource is currently ++ * allocated, or NULL if the resource is not currently allocated. ++ * @parent_pool: pointer to the parent resource pool from which this ++ * resource is allocated from. ++ * @node: Node in the free list of the corresponding resource pool ++ * ++ * NOTE: This structure is to be embedded as a field of specific ++ * MC resource structures. ++ */ ++struct fsl_mc_resource { ++ enum fsl_mc_pool_type type; ++ int32_t id; ++ void *data; ++ struct fsl_mc_resource_pool *parent_pool; ++ struct list_head node; ++}; ++ ++/** ++ * struct fsl_mc_device_irq - MC object device message-based interrupt ++ * @msi_paddr: message-based interrupt physical address ++ * @msi_value: message-based interrupt data value ++ * @irq_number: Linux IRQ number assigned to the interrupt ++ * @mc_dev: MC object device that owns this interrupt ++ * @dev_irq_index: device-relative IRQ index ++ * @resource: MC generic resource associated with the interrupt ++ */ ++struct fsl_mc_device_irq { ++ phys_addr_t msi_paddr; ++ uint32_t msi_value; ++ uint32_t irq_number; ++ struct fsl_mc_device *mc_dev; ++ uint8_t dev_irq_index; ++ struct fsl_mc_resource resource; ++}; ++ ++#define to_fsl_mc_irq(_mc_resource) \ ++ container_of(_mc_resource, struct fsl_mc_device_irq, resource) ++ ++/** ++ * Bit masks for a MC object device (struct fsl_mc_device) flags ++ */ ++#define FSL_MC_IS_DPRC 0x0001 ++ ++/** ++ * root dprc's parent is a platform device ++ * that platform device's bus type is platform_bus_type. ++ */ ++#define is_root_dprc(dev) \ ++ ((to_fsl_mc_device(dev)->flags & FSL_MC_IS_DPRC) && \ ++ ((dev)->bus == &fsl_mc_bus_type) && \ ++ ((dev)->parent->bus == &platform_bus_type)) ++ ++/** ++ * Default DMA mask for devices on a fsl-mc bus ++ */ ++#define FSL_MC_DEFAULT_DMA_MASK (~0ULL) ++ ++/** ++ * struct fsl_mc_device - MC object device object ++ * @dev: Linux driver model device object ++ * @dma_mask: Default DMA mask ++ * @flags: MC object device flags ++ * @icid: Isolation context ID for the device ++ * @mc_handle: MC handle for the corresponding MC object opened ++ * @mc_io: Pointer to MC IO object assigned to this device or ++ * NULL if none. ++ * @obj_desc: MC description of the DPAA device ++ * @regions: pointer to array of MMIO region entries ++ * @irqs: pointer to array of pointers to interrupts allocated to this device ++ * @resource: generic resource associated with this MC object device, if any. ++ * @driver_override: Driver name to force a match ++ * ++ * Generic device object for MC object devices that are "attached" to a ++ * MC bus. ++ * ++ * NOTES: ++ * - For a non-DPRC object its icid is the same as its parent DPRC's icid. ++ * - The SMMU notifier callback gets invoked after device_add() has been ++ * called for an MC object device, but before the device-specific probe ++ * callback gets called. ++ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC ++ * portals. For all other MC objects, their device drivers are responsible for ++ * allocating MC portals for them by calling fsl_mc_portal_allocate(). ++ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are ++ * treated as resources that can be allocated/deallocated from the ++ * corresponding resource pool in the object's parent DPRC, using the ++ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects ++ * are known as "allocatable" objects. For them, the corresponding ++ * fsl_mc_device's 'resource' points to the associated resource object. ++ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), ++ * 'resource' is NULL. ++ */ ++struct fsl_mc_device { ++ struct device dev; ++ uint64_t dma_mask; ++ uint16_t flags; ++ uint16_t icid; ++ uint16_t mc_handle; ++ struct fsl_mc_io *mc_io; ++ struct dprc_obj_desc obj_desc; ++ struct resource *regions; ++ struct fsl_mc_device_irq **irqs; ++ struct fsl_mc_resource *resource; ++ const char *driver_override; ++}; ++ ++#define to_fsl_mc_device(_dev) \ ++ container_of(_dev, struct fsl_mc_device, dev) ++ ++/* ++ * module_fsl_mc_driver() - Helper macro for drivers that don't do ++ * anything special in module init/exit. This eliminates a lot of ++ * boilerplate. Each module may only use this macro once, and ++ * calling it replaces module_init() and module_exit() ++ */ ++#define module_fsl_mc_driver(__fsl_mc_driver) \ ++ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ ++ fsl_mc_driver_unregister) ++ ++/* ++ * Macro to avoid include chaining to get THIS_MODULE ++ */ ++#define fsl_mc_driver_register(drv) \ ++ __fsl_mc_driver_register(drv, THIS_MODULE) ++ ++int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, ++ struct module *owner); ++ ++void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); ++ ++bool fsl_mc_interrupts_supported(void); ++ ++int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, ++ uint16_t mc_io_flags, ++ struct fsl_mc_io **new_mc_io); ++ ++void fsl_mc_portal_free(struct fsl_mc_io *mc_io); ++ ++int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); ++ ++int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_device **new_mc_adev); ++ ++void fsl_mc_object_free(struct fsl_mc_device *mc_adev); ++ ++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); ++ ++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); ++ ++extern struct bus_type fsl_mc_bus_type; ++ ++#endif /* _FSL_MC_H_ */ +diff --git a/drivers/staging/fsl-mc/include/net.h b/drivers/staging/fsl-mc/include/net.h +new file mode 100644 +index 0000000..7480f6a +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/net.h +@@ -0,0 +1,481 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_NET_H ++#define __FSL_NET_H ++ ++#define LAST_HDR_INDEX 0xFFFFFFFF ++ ++/*****************************************************************************/ ++/* Protocol fields */ ++/*****************************************************************************/ ++ ++/************************* Ethernet fields *********************************/ ++#define NH_FLD_ETH_DA (1) ++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) ++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) ++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) ++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) ++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) ++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) ++ ++#define NH_FLD_ETH_ADDR_SIZE 6 ++ ++/*************************** VLAN fields ***********************************/ ++#define NH_FLD_VLAN_VPRI (1) ++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) ++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) ++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) ++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) ++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) ++ ++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ ++ NH_FLD_VLAN_CFI | \ ++ NH_FLD_VLAN_VID) ++ ++/************************ IP (generic) fields ******************************/ ++#define NH_FLD_IP_VER (1) ++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) ++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) ++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) ++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) ++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) ++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) ++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) ++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) ++ ++#define NH_FLD_IP_PROTO_SIZE 1 ++ ++/***************************** IPV4 fields *********************************/ ++#define NH_FLD_IPV4_VER (1) ++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) ++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) ++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) ++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) ++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) ++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) ++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) ++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) ++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) ++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) ++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) ++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) ++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) ++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) ++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) ++ ++#define NH_FLD_IPV4_ADDR_SIZE 4 ++#define NH_FLD_IPV4_PROTO_SIZE 1 ++ ++/***************************** IPV6 fields *********************************/ ++#define NH_FLD_IPV6_VER (1) ++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) ++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) ++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) ++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) ++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) ++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) ++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) ++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) ++ ++#define NH_FLD_IPV6_ADDR_SIZE 16 ++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 ++ ++/***************************** ICMP fields *********************************/ ++#define NH_FLD_ICMP_TYPE (1) ++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) ++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) ++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) ++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) ++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) ++ ++#define NH_FLD_ICMP_CODE_SIZE 1 ++#define NH_FLD_ICMP_TYPE_SIZE 1 ++ ++/***************************** IGMP fields *********************************/ ++#define NH_FLD_IGMP_VERSION (1) ++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) ++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) ++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) ++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) ++ ++/***************************** TCP fields **********************************/ ++#define NH_FLD_TCP_PORT_SRC (1) ++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) ++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) ++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) ++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) ++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) ++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) ++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) ++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) ++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) ++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) ++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) ++ ++#define NH_FLD_TCP_PORT_SIZE 2 ++ ++/***************************** UDP fields **********************************/ ++#define NH_FLD_UDP_PORT_SRC (1) ++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) ++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) ++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) ++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) ++ ++#define NH_FLD_UDP_PORT_SIZE 2 ++ ++/*************************** UDP-lite fields *******************************/ ++#define NH_FLD_UDP_LITE_PORT_SRC (1) ++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) ++#define NH_FLD_UDP_LITE_ALL_FIELDS \ ++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) ++ ++#define NH_FLD_UDP_LITE_PORT_SIZE 2 ++ ++/*************************** UDP-encap-ESP fields **************************/ ++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) ++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) ++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) ++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) ++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) ++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) ++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ ++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) ++ ++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 ++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 ++ ++/***************************** SCTP fields *********************************/ ++#define NH_FLD_SCTP_PORT_SRC (1) ++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) ++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) ++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) ++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) ++ ++#define NH_FLD_SCTP_PORT_SIZE 2 ++ ++/***************************** DCCP fields *********************************/ ++#define NH_FLD_DCCP_PORT_SRC (1) ++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) ++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) ++ ++#define NH_FLD_DCCP_PORT_SIZE 2 ++ ++/***************************** IPHC fields *********************************/ ++#define NH_FLD_IPHC_CID (1) ++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) ++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) ++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) ++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) ++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) ++ ++/***************************** SCTP fields *********************************/ ++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) ++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) ++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) ++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) ++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) ++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) ++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) ++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) ++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) ++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) ++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ ++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) ++ ++/*************************** L2TPV2 fields *********************************/ ++#define NH_FLD_L2TPV2_TYPE_BIT (1) ++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) ++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) ++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) ++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) ++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) ++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) ++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) ++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) ++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) ++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) ++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) ++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) ++#define NH_FLD_L2TPV2_ALL_FIELDS \ ++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) ++ ++/*************************** L2TPV3 fields *********************************/ ++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) ++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) ++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) ++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) ++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) ++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) ++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) ++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) ++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) ++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ ++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) ++ ++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) ++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) ++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) ++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) ++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ ++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) ++ ++/**************************** PPP fields ***********************************/ ++#define NH_FLD_PPP_PID (1) ++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) ++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) ++ ++/************************** PPPoE fields ***********************************/ ++#define NH_FLD_PPPOE_VER (1) ++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) ++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) ++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) ++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) ++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) ++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) ++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) ++ ++/************************* PPP-Mux fields **********************************/ ++#define NH_FLD_PPPMUX_PID (1) ++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) ++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) ++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) ++ ++/*********************** PPP-Mux sub-frame fields **************************/ ++#define NH_FLD_PPPMUX_SUBFRM_PFF (1) ++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) ++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) ++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) ++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) ++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ ++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) ++ ++/*************************** LLC fields ************************************/ ++#define NH_FLD_LLC_DSAP (1) ++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) ++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) ++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) ++ ++/*************************** NLPID fields **********************************/ ++#define NH_FLD_NLPID_NLPID (1) ++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) ++ ++/*************************** SNAP fields ***********************************/ ++#define NH_FLD_SNAP_OUI (1) ++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) ++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) ++ ++/*************************** LLC SNAP fields *******************************/ ++#define NH_FLD_LLC_SNAP_TYPE (1) ++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) ++ ++#define NH_FLD_ARP_HTYPE (1) ++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) ++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) ++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) ++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) ++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) ++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) ++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) ++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) ++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) ++ ++/*************************** RFC2684 fields ********************************/ ++#define NH_FLD_RFC2684_LLC (1) ++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) ++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) ++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) ++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) ++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) ++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) ++ ++/*************************** User defined fields ***************************/ ++#define NH_FLD_USER_DEFINED_SRCPORT (1) ++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) ++#define NH_FLD_USER_DEFINED_ALL_FIELDS \ ++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) ++ ++/*************************** Payload fields ********************************/ ++#define NH_FLD_PAYLOAD_BUFFER (1) ++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) ++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) ++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) ++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) ++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) ++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) ++ ++/*************************** GRE fields ************************************/ ++#define NH_FLD_GRE_TYPE (1) ++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) ++ ++/*************************** MINENCAP fields *******************************/ ++#define NH_FLD_MINENCAP_SRC_IP (1) ++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) ++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) ++#define NH_FLD_MINENCAP_ALL_FIELDS \ ++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) ++ ++/*************************** IPSEC AH fields *******************************/ ++#define NH_FLD_IPSEC_AH_SPI (1) ++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) ++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) ++ ++/*************************** IPSEC ESP fields ******************************/ ++#define NH_FLD_IPSEC_ESP_SPI (1) ++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) ++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) ++ ++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 ++ ++/*************************** MPLS fields ***********************************/ ++#define NH_FLD_MPLS_LABEL_STACK (1) ++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ ++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) ++ ++/*************************** MACSEC fields *********************************/ ++#define NH_FLD_MACSEC_SECTAG (1) ++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) ++ ++/*************************** GTP fields ************************************/ ++#define NH_FLD_GTP_TEID (1) ++ ++ ++/* Protocol options */ ++ ++/* Ethernet options */ ++#define NH_OPT_ETH_BROADCAST 1 ++#define NH_OPT_ETH_MULTICAST 2 ++#define NH_OPT_ETH_UNICAST 3 ++#define NH_OPT_ETH_BPDU 4 ++ ++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) ++/* also applicable for broadcast */ ++ ++/* VLAN options */ ++#define NH_OPT_VLAN_CFI 1 ++ ++/* IPV4 options */ ++#define NH_OPT_IPV4_UNICAST 1 ++#define NH_OPT_IPV4_MULTICAST 2 ++#define NH_OPT_IPV4_BROADCAST 3 ++#define NH_OPT_IPV4_OPTION 4 ++#define NH_OPT_IPV4_FRAG 5 ++#define NH_OPT_IPV4_INITIAL_FRAG 6 ++ ++/* IPV6 options */ ++#define NH_OPT_IPV6_UNICAST 1 ++#define NH_OPT_IPV6_MULTICAST 2 ++#define NH_OPT_IPV6_OPTION 3 ++#define NH_OPT_IPV6_FRAG 4 ++#define NH_OPT_IPV6_INITIAL_FRAG 5 ++ ++/* General IP options (may be used for any version) */ ++#define NH_OPT_IP_FRAG 1 ++#define NH_OPT_IP_INITIAL_FRAG 2 ++#define NH_OPT_IP_OPTION 3 ++ ++/* Minenc. options */ ++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 ++ ++/* GRE. options */ ++#define NH_OPT_GRE_ROUTING_PRESENT 1 ++ ++/* TCP options */ ++#define NH_OPT_TCP_OPTIONS 1 ++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 ++#define NH_OPT_TCP_CONTROL_LOW_BITS 3 ++ ++/* CAPWAP options */ ++#define NH_OPT_CAPWAP_DTLS 1 ++ ++enum net_prot { ++ NET_PROT_NONE = 0, ++ NET_PROT_PAYLOAD, ++ NET_PROT_ETH, ++ NET_PROT_VLAN, ++ NET_PROT_IPV4, ++ NET_PROT_IPV6, ++ NET_PROT_IP, ++ NET_PROT_TCP, ++ NET_PROT_UDP, ++ NET_PROT_UDP_LITE, ++ NET_PROT_IPHC, ++ NET_PROT_SCTP, ++ NET_PROT_SCTP_CHUNK_DATA, ++ NET_PROT_PPPOE, ++ NET_PROT_PPP, ++ NET_PROT_PPPMUX, ++ NET_PROT_PPPMUX_SUBFRM, ++ NET_PROT_L2TPV2, ++ NET_PROT_L2TPV3_CTRL, ++ NET_PROT_L2TPV3_SESS, ++ NET_PROT_LLC, ++ NET_PROT_LLC_SNAP, ++ NET_PROT_NLPID, ++ NET_PROT_SNAP, ++ NET_PROT_MPLS, ++ NET_PROT_IPSEC_AH, ++ NET_PROT_IPSEC_ESP, ++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ ++ NET_PROT_MACSEC, ++ NET_PROT_GRE, ++ NET_PROT_MINENCAP, ++ NET_PROT_DCCP, ++ NET_PROT_ICMP, ++ NET_PROT_IGMP, ++ NET_PROT_ARP, ++ NET_PROT_CAPWAP_DATA, ++ NET_PROT_CAPWAP_CTRL, ++ NET_PROT_RFC2684, ++ NET_PROT_ICMPV6, ++ NET_PROT_FCOE, ++ NET_PROT_FIP, ++ NET_PROT_ISCSI, ++ NET_PROT_GTP, ++ NET_PROT_USER_DEFINED_L2, ++ NET_PROT_USER_DEFINED_L3, ++ NET_PROT_USER_DEFINED_L4, ++ NET_PROT_USER_DEFINED_L5, ++ NET_PROT_USER_DEFINED_SHIM1, ++ NET_PROT_USER_DEFINED_SHIM2, ++ ++ NET_PROT_DUMMY_LAST ++}; ++ ++/*! IEEE8021.Q */ ++#define NH_IEEE8021Q_ETYPE 0x8100 ++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ ++ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ ++ (((uint32_t)(pcp & 0x07)) << 13) | \ ++ (((uint32_t)(dei & 0x01)) << 12) | \ ++ (((uint32_t)(vlan_id & 0xFFF)))) ++ ++#endif /* __FSL_NET_H */ +diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst +new file mode 100644 +index 0000000..909ed7a +--- /dev/null ++++ b/scripts/Makefile.dtbinst +@@ -0,0 +1,51 @@ ++# ========================================================================== ++# Installing dtb files ++# ++# Installs all dtb files listed in $(dtb-y) either in the ++# INSTALL_DTBS_PATH directory or the default location: ++# ++# $INSTALL_PATH/dtbs/$KERNELRELEASE ++# ++# Traverse through subdirectories listed in $(dts-dirs). ++# ========================================================================== ++ ++src := $(obj) ++ ++PHONY := __dtbs_install ++__dtbs_install: ++ ++export dtbinst-root ?= $(obj) ++ ++include include/config/auto.conf ++include scripts/Kbuild.include ++include $(srctree)/$(obj)/Makefile ++ ++PHONY += __dtbs_install_prep ++__dtbs_install_prep: ++ifeq ("$(dtbinst-root)", "$(obj)") ++ $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi ++ $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi ++ $(Q)mkdir -p $(INSTALL_DTBS_PATH) ++endif ++ ++dtbinst-files := $(dtb-y) ++dtbinst-dirs := $(dts-dirs) ++ ++# Helper targets for Installing DTBs into the boot directory ++quiet_cmd_dtb_install = INSTALL $< ++ cmd_dtb_install = mkdir -p $(2); cp $< $(2) ++ ++install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj)) ++ ++$(dtbinst-files) $(dtbinst-dirs): | __dtbs_install_prep ++ ++$(dtbinst-files): %.dtb: $(obj)/%.dtb ++ $(call cmd,dtb_install,$(install-dir)) ++ ++$(dtbinst-dirs): ++ $(Q)$(MAKE) $(dtbinst)=$(obj)/$@ ++ ++PHONY += $(dtbinst-files) $(dtbinst-dirs) ++__dtbs_install: $(dtbinst-files) $(dtbinst-dirs) ++ ++.PHONY: $(PHONY) +-- +2.1.0.27.g96db324 + diff --git a/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch b/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch new file mode 100644 index 00000000..6a6e36f5 --- /dev/null +++ b/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch @@ -0,0 +1,11095 @@ +From fdf22b15468bed6aac4e52e83903d8e010fbe60b Mon Sep 17 00:00:00 2001 +From: Shengzhou Liu +Date: Fri, 23 Sep 2016 14:58:06 +0800 +Subject: [PATCH 2/2] Backport some kernel patches based on 3.18.25 + +Fixup dpaa2-eth, phy, pcie, gicv3, sdhc, i2c. +Verified on LS2080A/LS2088A RDB. +--- + Documentation/devicetree/bindings/arm/gic.txt | 8 +- + .../devicetree/bindings/clock/qoriq-clock.txt | 64 +- + Documentation/devicetree/bindings/i2c/i2c-imx.txt | 11 + + .../devicetree/bindings/i2c/i2c-mux-pca954x.txt | 3 + + .../bindings/memory-controllers/fsl/ifc.txt | 3 + + Documentation/devicetree/of_selftest.txt | 20 +- + Documentation/devicetree/todo.txt | 1 - + arch/arm64/Kconfig | 1 + + arch/arm64/include/asm/device.h | 1 + + arch/arm64/include/asm/dma-mapping.h | 16 +- + arch/powerpc/include/asm/mpc85xx.h | 94 -- + arch/powerpc/platforms/85xx/mpc85xx_mds.c | 2 +- + arch/powerpc/platforms/85xx/mpc85xx_rdb.c | 2 +- + arch/powerpc/platforms/85xx/p1022_ds.c | 2 +- + arch/powerpc/platforms/85xx/p1022_rdk.c | 2 +- + arch/powerpc/platforms/85xx/smp.c | 2 +- + arch/powerpc/platforms/85xx/twr_p102x.c | 2 +- + arch/powerpc/platforms/86xx/mpc8610_hpcd.c | 2 +- + arch/x86/pci/xen.c | 4 + + drivers/clk/Kconfig | 10 +- + drivers/clk/Makefile | 2 +- + drivers/clk/clk-qoriq.c | 1256 ++++++++++++++++++++ + drivers/cpufreq/Kconfig.powerpc | 2 +- + drivers/i2c/busses/Kconfig | 4 +- + drivers/i2c/busses/i2c-imx.c | 373 +++++- + drivers/i2c/muxes/i2c-mux-pca9541.c | 4 +- + drivers/i2c/muxes/i2c-mux-pca954x.c | 57 +- + drivers/iommu/fsl_pamu.c | 2 +- + drivers/iommu/io-pgtable-arm.c | 15 +- + drivers/irqchip/Kconfig | 8 + + drivers/irqchip/Makefile | 1 + + drivers/irqchip/irq-gic-common.c | 18 +- + drivers/irqchip/irq-gic-common.h | 2 +- + drivers/irqchip/irq-gic-v2m.c | 333 ++++++ + drivers/irqchip/irq-gic-v3-its.c | 6 +- + drivers/irqchip/irq-gic-v3.c | 66 +- + drivers/irqchip/irq-gic.c | 90 +- + drivers/irqchip/irq-hip04.c | 9 +- + drivers/memory/Kconfig | 2 +- + drivers/memory/fsl_ifc.c | 77 +- + drivers/mfd/vexpress-sysreg.c | 2 +- + drivers/mmc/card/block.c | 4 + + drivers/mmc/host/Kconfig | 10 +- + drivers/mmc/host/sdhci-esdhc.h | 9 +- + drivers/mmc/host/sdhci-of-esdhc.c | 680 +++++++++-- + drivers/mmc/host/sdhci.c | 250 ++-- + drivers/mmc/host/sdhci.h | 42 + + drivers/mtd/nand/Kconfig | 2 +- + drivers/mtd/nand/fsl_ifc_nand.c | 301 ++--- + drivers/net/ethernet/freescale/gianfar.c | 6 +- + drivers/net/phy/Kconfig | 14 +- + drivers/net/phy/Makefile | 4 +- + drivers/net/phy/at803x.c | 4 + + drivers/net/phy/fixed.c | 336 ------ + drivers/net/phy/fixed_phy.c | 370 ++++++ + drivers/net/phy/marvell.c | 11 + + drivers/net/phy/mdio_bus.c | 34 +- + drivers/net/phy/phy.c | 19 +- + drivers/net/phy/phy_device.c | 90 +- + drivers/net/phy/realtek.c | 82 +- + drivers/of/base.c | 53 +- + drivers/of/dynamic.c | 13 - + drivers/of/fdt.c | 30 +- + drivers/of/pdt.c | 27 +- + drivers/of/selftest.c | 71 +- + drivers/pci/Makefile | 1 + + drivers/pci/access.c | 87 ++ + drivers/pci/host/Kconfig | 2 +- + drivers/pci/host/pci-layerscape.c | 86 +- + drivers/pci/host/pcie-designware.c | 14 + + drivers/pci/host/pcie-designware.h | 1 + + drivers/pci/msi.c | 5 + + drivers/pci/pci.c | 1 + + drivers/pci/pcie/portdrv_core.c | 31 +- + drivers/pci/probe.c | 1 + + drivers/pci/remove.c | 2 + + drivers/pci/setup-bus.c | 1 + + drivers/pci/setup-irq.c | 1 + + drivers/soc/Kconfig | 13 + + drivers/soc/Makefile | 1 + + drivers/soc/fsl/Kconfig | 6 + + drivers/soc/fsl/Kconfig.arm | 25 + + drivers/soc/fsl/Makefile | 6 + + drivers/soc/fsl/guts.c | 123 ++ + drivers/soc/fsl/ls1/Kconfig | 11 + + drivers/soc/fsl/ls1/Makefile | 1 + + drivers/soc/fsl/ls1/ftm_alarm.c | 274 +++++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 273 +++-- + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 48 +- + drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 553 ++++----- + drivers/staging/fsl-dpaa2/mac/mac.c | 4 +- + drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +- + drivers/staging/fsl-mc/include/mc-private.h | 2 +- + drivers/usb/host/xhci.c | 6 +- + include/linux/fsl/guts.h | 99 +- + include/linux/fsl/svr.h | 95 ++ + include/linux/fsl_ifc.h | 116 +- + include/linux/interrupt.h | 14 + + include/linux/iommu.h | 1 + + include/linux/irq.h | 8 + + include/linux/irqchip/arm-gic-v3.h | 12 + + include/linux/irqchip/arm-gic.h | 2 + + include/linux/irqdomain.h | 1 + + include/linux/mmc/sdhci.h | 16 +- + include/linux/of.h | 11 +- + include/linux/of_pdt.h | 3 +- + include/linux/pci.h | 11 + + include/linux/phy.h | 1 + + include/linux/phy_fixed.h | 11 +- + kernel/irq/chip.c | 58 +- + kernel/irq/manage.c | 91 ++ + kernel/irq/msi.c | 13 +- + sound/soc/fsl/mpc8610_hpcd.c | 2 +- + sound/soc/fsl/p1022_ds.c | 2 +- + sound/soc/fsl/p1022_rdk.c | 2 +- + 115 files changed, 5570 insertions(+), 1621 deletions(-) + delete mode 100644 arch/powerpc/include/asm/mpc85xx.h + create mode 100644 drivers/clk/clk-qoriq.c + create mode 100644 drivers/irqchip/irq-gic-v2m.c + delete mode 100644 drivers/net/phy/fixed.c + create mode 100644 drivers/net/phy/fixed_phy.c + create mode 100644 drivers/soc/fsl/Kconfig + create mode 100644 drivers/soc/fsl/Kconfig.arm + create mode 100644 drivers/soc/fsl/Makefile + create mode 100644 drivers/soc/fsl/guts.c + create mode 100644 drivers/soc/fsl/ls1/Kconfig + create mode 100644 drivers/soc/fsl/ls1/Makefile + create mode 100644 drivers/soc/fsl/ls1/ftm_alarm.c + create mode 100644 include/linux/fsl/svr.h + +diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt +index c7d2fa1..e87d3d7 100644 +--- a/Documentation/devicetree/bindings/arm/gic.txt ++++ b/Documentation/devicetree/bindings/arm/gic.txt +@@ -31,12 +31,16 @@ Main node required properties: + The 3rd cell is the flags, encoded as follows: + bits[3:0] trigger type and level flags. + 1 = low-to-high edge triggered +- 2 = high-to-low edge triggered ++ 2 = high-to-low edge triggered (invalid for SPIs) + 4 = active high level-sensitive +- 8 = active low level-sensitive ++ 8 = active low level-sensitive (invalid for SPIs). + bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of + the 8 possible cpus attached to the GIC. A bit set to '1' indicated + the interrupt is wired to that CPU. Only valid for PPI interrupts. ++ Also note that the configurability of PPI interrupts is IMPLEMENTATION ++ DEFINED and as such not guaranteed to be present (most SoC available ++ in 2014 seem to ignore the setting of this flag and use the hardware ++ default value). + + - reg : Specifies base physical address(s) and size of the GIC registers. The + first region is the GIC distributor register base and size. The 2nd region is +diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt +index 5666812..128fc72 100644 +--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt ++++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt +@@ -1,6 +1,6 @@ +-* Clock Block on Freescale CoreNet Platforms ++* Clock Block on Freescale QorIQ Platforms + +-Freescale CoreNet chips take primary clocking input from the external ++Freescale QorIQ chips take primary clocking input from the external + SYSCLK signal. The SYSCLK input (frequency) is multiplied using + multiple phase locked loops (PLL) to create a variety of frequencies + which can then be passed to a variety of internal logic, including +@@ -13,14 +13,16 @@ which the chip complies. + Chassis Version Example Chips + --------------- ------------- + 1.0 p4080, p5020, p5040 +-2.0 t4240, b4860, t1040 ++2.0 t4240, b4860 + + 1. Clock Block Binding + + Required properties: +-- compatible: Should contain a specific clock block compatible string +- and a single chassis clock compatible string. +- Clock block strings include, but not limited to, one of the: ++- compatible: Should contain a chip-specific clock block compatible ++ string and (if applicable) may contain a chassis-version clock ++ compatible string. ++ ++ Chip-specific strings are of the form "fsl,-clockgen", such as: + * "fsl,p2041-clockgen" + * "fsl,p3041-clockgen" + * "fsl,p4080-clockgen" +@@ -29,15 +31,15 @@ Required properties: + * "fsl,t4240-clockgen" + * "fsl,b4420-clockgen" + * "fsl,b4860-clockgen" +- Chassis clock strings include: ++ * "fsl,ls1021a-clockgen" ++ Chassis-version clock strings include: + * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks + * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks + - reg: Describes the address of the device's resources within the + address space defined by its parent bus, and resource zero + represents the clock register set +-- clock-frequency: Input system clock frequency + +-Recommended properties: ++Optional properties: + - ranges: Allows valid translation between child's address space and + parent's. Must be present if the device has sub-nodes. + - #address-cells: Specifies the number of cells used to represent +@@ -46,8 +48,46 @@ Recommended properties: + - #size-cells: Specifies the number of cells used to represent + the size of an address. Must be present if the device has + sub-nodes and set to 1 if present ++- clock-frequency: Input system clock frequency (SYSCLK) ++- clocks: If clock-frequency is not specified, sysclk may be provided ++ as an input clock. Either clock-frequency or clocks must be ++ provided. ++ ++2. Clock Provider ++ ++The clockgen node should act as a clock provider, though in older device ++trees the children of the clockgen node are the clock providers. ++ ++When the clockgen node is a clock provider, #clock-cells = <2>. ++The first cell of the clock specifier is the clock type, and the ++second cell is the clock index for the specified type. ++ ++ Type# Name Index Cell ++ 0 sysclk must be 0 ++ 1 cmux index (n in CLKCnCSR) ++ 2 hwaccel index (n in CLKCGnHWACSR) ++ 3 fman 0 for fm1, 1 for fm2 ++ 4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4 ++ ++3. Example ++ ++ clockgen: global-utilities@e1000 { ++ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; ++ clock-frequency = <133333333>; ++ reg = <0xe1000 0x1000>; ++ #clock-cells = <2>; ++ }; ++ ++ fman@400000 { ++ ... ++ clocks = <&clockgen 3 0>; ++ ... ++ }; ++} ++4. Legacy Child Nodes + +-2. Clock Provider/Consumer Binding ++NOTE: These nodes are deprecated. Kernels should continue to support ++device trees with these nodes, but new device trees should not use them. + + Most of the bindings are from the common clock binding[1]. + [1] Documentation/devicetree/bindings/clock/clock-bindings.txt +@@ -79,7 +119,7 @@ Recommended properties: + - reg: Should be the offset and length of clock block base address. + The length should be 4. + +-Example for clock block and clock provider: ++Legacy Example: + / { + clockgen: global-utilities@e1000 { + compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; +@@ -131,7 +171,7 @@ Example for clock block and clock provider: + }; + } + +-Example for clock consumer: ++Example for legacy clock consumer: + + / { + cpu0: PowerPC,e5500@0 { +diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt +index 4a8513e..52d37fd 100644 +--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt ++++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt +@@ -11,6 +11,8 @@ Required properties: + Optional properties: + - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. + The absence of the propoerty indicates the default frequency 100 kHz. ++- dmas: A list of two dma specifiers, one for each entry in dma-names. ++- dma-names: should contain "tx" and "rx". + + Examples: + +@@ -26,3 +28,12 @@ i2c@70038000 { /* HS-I2C on i.MX51 */ + interrupts = <64>; + clock-frequency = <400000>; + }; ++ ++i2c0: i2c@40066000 { /* i2c0 on vf610 */ ++ compatible = "fsl,vf610-i2c"; ++ reg = <0x40066000 0x1000>; ++ interrupts =<0 71 0x04>; ++ dmas = <&edma0 0 50>, ++ <&edma0 0 51>; ++ dma-names = "rx","tx"; ++}; +diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +index 34a3fb6..cf53d5f 100644 +--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt ++++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +@@ -16,6 +16,9 @@ Required Properties: + Optional Properties: + + - reset-gpios: Reference to the GPIO connected to the reset input. ++ - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all ++ children in idle state. This is necessary for example, if there are several ++ multiplexers on the bus and the devices behind them use same I2C addresses. + + + Example: +diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt +index d5e3704..89427b0 100644 +--- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt ++++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt +@@ -18,6 +18,8 @@ Properties: + interrupt (NAND_EVTER_STAT). If there is only one, + that interrupt reports both types of event. + ++- little-endian : If this property is absent, the big-endian mode will ++ be in use as default for registers. + + - ranges : Each range corresponds to a single chipselect, and covers + the entire access window as configured. +@@ -34,6 +36,7 @@ Example: + #size-cells = <1>; + reg = <0x0 0xffe1e000 0 0x2000>; + interrupts = <16 2 19 2>; ++ little-endian; + + /* NOR, NAND Flashes and CPLD on board */ + ranges = <0x0 0x0 0x0 0xee000000 0x02000000 +diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt +index 1e3d5c9..57a808b 100644 +--- a/Documentation/devicetree/of_selftest.txt ++++ b/Documentation/devicetree/of_selftest.txt +@@ -63,7 +63,6 @@ struct device_node { + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; +- struct device_node *allnext; /* next in list of all nodes */ + ... + }; + +@@ -99,12 +98,6 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null + Figure 1: Generic structure of un-flattened device tree + + +-*allnext: it is used to link all the nodes of DT into a list. So, for the +- above tree the list would be as follows: +- +-root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2-> +-child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null +- + Before executing OF selftest, it is required to attach the test data to + machine's device tree (if present). So, when selftest_data_add() is called, + at first it reads the flattened device tree data linked into the kernel image +@@ -131,11 +124,6 @@ root ('/') + test-child01 null null null + + +-allnext list: +- +-root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2 +-->test-sibling3->null +- + Figure 2: Example test data tree to be attached to live tree. + + According to the scenario above, the live tree is already present so it isn't +@@ -204,8 +192,6 @@ detached and then moving up the parent nodes are removed, and eventually the + whole tree). selftest_data_remove() calls detach_node_and_children() that uses + of_detach_node() to detach the nodes from the live device tree. + +-To detach a node, of_detach_node() first updates all_next linked list, by +-attaching the previous node's allnext to current node's allnext pointer. And +-then, it either updates the child pointer of given node's parent to its +-sibling or attaches the previous sibling to the given node's sibling, as +-appropriate. That is it :) ++To detach a node, of_detach_node() either updates the child pointer of given ++node's parent to its sibling or attaches the previous sibling to the given ++node's sibling, as appropriate. That is it :) +diff --git a/Documentation/devicetree/todo.txt b/Documentation/devicetree/todo.txt +index c3cf065..b5139d1 100644 +--- a/Documentation/devicetree/todo.txt ++++ b/Documentation/devicetree/todo.txt +@@ -2,7 +2,6 @@ Todo list for devicetree: + + === General structure === + - Switch from custom lists to (h)list_head for nodes and properties structure +-- Remove of_allnodes list and iterate using list of child nodes alone + + === CONFIG_OF_DYNAMIC === + - Switch to RCU for tree updates and get rid of global spinlock +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 08e1287..329f5f4 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -13,6 +13,7 @@ config ARM64 + select ARM_ARCH_TIMER + select ARM_GIC + select AUDIT_ARCH_COMPAT_GENERIC ++ select ARM_GIC_V2M if PCI_MSI + select ARM_GIC_V3 + select ARM_GIC_V3_ITS if PCI_MSI + select BUILDTIME_EXTABLE_SORT +diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h +index cf98b36..243ef25 100644 +--- a/arch/arm64/include/asm/device.h ++++ b/arch/arm64/include/asm/device.h +@@ -21,6 +21,7 @@ struct dev_archdata { + #ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ + #endif ++ bool dma_coherent; + }; + + struct pdev_archdata { +diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h +index adeae3f..9ce3e68 100644 +--- a/arch/arm64/include/asm/dma-mapping.h ++++ b/arch/arm64/include/asm/dma-mapping.h +@@ -52,12 +52,20 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) + dev->archdata.dma_ops = ops; + } + +-static inline int set_arch_dma_coherent_ops(struct device *dev) ++static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ++ struct iommu_ops *iommu, bool coherent) + { +- set_dma_ops(dev, &coherent_swiotlb_dma_ops); +- return 0; ++ dev->archdata.dma_coherent = coherent; ++ if (coherent) ++ set_dma_ops(dev, &coherent_swiotlb_dma_ops); ++} ++#define arch_setup_dma_ops arch_setup_dma_ops ++ ++/* do not use this function in a driver */ ++static inline bool is_device_dma_coherent(struct device *dev) ++{ ++ return dev->archdata.dma_coherent; + } +-#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops + + #include + +diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h +deleted file mode 100644 +index 3bef74a..0000000 +--- a/arch/powerpc/include/asm/mpc85xx.h ++++ /dev/null +@@ -1,94 +0,0 @@ +-/* +- * MPC85xx cpu type detection +- * +- * Copyright 2011-2012 Freescale Semiconductor, Inc. +- * +- * This is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- */ +- +-#ifndef __ASM_PPC_MPC85XX_H +-#define __ASM_PPC_MPC85XX_H +- +-#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ +-#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ +-#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ +- +-/* Some parts define SVR[0:23] as the SOC version */ +-#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ +- +-#define SVR_8533 0x803400 +-#define SVR_8535 0x803701 +-#define SVR_8536 0x803700 +-#define SVR_8540 0x803000 +-#define SVR_8541 0x807200 +-#define SVR_8543 0x803200 +-#define SVR_8544 0x803401 +-#define SVR_8545 0x803102 +-#define SVR_8547 0x803101 +-#define SVR_8548 0x803100 +-#define SVR_8555 0x807100 +-#define SVR_8560 0x807000 +-#define SVR_8567 0x807501 +-#define SVR_8568 0x807500 +-#define SVR_8569 0x808000 +-#define SVR_8572 0x80E000 +-#define SVR_P1010 0x80F100 +-#define SVR_P1011 0x80E500 +-#define SVR_P1012 0x80E501 +-#define SVR_P1013 0x80E700 +-#define SVR_P1014 0x80F101 +-#define SVR_P1017 0x80F700 +-#define SVR_P1020 0x80E400 +-#define SVR_P1021 0x80E401 +-#define SVR_P1022 0x80E600 +-#define SVR_P1023 0x80F600 +-#define SVR_P1024 0x80E402 +-#define SVR_P1025 0x80E403 +-#define SVR_P2010 0x80E300 +-#define SVR_P2020 0x80E200 +-#define SVR_P2040 0x821000 +-#define SVR_P2041 0x821001 +-#define SVR_P3041 0x821103 +-#define SVR_P4040 0x820100 +-#define SVR_P4080 0x820000 +-#define SVR_P5010 0x822100 +-#define SVR_P5020 0x822000 +-#define SVR_P5021 0X820500 +-#define SVR_P5040 0x820400 +-#define SVR_T4240 0x824000 +-#define SVR_T4120 0x824001 +-#define SVR_T4160 0x824100 +-#define SVR_C291 0x850000 +-#define SVR_C292 0x850020 +-#define SVR_C293 0x850030 +-#define SVR_B4860 0X868000 +-#define SVR_G4860 0x868001 +-#define SVR_G4060 0x868003 +-#define SVR_B4440 0x868100 +-#define SVR_G4440 0x868101 +-#define SVR_B4420 0x868102 +-#define SVR_B4220 0x868103 +-#define SVR_T1040 0x852000 +-#define SVR_T1041 0x852001 +-#define SVR_T1042 0x852002 +-#define SVR_T1020 0x852100 +-#define SVR_T1021 0x852101 +-#define SVR_T1022 0x852102 +-#define SVR_T2080 0x853000 +-#define SVR_T2081 0x853100 +- +-#define SVR_8610 0x80A000 +-#define SVR_8641 0x809000 +-#define SVR_8641D 0x809001 +- +-#define SVR_9130 0x860001 +-#define SVR_9131 0x860000 +-#define SVR_9132 0x861000 +-#define SVR_9232 0x861400 +- +-#define SVR_Unknown 0xFFFFFF +- +-#endif +diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c +index a392e94..f0be439 100644 +--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c ++++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -51,7 +52,6 @@ + #include + #include + #include +-#include + #include "smp.h" + + #include "mpc85xx.h" +diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +index e358bed..50dcc00 100644 +--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c ++++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -27,7 +28,6 @@ + #include + #include + #include +-#include + + #include + #include +diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c +index 6ac986d..371df82 100644 +--- a/arch/powerpc/platforms/85xx/p1022_ds.c ++++ b/arch/powerpc/platforms/85xx/p1022_ds.c +@@ -16,6 +16,7 @@ + * kind, whether express or implied. + */ + ++#include + #include + #include + #include +@@ -25,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include "smp.h" + +diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c +index 7a180f0..4f8fc5f 100644 +--- a/arch/powerpc/platforms/85xx/p1022_rdk.c ++++ b/arch/powerpc/platforms/85xx/p1022_rdk.c +@@ -12,6 +12,7 @@ + * kind, whether express or implied. + */ + ++#include + #include + #include + #include +@@ -21,7 +22,6 @@ + #include + #include + #include +-#include + #include "smp.h" + + #include "mpc85xx.h" +diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c +index d7c1e69..3956455 100644 +--- a/arch/powerpc/platforms/85xx/smp.c ++++ b/arch/powerpc/platforms/85xx/smp.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -26,7 +27,6 @@ + #include + #include + #include +-#include + #include + #include + +diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c +index 1eadb6d..2799120 100644 +--- a/arch/powerpc/platforms/85xx/twr_p102x.c ++++ b/arch/powerpc/platforms/85xx/twr_p102x.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -23,7 +24,6 @@ + #include + #include + #include +-#include + + #include + #include +diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +index 55413a5..437a9c3 100644 +--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c ++++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -38,7 +39,6 @@ + #include + #include + #include +-#include + + #include "mpc86xx.h" + +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c +index 4f6844b..878fb8e 100644 +--- a/arch/x86/pci/xen.c ++++ b/arch/x86/pci/xen.c +@@ -296,12 +296,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + map_irq.entry_nr = nvec; + } else if (type == PCI_CAP_ID_MSIX) { + int pos; ++ unsigned long flags; + u32 table_offset, bir; + + pos = dev->msix_cap; + pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, + &table_offset); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); ++ flags = pci_resource_flags(dev, bir); ++ if (!flags || (flags & IORESOURCE_UNSET)) ++ return -EINVAL; + + map_irq.table_base = pci_resource_start(dev, bir); + map_irq.entry_nr = msidesc->msi_attrib.entry_nr; +diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig +index 455fd17..38c8814 100644 +--- a/drivers/clk/Kconfig ++++ b/drivers/clk/Kconfig +@@ -101,12 +101,12 @@ config COMMON_CLK_AXI_CLKGEN + Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx + FPGAs. It is commonly used in Analog Devices' reference designs. + +-config CLK_PPC_CORENET +- bool "Clock driver for PowerPC corenet platforms" +- depends on PPC_E500MC && OF ++config CLK_QORIQ ++ bool "Clock driver for Freescale QorIQ platforms" ++ depends on (PPC_E500MC || ARM || ARM64) && OF + ---help--- +- This adds the clock driver support for Freescale PowerPC corenet +- platforms using common clock framework. ++ This adds the clock driver support for Freescale QorIQ platforms ++ using common clock framework. + + config COMMON_CLK_XGENE + bool "Clock driver for APM XGene SoC" +diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile +index d5fba5b..4ff94cd 100644 +--- a/drivers/clk/Makefile ++++ b/drivers/clk/Makefile +@@ -30,7 +30,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o + obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o + obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o + obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o +-obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o ++obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o + obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o + obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o + obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o +diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c +new file mode 100644 +index 0000000..74051c9 +--- /dev/null ++++ b/drivers/clk/clk-qoriq.c +@@ -0,0 +1,1256 @@ ++/* ++ * Copyright 2013 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * clock driver for Freescale QorIQ SoCs. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PLL_DIV1 0 ++#define PLL_DIV2 1 ++#define PLL_DIV3 2 ++#define PLL_DIV4 3 ++ ++#define PLATFORM_PLL 0 ++#define CGA_PLL1 1 ++#define CGA_PLL2 2 ++#define CGA_PLL3 3 ++#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ ++#define CGB_PLL1 4 ++#define CGB_PLL2 5 ++ ++struct clockgen_pll_div { ++ struct clk *clk; ++ char name[32]; ++}; ++ ++struct clockgen_pll { ++ struct clockgen_pll_div div[4]; ++}; ++ ++#define CLKSEL_VALID 1 ++#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ ++ ++struct clockgen_sourceinfo { ++ u32 flags; /* CLKSEL_xxx */ ++ int pll; /* CGx_PLLn */ ++ int div; /* PLL_DIVn */ ++}; ++ ++#define NUM_MUX_PARENTS 16 ++ ++struct clockgen_muxinfo { ++ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; ++}; ++ ++#define NUM_HWACCEL 5 ++#define NUM_CMUX 8 ++ ++struct clockgen; ++ ++/* ++ * cmux freq must be >= platform pll. ++ * If not set, cmux freq must be >= platform pll/2 ++ */ ++#define CG_CMUX_GE_PLAT 1 ++ ++#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ ++#define CG_VER3 4 /* version 3 cg: reg layout different */ ++#define CG_LITTLE_ENDIAN 8 ++ ++struct clockgen_chipinfo { ++ const char *compat, *guts_compat; ++ const struct clockgen_muxinfo *cmux_groups[2]; ++ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; ++ void (*init_periph)(struct clockgen *cg); ++ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ ++ u32 pll_mask; /* 1 << n bit set if PLL n is valid */ ++ u32 flags; /* CG_xxx */ ++}; ++ ++struct clockgen { ++ struct device_node *node; ++ void __iomem *regs; ++ struct clockgen_chipinfo info; /* mutable copy */ ++ struct clk *sysclk; ++ struct clockgen_pll pll[6]; ++ struct clk *cmux[NUM_CMUX]; ++ struct clk *hwaccel[NUM_HWACCEL]; ++ struct clk *fman[2]; ++ struct ccsr_guts __iomem *guts; ++}; ++ ++static struct clockgen clockgen; ++ ++static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) ++{ ++ if (cg->info.flags & CG_LITTLE_ENDIAN) ++ iowrite32(val, reg); ++ else ++ iowrite32be(val, reg); ++} ++ ++static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) ++{ ++ u32 val; ++ ++ if (cg->info.flags & CG_LITTLE_ENDIAN) ++ val = ioread32(reg); ++ else ++ val = ioread32be(reg); ++ ++ return val; ++} ++ ++static const struct clockgen_muxinfo p2041_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p2041_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [4] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5020_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5020_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5040_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5040_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p4080_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p4080_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, ++ [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, ++ [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, ++ [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, ++ [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo t1023_cmux = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo t1040_cmux = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++ ++static const struct clockgen_muxinfo clockgen2_cmux_cga = { ++ { ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { ++ { ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo clockgen2_cmux_cgb = { ++ { ++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t1023_hwa1 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t1023_hwa2 = { ++ { ++ [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t2080_hwa1 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t2080_hwa2 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t4240_hwa1 = { ++ { ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t4240_hwa4 = { ++ { ++ [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, ++ [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, ++ [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, ++ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t4240_hwa5 = { ++ { ++ [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, ++ [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, ++ [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, ++ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, ++ [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, ++ }, ++}; ++ ++#define RCWSR7_FM1_CLK_SEL 0x40000000 ++#define RCWSR7_FM2_CLK_SEL 0x20000000 ++#define RCWSR7_HWA_ASYNC_DIV 0x04000000 ++ ++static void __init p2041_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init p4080_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++ ++ if (reg & RCWSR7_FM2_CLK_SEL) ++ cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; ++ else ++ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init p5020_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ int div = PLL_DIV2; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ if (reg & RCWSR7_HWA_ASYNC_DIV) ++ div = PLL_DIV4; ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init p5040_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ int div = PLL_DIV2; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ if (reg & RCWSR7_HWA_ASYNC_DIV) ++ div = PLL_DIV4; ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++ ++ if (reg & RCWSR7_FM2_CLK_SEL) ++ cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; ++ else ++ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init t1023_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->hwaccel[1]; ++} ++ ++static void __init t1040_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; ++} ++ ++static void __init t2080_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->hwaccel[0]; ++} ++ ++static void __init t4240_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->hwaccel[3]; ++ cg->fman[1] = cg->hwaccel[4]; ++} ++ ++static const struct clockgen_chipinfo chipinfo[] = { ++ { ++ .compat = "fsl,b4420-clockgen", ++ .guts_compat = "fsl,b4860-device-config", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .hwaccel = { ++ &t2080_hwa1 ++ }, ++ .cmux_to_group = { ++ 0, 1, 1, 1, -1 ++ }, ++ .pll_mask = 0x3f, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,b4860-clockgen", ++ .guts_compat = "fsl,b4860-device-config", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .hwaccel = { ++ &t2080_hwa1 ++ }, ++ .cmux_to_group = { ++ 0, 1, 1, 1, -1 ++ }, ++ .pll_mask = 0x3f, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,ls1021a-clockgen", ++ .cmux_groups = { ++ &t1023_cmux ++ }, ++ .cmux_to_group = { ++ 0, -1 ++ }, ++ .pll_mask = 0x03, ++ }, ++ { ++ .compat = "fsl,ls2080a-clockgen", ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x37, ++ .flags = CG_VER3 | CG_LITTLE_ENDIAN, ++ }, ++ { ++ .compat = "fsl,ls2088a-clockgen", ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x37, ++ .flags = CG_VER3 | CG_LITTLE_ENDIAN, ++ }, ++ { ++ .compat = "fsl,p2041-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p2041_init_periph, ++ .cmux_groups = { ++ &p2041_cmux_grp1, &p2041_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x07, ++ }, ++ { ++ .compat = "fsl,p3041-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p2041_init_periph, ++ .cmux_groups = { ++ &p2041_cmux_grp1, &p2041_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x07, ++ }, ++ { ++ .compat = "fsl,p4080-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p4080_init_periph, ++ .cmux_groups = { ++ &p4080_cmux_grp1, &p4080_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 0, 0, 1, 1, 1, 1 ++ }, ++ .pll_mask = 0x1f, ++ }, ++ { ++ .compat = "fsl,p5020-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p5020_init_periph, ++ .cmux_groups = { ++ &p2041_cmux_grp1, &p2041_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 1, -1 ++ }, ++ .pll_mask = 0x07, ++ }, ++ { ++ .compat = "fsl,p5040-clockgen", ++ .guts_compat = "fsl,p5040-device-config", ++ .init_periph = p5040_init_periph, ++ .cmux_groups = { ++ &p5040_cmux_grp1, &p5040_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x0f, ++ }, ++ { ++ .compat = "fsl,t1023-clockgen", ++ .guts_compat = "fsl,t1023-device-config", ++ .init_periph = t1023_init_periph, ++ .cmux_groups = { ++ &t1023_cmux ++ }, ++ .hwaccel = { ++ &t1023_hwa1, &t1023_hwa2 ++ }, ++ .cmux_to_group = { ++ 0, 0, -1 ++ }, ++ .pll_mask = 0x03, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,t1040-clockgen", ++ .guts_compat = "fsl,t1040-device-config", ++ .init_periph = t1040_init_periph, ++ .cmux_groups = { ++ &t1040_cmux ++ }, ++ .cmux_to_group = { ++ 0, 0, 0, 0, -1 ++ }, ++ .pll_mask = 0x07, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,t2080-clockgen", ++ .guts_compat = "fsl,t2080-device-config", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga12 ++ }, ++ .hwaccel = { ++ &t2080_hwa1, &t2080_hwa2 ++ }, ++ .cmux_to_group = { ++ 0, -1 ++ }, ++ .pll_mask = 0x07, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,t4240-clockgen", ++ .guts_compat = "fsl,t4240-device-config", ++ .init_periph = t4240_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga, &clockgen2_cmux_cgb ++ }, ++ .hwaccel = { ++ &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, -1 ++ }, ++ .pll_mask = 0x3f, ++ .flags = CG_PLL_8BIT, ++ }, ++ {}, ++}; ++ ++struct mux_hwclock { ++ struct clk_hw hw; ++ struct clockgen *cg; ++ const struct clockgen_muxinfo *info; ++ u32 __iomem *reg; ++ u8 parent_to_clksel[NUM_MUX_PARENTS]; ++ s8 clksel_to_parent[NUM_MUX_PARENTS]; ++ int num_parents; ++}; ++ ++#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) ++#define CLKSEL_MASK 0x78000000 ++#define CLKSEL_SHIFT 27 ++ ++static int mux_set_parent(struct clk_hw *hw, u8 idx) ++{ ++ struct mux_hwclock *hwc = to_mux_hwclock(hw); ++ u32 clksel; ++ ++ if (idx >= hwc->num_parents) ++ return -EINVAL; ++ ++ clksel = hwc->parent_to_clksel[idx]; ++ cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); ++ ++ return 0; ++} ++ ++static u8 mux_get_parent(struct clk_hw *hw) ++{ ++ struct mux_hwclock *hwc = to_mux_hwclock(hw); ++ u32 clksel; ++ s8 ret; ++ ++ clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; ++ ++ ret = hwc->clksel_to_parent[clksel]; ++ if (ret < 0) { ++ pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); ++ return 0; ++ } ++ ++ return ret; ++} ++ ++static const struct clk_ops cmux_ops = { ++ .get_parent = mux_get_parent, ++ .set_parent = mux_set_parent, ++}; ++ ++/* ++ * Don't allow setting for now, as the clock options haven't been ++ * sanitized for additional restrictions. ++ */ ++static const struct clk_ops hwaccel_ops = { ++ .get_parent = mux_get_parent, ++}; ++ ++static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, ++ struct mux_hwclock *hwc, ++ int idx) ++{ ++ int pll, div; ++ ++ if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) ++ return NULL; ++ ++ pll = hwc->info->clksel[idx].pll; ++ div = hwc->info->clksel[idx].div; ++ ++ return &cg->pll[pll].div[div]; ++} ++ ++static struct clk * __init create_mux_common(struct clockgen *cg, ++ struct mux_hwclock *hwc, ++ const struct clk_ops *ops, ++ unsigned long min_rate, ++ unsigned long pct80_rate, ++ const char *fmt, int idx) ++{ ++ struct clk_init_data init = {}; ++ struct clk *clk; ++ const struct clockgen_pll_div *div; ++ const char *parent_names[NUM_MUX_PARENTS]; ++ char name[32]; ++ int i, j; ++ ++ snprintf(name, sizeof(name), fmt, idx); ++ ++ for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { ++ unsigned long rate; ++ ++ hwc->clksel_to_parent[i] = -1; ++ ++ div = get_pll_div(cg, hwc, i); ++ if (!div) ++ continue; ++ ++ rate = clk_get_rate(div->clk); ++ ++ if (hwc->info->clksel[i].flags & CLKSEL_80PCT && ++ rate > pct80_rate) ++ continue; ++ if (rate < min_rate) ++ continue; ++ ++ parent_names[j] = div->name; ++ hwc->parent_to_clksel[j] = i; ++ hwc->clksel_to_parent[i] = j; ++ j++; ++ } ++ ++ init.name = name; ++ init.ops = ops; ++ init.parent_names = parent_names; ++ init.num_parents = hwc->num_parents = j; ++ init.flags = 0; ++ hwc->hw.init = &init; ++ hwc->cg = cg; ++ ++ clk = clk_register(NULL, &hwc->hw); ++ if (IS_ERR(clk)) { ++ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, ++ PTR_ERR(clk)); ++ kfree(hwc); ++ return NULL; ++ } ++ ++ return clk; ++} ++ ++static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) ++{ ++ struct mux_hwclock *hwc; ++ const struct clockgen_pll_div *div; ++ unsigned long plat_rate, min_rate; ++ u64 pct80_rate; ++ u32 clksel; ++ ++ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); ++ if (!hwc) ++ return NULL; ++ ++ if (cg->info.flags & CG_VER3) ++ hwc->reg = cg->regs + 0x70000 + 0x20 * idx; ++ else ++ hwc->reg = cg->regs + 0x20 * idx; ++ ++ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; ++ ++ /* ++ * Find the rate for the default clksel, and treat it as the ++ * maximum rated core frequency. If this is an incorrect ++ * assumption, certain clock options (possibly including the ++ * default clksel) may be inappropriately excluded on certain ++ * chips. ++ */ ++ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; ++ div = get_pll_div(cg, hwc, clksel); ++ if (!div) ++ return NULL; ++ ++ pct80_rate = clk_get_rate(div->clk); ++ pct80_rate *= 8; ++ do_div(pct80_rate, 10); ++ ++ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); ++ ++ if (cg->info.flags & CG_CMUX_GE_PLAT) ++ min_rate = plat_rate; ++ else ++ min_rate = plat_rate / 2; ++ ++ return create_mux_common(cg, hwc, &cmux_ops, min_rate, ++ pct80_rate, "cg-cmux%d", idx); ++} ++ ++static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) ++{ ++ struct mux_hwclock *hwc; ++ ++ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); ++ if (!hwc) ++ return NULL; ++ ++ hwc->reg = cg->regs + 0x20 * idx + 0x10; ++ hwc->info = cg->info.hwaccel[idx]; ++ ++ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, ++ "cg-hwaccel%d", idx); ++} ++ ++static void __init create_muxes(struct clockgen *cg) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { ++ if (cg->info.cmux_to_group[i] < 0) ++ break; ++ if (cg->info.cmux_to_group[i] >= ++ ARRAY_SIZE(cg->info.cmux_groups)) { ++ WARN_ON_ONCE(1); ++ continue; ++ } ++ ++ cg->cmux[i] = create_one_cmux(cg, i); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { ++ if (!cg->info.hwaccel[i]) ++ continue; ++ ++ cg->hwaccel[i] = create_one_hwaccel(cg, i); ++ } ++} ++ ++static void __init clockgen_init(struct device_node *np); ++ ++/* Legacy nodes may get probed before the parent clockgen node */ ++static void __init legacy_init_clockgen(struct device_node *np) ++{ ++ if (!clockgen.node) ++ clockgen_init(of_get_parent(np)); ++} ++ ++/* Legacy node */ ++static void __init core_mux_init(struct device_node *np) ++{ ++ struct clk *clk; ++ struct resource res; ++ int idx, rc; ++ ++ legacy_init_clockgen(np); ++ ++ if (of_address_to_resource(np, 0, &res)) ++ return; ++ ++ idx = (res.start & 0xf0) >> 5; ++ clk = clockgen.cmux[idx]; ++ ++ rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); ++ if (rc) { ++ pr_err("%s: Couldn't register clk provider for node %s: %d\n", ++ __func__, np->name, rc); ++ return; ++ } ++} ++ ++static struct clk *sysclk_from_fixed(struct device_node *node, const char *name) ++{ ++ u32 rate; ++ ++ if (of_property_read_u32(node, "clock-frequency", &rate)) ++ return ERR_PTR(-ENODEV); ++ ++ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); ++} ++ ++static struct clk *sysclk_from_parent(const char *name) ++{ ++ struct clk *clk; ++ const char *parent_name; ++ ++ clk = of_clk_get(clockgen.node, 0); ++ if (IS_ERR(clk)) ++ return clk; ++ ++ /* Register the input clock under the desired name. */ ++ parent_name = __clk_get_name(clk); ++ clk = clk_register_fixed_factor(NULL, name, parent_name, ++ 0, 1, 1); ++ if (IS_ERR(clk)) ++ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, ++ PTR_ERR(clk)); ++ ++ return clk; ++} ++ ++static struct clk * __init create_sysclk(const char *name) ++{ ++ struct device_node *sysclk; ++ struct clk *clk; ++ ++ clk = sysclk_from_fixed(clockgen.node, name); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ clk = sysclk_from_parent(name); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ sysclk = of_get_child_by_name(clockgen.node, "sysclk"); ++ if (sysclk) { ++ clk = sysclk_from_fixed(sysclk, name); ++ if (!IS_ERR(clk)) ++ return clk; ++ } ++ ++ pr_err("%s: No input clock\n", __func__); ++ return NULL; ++} ++ ++/* Legacy node */ ++static void __init sysclk_init(struct device_node *node) ++{ ++ struct clk *clk; ++ ++ legacy_init_clockgen(node); ++ ++ clk = clockgen.sysclk; ++ if (clk) ++ of_clk_add_provider(node, of_clk_src_simple_get, clk); ++} ++ ++#define PLL_KILL BIT(31) ++ ++static void __init create_one_pll(struct clockgen *cg, int idx) ++{ ++ u32 __iomem *reg; ++ u32 mult; ++ struct clockgen_pll *pll = &cg->pll[idx]; ++ int i; ++ ++ if (!(cg->info.pll_mask & (1 << idx))) ++ return; ++ ++ if (cg->info.flags & CG_VER3) { ++ switch (idx) { ++ case PLATFORM_PLL: ++ reg = cg->regs + 0x60080; ++ break; ++ case CGA_PLL1: ++ reg = cg->regs + 0x80; ++ break; ++ case CGA_PLL2: ++ reg = cg->regs + 0xa0; ++ break; ++ case CGB_PLL1: ++ reg = cg->regs + 0x10080; ++ break; ++ case CGB_PLL2: ++ reg = cg->regs + 0x100a0; ++ break; ++ default: ++ WARN_ONCE(1, "index %d\n", idx); ++ return; ++ } ++ } else { ++ if (idx == PLATFORM_PLL) ++ reg = cg->regs + 0xc00; ++ else ++ reg = cg->regs + 0x800 + 0x20 * (idx - 1); ++ } ++ ++ /* Get the multiple of PLL */ ++ mult = cg_in(cg, reg); ++ ++ /* Check if this PLL is disabled */ ++ if (mult & PLL_KILL) { ++ pr_debug("%s(): pll %p disabled\n", __func__, reg); ++ return; ++ } ++ ++ if ((cg->info.flags & CG_VER3) || ++ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) ++ mult = (mult & GENMASK(8, 1)) >> 1; ++ else ++ mult = (mult & GENMASK(6, 1)) >> 1; ++ ++ for (i = 0; i < ARRAY_SIZE(pll->div); i++) { ++ struct clk *clk; ++ ++ snprintf(pll->div[i].name, sizeof(pll->div[i].name), ++ "cg-pll%d-div%d", idx, i + 1); ++ ++ clk = clk_register_fixed_factor(NULL, ++ pll->div[i].name, "cg-sysclk", 0, mult, i + 1); ++ if (IS_ERR(clk)) { ++ pr_err("%s: %s: register failed %ld\n", ++ __func__, pll->div[i].name, PTR_ERR(clk)); ++ continue; ++ } ++ ++ pll->div[i].clk = clk; ++ } ++} ++ ++static void __init create_plls(struct clockgen *cg) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cg->pll); i++) ++ create_one_pll(cg, i); ++} ++ ++static void __init legacy_pll_init(struct device_node *np, int idx) ++{ ++ struct clockgen_pll *pll; ++ struct clk_onecell_data *onecell_data; ++ struct clk **subclks; ++ int count, rc; ++ ++ legacy_init_clockgen(np); ++ ++ pll = &clockgen.pll[idx]; ++ count = of_property_count_strings(np, "clock-output-names"); ++ ++ BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); ++ subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); ++ if (!subclks) ++ return; ++ ++ onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); ++ if (!onecell_data) ++ goto err_clks; ++ ++ if (count <= 3) { ++ subclks[0] = pll->div[0].clk; ++ subclks[1] = pll->div[1].clk; ++ subclks[2] = pll->div[3].clk; ++ } else { ++ subclks[0] = pll->div[0].clk; ++ subclks[1] = pll->div[1].clk; ++ subclks[2] = pll->div[2].clk; ++ subclks[3] = pll->div[3].clk; ++ } ++ ++ onecell_data->clks = subclks; ++ onecell_data->clk_num = count; ++ ++ rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); ++ if (rc) { ++ pr_err("%s: Couldn't register clk provider for node %s: %d\n", ++ __func__, np->name, rc); ++ goto err_cell; ++ } ++ ++ return; ++err_cell: ++ kfree(onecell_data); ++err_clks: ++ kfree(subclks); ++} ++ ++/* Legacy node */ ++static void __init pltfrm_pll_init(struct device_node *np) ++{ ++ legacy_pll_init(np, PLATFORM_PLL); ++} ++ ++/* Legacy node */ ++static void __init core_pll_init(struct device_node *np) ++{ ++ struct resource res; ++ int idx; ++ ++ if (of_address_to_resource(np, 0, &res)) ++ return; ++ ++ if ((res.start & 0xfff) == 0xc00) { ++ /* ++ * ls1021a devtree labels the platform PLL ++ * with the core PLL compatible ++ */ ++ pltfrm_pll_init(np); ++ } else { ++ idx = (res.start & 0xf0) >> 5; ++ legacy_pll_init(np, CGA_PLL1 + idx); ++ } ++} ++ ++static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) ++{ ++ struct clockgen *cg = data; ++ struct clk *clk; ++ struct clockgen_pll *pll; ++ u32 type, idx; ++ ++ if (clkspec->args_count < 2) { ++ pr_err("%s: insufficient phandle args\n", __func__); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ type = clkspec->args[0]; ++ idx = clkspec->args[1]; ++ ++ switch (type) { ++ case 0: ++ if (idx != 0) ++ goto bad_args; ++ clk = cg->sysclk; ++ break; ++ case 1: ++ if (idx >= ARRAY_SIZE(cg->cmux)) ++ goto bad_args; ++ clk = cg->cmux[idx]; ++ break; ++ case 2: ++ if (idx >= ARRAY_SIZE(cg->hwaccel)) ++ goto bad_args; ++ clk = cg->hwaccel[idx]; ++ break; ++ case 3: ++ if (idx >= ARRAY_SIZE(cg->fman)) ++ goto bad_args; ++ clk = cg->fman[idx]; ++ break; ++ case 4: ++ pll = &cg->pll[PLATFORM_PLL]; ++ if (idx >= ARRAY_SIZE(pll->div)) ++ goto bad_args; ++ clk = pll->div[idx].clk; ++ break; ++ default: ++ goto bad_args; ++ } ++ ++ if (!clk) ++ return ERR_PTR(-ENOENT); ++ return clk; ++ ++bad_args: ++ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); ++ return ERR_PTR(-EINVAL); ++} ++ ++#ifdef CONFIG_PPC ++ ++static const u32 a4510_svrs[] __initconst = { ++ (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ ++ (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ ++ (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ ++ (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ ++ (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ ++ (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ ++ (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ ++ (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ ++ (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ ++ (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ ++ (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ ++ (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ ++ (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ ++}; ++ ++#define SVR_SECURITY 0x80000 /* The Security (E) bit */ ++ ++static bool __init has_erratum_a4510(void) ++{ ++ u32 svr = mfspr(SPRN_SVR); ++ int i; ++ ++ svr &= ~SVR_SECURITY; ++ ++ for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { ++ if (svr == a4510_svrs[i]) ++ return true; ++ } ++ ++ return false; ++} ++#else ++static bool __init has_erratum_a4510(void) ++{ ++ return false; ++} ++#endif ++ ++static void __init clockgen_init(struct device_node *np) ++{ ++ int i, ret; ++ bool is_old_ls1021a = false; ++ ++ /* May have already been called by a legacy probe */ ++ if (clockgen.node) ++ return; ++ ++ clockgen.node = np; ++ clockgen.regs = of_iomap(np, 0); ++ if (!clockgen.regs && ++ of_device_is_compatible(of_root, "fsl,ls1021a")) { ++ /* Compatibility hack for old, broken device trees */ ++ clockgen.regs = ioremap(0x1ee1000, 0x1000); ++ is_old_ls1021a = true; ++ } ++ if (!clockgen.regs) { ++ pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); ++ return; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { ++ if (of_device_is_compatible(np, chipinfo[i].compat)) ++ break; ++ if (is_old_ls1021a && ++ !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) ++ break; ++ } ++ ++ if (i == ARRAY_SIZE(chipinfo)) { ++ pr_err("%s: unknown clockgen node %s\n", __func__, ++ np->full_name); ++ goto err; ++ } ++ clockgen.info = chipinfo[i]; ++ ++ if (clockgen.info.guts_compat) { ++ struct device_node *guts; ++ ++ guts = of_find_compatible_node(NULL, NULL, ++ clockgen.info.guts_compat); ++ if (guts) { ++ clockgen.guts = of_iomap(guts, 0); ++ if (!clockgen.guts) { ++ pr_err("%s: Couldn't map %s regs\n", __func__, ++ guts->full_name); ++ } ++ } ++ ++ } ++ ++ if (has_erratum_a4510()) ++ clockgen.info.flags |= CG_CMUX_GE_PLAT; ++ ++ clockgen.sysclk = create_sysclk("cg-sysclk"); ++ create_plls(&clockgen); ++ create_muxes(&clockgen); ++ ++ if (clockgen.info.init_periph) ++ clockgen.info.init_periph(&clockgen); ++ ++ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); ++ if (ret) { ++ pr_err("%s: Couldn't register clk provider for node %s: %d\n", ++ __func__, np->name, ret); ++ } ++ ++ return; ++err: ++ iounmap(clockgen.regs); ++ clockgen.regs = NULL; ++} ++ ++CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls2088a, "fsl,ls2088a-clockgen", clockgen_init); ++ ++/* Legacy nodes */ ++CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); ++CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); ++CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); ++CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); ++CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); ++CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); ++CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); ++CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); +diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc +index 72564b7..7ea2441 100644 +--- a/drivers/cpufreq/Kconfig.powerpc ++++ b/drivers/cpufreq/Kconfig.powerpc +@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE + config PPC_CORENET_CPUFREQ + tristate "CPU frequency scaling driver for Freescale E500MC SoCs" + depends on PPC_E500MC && OF && COMMON_CLK +- select CLK_PPC_CORENET ++ select CLK_QORIQ + help + This adds the CPUFreq driver support for Freescale e500mc, + e5500 and e6500 series SoCs which are capable of changing +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 06e99eb..bbf8ae4 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -526,10 +526,10 @@ config I2C_IBM_IIC + + config I2C_IMX + tristate "IMX I2C interface" +- depends on ARCH_MXC ++ depends on ARCH_MXC || ARCH_LAYERSCAPE + help + Say Y here if you want to use the IIC bus controller on +- the Freescale i.MX/MXC processors. ++ the Freescale i.MX/MXC and layerscape processors. + + This driver can also be built as a module. If so, the module + will be called i2c-imx. +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c +index e9fb7cf..13f88f9 100644 +--- a/drivers/i2c/busses/i2c-imx.c ++++ b/drivers/i2c/busses/i2c-imx.c +@@ -33,6 +33,10 @@ + *******************************************************************************/ + + #include ++#include ++#include ++#include ++#include + #include + #include + #include +@@ -47,6 +51,7 @@ + #include + #include + #include ++#include + #include + + /** Defines ******************************************************************** +@@ -58,6 +63,15 @@ + /* Default value */ + #define IMX_I2C_BIT_RATE 100000 /* 100kHz */ + ++/* ++ * Enable DMA if transfer byte size is bigger than this threshold. ++ * As the hardware request, it must bigger than 4 bytes.\ ++ * I have set '16' here, maybe it's not the best but I think it's ++ * the appropriate. ++ */ ++#define DMA_THRESHOLD 16 ++#define DMA_TIMEOUT 1000 ++ + /* IMX I2C registers: + * the I2C register offset is different between SoCs, + * to provid support for all these chips, split the +@@ -83,6 +97,7 @@ + #define I2SR_IBB 0x20 + #define I2SR_IAAS 0x40 + #define I2SR_ICF 0x80 ++#define I2CR_DMAEN 0x02 + #define I2CR_RSTA 0x04 + #define I2CR_TXAK 0x08 + #define I2CR_MTX 0x10 +@@ -169,6 +184,17 @@ struct imx_i2c_hwdata { + unsigned i2cr_ien_opcode; + }; + ++struct imx_i2c_dma { ++ struct dma_chan *chan_tx; ++ struct dma_chan *chan_rx; ++ struct dma_chan *chan_using; ++ struct completion cmd_complete; ++ dma_addr_t dma_buf; ++ unsigned int dma_len; ++ enum dma_transfer_direction dma_transfer_dir; ++ enum dma_data_direction dma_data_dir; ++}; ++ + struct imx_i2c_struct { + struct i2c_adapter adapter; + struct clk *clk; +@@ -181,6 +207,8 @@ struct imx_i2c_struct { + unsigned int cur_clk; + unsigned int bitrate; + const struct imx_i2c_hwdata *hwdata; ++ ++ struct imx_i2c_dma *dma; + }; + + static const struct imx_i2c_hwdata imx1_i2c_hwdata = { +@@ -251,6 +279,162 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, + return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift)); + } + ++/* Functions for DMA support */ ++static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, ++ dma_addr_t phy_addr) ++{ ++ struct imx_i2c_dma *dma; ++ struct dma_slave_config dma_sconfig; ++ struct device *dev = &i2c_imx->adapter.dev; ++ int ret; ++ ++ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); ++ if (!dma) ++ return; ++ ++ dma->chan_tx = dma_request_slave_channel(dev, "tx"); ++ if (!dma->chan_tx) { ++ dev_dbg(dev, "can't request DMA tx channel\n"); ++ goto fail_al; ++ } ++ ++ dma_sconfig.dst_addr = phy_addr + ++ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); ++ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ dma_sconfig.dst_maxburst = 1; ++ dma_sconfig.direction = DMA_MEM_TO_DEV; ++ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig); ++ if (ret < 0) { ++ dev_dbg(dev, "can't configure tx channel\n"); ++ goto fail_tx; ++ } ++ ++ dma->chan_rx = dma_request_slave_channel(dev, "rx"); ++ if (!dma->chan_rx) { ++ dev_dbg(dev, "can't request DMA rx channel\n"); ++ goto fail_tx; ++ } ++ ++ dma_sconfig.src_addr = phy_addr + ++ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); ++ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ dma_sconfig.src_maxburst = 1; ++ dma_sconfig.direction = DMA_DEV_TO_MEM; ++ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig); ++ if (ret < 0) { ++ dev_dbg(dev, "can't configure rx channel\n"); ++ goto fail_rx; ++ } ++ ++ i2c_imx->dma = dma; ++ init_completion(&dma->cmd_complete); ++ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", ++ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); ++ ++ return; ++ ++fail_rx: ++ dma_release_channel(dma->chan_rx); ++fail_tx: ++ dma_release_channel(dma->chan_tx); ++fail_al: ++ devm_kfree(dev, dma); ++ dev_info(dev, "can't use DMA\n"); ++} ++ ++static void i2c_imx_dma_callback(void *arg) ++{ ++ struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg; ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ ++ dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf, ++ dma->dma_len, dma->dma_data_dir); ++ complete(&dma->cmd_complete); ++} ++ ++static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, ++ struct i2c_msg *msgs) ++{ ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ struct dma_async_tx_descriptor *txdesc; ++ struct device *dev = &i2c_imx->adapter.dev; ++ struct device *chan_dev = dma->chan_using->device->dev; ++ ++ dma->dma_buf = dma_map_single(chan_dev, msgs->buf, ++ dma->dma_len, dma->dma_data_dir); ++ if (dma_mapping_error(chan_dev, dma->dma_buf)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ goto err_map; ++ } ++ ++ txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf, ++ dma->dma_len, dma->dma_transfer_dir, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!txdesc) { ++ dev_err(dev, "Not able to get desc for DMA xfer\n"); ++ goto err_desc; ++ } ++ ++ txdesc->callback = i2c_imx_dma_callback; ++ txdesc->callback_param = i2c_imx; ++ if (dma_submit_error(dmaengine_submit(txdesc))) { ++ dev_err(dev, "DMA submit failed\n"); ++ goto err_submit; ++ } ++ ++ dma_async_issue_pending(dma->chan_using); ++ return 0; ++ ++err_submit: ++err_desc: ++ dma_unmap_single(chan_dev, dma->dma_buf, ++ dma->dma_len, dma->dma_data_dir); ++err_map: ++ return -EINVAL; ++} ++ ++static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) ++{ ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ ++ dma->dma_buf = 0; ++ dma->dma_len = 0; ++ ++ dma_release_channel(dma->chan_tx); ++ dma->chan_tx = NULL; ++ ++ dma_release_channel(dma->chan_rx); ++ dma->chan_rx = NULL; ++ ++ dma->chan_using = NULL; ++} ++ ++/* ++ * When a system reset does not cause all I2C devices to be reset, it is ++ * sometimes necessary to force the I2C module to become the I2C bus master ++ * out of reset and drive SCL A slave can hold bus low to cause bus hang. ++ * Thus, SDA can be driven low by another I2C device while this I2C module ++ * is coming out of reset and will stay low indefinitely. ++ * The I2C master has to generate 9 clock pulses to get the bus free or idle. ++ */ ++static void imx_i2c_fixup(struct imx_i2c_struct *i2c_imx) ++{ ++ int k; ++ u32 delay_val = 1000000 / i2c_imx->cur_clk + 1; ++ ++ if (delay_val < 2) ++ delay_val = 2; ++ ++ for (k = 9; k; k--) { ++ imx_i2c_write_reg(I2CR_IEN, i2c_imx, IMX_I2C_I2CR); ++ imx_i2c_write_reg((I2CR_MSTA | I2CR_MTX) & (~I2CR_IEN), ++ i2c_imx, IMX_I2C_I2CR); ++ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); ++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); ++ udelay(delay_val << 1); ++ } ++} ++ + /** Functions for IMX I2C adapter driver *************************************** + *******************************************************************************/ + +@@ -276,8 +460,15 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) + if (!for_busy && !(temp & I2SR_IBB)) + break; + if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { ++ u8 status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ + dev_dbg(&i2c_imx->adapter.dev, + "<%s> I2C bus is busy\n", __func__); ++ if ((status & (I2SR_ICF | I2SR_IBB | I2CR_TXAK)) != 0) { ++ imx_i2c_write_reg(status & ~I2SR_IAL, i2c_imx, ++ IMX_I2C_I2CR); ++ imx_i2c_fixup(i2c_imx); ++ } + return -ETIMEDOUT; + } + schedule(); +@@ -382,6 +573,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) + i2c_imx->stopped = 0; + + temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; ++ temp &= ~I2CR_DMAEN; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + return result; + } +@@ -395,6 +587,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp &= ~(I2CR_MSTA | I2CR_MTX); ++ if (i2c_imx->dma) ++ temp &= ~I2CR_DMAEN; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } + if (is_imx1_i2c(i2c_imx)) { +@@ -435,6 +629,157 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) + return IRQ_NONE; + } + ++static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, ++ struct i2c_msg *msgs) ++{ ++ int result; ++ unsigned long time_left; ++ unsigned int temp = 0; ++ unsigned long orig_jiffies = jiffies; ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ struct device *dev = &i2c_imx->adapter.dev; ++ ++ dma->chan_using = dma->chan_tx; ++ dma->dma_transfer_dir = DMA_MEM_TO_DEV; ++ dma->dma_data_dir = DMA_TO_DEVICE; ++ dma->dma_len = msgs->len - 1; ++ result = i2c_imx_dma_xfer(i2c_imx, msgs); ++ if (result) ++ return result; ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp |= I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* ++ * Write slave address. ++ * The first byte must be transmitted by the CPU. ++ */ ++ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); ++ reinit_completion(&i2c_imx->dma->cmd_complete); ++ time_left = wait_for_completion_timeout( ++ &i2c_imx->dma->cmd_complete, ++ msecs_to_jiffies(DMA_TIMEOUT)); ++ if (time_left == 0) { ++ dmaengine_terminate_all(dma->chan_using); ++ return -ETIMEDOUT; ++ } ++ ++ /* Waiting for transfer complete. */ ++ while (1) { ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ if (temp & I2SR_ICF) ++ break; ++ if (time_after(jiffies, orig_jiffies + ++ msecs_to_jiffies(DMA_TIMEOUT))) { ++ dev_dbg(dev, "<%s> Timeout\n", __func__); ++ return -ETIMEDOUT; ++ } ++ schedule(); ++ } ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* The last data byte must be transferred by the CPU. */ ++ imx_i2c_write_reg(msgs->buf[msgs->len-1], ++ i2c_imx, IMX_I2C_I2DR); ++ result = i2c_imx_trx_complete(i2c_imx); ++ if (result) ++ return result; ++ ++ return i2c_imx_acked(i2c_imx); ++} ++ ++static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, ++ struct i2c_msg *msgs, bool is_lastmsg) ++{ ++ int result; ++ unsigned long time_left; ++ unsigned int temp; ++ unsigned long orig_jiffies = jiffies; ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ struct device *dev = &i2c_imx->adapter.dev; ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp |= I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ dma->chan_using = dma->chan_rx; ++ dma->dma_transfer_dir = DMA_DEV_TO_MEM; ++ dma->dma_data_dir = DMA_FROM_DEVICE; ++ /* The last two data bytes must be transferred by the CPU. */ ++ dma->dma_len = msgs->len - 2; ++ result = i2c_imx_dma_xfer(i2c_imx, msgs); ++ if (result) ++ return result; ++ ++ reinit_completion(&i2c_imx->dma->cmd_complete); ++ time_left = wait_for_completion_timeout( ++ &i2c_imx->dma->cmd_complete, ++ msecs_to_jiffies(DMA_TIMEOUT)); ++ if (time_left == 0) { ++ dmaengine_terminate_all(dma->chan_using); ++ return -ETIMEDOUT; ++ } ++ ++ /* waiting for transfer complete. */ ++ while (1) { ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ if (temp & I2SR_ICF) ++ break; ++ if (time_after(jiffies, orig_jiffies + ++ msecs_to_jiffies(DMA_TIMEOUT))) { ++ dev_dbg(dev, "<%s> Timeout\n", __func__); ++ return -ETIMEDOUT; ++ } ++ schedule(); ++ } ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* read n-1 byte data */ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp |= I2CR_TXAK; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); ++ /* read n byte data */ ++ result = i2c_imx_trx_complete(i2c_imx); ++ if (result) ++ return result; ++ ++ if (is_lastmsg) { ++ /* ++ * It must generate STOP before read I2DR to prevent ++ * controller from generating another clock cycle ++ */ ++ dev_dbg(dev, "<%s> clear MSTA\n", __func__); ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~(I2CR_MSTA | I2CR_MTX); ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ i2c_imx_bus_busy(i2c_imx, 0); ++ i2c_imx->stopped = 1; ++ } else { ++ /* ++ * For i2c master receiver repeat restart operation like: ++ * read -> repeat MSTA -> read/write ++ * The controller must set MTX before read the last byte in ++ * the first read operation, otherwise the first read cost ++ * one extra clock cycle. ++ */ ++ temp = readb(i2c_imx->base + IMX_I2C_I2CR); ++ temp |= I2CR_MTX; ++ writeb(temp, i2c_imx->base + IMX_I2C_I2CR); ++ } ++ msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); ++ ++ return 0; ++} ++ + static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) + { + int i, result; +@@ -504,6 +849,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo + + dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); + ++ if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data) ++ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); ++ + /* read data */ + for (i = 0; i < msgs->len; i++) { + u8 len = 0; +@@ -577,6 +925,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, + + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + ++ /* workround for ERR010027: ensure that the I2C BUS is idle ++ before switching to master mode and attempting a Start cycle ++ */ ++ result = i2c_imx_bus_busy(i2c_imx, 0); ++ if (result) ++ goto fail0; ++ + /* Start I2C transfer */ + result = i2c_imx_start(i2c_imx); + if (result) +@@ -618,8 +973,12 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, + #endif + if (msgs[i].flags & I2C_M_RD) + result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg); +- else +- result = i2c_imx_write(i2c_imx, &msgs[i]); ++ else { ++ if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD) ++ result = i2c_imx_dma_write(i2c_imx, &msgs[i]); ++ else ++ result = i2c_imx_write(i2c_imx, &msgs[i]); ++ } + if (result) + goto fail0; + } +@@ -654,6 +1013,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev); + void __iomem *base; + int irq, ret; ++ dma_addr_t phy_addr; + + dev_dbg(&pdev->dev, "<%s>\n", __func__); + +@@ -668,6 +1028,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + if (IS_ERR(base)) + return PTR_ERR(base); + ++ phy_addr = (dma_addr_t)res->start; + i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct), + GFP_KERNEL); + if (!i2c_imx) +@@ -701,7 +1062,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + return ret; + } + /* Request IRQ */ +- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, ++ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, + pdev->name, i2c_imx); + if (ret) { + dev_err(&pdev->dev, "can't claim irq %d\n", irq); +@@ -743,6 +1104,9 @@ static int i2c_imx_probe(struct platform_device *pdev) + i2c_imx->adapter.name); + dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); + ++ /* Init DMA config if support*/ ++ i2c_imx_dma_request(i2c_imx, phy_addr); ++ + return 0; /* Return OK */ + + clk_disable: +@@ -758,6 +1122,9 @@ static int i2c_imx_remove(struct platform_device *pdev) + dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); + i2c_del_adapter(&i2c_imx->adapter); + ++ if (i2c_imx->dma) ++ i2c_imx_dma_free(i2c_imx); ++ + /* setup chip registers to defaults */ + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); +diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c +index cb77277..0c8d4d2 100644 +--- a/drivers/i2c/muxes/i2c-mux-pca9541.c ++++ b/drivers/i2c/muxes/i2c-mux-pca9541.c +@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val) + buf[0] = command; + buf[1] = val; + msg.buf = buf; +- ret = adap->algo->master_xfer(adap, &msg, 1); ++ ret = __i2c_transfer(adap, &msg, 1); + } else { + union i2c_smbus_data data; + +@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command) + .buf = &val + } + }; +- ret = adap->algo->master_xfer(adap, msg, 2); ++ ret = __i2c_transfer(adap, msg, 2); + if (ret == 2) + ret = val; + else if (ret >= 0) +diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c +index ec11b40..28540a4 100644 +--- a/drivers/i2c/muxes/i2c-mux-pca954x.c ++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -62,6 +63,7 @@ struct pca954x { + struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS]; + + u8 last_chan; /* last register value */ ++ u8 disable_mux; /* do not disable mux if val not 0 */ + }; + + struct chip_desc { +@@ -133,7 +135,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap, + msg.len = 1; + buf[0] = val; + msg.buf = buf; +- ret = adap->algo->master_xfer(adap, &msg, 1); ++ ret = __i2c_transfer(adap, &msg, 1); + } else { + union i2c_smbus_data data; + ret = adap->algo->smbus_xfer(adap, client->addr, +@@ -173,6 +175,13 @@ static int pca954x_deselect_mux(struct i2c_adapter *adap, + { + struct pca954x *data = i2c_get_clientdata(client); + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (data->disable_mux != 0) ++ data->last_chan = chips[data->type].nchans; ++ else ++ data->last_chan = 0; ++ return pca954x_reg_write(adap, client, data->disable_mux); ++#endif + /* Deselect active channel */ + data->last_chan = 0; + return pca954x_reg_write(adap, client, data->last_chan); +@@ -186,6 +195,8 @@ static int pca954x_probe(struct i2c_client *client, + { + struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); + struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); ++ struct device_node *of_node = client->dev.of_node; ++ bool idle_disconnect_dt; + struct gpio_desc *gpio; + int num, force, class; + struct pca954x *data; +@@ -198,27 +209,55 @@ static int pca954x_probe(struct i2c_client *client, + if (!data) + return -ENOMEM; + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ /* The point here is that you must not disable a mux if there ++ * are no pullups on the input or you mess up the I2C. This ++ * needs to be put into the DTS really as the kernel cannot ++ * know this otherwise. ++ */ ++ data->type = id->driver_data; ++ data->disable_mux = of_node && ++ of_property_read_bool(of_node, "i2c-mux-never-disable") && ++ chips[data->type].muxtype == pca954x_ismux ? ++ chips[data->type].enable : 0; ++ /* force the first selection */ ++ if (data->disable_mux != 0) ++ data->last_chan = chips[data->type].nchans; ++ else ++ data->last_chan = 0; ++#endif + i2c_set_clientdata(client, data); + + /* Get the mux out of reset if a reset GPIO is specified. */ +- gpio = devm_gpiod_get(&client->dev, "reset"); +- if (!IS_ERR(gpio)) +- gpiod_direction_output(gpio, 0); ++ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW); ++ if (IS_ERR(gpio)) ++ return PTR_ERR(gpio); + + /* Write the mux register at addr to verify + * that the mux is in fact present. This also + * initializes the mux to disconnected state. + */ ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) { ++#else + if (i2c_smbus_write_byte(client, 0) < 0) { ++#endif + dev_warn(&client->dev, "probe failed\n"); + return -ENODEV; + } + ++#ifndef CONFIG_ARCH_LAYERSCAPE + data->type = id->driver_data; + data->last_chan = 0; /* force the first selection */ ++#endif ++ ++ idle_disconnect_dt = of_node && ++ of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); + + /* Now create an adapter for each channel */ + for (num = 0; num < chips[data->type].nchans; num++) { ++ bool idle_disconnect_pd = false; ++ + force = 0; /* dynamic adap number */ + class = 0; /* no class by default */ + if (pdata) { +@@ -229,12 +268,13 @@ static int pca954x_probe(struct i2c_client *client, + } else + /* discard unconfigured channels */ + break; ++ idle_disconnect_pd = pdata->modes[num].deselect_on_exit; + } + + data->virt_adaps[num] = + i2c_add_mux_adapter(adap, &client->dev, client, + force, num, class, pca954x_select_chan, +- (pdata && pdata->modes[num].deselect_on_exit) ++ (idle_disconnect_pd || idle_disconnect_dt) + ? pca954x_deselect_mux : NULL); + + if (data->virt_adaps[num] == NULL) { +@@ -280,6 +320,13 @@ static int pca954x_resume(struct device *dev) + struct i2c_client *client = to_i2c_client(dev); + struct pca954x *data = i2c_get_clientdata(client); + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (data->disable_mux != 0) ++ data->last_chan = chips[data->type].nchans; ++ else ++ data->last_chan = 0; ++ return i2c_smbus_write_byte(client, data->disable_mux); ++#endif + data->last_chan = 0; + return i2c_smbus_write_byte(client, 0); + } +diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c +index 80ac68d..9396c85 100644 +--- a/drivers/iommu/fsl_pamu.c ++++ b/drivers/iommu/fsl_pamu.c +@@ -31,7 +31,7 @@ + #include + #include + #include +-#include ++#include + + #include "fsl_pamu.h" + +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c +index 5a500ed..fd6dd22 100644 +--- a/drivers/iommu/io-pgtable-arm.c ++++ b/drivers/iommu/io-pgtable-arm.c +@@ -56,7 +56,8 @@ + ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ + * (d)->bits_per_level) + (d)->pg_shift) + +-#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) ++#define ARM_LPAE_PAGES_PER_PGD(d) \ ++ DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) + + /* + * Calculate the index at level l used to map virtual address a using the +@@ -66,7 +67,7 @@ + ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) + + #define ARM_LPAE_LVL_IDX(a,l,d) \ +- (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ++ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ + ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) + + /* Calculate the block/page mapping size at level l for pagetable in d. */ +@@ -115,6 +116,8 @@ + #define ARM_32_LPAE_TCR_EAE (1 << 31) + #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) + ++#define ARM_LPAE_TCR_EPD1 (1 << 23) ++ + #define ARM_LPAE_TCR_TG0_4K (0 << 14) + #define ARM_LPAE_TCR_TG0_64K (1 << 14) + #define ARM_LPAE_TCR_TG0_16K (2 << 14) +@@ -283,6 +286,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, + if (prot & IOMMU_CACHE) + pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE + << ARM_LPAE_PTE_ATTRINDX_SHIFT); ++ else if (prot & IOMMU_MMIO) ++ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV ++ << ARM_LPAE_PTE_ATTRINDX_SHIFT); + } else { + pte = ARM_LPAE_PTE_HAP_FAULT; + if (prot & IOMMU_READ) +@@ -291,6 +297,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, + pte |= ARM_LPAE_PTE_HAP_WRITE; + if (prot & IOMMU_CACHE) + pte |= ARM_LPAE_PTE_MEMATTR_OIWB; ++ else if (prot & IOMMU_MMIO) ++ pte |= ARM_LPAE_PTE_MEMATTR_DEV; + else + pte |= ARM_LPAE_PTE_MEMATTR_NC; + } +@@ -620,6 +628,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) + } + + reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; ++ ++ /* Disable speculative walks through TTBR1 */ ++ reg |= ARM_LPAE_TCR_EPD1; + cfg->arm_lpae_s1_cfg.tcr = reg; + + /* MAIRs */ +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig +index caf590c..e72e239 100644 +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -5,8 +5,15 @@ config IRQCHIP + config ARM_GIC + bool + select IRQ_DOMAIN ++ select IRQ_DOMAIN_HIERARCHY + select MULTI_IRQ_HANDLER + ++config ARM_GIC_V2M ++ bool ++ depends on ARM_GIC ++ depends on PCI && PCI_MSI ++ select PCI_MSI_IRQ_DOMAIN ++ + config GIC_NON_BANKED + bool + +@@ -14,6 +21,7 @@ config ARM_GIC_V3 + bool + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER ++ select IRQ_DOMAIN_HIERARCHY + + config ARM_GIC_V3_ITS + bool +diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile +index ec3621d..1c4f9a4 100644 +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o + obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o + obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o + obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o ++obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o + obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o + obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o + obj-$(CONFIG_ARM_NVIC) += irq-nvic.o +diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c +index 61541ff..ad96ebb 100644 +--- a/drivers/irqchip/irq-gic-common.c ++++ b/drivers/irqchip/irq-gic-common.c +@@ -21,7 +21,7 @@ + + #include "irq-gic-common.h" + +-void gic_configure_irq(unsigned int irq, unsigned int type, ++int gic_configure_irq(unsigned int irq, unsigned int type, + void __iomem *base, void (*sync_access)(void)) + { + u32 enablemask = 1 << (irq % 32); +@@ -29,16 +29,17 @@ void gic_configure_irq(unsigned int irq, unsigned int type, + u32 confmask = 0x2 << ((irq % 16) * 2); + u32 confoff = (irq / 16) * 4; + bool enabled = false; +- u32 val; ++ u32 val, oldval; ++ int ret = 0; + + /* + * Read current configuration register, and insert the config + * for "irq", depending on "type". + */ +- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); +- if (type == IRQ_TYPE_LEVEL_HIGH) ++ val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); ++ if (type & IRQ_TYPE_LEVEL_MASK) + val &= ~confmask; +- else if (type == IRQ_TYPE_EDGE_RISING) ++ else if (type & IRQ_TYPE_EDGE_BOTH) + val |= confmask; + + /* +@@ -54,15 +55,20 @@ void gic_configure_irq(unsigned int irq, unsigned int type, + + /* + * Write back the new configuration, and possibly re-enable +- * the interrupt. ++ * the interrupt. If we tried to write a new configuration and failed, ++ * return an error. + */ + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); ++ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) ++ ret = -EINVAL; + + if (enabled) + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); + + if (sync_access) + sync_access(); ++ ++ return ret; + } + + void __init gic_dist_config(void __iomem *base, int gic_irqs, +diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h +index b41f024..35a9884 100644 +--- a/drivers/irqchip/irq-gic-common.h ++++ b/drivers/irqchip/irq-gic-common.h +@@ -20,7 +20,7 @@ + #include + #include + +-void gic_configure_irq(unsigned int irq, unsigned int type, ++int gic_configure_irq(unsigned int irq, unsigned int type, + void __iomem *base, void (*sync_access)(void)); + void gic_dist_config(void __iomem *base, int gic_irqs, + void (*sync_access)(void)); +diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c +new file mode 100644 +index 0000000..fdf7065 +--- /dev/null ++++ b/drivers/irqchip/irq-gic-v2m.c +@@ -0,0 +1,333 @@ ++/* ++ * ARM GIC v2m MSI(-X) support ++ * Support for Message Signaled Interrupts for systems that ++ * implement ARM Generic Interrupt Controller: GICv2m. ++ * ++ * Copyright (C) 2014 Advanced Micro Devices, Inc. ++ * Authors: Suravee Suthikulpanit ++ * Harish Kasiviswanathan ++ * Brandon Anderson ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#define pr_fmt(fmt) "GICv2m: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++* MSI_TYPER: ++* [31:26] Reserved ++* [25:16] lowest SPI assigned to MSI ++* [15:10] Reserved ++* [9:0] Numer of SPIs assigned to MSI ++*/ ++#define V2M_MSI_TYPER 0x008 ++#define V2M_MSI_TYPER_BASE_SHIFT 16 ++#define V2M_MSI_TYPER_BASE_MASK 0x3FF ++#define V2M_MSI_TYPER_NUM_MASK 0x3FF ++#define V2M_MSI_SETSPI_NS 0x040 ++#define V2M_MIN_SPI 32 ++#define V2M_MAX_SPI 1019 ++ ++#define V2M_MSI_TYPER_BASE_SPI(x) \ ++ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) ++ ++#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) ++ ++struct v2m_data { ++ spinlock_t msi_cnt_lock; ++ struct msi_controller mchip; ++ struct resource res; /* GICv2m resource */ ++ void __iomem *base; /* GICv2m virt address */ ++ u32 spi_start; /* The SPI number that MSIs start */ ++ u32 nr_spis; /* The number of SPIs for MSIs */ ++ unsigned long *bm; /* MSI vector bitmap */ ++ struct irq_domain *domain; ++}; ++ ++static void gicv2m_mask_msi_irq(struct irq_data *d) ++{ ++ pci_msi_mask_irq(d); ++ irq_chip_mask_parent(d); ++} ++ ++static void gicv2m_unmask_msi_irq(struct irq_data *d) ++{ ++ pci_msi_unmask_irq(d); ++ irq_chip_unmask_parent(d); ++} ++ ++static struct irq_chip gicv2m_msi_irq_chip = { ++ .name = "MSI", ++ .irq_mask = gicv2m_mask_msi_irq, ++ .irq_unmask = gicv2m_unmask_msi_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_write_msi_msg = pci_msi_domain_write_msg, ++}; ++ ++static struct msi_domain_info gicv2m_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_PCI_MSIX), ++ .chip = &gicv2m_msi_irq_chip, ++}; ++ ++static int gicv2m_set_affinity(struct irq_data *irq_data, ++ const struct cpumask *mask, bool force) ++{ ++ int ret; ++ ++ ret = irq_chip_set_affinity_parent(irq_data, mask, force); ++ if (ret == IRQ_SET_MASK_OK) ++ ret = IRQ_SET_MASK_OK_DONE; ++ ++ return ret; ++} ++ ++static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ++{ ++ struct v2m_data *v2m = irq_data_get_irq_chip_data(data); ++ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; ++ ++ msg->address_hi = (u32) (addr >> 32); ++ msg->address_lo = (u32) (addr); ++ msg->data = data->hwirq; ++} ++ ++static struct irq_chip gicv2m_irq_chip = { ++ .name = "GICv2m", ++ .irq_mask = irq_chip_mask_parent, ++ .irq_unmask = irq_chip_unmask_parent, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_set_affinity = gicv2m_set_affinity, ++ .irq_compose_msi_msg = gicv2m_compose_msi_msg, ++}; ++ ++static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, ++ unsigned int virq, ++ irq_hw_number_t hwirq) ++{ ++ struct of_phandle_args args; ++ struct irq_data *d; ++ int err; ++ ++ args.np = domain->parent->of_node; ++ args.args_count = 3; ++ args.args[0] = 0; ++ args.args[1] = hwirq - 32; ++ args.args[2] = IRQ_TYPE_EDGE_RISING; ++ ++ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); ++ if (err) ++ return err; ++ ++ /* Configure the interrupt line to be edge */ ++ d = irq_domain_get_irq_data(domain->parent, virq); ++ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); ++ return 0; ++} ++ ++static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) ++{ ++ int pos; ++ ++ pos = hwirq - v2m->spi_start; ++ if (pos < 0 || pos >= v2m->nr_spis) { ++ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); ++ return; ++ } ++ ++ spin_lock(&v2m->msi_cnt_lock); ++ __clear_bit(pos, v2m->bm); ++ spin_unlock(&v2m->msi_cnt_lock); ++} ++ ++static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ struct v2m_data *v2m = domain->host_data; ++ int hwirq, offset, err = 0; ++ ++ spin_lock(&v2m->msi_cnt_lock); ++ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); ++ if (offset < v2m->nr_spis) ++ __set_bit(offset, v2m->bm); ++ else ++ err = -ENOSPC; ++ spin_unlock(&v2m->msi_cnt_lock); ++ ++ if (err) ++ return err; ++ ++ hwirq = v2m->spi_start + offset; ++ ++ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); ++ if (err) { ++ gicv2m_unalloc_msi(v2m, hwirq); ++ return err; ++ } ++ ++ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, ++ &gicv2m_irq_chip, v2m); ++ ++ return 0; ++} ++ ++static void gicv2m_irq_domain_free(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct v2m_data *v2m = irq_data_get_irq_chip_data(d); ++ ++ BUG_ON(nr_irqs != 1); ++ gicv2m_unalloc_msi(v2m, d->hwirq); ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++} ++ ++static const struct irq_domain_ops gicv2m_domain_ops = { ++ .alloc = gicv2m_irq_domain_alloc, ++ .free = gicv2m_irq_domain_free, ++}; ++ ++static bool is_msi_spi_valid(u32 base, u32 num) ++{ ++ if (base < V2M_MIN_SPI) { ++ pr_err("Invalid MSI base SPI (base:%u)\n", base); ++ return false; ++ } ++ ++ if ((num == 0) || (base + num > V2M_MAX_SPI)) { ++ pr_err("Number of SPIs (%u) exceed maximum (%u)\n", ++ num, V2M_MAX_SPI - V2M_MIN_SPI + 1); ++ return false; ++ } ++ ++ return true; ++} ++ ++static int __init gicv2m_init_one(struct device_node *node, ++ struct irq_domain *parent) ++{ ++ int ret; ++ struct v2m_data *v2m; ++ ++ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); ++ if (!v2m) { ++ pr_err("Failed to allocate struct v2m_data.\n"); ++ return -ENOMEM; ++ } ++ ++ ret = of_address_to_resource(node, 0, &v2m->res); ++ if (ret) { ++ pr_err("Failed to allocate v2m resource.\n"); ++ goto err_free_v2m; ++ } ++ ++ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); ++ if (!v2m->base) { ++ pr_err("Failed to map GICv2m resource\n"); ++ ret = -ENOMEM; ++ goto err_free_v2m; ++ } ++ ++ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && ++ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { ++ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", ++ v2m->spi_start, v2m->nr_spis); ++ } else { ++ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); ++ ++ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); ++ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); ++ } ++ ++ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { ++ ret = -EINVAL; ++ goto err_iounmap; ++ } ++ ++ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), ++ GFP_KERNEL); ++ if (!v2m->bm) { ++ ret = -ENOMEM; ++ goto err_iounmap; ++ } ++ ++ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m); ++ if (!v2m->domain) { ++ pr_err("Failed to create GICv2m domain\n"); ++ ret = -ENOMEM; ++ goto err_free_bm; ++ } ++ ++ v2m->domain->parent = parent; ++ v2m->mchip.of_node = node; ++ v2m->mchip.domain = pci_msi_create_irq_domain(node, ++ &gicv2m_msi_domain_info, ++ v2m->domain); ++ if (!v2m->mchip.domain) { ++ pr_err("Failed to create MSI domain\n"); ++ ret = -ENOMEM; ++ goto err_free_domains; ++ } ++ ++ spin_lock_init(&v2m->msi_cnt_lock); ++ ++ ret = of_pci_msi_chip_add(&v2m->mchip); ++ if (ret) { ++ pr_err("Failed to add msi_chip.\n"); ++ goto err_free_domains; ++ } ++ ++ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, ++ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, ++ v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); ++ ++ return 0; ++ ++err_free_domains: ++ if (v2m->mchip.domain) ++ irq_domain_remove(v2m->mchip.domain); ++ if (v2m->domain) ++ irq_domain_remove(v2m->domain); ++err_free_bm: ++ kfree(v2m->bm); ++err_iounmap: ++ iounmap(v2m->base); ++err_free_v2m: ++ kfree(v2m); ++ return ret; ++} ++ ++static struct of_device_id gicv2m_device_id[] = { ++ { .compatible = "arm,gic-v2m-frame", }, ++ {}, ++}; ++ ++int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) ++{ ++ int ret = 0; ++ struct device_node *child; ++ ++ for (child = of_find_matching_node(node, gicv2m_device_id); child; ++ child = of_find_matching_node(child, gicv2m_device_id)) { ++ if (!of_find_property(child, "msi-controller", NULL)) ++ continue; ++ ++ ret = gicv2m_init_one(child, parent); ++ if (ret) { ++ of_node_put(node); ++ break; ++ } ++ } ++ ++ return ret; ++} +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index 43c50ed..d689158 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -1293,7 +1293,8 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + + dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); + dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); +- return __its_msi_prepare(domain->parent, dev_alias.dev_id, dev, dev_alias.count, info); ++ return __its_msi_prepare(domain, dev_alias.dev_id, ++ dev, dev_alias.count, info); + } + + static struct msi_domain_ops its_pci_msi_ops = { +@@ -1535,13 +1536,14 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) + writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); + + if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { +- its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); ++ its->domain = irq_domain_add_tree(node, &its_domain_ops, its); + if (!its->domain) { + err = -ENOMEM; + goto out_free_tables; + } + + its->domain->parent = parent; ++ its->domain->bus_token = DOMAIN_BUS_NEXUS; + + its->msi_chip.domain = pci_msi_create_irq_domain(node, + &its_pci_msi_domain_info, +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +index 34feda3..fd8850d 100644 +--- a/drivers/irqchip/irq-gic-v3.c ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -238,7 +238,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + if (irq < 16) + return -EINVAL; + +- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ /* SPIs have restrictions on the supported types */ ++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + if (gic_irq_in_rdist(d)) { +@@ -249,9 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + rwp_wait = gic_dist_wait_for_rwp; + } + +- gic_configure_irq(irq, type, base, rwp_wait); +- +- return 0; ++ return gic_configure_irq(irq, type, base, rwp_wait); + } + + static u64 gic_mpidr_to_affinity(u64 mpidr) +@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + tlist |= 1 << (mpidr & 0xf); + + cpu = cpumask_next(cpu, mask); +- if (cpu == nr_cpu_ids) ++ if (cpu >= nr_cpu_ids) + goto out; + + mpidr = cpu_logical_map(cpu); +@@ -481,15 +481,19 @@ out: + return tlist; + } + ++#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ ++ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ ++ << ICC_SGI1R_AFFINITY_## level ##_SHIFT) ++ + static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) + { + u64 val; + +- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | +- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | +- irq << 24 | +- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | +- tlist); ++ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | ++ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | ++ irq << ICC_SGI1R_SGI_ID_SHIFT | ++ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | ++ tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +@@ -617,14 +621,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + /* PPIs */ + if (hw < 32) { + irq_set_percpu_devid(irq); +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_percpu_devid_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_percpu_devid_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + } + /* SPIs */ + if (hw >= 32 && hw < gic_data.irq_nr) { +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_fasteoi_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + /* LPIs */ +@@ -667,9 +671,41 @@ static int gic_irq_domain_xlate(struct irq_domain *d, + return 0; + } + ++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *arg) ++{ ++ int i, ret; ++ irq_hw_number_t hwirq; ++ unsigned int type = IRQ_TYPE_NONE; ++ struct of_phandle_args *irq_data = arg; ++ ++ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, ++ irq_data->args_count, &hwirq, &type); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) ++ gic_irq_domain_map(domain, virq + i, hwirq + i); ++ ++ return 0; ++} ++ ++static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); ++ irq_set_handler(virq + i, NULL); ++ irq_domain_reset_irq_data(d); ++ } ++} ++ + static const struct irq_domain_ops gic_irq_domain_ops = { +- .map = gic_irq_domain_map, + .xlate = gic_irq_domain_xlate, ++ .alloc = gic_irq_domain_alloc, ++ .free = gic_irq_domain_free, + }; + + static int __init gic_of_init(struct device_node *node, struct device_node *parent) +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c +index 38493ff..ab0b1cb 100644 +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -188,12 +188,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + { + void __iomem *base = gic_dist_base(d); + unsigned int gicirq = gic_irq(d); ++ int ret; + + /* Interrupt configuration for SGIs can't be changed */ + if (gicirq < 16) + return -EINVAL; + +- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ /* SPIs have restrictions on the supported types */ ++ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + raw_spin_lock(&irq_controller_lock); +@@ -201,11 +204,11 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + +- gic_configure_irq(gicirq, type, base, NULL); ++ ret = gic_configure_irq(gicirq, type, base, NULL); + + raw_spin_unlock(&irq_controller_lock); + +- return 0; ++ return ret; + } + + static int gic_retrigger(struct irq_data *d) +@@ -788,17 +791,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + { + if (hw < 32) { + irq_set_percpu_devid(irq); +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_percpu_devid_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_percpu_devid_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + } else { +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_fasteoi_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + + gic_routable_irq_domain_ops->map(d, irq, hw); + } +- irq_set_chip_data(irq, d->host_data); + return 0; + } + +@@ -858,6 +860,31 @@ static struct notifier_block gic_cpu_notifier = { + }; + #endif + ++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *arg) ++{ ++ int i, ret; ++ irq_hw_number_t hwirq; ++ unsigned int type = IRQ_TYPE_NONE; ++ struct of_phandle_args *irq_data = arg; ++ ++ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, ++ irq_data->args_count, &hwirq, &type); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) ++ gic_irq_domain_map(domain, virq + i, hwirq + i); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { ++ .xlate = gic_irq_domain_xlate, ++ .alloc = gic_irq_domain_alloc, ++ .free = irq_domain_free_irqs_top, ++}; ++ + static const struct irq_domain_ops gic_irq_domain_ops = { + .map = gic_irq_domain_map, + .unmap = gic_irq_domain_unmap, +@@ -948,18 +975,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, + gic_cpu_map[i] = 0xff; + + /* +- * For primary GICs, skip over SGIs. +- * For secondary GICs, skip over PPIs, too. +- */ +- if (gic_nr == 0 && (irq_start & 31) > 0) { +- hwirq_base = 16; +- if (irq_start != -1) +- irq_start = (irq_start & ~31) + 16; +- } else { +- hwirq_base = 32; +- } +- +- /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources. + */ +@@ -969,10 +984,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, + gic_irqs = 1020; + gic->gic_irqs = gic_irqs; + +- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ ++ if (node) { /* DT case */ ++ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; ++ ++ if (!of_property_read_u32(node, "arm,routable-irqs", ++ &nr_routable_irqs)) { ++ ops = &gic_irq_domain_ops; ++ gic_irqs = nr_routable_irqs; ++ } ++ ++ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic); ++ } else { /* Non-DT case */ ++ /* ++ * For primary GICs, skip over SGIs. ++ * For secondary GICs, skip over PPIs, too. ++ */ ++ if (gic_nr == 0 && (irq_start & 31) > 0) { ++ hwirq_base = 16; ++ if (irq_start != -1) ++ irq_start = (irq_start & ~31) + 16; ++ } else { ++ hwirq_base = 32; ++ } ++ ++ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + +- if (of_property_read_u32(node, "arm,routable-irqs", +- &nr_routable_irqs)) { + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { +@@ -983,10 +1019,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, + + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); +- } else { +- gic->domain = irq_domain_add_linear(node, nr_routable_irqs, +- &gic_irq_domain_ops, +- gic); + } + + if (WARN_ON(!gic->domain)) +@@ -1037,6 +1069,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) + irq = irq_of_parse_and_map(node, 0); + gic_cascade_irq(gic_cnt, irq); + } ++ ++ if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) ++ gicv2m_of_init(node, gic_data[gic_cnt].domain); ++ + gic_cnt++; + return 0; + } +diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c +index 9c8f833..5507a0c 100644 +--- a/drivers/irqchip/irq-hip04.c ++++ b/drivers/irqchip/irq-hip04.c +@@ -120,21 +120,24 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) + { + void __iomem *base = hip04_dist_base(d); + unsigned int irq = hip04_irq(d); ++ int ret; + + /* Interrupt configuration for SGIs can't be changed */ + if (irq < 16) + return -EINVAL; + +- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ /* SPIs have restrictions on the supported types */ ++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + raw_spin_lock(&irq_controller_lock); + +- gic_configure_irq(irq, type, base, NULL); ++ ret = gic_configure_irq(irq, type, base, NULL); + + raw_spin_unlock(&irq_controller_lock); + +- return 0; ++ return ret; + } + + #ifdef CONFIG_SMP +diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig +index 6d91c27..d6af99f 100644 +--- a/drivers/memory/Kconfig ++++ b/drivers/memory/Kconfig +@@ -83,6 +83,6 @@ config FSL_CORENET_CF + + config FSL_IFC + bool +- depends on FSL_SOC ++ depends on FSL_SOC || ARCH_LAYERSCAPE + + endif +diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c +index 3d5d792..1b182b1 100644 +--- a/drivers/memory/fsl_ifc.c ++++ b/drivers/memory/fsl_ifc.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -30,7 +31,9 @@ + #include + #include + #include +-#include ++#include ++#include ++#include + + struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + EXPORT_SYMBOL(fsl_ifc_ctrl_dev); +@@ -58,11 +61,11 @@ int fsl_ifc_find(phys_addr_t addr_base) + { + int i = 0; + +- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) ++ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) + return -ENODEV; + +- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { +- u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); ++ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { ++ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); + if (cspr & CSPR_V && (cspr & CSPR_BA) == + convert_ifc_address(addr_base)) + return i; +@@ -74,21 +77,21 @@ EXPORT_SYMBOL(fsl_ifc_find); + + static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) + { +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; + + /* + * Clear all the common status and event registers + */ +- if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) +- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); ++ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) ++ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); + + /* enable all error and events */ +- out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); ++ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en); + + /* enable all error and event interrupts */ +- out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); +- out_be32(&ifc->cm_erattr0, 0x0); +- out_be32(&ifc->cm_erattr1, 0x0); ++ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en); ++ ifc_out32(0x0, &ifc->cm_erattr0); ++ ifc_out32(0x0, &ifc->cm_erattr1); + + return 0; + } +@@ -103,7 +106,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) + irq_dispose_mapping(ctrl->nand_irq); + irq_dispose_mapping(ctrl->irq); + +- iounmap(ctrl->regs); ++ iounmap(ctrl->gregs); + + dev_set_drvdata(&dev->dev, NULL); + kfree(ctrl); +@@ -121,15 +124,15 @@ static DEFINE_SPINLOCK(nand_irq_lock); + + static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) + { +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + unsigned long flags; + u32 stat; + + spin_lock_irqsave(&nand_irq_lock, flags); + +- stat = in_be32(&ifc->ifc_nand.nand_evter_stat); ++ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat); + if (stat) { +- out_be32(&ifc->ifc_nand.nand_evter_stat, stat); ++ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat); + ctrl->nand_stat = stat; + wake_up(&ctrl->nand_wait); + } +@@ -156,21 +159,21 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) + static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) + { + struct fsl_ifc_ctrl *ctrl = data; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; + u32 err_axiid, err_srcid, status, cs_err, err_addr; + irqreturn_t ret = IRQ_NONE; + + /* read for chip select error */ +- cs_err = in_be32(&ifc->cm_evter_stat); ++ cs_err = ifc_in32(&ifc->cm_evter_stat); + if (cs_err) { + dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" + "any memory bank 0x%08X\n", cs_err); + /* clear the chip select error */ +- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); ++ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); + + /* read error attribute registers print the error information */ +- status = in_be32(&ifc->cm_erattr0); +- err_addr = in_be32(&ifc->cm_erattr1); ++ status = ifc_in32(&ifc->cm_erattr0); ++ err_addr = ifc_in32(&ifc->cm_erattr1); + + if (status & IFC_CM_ERATTR0_ERTYP_READ) + dev_err(ctrl->dev, "Read transaction error" +@@ -213,7 +216,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) + static int fsl_ifc_ctrl_probe(struct platform_device *dev) + { + int ret = 0; +- ++ int version, banks; ++ void __iomem *addr; + + dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); + +@@ -224,16 +228,41 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) + dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); + + /* IOMAP the entire IFC region */ +- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); +- if (!fsl_ifc_ctrl_dev->regs) { ++ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); ++ if (!fsl_ifc_ctrl_dev->gregs) { + dev_err(&dev->dev, "failed to get memory region\n"); + ret = -ENODEV; + goto err; + } + ++ if (of_property_read_bool(dev->dev.of_node, "little-endian")) { ++ fsl_ifc_ctrl_dev->little_endian = true; ++ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); ++ } else { ++ fsl_ifc_ctrl_dev->little_endian = false; ++ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); ++ } ++ ++ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & ++ FSL_IFC_VERSION_MASK; ++ ++ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; ++ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", ++ version >> 24, (version >> 16) & 0xf, banks); ++ ++ fsl_ifc_ctrl_dev->version = version; ++ fsl_ifc_ctrl_dev->banks = banks; ++ ++ addr = fsl_ifc_ctrl_dev->gregs; ++ if (version >= FSL_IFC_VERSION_2_0_0) ++ addr += PGOFFSET_64K; ++ else ++ addr += PGOFFSET_4K; ++ fsl_ifc_ctrl_dev->rregs = addr; ++ + /* get the Controller level irq */ + fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); +- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { ++ if (fsl_ifc_ctrl_dev->irq == 0) { + dev_err(&dev->dev, "failed to get irq resource " + "for IFC\n"); + ret = -ENODEV; +diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c +index 9e21e4f..8f43ab8 100644 +--- a/drivers/mfd/vexpress-sysreg.c ++++ b/drivers/mfd/vexpress-sysreg.c +@@ -223,7 +223,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) + vexpress_config_set_master(vexpress_sysreg_get_master()); + + /* Confirm board type against DT property, if available */ +- if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { ++ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) { + u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER); + u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; + +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 10ecc0a..d356dbc 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -2402,6 +2402,10 @@ static const struct mmc_fixup blk_fixups[] = + * + * N.B. This doesn't affect SD cards. + */ ++ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, ++ MMC_QUIRK_BLK_NO_CMD23), ++ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, ++ MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig +index 1386065..b8c9b73 100644 +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -66,7 +66,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + has the effect of scrambling the addresses and formats of data + accessed in sizes other than the datum size. + +- This is the case for the Freescale eSDHC and Nintendo Wii SDHCI. ++ This is the case for the Nintendo Wii SDHCI. + + config MMC_SDHCI_PCI + tristate "SDHCI support on PCI bus" +@@ -130,8 +130,10 @@ config MMC_SDHCI_OF_ARASAN + config MMC_SDHCI_OF_ESDHC + tristate "SDHCI OF support for the Freescale eSDHC controller" + depends on MMC_SDHCI_PLTFM +- depends on PPC_OF +- select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER ++ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE ++ select MMC_SDHCI_IO_ACCESSORS ++ select FSL_SOC_DRIVERS ++ select FSL_GUTS + help + This selects the Freescale eSDHC controller support. + +@@ -142,7 +144,7 @@ config MMC_SDHCI_OF_ESDHC + config MMC_SDHCI_OF_HLWD + tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" + depends on MMC_SDHCI_PLTFM +- depends on PPC_OF ++ depends on PPC + select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + help + This selects the Secure Digital Host Controller Interface (SDHCI) +diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h +index a870c42..f2baede 100644 +--- a/drivers/mmc/host/sdhci-esdhc.h ++++ b/drivers/mmc/host/sdhci-esdhc.h +@@ -21,16 +21,23 @@ + #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ + SDHCI_QUIRK_NO_BUSY_IRQ | \ + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ +- SDHCI_QUIRK_PIO_NEEDS_DELAY) ++ SDHCI_QUIRK_PIO_NEEDS_DELAY | \ ++ SDHCI_QUIRK_NO_HISPD_BIT) ++ ++#define ESDHC_PROCTL 0x28 + + #define ESDHC_SYSTEM_CONTROL 0x2c + #define ESDHC_CLOCK_MASK 0x0000fff0 + #define ESDHC_PREDIV_SHIFT 8 + #define ESDHC_DIVIDER_SHIFT 4 ++#define ESDHC_CLOCK_CRDEN 0x00000008 + #define ESDHC_CLOCK_PEREN 0x00000004 + #define ESDHC_CLOCK_HCKEN 0x00000002 + #define ESDHC_CLOCK_IPGEN 0x00000001 + ++#define ESDHC_PRESENT_STATE 0x24 ++#define ESDHC_CLOCK_STABLE 0x00000008 ++ + /* pltfm-specific */ + #define ESDHC_HOST_CONTROL_LE 0x20 + +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c +index 8872c85..4a4a693 100644 +--- a/drivers/mmc/host/sdhci-of-esdhc.c ++++ b/drivers/mmc/host/sdhci-of-esdhc.c +@@ -18,128 +18,334 @@ + #include + #include + #include ++#include ++#include + #include + #include "sdhci-pltfm.h" + #include "sdhci-esdhc.h" + + #define VENDOR_V_22 0x12 + #define VENDOR_V_23 0x13 +-static u32 esdhc_readl(struct sdhci_host *host, int reg) ++ ++struct sdhci_esdhc { ++ u8 vendor_ver; ++ u8 spec_ver; ++ u32 soc_ver; ++ u8 soc_rev; ++}; ++ ++/** ++ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register ++ * to make it compatible with SD spec. ++ * ++ * @host: pointer to sdhci_host ++ * @spec_reg: SD spec register address ++ * @value: 32bit eSDHC register value on spec_reg address ++ * ++ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC ++ * registers are 32 bits. There are differences in register size, register ++ * address, register function, bit position and function between eSDHC spec ++ * and SD spec. ++ * ++ * Return a fixed up register value ++ */ ++static u32 esdhc_readl_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value) + { ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; + u32 ret; + +- ret = in_be32(host->ioaddr + reg); + /* + * The bit of ADMA flag in eSDHC is not compatible with standard + * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is + * supported by eSDHC. + * And for many FSL eSDHC controller, the reset value of field +- * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, ++ * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA, + * only these vendor version is greater than 2.2/0x12 support ADMA. +- * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the +- * the verdor version number, oxFE is SDHCI_HOST_VERSION. + */ +- if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { +- u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +- if (tmp > VENDOR_V_22) +- ret |= SDHCI_CAN_DO_ADMA2; ++ if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) { ++ if (esdhc->vendor_ver > VENDOR_V_22) { ++ ret = value | SDHCI_CAN_DO_ADMA2; ++ return ret; ++ } + } +- ++ ret = value; + return ret; + } + +-static u16 esdhc_readw(struct sdhci_host *host, int reg) ++static u16 esdhc_readw_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value) + { + u16 ret; +- int base = reg & ~0x3; +- int shift = (reg & 0x2) * 8; ++ int shift = (spec_reg & 0x2) * 8; + +- if (unlikely(reg == SDHCI_HOST_VERSION)) +- ret = in_be32(host->ioaddr + base) & 0xffff; ++ if (spec_reg == SDHCI_HOST_VERSION) ++ ret = value & 0xffff; + else +- ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; ++ ret = (value >> shift) & 0xffff; + return ret; + } + +-static u8 esdhc_readb(struct sdhci_host *host, int reg) ++static u8 esdhc_readb_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value) + { +- int base = reg & ~0x3; +- int shift = (reg & 0x3) * 8; +- u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; ++ u8 ret; ++ u8 dma_bits; ++ int shift = (spec_reg & 0x3) * 8; ++ ++ ret = (value >> shift) & 0xff; + + /* + * "DMA select" locates at offset 0x28 in SD specification, but on + * P5020 or P3041, it locates at 0x29. + */ +- if (reg == SDHCI_HOST_CONTROL) { +- u32 dma_bits; +- +- dma_bits = in_be32(host->ioaddr + reg); ++ if (spec_reg == SDHCI_HOST_CONTROL) { + /* DMA select is 22,23 bits in Protocol Control Register */ +- dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; +- ++ dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK; + /* fixup the result */ + ret &= ~SDHCI_CTRL_DMA_MASK; + ret |= dma_bits; + } +- + return ret; + } + +-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) ++/** ++ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be ++ * written into eSDHC register. ++ * ++ * @host: pointer to sdhci_host ++ * @spec_reg: SD spec register address ++ * @value: 8/16/32bit SD spec register value that would be written ++ * @old_value: 32bit eSDHC register value on spec_reg address ++ * ++ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC ++ * registers are 32 bits. There are differences in register size, register ++ * address, register function, bit position and function between eSDHC spec ++ * and SD spec. ++ * ++ * Return a fixed up register value ++ */ ++static u32 esdhc_writel_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value, u32 old_value) + { ++ u32 ret; ++ + /* +- * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] +- * when SYSCTL[RSTD]) is set for some special operations. +- * No any impact other operation. ++ * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] ++ * when SYSCTL[RSTD] is set for some special operations. ++ * No any impact on other operation. + */ +- if (reg == SDHCI_INT_ENABLE) +- val |= SDHCI_INT_BLK_GAP; +- sdhci_be32bs_writel(host, val, reg); ++ if (spec_reg == SDHCI_INT_ENABLE) ++ ret = value | SDHCI_INT_BLK_GAP; ++ else ++ ret = value; ++ ++ return ret; + } + +-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) ++static u32 esdhc_writew_fixup(struct sdhci_host *host, ++ int spec_reg, u16 value, u32 old_value) + { +- if (reg == SDHCI_BLOCK_SIZE) { ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ int shift = (spec_reg & 0x2) * 8; ++ u32 ret; ++ ++ switch (spec_reg) { ++ case SDHCI_TRANSFER_MODE: ++ /* ++ * Postpone this write, we must do it together with a ++ * command write that is down below. Return old value. ++ */ ++ pltfm_host->xfer_mode_shadow = value; ++ return old_value; ++ case SDHCI_COMMAND: ++ ret = (value << 16) | pltfm_host->xfer_mode_shadow; ++ return ret; ++ } ++ ++ ret = old_value & (~(0xffff << shift)); ++ ret |= (value << shift); ++ ++ if (spec_reg == SDHCI_BLOCK_SIZE) { + /* + * Two last DMA bits are reserved, and first one is used for + * non-standard blksz of 4096 bytes that we don't support + * yet. So clear the DMA boundary bits. + */ +- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); ++ ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0)); + } +- sdhci_be32bs_writew(host, val, reg); ++ return ret; + } + +-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) ++static u32 esdhc_writeb_fixup(struct sdhci_host *host, ++ int spec_reg, u8 value, u32 old_value) + { ++ u32 ret; ++ u32 dma_bits; ++ u8 tmp; ++ int shift = (spec_reg & 0x3) * 8; ++ ++ /* ++ * eSDHC doesn't have a standard power control register, so we do ++ * nothing here to avoid incorrect operation. ++ */ ++ if (spec_reg == SDHCI_POWER_CONTROL) ++ return old_value; + /* + * "DMA select" location is offset 0x28 in SD specification, but on + * P5020 or P3041, it's located at 0x29. + */ +- if (reg == SDHCI_HOST_CONTROL) { +- u32 dma_bits; +- ++ if (spec_reg == SDHCI_HOST_CONTROL) { + /* + * If host control register is not standard, exit + * this function + */ + if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) +- return; ++ return old_value; + + /* DMA select is 22,23 bits in Protocol Control Register */ +- dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; +- clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, +- dma_bits); +- val &= ~SDHCI_CTRL_DMA_MASK; +- val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; ++ dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5; ++ ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits; ++ tmp = (value & (~SDHCI_CTRL_DMA_MASK)) | ++ (old_value & SDHCI_CTRL_DMA_MASK); ++ ret = (ret & (~0xff)) | tmp; ++ ++ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */ ++ ret &= ~ESDHC_HOST_CONTROL_RES; ++ return ret; + } + +- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ +- if (reg == SDHCI_HOST_CONTROL) +- val &= ~ESDHC_HOST_CONTROL_RES; +- sdhci_be32bs_writeb(host, val, reg); ++ ret = (old_value & (~(0xff << shift))) | (value << shift); ++ return ret; ++} ++ ++static u32 esdhc_be_readl(struct sdhci_host *host, int reg) ++{ ++ u32 ret; ++ u32 value; ++ ++ value = ioread32be(host->ioaddr + reg); ++ ret = esdhc_readl_fixup(host, reg, value); ++ ++ return ret; ++} ++ ++static u32 esdhc_le_readl(struct sdhci_host *host, int reg) ++{ ++ u32 ret; ++ u32 value; ++ ++ value = ioread32(host->ioaddr + reg); ++ ret = esdhc_readl_fixup(host, reg, value); ++ ++ return ret; ++} ++ ++static u16 esdhc_be_readw(struct sdhci_host *host, int reg) ++{ ++ u16 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_readw_fixup(host, reg, value); ++ return ret; ++} ++ ++static u16 esdhc_le_readw(struct sdhci_host *host, int reg) ++{ ++ u16 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_readw_fixup(host, reg, value); ++ return ret; ++} ++ ++static u8 esdhc_be_readb(struct sdhci_host *host, int reg) ++{ ++ u8 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_readb_fixup(host, reg, value); ++ return ret; ++} ++ ++static u8 esdhc_le_readb(struct sdhci_host *host, int reg) ++{ ++ u8 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_readb_fixup(host, reg, value); ++ return ret; ++} ++ ++static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg) ++{ ++ u32 value; ++ ++ value = esdhc_writel_fixup(host, reg, val, 0); ++ iowrite32be(value, host->ioaddr + reg); ++} ++ ++static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) ++{ ++ u32 value; ++ ++ value = esdhc_writel_fixup(host, reg, val, 0); ++ iowrite32(value, host->ioaddr + reg); ++} ++ ++static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_writew_fixup(host, reg, val, value); ++ if (reg != SDHCI_TRANSFER_MODE) ++ iowrite32be(ret, host->ioaddr + base); ++} ++ ++static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_writew_fixup(host, reg, val, value); ++ if (reg != SDHCI_TRANSFER_MODE) ++ iowrite32(ret, host->ioaddr + base); ++} ++ ++static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_writeb_fixup(host, reg, val, value); ++ iowrite32be(ret, host->ioaddr + base); ++} ++ ++static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_writeb_fixup(host, reg, val, value); ++ iowrite32(ret, host->ioaddr + base); + } + + /* +@@ -149,37 +355,116 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) + * For Continue, apply soft reset for data(SYSCTL[RSTD]); + * and re-issue the entire read transaction from beginning. + */ +-static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) ++static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) + { +- u32 tmp; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; + bool applicable; + dma_addr_t dmastart; + dma_addr_t dmanow; + +- tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +- + applicable = (intmask & SDHCI_INT_DATA_END) && +- (intmask & SDHCI_INT_BLK_GAP) && +- (tmp == VENDOR_V_23); +- if (!applicable) ++ (intmask & SDHCI_INT_BLK_GAP) && ++ (esdhc->vendor_ver == VENDOR_V_23); ++ if (applicable) { ++ ++ sdhci_reset(host, SDHCI_RESET_DATA); ++ host->data->error = 0; ++ dmastart = sg_dma_address(host->data->sg); ++ dmanow = dmastart + host->data->bytes_xfered; ++ /* ++ * Force update to the next DMA block boundary. ++ */ ++ dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + ++ SDHCI_DEFAULT_BOUNDARY_SIZE; ++ host->data->bytes_xfered = dmanow - dmastart; ++ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); ++ + return; ++ } + +- host->data->error = 0; +- dmastart = sg_dma_address(host->data->sg); +- dmanow = dmastart + host->data->bytes_xfered; + /* +- * Force update to the next DMA block boundary. ++ * Check for A-004388: eSDHC DMA might not stop if error ++ * occurs on system transaction ++ * Impact list: ++ * T4240-4160-R1.0 B4860-4420-R1.0-R2.0 P1010-1014-R1.0 ++ * P3041-R1.0-R2.0-R1.1 P2041-2040-R1.0-R1.1-R2.0 ++ * P5020-5010-R2.0-R1.0 P5040-5021-R2.0-R2.1 + */ +- dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + +- SDHCI_DEFAULT_BOUNDARY_SIZE; +- host->data->bytes_xfered = dmanow - dmastart; +- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); ++ if (!(((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || ++ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)))) ++ return; ++ ++ sdhci_reset(host, SDHCI_RESET_DATA); ++ ++ if (host->flags & SDHCI_USE_ADMA) { ++ u32 mod, i, offset; ++ u8 *desc; ++ dma_addr_t addr; ++ struct scatterlist *sg; ++ __le32 *dataddr; ++ __le32 *cmdlen; ++ ++ /* ++ * If block count was enabled, in case read transfer there ++ * is no data was corrupted ++ */ ++ mod = sdhci_readl(host, SDHCI_TRANSFER_MODE); ++ if ((mod & SDHCI_TRNS_BLK_CNT_EN) && ++ (host->data->flags & MMC_DATA_READ)) ++ host->data->error = 0; ++ ++ BUG_ON(!host->data); ++ desc = host->adma_table; ++ for_each_sg(host->data->sg, sg, host->sg_count, i) { ++ addr = sg_dma_address(sg); ++ offset = (4 - (addr & 0x3)) & 0x3; ++ if (offset) ++ desc += 8; ++ desc += 8; ++ } ++ ++ /* ++ * Add an extra zero descriptor next to the ++ * terminating descriptor. ++ */ ++ desc += 8; ++ WARN_ON((desc - (u8 *)(host->adma_table)) > (128 * 2 + 1) * 4); ++ ++ dataddr = (__le32 __force *)(desc + 4); ++ cmdlen = (__le32 __force *)desc; ++ ++ cmdlen[0] = cpu_to_le32(0); ++ dataddr[0] = cpu_to_le32(0); ++ } ++ ++ if ((host->flags & SDHCI_USE_SDMA) && ++ (host->data->flags & MMC_DATA_READ)) ++ host->data->error = 0; ++ ++ return; + } + + static int esdhc_of_enable_dma(struct sdhci_host *host) + { +- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); ++ u32 value; ++ ++ value = sdhci_readl(host, ESDHC_DMA_SYSCTL); ++ value |= ESDHC_DMA_SNOOP; ++ sdhci_writel(host, value, ESDHC_DMA_SYSCTL); + return 0; + } + +@@ -199,15 +484,22 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) + + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + { +- int pre_div = 2; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; ++ int pre_div = 1; + int div = 1; + u32 temp; ++ u32 timeout; + + host->mmc->actual_clock = 0; + + if (clock == 0) + return; + ++ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ ++ if (esdhc->vendor_ver < VENDOR_V_23) ++ pre_div = 2; ++ + /* Workaround to reduce the clock frequency for p1010 esdhc */ + if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { + if (clock > 20000000) +@@ -218,7 +510,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +- | ESDHC_CLOCK_MASK); ++ | ESDHC_CLOCK_CRDEN | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) +@@ -229,7 +521,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); +- ++ host->mmc->actual_clock = host->max_clk / pre_div / div; + pre_div >>= 1; + div--; + +@@ -238,70 +530,117 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); +- mdelay(1); +-} + +-static void esdhc_of_platform_init(struct sdhci_host *host) +-{ +- u32 vvn; +- +- vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +- vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +- if (vvn == VENDOR_V_22) +- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; ++ /* Wait max 20 ms */ ++ timeout = 20; ++ while (!(sdhci_readl(host, ESDHC_PRESENT_STATE) & ESDHC_CLOCK_STABLE)) { ++ if (timeout == 0) { ++ pr_err("%s: Internal clock never stabilised.\n", ++ mmc_hostname(host->mmc)); ++ return; ++ } ++ timeout--; ++ mdelay(1); ++ } + +- if (vvn > VENDOR_V_22) +- host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; ++ temp |= ESDHC_CLOCK_CRDEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + } + + static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) + { + u32 ctrl; + ++ ctrl = sdhci_readl(host, ESDHC_PROCTL); ++ ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK); + switch (width) { + case MMC_BUS_WIDTH_8: +- ctrl = ESDHC_CTRL_8BITBUS; ++ ctrl |= ESDHC_CTRL_8BITBUS; + break; + + case MMC_BUS_WIDTH_4: +- ctrl = ESDHC_CTRL_4BITBUS; ++ ctrl |= ESDHC_CTRL_4BITBUS; + break; + + default: +- ctrl = 0; + break; + } + +- clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, +- ESDHC_CTRL_BUSWIDTH_MASK, ctrl); ++ sdhci_writel(host, ctrl, ESDHC_PROCTL); + } + +-static const struct sdhci_ops sdhci_esdhc_ops = { +- .read_l = esdhc_readl, +- .read_w = esdhc_readw, +- .read_b = esdhc_readb, +- .write_l = esdhc_writel, +- .write_w = esdhc_writew, +- .write_b = esdhc_writeb, +- .set_clock = esdhc_of_set_clock, +- .enable_dma = esdhc_of_enable_dma, +- .get_max_clock = esdhc_of_get_max_clock, +- .get_min_clock = esdhc_of_get_min_clock, +- .platform_init = esdhc_of_platform_init, +- .adma_workaround = esdhci_of_adma_workaround, +- .set_bus_width = esdhc_pltfm_set_bus_width, +- .reset = sdhci_reset, +- .set_uhs_signaling = sdhci_set_uhs_signaling, +-}; ++/* ++ * A-003980: SDHC: Glitch is generated on the card clock with software reset ++ * or clock divider change ++ * Workaround: ++ * A simple workaround is to disable the SD card clock before the software ++ * reset, and enable it when the module resumes normal operation. The Host ++ * and the SD card are in a master-slave relationship. The Host provides ++ * clock and control transfer across the interface. Therefore, any existing ++ * operation is discarded when the Host controller is reset. ++ */ ++static int esdhc_of_reset_workaround(struct sdhci_host *host, u8 mask) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; ++ bool disable_clk_before_reset = false; ++ u32 temp; + +-#ifdef CONFIG_PM ++ /* ++ * Check for A-003980 ++ * Impact list: ++ * T4240-4160-R1.0-R2.0 B4860-4420-R1.0-R2.0 P5040-5021-R1.0-R2.0-R2.1 ++ * P5020-5010-R1.0-R2.0 P3041-R1.0-R1.1-R2.0 P2041-2040-R1.0-R1.1-R2.0 ++ * P1010-1014-R1.0 ++ */ ++ if (((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || ++ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)) || ++ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10))) ++ disable_clk_before_reset = true; ++ ++ if (disable_clk_before_reset && (mask & SDHCI_RESET_ALL)) { ++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); ++ temp &= ~ESDHC_CLOCK_CRDEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); ++ sdhci_reset(host, mask); ++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); ++ temp |= ESDHC_CLOCK_CRDEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); ++ return 1; ++ } ++ return 0; ++} ++ ++static void esdhc_reset(struct sdhci_host *host, u8 mask) ++{ ++ if (!esdhc_of_reset_workaround(host, mask)) ++ sdhci_reset(host, mask); + ++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++} ++ ++#ifdef CONFIG_PM + static u32 esdhc_proctl; + static int esdhc_of_suspend(struct device *dev) + { + struct sdhci_host *host = dev_get_drvdata(dev); + +- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); ++ esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); + + return sdhci_suspend_host(host); + } +@@ -311,11 +650,8 @@ static int esdhc_of_resume(struct device *dev) + struct sdhci_host *host = dev_get_drvdata(dev); + int ret = sdhci_resume_host(host); + +- if (ret == 0) { +- /* Isn't this already done by sdhci_resume_host() ? --rmk */ +- esdhc_of_enable_dma(host); +- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); +- } ++ if (ret == 0) ++ sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); + + return ret; + } +@@ -329,30 +665,120 @@ static const struct dev_pm_ops esdhc_pmops = { + #define ESDHC_PMOPS NULL + #endif + +-static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { +- /* +- * card detection could be handled via GPIO +- * eSDHC cannot support End Attribute in NOP ADMA descriptor +- */ ++static const struct sdhci_ops sdhci_esdhc_be_ops = { ++ .read_l = esdhc_be_readl, ++ .read_w = esdhc_be_readw, ++ .read_b = esdhc_be_readb, ++ .write_l = esdhc_be_writel, ++ .write_w = esdhc_be_writew, ++ .write_b = esdhc_be_writeb, ++ .set_clock = esdhc_of_set_clock, ++ .enable_dma = esdhc_of_enable_dma, ++ .get_max_clock = esdhc_of_get_max_clock, ++ .get_min_clock = esdhc_of_get_min_clock, ++ .adma_workaround = esdhc_of_adma_workaround, ++ .set_bus_width = esdhc_pltfm_set_bus_width, ++ .reset = esdhc_reset, ++ .set_uhs_signaling = sdhci_set_uhs_signaling, ++}; ++ ++static const struct sdhci_ops sdhci_esdhc_le_ops = { ++ .read_l = esdhc_le_readl, ++ .read_w = esdhc_le_readw, ++ .read_b = esdhc_le_readb, ++ .write_l = esdhc_le_writel, ++ .write_w = esdhc_le_writew, ++ .write_b = esdhc_le_writeb, ++ .set_clock = esdhc_of_set_clock, ++ .enable_dma = esdhc_of_enable_dma, ++ .get_max_clock = esdhc_of_get_max_clock, ++ .get_min_clock = esdhc_of_get_min_clock, ++ .adma_workaround = esdhc_of_adma_workaround, ++ .set_bus_width = esdhc_pltfm_set_bus_width, ++ .reset = esdhc_reset, ++ .set_uhs_signaling = sdhci_set_uhs_signaling, ++}; ++ ++static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = { + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION + | SDHCI_QUIRK_NO_CARD_NO_RESET + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +- .ops = &sdhci_esdhc_ops, ++ .ops = &sdhci_esdhc_be_ops, + }; + ++static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = { ++ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION ++ | SDHCI_QUIRK_NO_CARD_NO_RESET ++ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, ++ .ops = &sdhci_esdhc_le_ops, ++}; ++ ++static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_pltfm_host *pltfm_host; ++ struct sdhci_esdhc *esdhc; ++ u16 host_ver; ++ u32 svr; ++ ++ pltfm_host = sdhci_priv(host); ++ esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc), ++ GFP_KERNEL); ++ pltfm_host->priv = esdhc; ++ ++ svr = guts_get_svr(); ++ esdhc->soc_ver = SVR_SOC_VER(svr); ++ esdhc->soc_rev = SVR_REV(svr); ++ ++ host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); ++ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> ++ SDHCI_VENDOR_VER_SHIFT; ++ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; ++} ++ + static int sdhci_esdhc_probe(struct platform_device *pdev) + { + struct sdhci_host *host; + struct device_node *np; ++ struct sdhci_pltfm_host *pltfm_host; ++ struct sdhci_esdhc *esdhc; + int ret; + +- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); ++ np = pdev->dev.of_node; ++ ++ if (of_get_property(np, "little-endian", NULL)) ++ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0); ++ else ++ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0); ++ + if (IS_ERR(host)) + return PTR_ERR(host); + ++ esdhc_init(pdev, host); ++ + sdhci_get_of_property(pdev); + +- np = pdev->dev.of_node; ++ pltfm_host = sdhci_priv(host); ++ esdhc = pltfm_host->priv; ++ if (esdhc->vendor_ver == VENDOR_V_22) ++ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; ++ ++ if (esdhc->vendor_ver > VENDOR_V_22) ++ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; ++ ++ if (of_device_is_compatible(np, "fsl,p5040-esdhc") || ++ of_device_is_compatible(np, "fsl,p5020-esdhc") || ++ of_device_is_compatible(np, "fsl,p4080-esdhc") || ++ of_device_is_compatible(np, "fsl,p1020-esdhc") || ++ of_device_is_compatible(np, "fsl,t1040-esdhc") || ++ of_device_is_compatible(np, "fsl,ls1021a-esdhc") || ++ of_device_is_compatible(np, "fsl,ls2080a-esdhc") || ++ of_device_is_compatible(np, "fsl,ls2085a-esdhc") || ++ of_device_is_compatible(np, "fsl,ls1043a-esdhc")) ++ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; ++ ++ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) ++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; ++ + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { + /* + * Freescale messed up with P2020 as it has a non-standard +@@ -362,13 +788,19 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) + } + + /* call to generic mmc_of_parse to support additional capabilities */ +- mmc_of_parse(host->mmc); ++ ret = mmc_of_parse(host->mmc); ++ if (ret) ++ goto err; ++ + mmc_of_parse_voltage(np, &host->ocr_mask); + + ret = sdhci_add_host(host); + if (ret) +- sdhci_pltfm_free(pdev); ++ goto err; + ++ return 0; ++ err: ++ sdhci_pltfm_free(pdev); + return ret; + } + +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 023c201..8af38a6 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -44,8 +44,6 @@ + + #define MAX_TUNING_LOOP 40 + +-#define ADMA_SIZE ((128 * 2 + 1) * 4) +- + static unsigned int debug_quirks = 0; + static unsigned int debug_quirks2; + +@@ -119,10 +117,17 @@ static void sdhci_dumpregs(struct sdhci_host *host) + pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_CONTROL2)); + +- if (host->flags & SDHCI_USE_ADMA) +- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", +- readl(host->ioaddr + SDHCI_ADMA_ERROR), +- readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); ++ if (host->flags & SDHCI_USE_ADMA) { ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", ++ readl(host->ioaddr + SDHCI_ADMA_ERROR), ++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), ++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); ++ else ++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", ++ readl(host->ioaddr + SDHCI_ADMA_ERROR), ++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); ++ } + + pr_debug(DRIVER_NAME ": ===========================================\n"); + } +@@ -231,6 +236,9 @@ static void sdhci_init(struct sdhci_host *host, int soft) + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | + SDHCI_INT_RESPONSE; + ++ if (host->flags & SDHCI_AUTO_CMD12) ++ host->ier |= SDHCI_INT_ACMD12ERR; ++ + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + +@@ -448,18 +456,26 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) + local_irq_restore(*flags); + } + +-static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) ++static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, ++ dma_addr_t addr, int len, unsigned cmd) + { +- __le32 *dataddr = (__le32 __force *)(desc + 4); +- __le16 *cmdlen = (__le16 __force *)desc; ++ struct sdhci_adma2_64_desc *dma_desc = desc; ++ ++ /* 32-bit and 64-bit descriptors have these members in same position */ ++ dma_desc->cmd = cpu_to_le16(cmd); ++ dma_desc->len = cpu_to_le16(len); ++ dma_desc->addr_lo = cpu_to_le32((u32)addr); + +- /* SDHCI specification says ADMA descriptors should be 4 byte +- * aligned, so using 16 or 32bit operations should be safe. */ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); ++} + +- cmdlen[0] = cpu_to_le16(cmd); +- cmdlen[1] = cpu_to_le16(len); ++static void sdhci_adma_mark_end(void *desc) ++{ ++ struct sdhci_adma2_64_desc *dma_desc = desc; + +- dataddr[0] = cpu_to_le32(addr); ++ /* 32-bit and 64-bit descriptors have 'cmd' in same position */ ++ dma_desc->cmd |= cpu_to_le16(ADMA2_END); + } + + static int sdhci_adma_table_pre(struct sdhci_host *host, +@@ -467,8 +483,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + { + int direction; + +- u8 *desc; +- u8 *align; ++ void *desc; ++ void *align; + dma_addr_t addr; + dma_addr_t align_addr; + int len, offset; +@@ -489,17 +505,17 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + direction = DMA_TO_DEVICE; + + host->align_addr = dma_map_single(mmc_dev(host->mmc), +- host->align_buffer, 128 * 4, direction); ++ host->align_buffer, host->align_buffer_sz, direction); + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) + goto fail; +- BUG_ON(host->align_addr & 0x3); ++ BUG_ON(host->align_addr & host->align_mask); + + host->sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, direction); + if (host->sg_count == 0) + goto unmap_align; + +- desc = host->adma_desc; ++ desc = host->adma_table; + align = host->align_buffer; + + align_addr = host->align_addr; +@@ -515,24 +531,27 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + * the (up to three) bytes that screw up the + * alignment. + */ +- offset = (4 - (addr & 0x3)) & 0x3; ++ offset = (host->align_sz - (addr & host->align_mask)) & ++ host->align_mask; + if (offset) { + if (data->flags & MMC_DATA_WRITE) { + buffer = sdhci_kmap_atomic(sg, &flags); +- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); ++ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > ++ (PAGE_SIZE - offset)); + memcpy(align, buffer, offset); + sdhci_kunmap_atomic(buffer, &flags); + } + + /* tran, valid */ +- sdhci_set_adma_desc(desc, align_addr, offset, 0x21); ++ sdhci_adma_write_desc(host, desc, align_addr, offset, ++ ADMA2_TRAN_VALID); + + BUG_ON(offset > 65536); + +- align += 4; +- align_addr += 4; ++ align += host->align_sz; ++ align_addr += host->align_sz; + +- desc += 8; ++ desc += host->desc_sz; + + addr += offset; + len -= offset; +@@ -541,23 +560,23 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + BUG_ON(len > 65536); + + /* tran, valid */ +- sdhci_set_adma_desc(desc, addr, len, 0x21); +- desc += 8; ++ sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); ++ desc += host->desc_sz; + + /* + * If this triggers then we have a calculation bug + * somewhere. :/ + */ +- WARN_ON((desc - host->adma_desc) > ADMA_SIZE); ++ WARN_ON((desc - host->adma_table) >= host->adma_table_sz); + } + + if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { + /* + * Mark the last descriptor as the terminating descriptor + */ +- if (desc != host->adma_desc) { +- desc -= 8; +- desc[0] |= 0x2; /* end */ ++ if (desc != host->adma_table) { ++ desc -= host->desc_sz; ++ sdhci_adma_mark_end(desc); + } + } else { + /* +@@ -565,7 +584,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + */ + + /* nop, end, valid */ +- sdhci_set_adma_desc(desc, 0, 0, 0x3); ++ sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); + } + + /* +@@ -573,14 +592,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + */ + if (data->flags & MMC_DATA_WRITE) { + dma_sync_single_for_device(mmc_dev(host->mmc), +- host->align_addr, 128 * 4, direction); ++ host->align_addr, host->align_buffer_sz, direction); + } + + return 0; + + unmap_align: + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, +- 128 * 4, direction); ++ host->align_buffer_sz, direction); + fail: + return -EINVAL; + } +@@ -592,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, + + struct scatterlist *sg; + int i, size; +- u8 *align; ++ void *align; + char *buffer; + unsigned long flags; + bool has_unaligned; +@@ -603,12 +622,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host, + direction = DMA_TO_DEVICE; + + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, +- 128 * 4, direction); ++ host->align_buffer_sz, direction); + + /* Do a quick scan of the SG list for any unaligned mappings */ + has_unaligned = false; + for_each_sg(data->sg, sg, host->sg_count, i) +- if (sg_dma_address(sg) & 3) { ++ if (sg_dma_address(sg) & host->align_mask) { + has_unaligned = true; + break; + } +@@ -620,15 +639,17 @@ static void sdhci_adma_table_post(struct sdhci_host *host, + align = host->align_buffer; + + for_each_sg(data->sg, sg, host->sg_count, i) { +- if (sg_dma_address(sg) & 0x3) { +- size = 4 - (sg_dma_address(sg) & 0x3); ++ if (sg_dma_address(sg) & host->align_mask) { ++ size = host->align_sz - ++ (sg_dma_address(sg) & host->align_mask); + + buffer = sdhci_kmap_atomic(sg, &flags); +- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); ++ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > ++ (PAGE_SIZE - size)); + memcpy(buffer, align, size); + sdhci_kunmap_atomic(buffer, &flags); + +- align += 4; ++ align += host->align_sz; + } + } + } +@@ -822,6 +843,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) + } else { + sdhci_writel(host, host->adma_addr, + SDHCI_ADMA_ADDRESS); ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ sdhci_writel(host, ++ (u64)host->adma_addr >> 32, ++ SDHCI_ADMA_ADDRESS_HI); + } + } else { + int sg_cnt; +@@ -855,10 +880,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if ((host->flags & SDHCI_REQ_USE_DMA) && +- (host->flags & SDHCI_USE_ADMA)) +- ctrl |= SDHCI_CTRL_ADMA32; +- else ++ (host->flags & SDHCI_USE_ADMA)) { ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ ctrl |= SDHCI_CTRL_ADMA64; ++ else ++ ctrl |= SDHCI_CTRL_ADMA32; ++ } else { + ctrl |= SDHCI_CTRL_SDMA; ++ } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } + +@@ -1797,6 +1826,10 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, + ctrl |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + ++ /* Some controller need to do more when switching */ ++ if (host->ops->voltage_switch) ++ host->ops->voltage_switch(host); ++ + /* 1.8V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl & SDHCI_CTRL_VDD_180) +@@ -2250,7 +2283,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) + if (intmask & SDHCI_INT_TIMEOUT) + host->cmd->error = -ETIMEDOUT; + else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | +- SDHCI_INT_INDEX)) ++ SDHCI_INT_INDEX | SDHCI_INT_ACMD12ERR)) + host->cmd->error = -EILSEQ; + + if (host->cmd->error) { +@@ -2292,32 +2325,36 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) + } + + #ifdef CONFIG_MMC_DEBUG +-static void sdhci_show_adma_error(struct sdhci_host *host) ++static void sdhci_adma_show_error(struct sdhci_host *host) + { + const char *name = mmc_hostname(host->mmc); +- u8 *desc = host->adma_desc; +- __le32 *dma; +- __le16 *len; +- u8 attr; ++ void *desc = host->adma_table; + + sdhci_dumpregs(host); + + while (true) { +- dma = (__le32 *)(desc + 4); +- len = (__le16 *)(desc + 2); +- attr = *desc; +- +- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", +- name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); ++ struct sdhci_adma2_64_desc *dma_desc = desc; ++ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", ++ name, desc, le32_to_cpu(dma_desc->addr_hi), ++ le32_to_cpu(dma_desc->addr_lo), ++ le16_to_cpu(dma_desc->len), ++ le16_to_cpu(dma_desc->cmd)); ++ else ++ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", ++ name, desc, le32_to_cpu(dma_desc->addr_lo), ++ le16_to_cpu(dma_desc->len), ++ le16_to_cpu(dma_desc->cmd)); + +- desc += 8; ++ desc += host->desc_sz; + +- if (attr & 2) ++ if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) + break; + } + } + #else +-static void sdhci_show_adma_error(struct sdhci_host *host) { } ++static void sdhci_adma_show_error(struct sdhci_host *host) { } + #endif + + static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) +@@ -2380,7 +2417,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) + host->data->error = -EILSEQ; + else if (intmask & SDHCI_INT_ADMA_ERROR) { + pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); +- sdhci_show_adma_error(host); ++ sdhci_adma_show_error(host); + host->data->error = -EIO; + if (host->ops->adma_workaround) + host->ops->adma_workaround(host, intmask); +@@ -2859,6 +2896,16 @@ int sdhci_add_host(struct sdhci_host *host) + host->flags &= ~SDHCI_USE_ADMA; + } + ++ /* ++ * It is assumed that a 64-bit capable device has set a 64-bit DMA mask ++ * and *must* do 64-bit DMA. A driver has the opportunity to change ++ * that during the first call to ->enable_dma(). Similarly ++ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to ++ * implement. ++ */ ++ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) ++ host->flags |= SDHCI_USE_64_BIT_DMA; ++ + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) { + if (host->ops->enable_dma(host)) { +@@ -2870,33 +2917,59 @@ int sdhci_add_host(struct sdhci_host *host) + } + } + ++ /* SDMA does not support 64-bit DMA */ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ host->flags &= ~SDHCI_USE_SDMA; ++ + if (host->flags & SDHCI_USE_ADMA) { + /* +- * We need to allocate descriptors for all sg entries +- * (128) and potentially one alignment transfer for +- * each of those entries. ++ * The DMA descriptor table size is calculated as the maximum ++ * number of segments times 2, to allow for an alignment ++ * descriptor for each segment, plus 1 for a nop end descriptor, ++ * all multipled by the descriptor size. + */ +- host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), +- ADMA_SIZE, &host->adma_addr, +- GFP_KERNEL); +- host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); +- if (!host->adma_desc || !host->align_buffer) { +- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, +- host->adma_desc, host->adma_addr); ++ if (host->flags & SDHCI_USE_64_BIT_DMA) { ++ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * ++ SDHCI_ADMA2_64_DESC_SZ; ++ host->align_buffer_sz = SDHCI_MAX_SEGS * ++ SDHCI_ADMA2_64_ALIGN; ++ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; ++ host->align_sz = SDHCI_ADMA2_64_ALIGN; ++ host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; ++ } else { ++ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * ++ SDHCI_ADMA2_32_DESC_SZ; ++ host->align_buffer_sz = SDHCI_MAX_SEGS * ++ SDHCI_ADMA2_32_ALIGN; ++ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; ++ host->align_sz = SDHCI_ADMA2_32_ALIGN; ++ host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; ++ } ++ host->adma_table = dma_alloc_coherent(mmc_dev(mmc), ++ host->adma_table_sz, ++ &host->adma_addr, ++ GFP_KERNEL); ++ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); ++ if (!host->adma_table || !host->align_buffer) { ++ if (host->adma_table) ++ dma_free_coherent(mmc_dev(mmc), ++ host->adma_table_sz, ++ host->adma_table, ++ host->adma_addr); + kfree(host->align_buffer); + pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; +- host->adma_desc = NULL; ++ host->adma_table = NULL; + host->align_buffer = NULL; +- } else if (host->adma_addr & 3) { ++ } else if (host->adma_addr & host->align_mask) { + pr_warn("%s: unable to allocate aligned ADMA descriptor\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; +- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, +- host->adma_desc, host->adma_addr); ++ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, ++ host->adma_table, host->adma_addr); + kfree(host->align_buffer); +- host->adma_desc = NULL; ++ host->adma_table = NULL; + host->align_buffer = NULL; + } + } +@@ -2995,7 +3068,8 @@ int sdhci_add_host(struct sdhci_host *host) + /* Auto-CMD23 stuff only works in ADMA or PIO. */ + if ((host->version >= SDHCI_SPEC_300) && + ((host->flags & SDHCI_USE_ADMA) || +- !(host->flags & SDHCI_USE_SDMA))) { ++ !(host->flags & SDHCI_USE_SDMA)) && ++ !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { + host->flags |= SDHCI_AUTO_CMD23; + DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); + } else { +@@ -3152,13 +3226,14 @@ int sdhci_add_host(struct sdhci_host *host) + SDHCI_MAX_CURRENT_MULTIPLIER; + } + +- /* If OCR set by external regulators, use it instead */ ++ /* If OCR set by host, use it instead. */ ++ if (host->ocr_mask) ++ ocr_avail = host->ocr_mask; ++ ++ /* If OCR set by external regulators, give it highest prio. */ + if (mmc->ocr_avail) + ocr_avail = mmc->ocr_avail; + +- if (host->ocr_mask) +- ocr_avail &= host->ocr_mask; +- + mmc->ocr_avail = ocr_avail; + mmc->ocr_avail_sdio = ocr_avail; + if (host->ocr_avail_sdio) +@@ -3185,11 +3260,11 @@ int sdhci_add_host(struct sdhci_host *host) + * can do scatter/gather or not. + */ + if (host->flags & SDHCI_USE_ADMA) +- mmc->max_segs = 128; ++ mmc->max_segs = SDHCI_MAX_SEGS; + else if (host->flags & SDHCI_USE_SDMA) + mmc->max_segs = 1; + else /* PIO */ +- mmc->max_segs = 128; ++ mmc->max_segs = SDHCI_MAX_SEGS; + + /* + * Maximum number of sectors in one transfer. Limited by DMA boundary +@@ -3287,7 +3362,8 @@ int sdhci_add_host(struct sdhci_host *host) + + pr_info("%s: SDHCI controller on %s [%s] using %s\n", + mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), +- (host->flags & SDHCI_USE_ADMA) ? "ADMA" : ++ (host->flags & SDHCI_USE_ADMA) ? ++ (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : + (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); + + sdhci_enable_card_detection(host); +@@ -3355,12 +3431,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) + if (!IS_ERR(mmc->supply.vqmmc)) + regulator_disable(mmc->supply.vqmmc); + +- if (host->adma_desc) +- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, +- host->adma_desc, host->adma_addr); ++ if (host->adma_table) ++ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, ++ host->adma_table, host->adma_addr); + kfree(host->align_buffer); + +- host->adma_desc = NULL; ++ host->adma_table = NULL; + host->align_buffer = NULL; + } + +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index 31896a7..5220f36 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -227,6 +227,7 @@ + /* 55-57 reserved */ + + #define SDHCI_ADMA_ADDRESS 0x58 ++#define SDHCI_ADMA_ADDRESS_HI 0x5C + + /* 60-FB reserved */ + +@@ -266,6 +267,46 @@ + #define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) + #define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) + ++/* ADMA2 32-bit DMA descriptor size */ ++#define SDHCI_ADMA2_32_DESC_SZ 8 ++ ++/* ADMA2 32-bit DMA alignment */ ++#define SDHCI_ADMA2_32_ALIGN 4 ++ ++/* ADMA2 32-bit descriptor */ ++struct sdhci_adma2_32_desc { ++ __le16 cmd; ++ __le16 len; ++ __le32 addr; ++} __packed __aligned(SDHCI_ADMA2_32_ALIGN); ++ ++/* ADMA2 64-bit DMA descriptor size */ ++#define SDHCI_ADMA2_64_DESC_SZ 12 ++ ++/* ADMA2 64-bit DMA alignment */ ++#define SDHCI_ADMA2_64_ALIGN 8 ++ ++/* ++ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte ++ * aligned. ++ */ ++struct sdhci_adma2_64_desc { ++ __le16 cmd; ++ __le16 len; ++ __le32 addr_lo; ++ __le32 addr_hi; ++} __packed __aligned(4); ++ ++#define ADMA2_TRAN_VALID 0x21 ++#define ADMA2_NOP_END_VALID 0x3 ++#define ADMA2_END 0x2 ++ ++/* ++ * Maximum segments assuming a 512KiB maximum requisition size and a minimum ++ * 4KiB page size. ++ */ ++#define SDHCI_MAX_SEGS 128 ++ + struct sdhci_ops { + #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS + u32 (*read_l)(struct sdhci_host *host, int reg); +@@ -296,6 +337,7 @@ struct sdhci_ops { + void (*adma_workaround)(struct sdhci_host *host, u32 intmask); + void (*platform_init)(struct sdhci_host *host); + void (*card_event)(struct sdhci_host *host); ++ void (*voltage_switch)(struct sdhci_host *host); + }; + + #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig +index dd10646..34ce759 100644 +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -429,7 +429,7 @@ config MTD_NAND_FSL_ELBC + + config MTD_NAND_FSL_IFC + tristate "NAND support for Freescale IFC controller" +- depends on MTD_NAND && FSL_SOC ++ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) + select FSL_IFC + select MEMORY + help +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 2338124..c8be272 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -31,7 +31,6 @@ + #include + #include + +-#define FSL_IFC_V1_1_0 0x01010000 + #define ERR_BYTE 0xFF /* Value returned for read + bytes when read failed */ + #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait +@@ -234,13 +233,13 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) + struct nand_chip *chip = mtd->priv; + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + int buf_num; + + ifc_nand_ctrl->page = page_addr; + /* Program ROW0/COL0 */ +- iowrite32be(page_addr, &ifc->ifc_nand.row0); +- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); ++ ifc_out32(page_addr, &ifc->ifc_nand.row0); ++ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); + + buf_num = page_addr & priv->bufnum_mask; + +@@ -297,28 +296,28 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; + struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + u32 eccstat[4]; + int i; + + /* set the chip select for NAND Transaction */ +- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT, +- &ifc->ifc_nand.nand_csel); ++ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT, ++ &ifc->ifc_nand.nand_csel); + + dev_vdbg(priv->dev, + "%s: fir0=%08x fcr0=%08x\n", + __func__, +- ioread32be(&ifc->ifc_nand.nand_fir0), +- ioread32be(&ifc->ifc_nand.nand_fcr0)); ++ ifc_in32(&ifc->ifc_nand.nand_fir0), ++ ifc_in32(&ifc->ifc_nand.nand_fcr0)); + + ctrl->nand_stat = 0; + + /* start read/write seq */ +- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); ++ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); + + /* wait for command complete flag or timeout */ + wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, +- IFC_TIMEOUT_MSECS * HZ/1000); ++ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); + + /* ctrl->nand_stat will be updated from IRQ context */ + if (!ctrl->nand_stat) +@@ -337,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + int sector_end = sector + chip->ecc.steps - 1; + + for (i = sector / 4; i <= sector_end / 4; i++) +- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]); ++ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); + + for (i = sector; i <= sector_end; i++) { + errors = check_read_ecc(mtd, ctrl, eccstat, i); +@@ -373,37 +372,37 @@ static void fsl_ifc_do_read(struct nand_chip *chip, + { + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + + /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ + if (mtd->writesize > 512) { +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | +- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); +- +- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | +- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | ++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | ++ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); ++ ++ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | ++ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), ++ &ifc->ifc_nand.nand_fcr0); + } else { +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | ++ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); + + if (oob) +- iowrite32be(NAND_CMD_READOOB << +- IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(NAND_CMD_READOOB << ++ IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); + else +- iowrite32be(NAND_CMD_READ0 << +- IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(NAND_CMD_READ0 << ++ IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); + } + } + +@@ -413,7 +412,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + struct nand_chip *chip = mtd->priv; + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + + /* clear the read buffer */ + ifc_nand_ctrl->read_bytes = 0; +@@ -423,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + switch (command) { + /* READ0 read the entire buffer to use hardware ECC. */ + case NAND_CMD_READ0: +- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, 0, page_addr, 0); + + ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; +@@ -438,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + + /* READOOB reads only the OOB because no ECC is performed. */ + case NAND_CMD_READOOB: +- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, column, page_addr, 1); + + ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; +@@ -454,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + if (command == NAND_CMD_PARAM) + timing = IFC_FIR_OP_RBCD; + +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | +- (timing << IFC_NAND_FIR0_OP2_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(column, &ifc->ifc_nand.row3); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | ++ (timing << IFC_NAND_FIR0_OP2_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(column, &ifc->ifc_nand.row3); + + /* + * although currently it's 8 bytes for READID, we always read + * the maximum 256 bytes(for PARAM) + */ +- iowrite32be(256, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(256, &ifc->ifc_nand.nand_fbcr); + ifc_nand_ctrl->read_bytes = 256; + + set_addr(mtd, 0, 0, 0); +@@ -481,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + + /* ERASE2 uses the block and page address from ERASE1 */ + case NAND_CMD_ERASE2: +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), +- &ifc->ifc_nand.nand_fir0); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), ++ &ifc->ifc_nand.nand_fir0); + +- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | +- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | ++ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), ++ &ifc->ifc_nand.nand_fcr0); + +- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); + ifc_nand_ctrl->read_bytes = 0; + fsl_ifc_run_command(mtd); + return; +@@ -507,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | + (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT); + +- iowrite32be( +- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | +- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be( +- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | +- (IFC_FIR_OP_RDSTAT << +- IFC_NAND_FIR1_OP6_SHIFT) | +- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), +- &ifc->ifc_nand.nand_fir1); ++ ifc_out32( ++ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | ++ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | ++ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32( ++ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | ++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) | ++ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), ++ &ifc->ifc_nand.nand_fir1); + } else { + nand_fcr0 = ((NAND_CMD_PAGEPROG << + IFC_NAND_FCR0_CMD1_SHIFT) | +@@ -528,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + (NAND_CMD_STATUS << + IFC_NAND_FCR0_CMD3_SHIFT)); + +- iowrite32be( ++ ifc_out32( + (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | + (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | + (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT), + &ifc->ifc_nand.nand_fir0); +- iowrite32be( +- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | +- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | +- (IFC_FIR_OP_RDSTAT << +- IFC_NAND_FIR1_OP7_SHIFT) | +- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), +- &ifc->ifc_nand.nand_fir1); ++ ifc_out32( ++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | ++ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | ++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) | ++ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), ++ &ifc->ifc_nand.nand_fir1); + + if (column >= mtd->writesize) + nand_fcr0 |= +@@ -556,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + column -= mtd->writesize; + ifc_nand_ctrl->oob = 1; + } +- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0); + set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); + return; + } +@@ -564,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ + case NAND_CMD_PAGEPROG: { + if (ifc_nand_ctrl->oob) { +- iowrite32be(ifc_nand_ctrl->index - +- ifc_nand_ctrl->column, +- &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(ifc_nand_ctrl->index - ++ ifc_nand_ctrl->column, ++ &ifc->ifc_nand.nand_fbcr); + } else { +- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); + } + + fsl_ifc_run_command(mtd); + return; + } + +- case NAND_CMD_STATUS: +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); ++ case NAND_CMD_STATUS: { ++ void __iomem *addr; ++ ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, 0, 0, 0); + ifc_nand_ctrl->read_bytes = 1; + +@@ -591,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + * The chip always seems to report that it is + * write-protected, even when it is not. + */ ++ addr = ifc_nand_ctrl->addr; + if (chip->options & NAND_BUSWIDTH_16) +- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP); ++ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr); + else +- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP); ++ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr); + return; ++ } + + case NAND_CMD_RESET: +- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); + fsl_ifc_run_command(mtd); + return; + +@@ -659,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd) + */ + if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { + offset = ifc_nand_ctrl->index++; +- return in_8(ifc_nand_ctrl->addr + offset); ++ return ifc_in8(ifc_nand_ctrl->addr + offset); + } + + dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); +@@ -681,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd) + * next byte. + */ + if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { +- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); ++ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); + ifc_nand_ctrl->index += 2; + return (uint8_t) data; + } +@@ -723,22 +724,22 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) + { + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + u32 nand_fsr; + + /* Use READ_STATUS command, but wait for the device to be ready */ +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, 0, 0, 0); + ifc_nand_ctrl->read_bytes = 1; + + fsl_ifc_run_command(mtd); + +- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr); ++ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); + + /* + * The chip always seems to report that it is +@@ -825,67 +826,72 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) + static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) + { + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; ++ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; + uint32_t csor = 0, csor_8k = 0, csor_ext = 0; + uint32_t cs = priv->bank; + + /* Save CSOR and CSOR_ext */ +- csor = ioread32be(&ifc->csor_cs[cs].csor); +- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext); ++ csor = ifc_in32(&ifc_global->csor_cs[cs].csor); ++ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); + + /* chage PageSize 8K and SpareSize 1K*/ + csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; +- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor); +- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext); ++ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); ++ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); + + /* READID */ +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(0x0, &ifc->ifc_nand.row3); ++ &ifc_runtime->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc_runtime->ifc_nand.nand_fcr0); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); + +- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); + + /* Program ROW0/COL0 */ +- iowrite32be(0x0, &ifc->ifc_nand.row0); +- iowrite32be(0x0, &ifc->ifc_nand.col0); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); + + /* set the chip select for NAND Transaction */ +- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); ++ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, ++ &ifc_runtime->ifc_nand.nand_csel); + + /* start read seq */ +- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); ++ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, ++ &ifc_runtime->ifc_nand.nandseq_strt); + + /* wait for command complete flag or timeout */ + wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, +- IFC_TIMEOUT_MSECS * HZ/1000); ++ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); + + if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) + printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); + + /* Restore CSOR and CSOR_ext */ +- iowrite32be(csor, &ifc->csor_cs[cs].csor); +- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext); ++ ifc_out32(csor, &ifc_global->csor_cs[cs].csor); ++ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); + } + + static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + { + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; ++ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; + struct nand_chip *chip = &priv->chip; + struct nand_ecclayout *layout; +- u32 csor, ver; ++ u32 csor; + + /* Fill in fsl_ifc_mtd structure */ + priv->mtd.priv = chip; +- priv->mtd.owner = THIS_MODULE; ++ priv->mtd.dev.parent = priv->dev; + + /* fill in nand_chip structure */ + /* set up function call table */ +- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) ++ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) ++ & CSPR_PORT_SIZE_16) + chip->read_byte = fsl_ifc_read_byte16; + else + chip->read_byte = fsl_ifc_read_byte; +@@ -899,13 +905,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; + +- iowrite32be(0x0, &ifc->ifc_nand.ncfgr); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); + + /* set up nand options */ + chip->bbt_options = NAND_BBT_USE_FLASH; + chip->options = NAND_NO_SUBPAGE_WRITE; + +- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { ++ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) ++ & CSPR_PORT_SIZE_16) { + chip->read_byte = fsl_ifc_read_byte16; + chip->options |= NAND_BUSWIDTH_16; + } else { +@@ -918,7 +925,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + chip->ecc.read_page = fsl_ifc_read_page; + chip->ecc.write_page = fsl_ifc_write_page; + +- csor = ioread32be(&ifc->csor_cs[priv->bank].csor); ++ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); + + /* Hardware generates ECC per 512 Bytes */ + chip->ecc.size = 512; +@@ -984,8 +991,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + chip->ecc.mode = NAND_ECC_SOFT; + } + +- ver = ioread32be(&ifc->ifc_rev); +- if (ver == FSL_IFC_V1_1_0) ++ if (ctrl->version == FSL_IFC_VERSION_1_1_0) + fsl_ifc_sram_init(priv); + + return 0; +@@ -1005,10 +1011,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) + return 0; + } + +-static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, ++static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, + phys_addr_t addr) + { +- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr); ++ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); + + if (!(cspr & CSPR_V)) + return 0; +@@ -1022,7 +1028,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex); + + static int fsl_ifc_nand_probe(struct platform_device *dev) + { +- struct fsl_ifc_regs __iomem *ifc; ++ struct fsl_ifc_runtime __iomem *ifc; + struct fsl_ifc_mtd *priv; + struct resource res; + static const char *part_probe_types[] +@@ -1033,9 +1039,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) + struct mtd_part_parser_data ppdata; + + ppdata.of_node = dev->dev.of_node; +- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) ++ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) + return -ENODEV; +- ifc = fsl_ifc_ctrl_dev->regs; ++ ifc = fsl_ifc_ctrl_dev->rregs; + + /* get, allocate and map the memory resource */ + ret = of_address_to_resource(node, 0, &res); +@@ -1045,12 +1051,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) + } + + /* find which chip select it is connected to */ +- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) { +- if (match_bank(ifc, bank, res.start)) ++ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { ++ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) + break; + } + +- if (bank >= FSL_IFC_BANK_COUNT) { ++ if (bank >= fsl_ifc_ctrl_dev->banks) { + dev_err(&dev->dev, "%s: address did not match any chip selects\n", + __func__); + return -ENODEV; +@@ -1094,16 +1100,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) + + dev_set_drvdata(priv->dev, priv); + +- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN | +- IFC_NAND_EVTER_EN_FTOER_EN | +- IFC_NAND_EVTER_EN_WPER_EN, +- &ifc->ifc_nand.nand_evter_en); ++ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN | ++ IFC_NAND_EVTER_EN_FTOER_EN | ++ IFC_NAND_EVTER_EN_WPER_EN, ++ &ifc->ifc_nand.nand_evter_en); + + /* enable NAND Machine Interrupts */ +- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN | +- IFC_NAND_EVTER_INTR_FTOERIR_EN | +- IFC_NAND_EVTER_INTR_WPERIR_EN, +- &ifc->ifc_nand.nand_evter_intr_en); ++ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN | ++ IFC_NAND_EVTER_INTR_FTOERIR_EN | ++ IFC_NAND_EVTER_INTR_WPERIR_EN, ++ &ifc->ifc_nand.nand_evter_intr_en); + priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); + if (!priv->mtd.name) { + ret = -ENOMEM; +@@ -1163,6 +1169,7 @@ static const struct of_device_id fsl_ifc_nand_match[] = { + }, + {} + }; ++MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match); + + static struct platform_driver fsl_ifc_nand_driver = { + .driver = { +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index a4a7396..0359cfd 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -86,11 +86,11 @@ + #include + #include + #include ++#include + + #include + #ifdef CONFIG_PPC + #include +-#include + #endif + #include + #include +@@ -1720,8 +1720,10 @@ static void gfar_configure_serdes(struct net_device *dev) + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ +- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) ++ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { ++ put_device(&tbiphy->dev); + return; ++ } + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index 2973c60..cdc9f8a 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -65,6 +65,11 @@ config VITESSE_PHY + ---help--- + Currently supports the vsc8244 + ++config TERANETICS_PHY ++ tristate "Drivers for the Teranetics PHYs" ++ ---help--- ++ Currently supports the Teranetics TN2020 ++ + config SMSC_PHY + tristate "Drivers for SMSC PHYs" + ---help--- +@@ -124,8 +129,8 @@ config MICREL_PHY + Supports the KSZ9021, VSC8201, KS8001 PHYs. + + config FIXED_PHY +- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" +- depends on PHYLIB=y ++ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" ++ depends on PHYLIB + ---help--- + Adds the platform "fixed" MDIO Bus to cover the boards that use + PHYs that are not connected to the real MDIO bus. +@@ -207,6 +212,11 @@ config MDIO_BUS_MUX_MMIOREG + the FPGA's registers. + + Currently, only 8-bit registers are supported. ++config FSL_10GBASE_KR ++ tristate "Support for 10GBASE-KR on Freescale XFI interface" ++ depends on OF_MDIO ++ help ++ This module provides a driver for Freescale XFI's 10GBASE-KR. + + config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" +diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile +index b5c8f9f..8ad4ac6 100644 +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -10,6 +10,7 @@ obj-$(CONFIG_CICADA_PHY) += cicada.o + obj-$(CONFIG_LXT_PHY) += lxt.o + obj-$(CONFIG_QSEMI_PHY) += qsemi.o + obj-$(CONFIG_SMSC_PHY) += smsc.o ++obj-$(CONFIG_TERANETICS_PHY) += teranetics.o + obj-$(CONFIG_VITESSE_PHY) += vitesse.o + obj-$(CONFIG_BROADCOM_PHY) += broadcom.o + obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o +@@ -18,7 +19,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o + obj-$(CONFIG_ICPLUS_PHY) += icplus.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o +-obj-$(CONFIG_FIXED_PHY) += fixed.o ++obj-$(CONFIG_FIXED_PHY) += fixed_phy.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o + obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o + obj-$(CONFIG_NATIONAL_PHY) += national.o +@@ -32,6 +33,7 @@ obj-$(CONFIG_AMD_PHY) += amd.o + obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o + obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o + obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o ++obj-$(CONFIG_FSL_10GBASE_KR) += fsl_10gkr.o + obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o + obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o + obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o +diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c +index fdc1b41..a4f0886 100644 +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -307,6 +307,8 @@ static struct phy_driver at803x_driver[] = { + .flags = PHY_HAS_INTERRUPT, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, ++ .ack_interrupt = at803x_ack_interrupt, ++ .config_intr = at803x_config_intr, + .driver = { + .owner = THIS_MODULE, + }, +@@ -326,6 +328,8 @@ static struct phy_driver at803x_driver[] = { + .flags = PHY_HAS_INTERRUPT, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, ++ .ack_interrupt = at803x_ack_interrupt, ++ .config_intr = at803x_config_intr, + .driver = { + .owner = THIS_MODULE, + }, +diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c +deleted file mode 100644 +index 47872ca..0000000 +--- a/drivers/net/phy/fixed.c ++++ /dev/null +@@ -1,336 +0,0 @@ +-/* +- * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) +- * +- * Author: Vitaly Bordug +- * Anton Vorontsov +- * +- * Copyright (c) 2006-2007 MontaVista Software, Inc. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; either version 2 of the License, or (at your +- * option) any later version. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define MII_REGS_NUM 29 +- +-struct fixed_mdio_bus { +- int irqs[PHY_MAX_ADDR]; +- struct mii_bus *mii_bus; +- struct list_head phys; +-}; +- +-struct fixed_phy { +- int addr; +- u16 regs[MII_REGS_NUM]; +- struct phy_device *phydev; +- struct fixed_phy_status status; +- int (*link_update)(struct net_device *, struct fixed_phy_status *); +- struct list_head node; +-}; +- +-static struct platform_device *pdev; +-static struct fixed_mdio_bus platform_fmb = { +- .phys = LIST_HEAD_INIT(platform_fmb.phys), +-}; +- +-static int fixed_phy_update_regs(struct fixed_phy *fp) +-{ +- u16 bmsr = BMSR_ANEGCAPABLE; +- u16 bmcr = 0; +- u16 lpagb = 0; +- u16 lpa = 0; +- +- if (fp->status.duplex) { +- bmcr |= BMCR_FULLDPLX; +- +- switch (fp->status.speed) { +- case 1000: +- bmsr |= BMSR_ESTATEN; +- bmcr |= BMCR_SPEED1000; +- lpagb |= LPA_1000FULL; +- break; +- case 100: +- bmsr |= BMSR_100FULL; +- bmcr |= BMCR_SPEED100; +- lpa |= LPA_100FULL; +- break; +- case 10: +- bmsr |= BMSR_10FULL; +- lpa |= LPA_10FULL; +- break; +- default: +- pr_warn("fixed phy: unknown speed\n"); +- return -EINVAL; +- } +- } else { +- switch (fp->status.speed) { +- case 1000: +- bmsr |= BMSR_ESTATEN; +- bmcr |= BMCR_SPEED1000; +- lpagb |= LPA_1000HALF; +- break; +- case 100: +- bmsr |= BMSR_100HALF; +- bmcr |= BMCR_SPEED100; +- lpa |= LPA_100HALF; +- break; +- case 10: +- bmsr |= BMSR_10HALF; +- lpa |= LPA_10HALF; +- break; +- default: +- pr_warn("fixed phy: unknown speed\n"); +- return -EINVAL; +- } +- } +- +- if (fp->status.link) +- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; +- +- if (fp->status.pause) +- lpa |= LPA_PAUSE_CAP; +- +- if (fp->status.asym_pause) +- lpa |= LPA_PAUSE_ASYM; +- +- fp->regs[MII_PHYSID1] = 0; +- fp->regs[MII_PHYSID2] = 0; +- +- fp->regs[MII_BMSR] = bmsr; +- fp->regs[MII_BMCR] = bmcr; +- fp->regs[MII_LPA] = lpa; +- fp->regs[MII_STAT1000] = lpagb; +- +- return 0; +-} +- +-static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) +-{ +- struct fixed_mdio_bus *fmb = bus->priv; +- struct fixed_phy *fp; +- +- if (reg_num >= MII_REGS_NUM) +- return -1; +- +- /* We do not support emulating Clause 45 over Clause 22 register reads +- * return an error instead of bogus data. +- */ +- switch (reg_num) { +- case MII_MMD_CTRL: +- case MII_MMD_DATA: +- return -1; +- default: +- break; +- } +- +- list_for_each_entry(fp, &fmb->phys, node) { +- if (fp->addr == phy_addr) { +- /* Issue callback if user registered it. */ +- if (fp->link_update) { +- fp->link_update(fp->phydev->attached_dev, +- &fp->status); +- fixed_phy_update_regs(fp); +- } +- return fp->regs[reg_num]; +- } +- } +- +- return 0xFFFF; +-} +- +-static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, +- u16 val) +-{ +- return 0; +-} +- +-/* +- * If something weird is required to be done with link/speed, +- * network driver is able to assign a function to implement this. +- * May be useful for PHY's that need to be software-driven. +- */ +-int fixed_phy_set_link_update(struct phy_device *phydev, +- int (*link_update)(struct net_device *, +- struct fixed_phy_status *)) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp; +- +- if (!link_update || !phydev || !phydev->bus) +- return -EINVAL; +- +- list_for_each_entry(fp, &fmb->phys, node) { +- if (fp->addr == phydev->addr) { +- fp->link_update = link_update; +- fp->phydev = phydev; +- return 0; +- } +- } +- +- return -ENOENT; +-} +-EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); +- +-int fixed_phy_add(unsigned int irq, int phy_addr, +- struct fixed_phy_status *status) +-{ +- int ret; +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp; +- +- fp = kzalloc(sizeof(*fp), GFP_KERNEL); +- if (!fp) +- return -ENOMEM; +- +- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); +- +- fmb->irqs[phy_addr] = irq; +- +- fp->addr = phy_addr; +- fp->status = *status; +- +- ret = fixed_phy_update_regs(fp); +- if (ret) +- goto err_regs; +- +- list_add_tail(&fp->node, &fmb->phys); +- +- return 0; +- +-err_regs: +- kfree(fp); +- return ret; +-} +-EXPORT_SYMBOL_GPL(fixed_phy_add); +- +-void fixed_phy_del(int phy_addr) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp, *tmp; +- +- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { +- if (fp->addr == phy_addr) { +- list_del(&fp->node); +- kfree(fp); +- return; +- } +- } +-} +-EXPORT_SYMBOL_GPL(fixed_phy_del); +- +-static int phy_fixed_addr; +-static DEFINE_SPINLOCK(phy_fixed_addr_lock); +- +-struct phy_device *fixed_phy_register(unsigned int irq, +- struct fixed_phy_status *status, +- struct device_node *np) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct phy_device *phy; +- int phy_addr; +- int ret; +- +- /* Get the next available PHY address, up to PHY_MAX_ADDR */ +- spin_lock(&phy_fixed_addr_lock); +- if (phy_fixed_addr == PHY_MAX_ADDR) { +- spin_unlock(&phy_fixed_addr_lock); +- return ERR_PTR(-ENOSPC); +- } +- phy_addr = phy_fixed_addr++; +- spin_unlock(&phy_fixed_addr_lock); +- +- ret = fixed_phy_add(PHY_POLL, phy_addr, status); +- if (ret < 0) +- return ERR_PTR(ret); +- +- phy = get_phy_device(fmb->mii_bus, phy_addr, false); +- if (!phy || IS_ERR(phy)) { +- fixed_phy_del(phy_addr); +- return ERR_PTR(-EINVAL); +- } +- +- of_node_get(np); +- phy->dev.of_node = np; +- +- ret = phy_device_register(phy); +- if (ret) { +- phy_device_free(phy); +- of_node_put(np); +- fixed_phy_del(phy_addr); +- return ERR_PTR(ret); +- } +- +- return phy; +-} +- +-static int __init fixed_mdio_bus_init(void) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- int ret; +- +- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); +- if (IS_ERR(pdev)) { +- ret = PTR_ERR(pdev); +- goto err_pdev; +- } +- +- fmb->mii_bus = mdiobus_alloc(); +- if (fmb->mii_bus == NULL) { +- ret = -ENOMEM; +- goto err_mdiobus_reg; +- } +- +- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); +- fmb->mii_bus->name = "Fixed MDIO Bus"; +- fmb->mii_bus->priv = fmb; +- fmb->mii_bus->parent = &pdev->dev; +- fmb->mii_bus->read = &fixed_mdio_read; +- fmb->mii_bus->write = &fixed_mdio_write; +- fmb->mii_bus->irq = fmb->irqs; +- +- ret = mdiobus_register(fmb->mii_bus); +- if (ret) +- goto err_mdiobus_alloc; +- +- return 0; +- +-err_mdiobus_alloc: +- mdiobus_free(fmb->mii_bus); +-err_mdiobus_reg: +- platform_device_unregister(pdev); +-err_pdev: +- return ret; +-} +-module_init(fixed_mdio_bus_init); +- +-static void __exit fixed_mdio_bus_exit(void) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp, *tmp; +- +- mdiobus_unregister(fmb->mii_bus); +- mdiobus_free(fmb->mii_bus); +- platform_device_unregister(pdev); +- +- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { +- list_del(&fp->node); +- kfree(fp); +- } +-} +-module_exit(fixed_mdio_bus_exit); +- +-MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); +-MODULE_AUTHOR("Vitaly Bordug"); +-MODULE_LICENSE("GPL"); +diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c +new file mode 100644 +index 0000000..88b8194 +--- /dev/null ++++ b/drivers/net/phy/fixed_phy.c +@@ -0,0 +1,370 @@ ++/* ++ * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) ++ * ++ * Author: Vitaly Bordug ++ * Anton Vorontsov ++ * ++ * Copyright (c) 2006-2007 MontaVista Software, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MII_REGS_NUM 29 ++ ++struct fixed_mdio_bus { ++ int irqs[PHY_MAX_ADDR]; ++ struct mii_bus *mii_bus; ++ struct list_head phys; ++}; ++ ++struct fixed_phy { ++ int addr; ++ u16 regs[MII_REGS_NUM]; ++ struct phy_device *phydev; ++ struct fixed_phy_status status; ++ int (*link_update)(struct net_device *, struct fixed_phy_status *); ++ struct list_head node; ++}; ++ ++static struct platform_device *pdev; ++static struct fixed_mdio_bus platform_fmb = { ++ .phys = LIST_HEAD_INIT(platform_fmb.phys), ++}; ++ ++static int fixed_phy_update_regs(struct fixed_phy *fp) ++{ ++ u16 bmsr = BMSR_ANEGCAPABLE; ++ u16 bmcr = 0; ++ u16 lpagb = 0; ++ u16 lpa = 0; ++ ++ if (fp->status.duplex) { ++ bmcr |= BMCR_FULLDPLX; ++ ++ switch (fp->status.speed) { ++ case 10000: ++ break; ++ case 1000: ++ bmsr |= BMSR_ESTATEN; ++ bmcr |= BMCR_SPEED1000; ++ lpagb |= LPA_1000FULL; ++ break; ++ case 100: ++ bmsr |= BMSR_100FULL; ++ bmcr |= BMCR_SPEED100; ++ lpa |= LPA_100FULL; ++ break; ++ case 10: ++ bmsr |= BMSR_10FULL; ++ lpa |= LPA_10FULL; ++ break; ++ default: ++ pr_warn("fixed phy: unknown speed\n"); ++ return -EINVAL; ++ } ++ } else { ++ switch (fp->status.speed) { ++ case 10000: ++ break; ++ case 1000: ++ bmsr |= BMSR_ESTATEN; ++ bmcr |= BMCR_SPEED1000; ++ lpagb |= LPA_1000HALF; ++ break; ++ case 100: ++ bmsr |= BMSR_100HALF; ++ bmcr |= BMCR_SPEED100; ++ lpa |= LPA_100HALF; ++ break; ++ case 10: ++ bmsr |= BMSR_10HALF; ++ lpa |= LPA_10HALF; ++ break; ++ default: ++ pr_warn("fixed phy: unknown speed\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (fp->status.link) ++ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; ++ ++ if (fp->status.pause) ++ lpa |= LPA_PAUSE_CAP; ++ ++ if (fp->status.asym_pause) ++ lpa |= LPA_PAUSE_ASYM; ++ ++ fp->regs[MII_PHYSID1] = 0; ++ fp->regs[MII_PHYSID2] = 0; ++ ++ fp->regs[MII_BMSR] = bmsr; ++ fp->regs[MII_BMCR] = bmcr; ++ fp->regs[MII_LPA] = lpa; ++ fp->regs[MII_STAT1000] = lpagb; ++ ++ return 0; ++} ++ ++static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) ++{ ++ struct fixed_mdio_bus *fmb = bus->priv; ++ struct fixed_phy *fp; ++ ++ if (reg_num >= MII_REGS_NUM) ++ return -1; ++ ++ /* We do not support emulating Clause 45 over Clause 22 register reads ++ * return an error instead of bogus data. ++ */ ++ switch (reg_num) { ++ case MII_MMD_CTRL: ++ case MII_MMD_DATA: ++ return -1; ++ default: ++ break; ++ } ++ ++ list_for_each_entry(fp, &fmb->phys, node) { ++ if (fp->addr == phy_addr) { ++ /* Issue callback if user registered it. */ ++ if (fp->link_update) { ++ fp->link_update(fp->phydev->attached_dev, ++ &fp->status); ++ fixed_phy_update_regs(fp); ++ } ++ return fp->regs[reg_num]; ++ } ++ } ++ ++ return 0xFFFF; ++} ++ ++static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, ++ u16 val) ++{ ++ return 0; ++} ++ ++/* ++ * If something weird is required to be done with link/speed, ++ * network driver is able to assign a function to implement this. ++ * May be useful for PHY's that need to be software-driven. ++ */ ++int fixed_phy_set_link_update(struct phy_device *phydev, ++ int (*link_update)(struct net_device *, ++ struct fixed_phy_status *)) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp; ++ ++ if (!phydev || !phydev->bus) ++ return -EINVAL; ++ ++ list_for_each_entry(fp, &fmb->phys, node) { ++ if (fp->addr == phydev->addr) { ++ fp->link_update = link_update; ++ fp->phydev = phydev; ++ return 0; ++ } ++ } ++ ++ return -ENOENT; ++} ++EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); ++ ++int fixed_phy_update_state(struct phy_device *phydev, ++ const struct fixed_phy_status *status, ++ const struct fixed_phy_status *changed) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp; ++ ++ if (!phydev || !phydev->bus) ++ return -EINVAL; ++ ++ list_for_each_entry(fp, &fmb->phys, node) { ++ if (fp->addr == phydev->addr) { ++#define _UPD(x) if (changed->x) \ ++ fp->status.x = status->x ++ _UPD(link); ++ _UPD(speed); ++ _UPD(duplex); ++ _UPD(pause); ++ _UPD(asym_pause); ++#undef _UPD ++ fixed_phy_update_regs(fp); ++ return 0; ++ } ++ } ++ ++ return -ENOENT; ++} ++EXPORT_SYMBOL(fixed_phy_update_state); ++ ++int fixed_phy_add(unsigned int irq, int phy_addr, ++ struct fixed_phy_status *status) ++{ ++ int ret; ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp; ++ ++ fp = kzalloc(sizeof(*fp), GFP_KERNEL); ++ if (!fp) ++ return -ENOMEM; ++ ++ memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); ++ ++ fmb->irqs[phy_addr] = irq; ++ ++ fp->addr = phy_addr; ++ fp->status = *status; ++ ++ ret = fixed_phy_update_regs(fp); ++ if (ret) ++ goto err_regs; ++ ++ list_add_tail(&fp->node, &fmb->phys); ++ ++ return 0; ++ ++err_regs: ++ kfree(fp); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(fixed_phy_add); ++ ++void fixed_phy_del(int phy_addr) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp, *tmp; ++ ++ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { ++ if (fp->addr == phy_addr) { ++ list_del(&fp->node); ++ kfree(fp); ++ return; ++ } ++ } ++} ++EXPORT_SYMBOL_GPL(fixed_phy_del); ++ ++static int phy_fixed_addr; ++static DEFINE_SPINLOCK(phy_fixed_addr_lock); ++ ++struct phy_device *fixed_phy_register(unsigned int irq, ++ struct fixed_phy_status *status, ++ struct device_node *np) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct phy_device *phy; ++ int phy_addr; ++ int ret; ++ ++ /* Get the next available PHY address, up to PHY_MAX_ADDR */ ++ spin_lock(&phy_fixed_addr_lock); ++ if (phy_fixed_addr == PHY_MAX_ADDR) { ++ spin_unlock(&phy_fixed_addr_lock); ++ return ERR_PTR(-ENOSPC); ++ } ++ phy_addr = phy_fixed_addr++; ++ spin_unlock(&phy_fixed_addr_lock); ++ ++ ret = fixed_phy_add(PHY_POLL, phy_addr, status); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ ++ phy = get_phy_device(fmb->mii_bus, phy_addr, false); ++ if (!phy || IS_ERR(phy)) { ++ fixed_phy_del(phy_addr); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ of_node_get(np); ++ phy->dev.of_node = np; ++ ++ ret = phy_device_register(phy); ++ if (ret) { ++ phy_device_free(phy); ++ of_node_put(np); ++ fixed_phy_del(phy_addr); ++ return ERR_PTR(ret); ++ } ++ ++ return phy; ++} ++EXPORT_SYMBOL_GPL(fixed_phy_register); ++ ++static int __init fixed_mdio_bus_init(void) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ int ret; ++ ++ pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); ++ if (IS_ERR(pdev)) { ++ ret = PTR_ERR(pdev); ++ goto err_pdev; ++ } ++ ++ fmb->mii_bus = mdiobus_alloc(); ++ if (fmb->mii_bus == NULL) { ++ ret = -ENOMEM; ++ goto err_mdiobus_reg; ++ } ++ ++ snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); ++ fmb->mii_bus->name = "Fixed MDIO Bus"; ++ fmb->mii_bus->priv = fmb; ++ fmb->mii_bus->parent = &pdev->dev; ++ fmb->mii_bus->read = &fixed_mdio_read; ++ fmb->mii_bus->write = &fixed_mdio_write; ++ fmb->mii_bus->irq = fmb->irqs; ++ ++ ret = mdiobus_register(fmb->mii_bus); ++ if (ret) ++ goto err_mdiobus_alloc; ++ ++ return 0; ++ ++err_mdiobus_alloc: ++ mdiobus_free(fmb->mii_bus); ++err_mdiobus_reg: ++ platform_device_unregister(pdev); ++err_pdev: ++ return ret; ++} ++module_init(fixed_mdio_bus_init); ++ ++static void __exit fixed_mdio_bus_exit(void) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp, *tmp; ++ ++ mdiobus_unregister(fmb->mii_bus); ++ mdiobus_free(fmb->mii_bus); ++ platform_device_unregister(pdev); ++ ++ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { ++ list_del(&fp->node); ++ kfree(fp); ++ } ++} ++module_exit(fixed_mdio_bus_exit); ++ ++MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); ++MODULE_AUTHOR("Vitaly Bordug"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index 225c033..969a198 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -50,6 +50,7 @@ + #define MII_M1011_PHY_SCR 0x10 + #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 + ++#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16 + #define MII_M1145_PHY_EXT_SR 0x1b + #define MII_M1145_PHY_EXT_CR 0x14 + #define MII_M1145_RGMII_RX_DELAY 0x0080 +@@ -495,6 +496,16 @@ static int m88e1111_config_init(struct phy_device *phydev) + err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + if (err < 0) + return err; ++ ++ /* make sure copper is selected */ ++ err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE); ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, ++ err & (~0xff)); ++ if (err < 0) ++ return err; + } + + if (phydev->interface == PHY_INTERFACE_MODE_RTBI) { +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index 50051f2..accd605 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -288,8 +288,11 @@ int mdiobus_register(struct mii_bus *bus) + + error: + while (--i >= 0) { +- if (bus->phy_map[i]) +- device_unregister(&bus->phy_map[i]->dev); ++ struct phy_device *phydev = bus->phy_map[i]; ++ if (phydev) { ++ phy_device_remove(phydev); ++ phy_device_free(phydev); ++ } + } + device_del(&bus->dev); + return err; +@@ -305,9 +308,11 @@ void mdiobus_unregister(struct mii_bus *bus) + + device_del(&bus->dev); + for (i = 0; i < PHY_MAX_ADDR; i++) { +- if (bus->phy_map[i]) +- device_unregister(&bus->phy_map[i]->dev); +- bus->phy_map[i] = NULL; ++ struct phy_device *phydev = bus->phy_map[i]; ++ if (phydev) { ++ phy_device_remove(phydev); ++ phy_device_free(phydev); ++ } + } + } + EXPORT_SYMBOL(mdiobus_unregister); +@@ -421,6 +426,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) + { + struct phy_device *phydev = to_phy_device(dev); + struct phy_driver *phydrv = to_phy_driver(drv); ++ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); ++ int i; + + if (of_driver_match_device(dev, drv)) + return 1; +@@ -428,8 +435,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) + if (phydrv->match_phy_device) + return phydrv->match_phy_device(phydev); + +- return (phydrv->phy_id & phydrv->phy_id_mask) == +- (phydev->phy_id & phydrv->phy_id_mask); ++ if (phydev->is_c45) { ++ for (i = 1; i < num_ids; i++) { ++ if (!(phydev->c45_ids.devices_in_package & (1 << i))) ++ continue; ++ ++ if ((phydrv->phy_id & phydrv->phy_id_mask) == ++ (phydev->c45_ids.device_ids[i] & ++ phydrv->phy_id_mask)) ++ return 1; ++ } ++ return 0; ++ } else { ++ return (phydrv->phy_id & phydrv->phy_id_mask) == ++ (phydev->phy_id & phydrv->phy_id_mask); ++ } + } + + #ifdef CONFIG_PM +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 91d6d03..840075e 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -768,6 +768,7 @@ void phy_state_machine(struct work_struct *work) + container_of(dwork, struct phy_device, state_queue); + bool needs_aneg = false, do_suspend = false, do_resume = false; + int err = 0; ++ int old_link; + + mutex_lock(&phydev->lock); + +@@ -814,6 +815,9 @@ void phy_state_machine(struct work_struct *work) + needs_aneg = true; + break; + case PHY_NOLINK: ++ if (phy_interrupt_is_valid(phydev)) ++ break; ++ + err = phy_read_status(phydev); + if (err) + break; +@@ -851,11 +855,18 @@ void phy_state_machine(struct work_struct *work) + phydev->adjust_link(phydev->attached_dev); + break; + case PHY_RUNNING: +- /* Only register a CHANGE if we are +- * polling or ignoring interrupts ++ /* Only register a CHANGE if we are polling or ignoring ++ * interrupts and link changed since latest checking. + */ +- if (!phy_interrupt_is_valid(phydev)) +- phydev->state = PHY_CHANGELINK; ++ if (!phy_interrupt_is_valid(phydev)) { ++ old_link = phydev->link; ++ err = phy_read_status(phydev); ++ if (err) ++ break; ++ ++ if (old_link != phydev->link) ++ phydev->state = PHY_CHANGELINK; ++ } + break; + case PHY_CHANGELINK: + err = phy_read_status(phydev); +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 70a0d88..07b1aa9 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -205,6 +205,37 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, + } + EXPORT_SYMBOL(phy_device_create); + ++/* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. ++ * @bus: the target MII bus ++ * @addr: PHY address on the MII bus ++ * @dev_addr: MMD address in the PHY. ++ * @devices_in_package: where to store the devices in package information. ++ * ++ * Description: reads devices in package registers of a MMD at @dev_addr ++ * from PHY at @addr on @bus. ++ * ++ * Returns: 0 on success, -EIO on failure. ++ */ ++static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, ++ u32 *devices_in_package) ++{ ++ int phy_reg, reg_addr; ++ ++ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2; ++ phy_reg = mdiobus_read(bus, addr, reg_addr); ++ if (phy_reg < 0) ++ return -EIO; ++ *devices_in_package = (phy_reg & 0xffff) << 16; ++ ++ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; ++ phy_reg = mdiobus_read(bus, addr, reg_addr); ++ if (phy_reg < 0) ++ return -EIO; ++ *devices_in_package |= (phy_reg & 0xffff); ++ ++ return 0; ++} ++ + /** + * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs. + * @bus: the target MII bus +@@ -223,31 +254,32 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, + int phy_reg; + int i, reg_addr; + const int num_ids = ARRAY_SIZE(c45_ids->device_ids); ++ u32 *devs = &c45_ids->devices_in_package; + +- /* Find first non-zero Devices In package. Device +- * zero is reserved, so don't probe it. ++ /* Find first non-zero Devices In package. Device zero is reserved ++ * for 802.3 c45 complied PHYs, so don't probe it at first. + */ +- for (i = 1; +- i < num_ids && c45_ids->devices_in_package == 0; +- i++) { +- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; +- phy_reg = mdiobus_read(bus, addr, reg_addr); +- if (phy_reg < 0) +- return -EIO; +- c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; +- +- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1; +- phy_reg = mdiobus_read(bus, addr, reg_addr); ++ for (i = 1; i < num_ids && *devs == 0; i++) { ++ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); + if (phy_reg < 0) + return -EIO; +- c45_ids->devices_in_package |= (phy_reg & 0xffff); + +- /* If mostly Fs, there is no device there, +- * let's get out of here. +- */ +- if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) { +- *phy_id = 0xffffffff; +- return 0; ++ if ((*devs & 0x1fffffff) == 0x1fffffff) { ++ /* If mostly Fs, there is no device there, ++ * then let's continue to probe more, as some ++ * 10G PHYs have zero Devices In package, ++ * e.g. Cortina CS4315/CS4340 PHY. ++ */ ++ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); ++ if (phy_reg < 0) ++ return -EIO; ++ /* no device there, let's get out of here */ ++ if ((*devs & 0x1fffffff) == 0x1fffffff) { ++ *phy_id = 0xffffffff; ++ return 0; ++ } else { ++ break; ++ } + } + } + +@@ -376,6 +408,24 @@ int phy_device_register(struct phy_device *phydev) + EXPORT_SYMBOL(phy_device_register); + + /** ++ * phy_device_remove - Remove a previously registered phy device from the MDIO bus ++ * @phydev: phy_device structure to remove ++ * ++ * This doesn't free the phy_device itself, it merely reverses the effects ++ * of phy_device_register(). Use phy_device_free() to free the device ++ * after calling this function. ++ */ ++void phy_device_remove(struct phy_device *phydev) ++{ ++ struct mii_bus *bus = phydev->bus; ++ int addr = phydev->addr; ++ ++ device_del(&phydev->dev); ++ bus->phy_map[addr] = NULL; ++} ++EXPORT_SYMBOL(phy_device_remove); ++ ++/** + * phy_find_first - finds the first PHY device on the bus + * @bus: the target MII bus + */ +diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c +index 45483fd..badcf24 100644 +--- a/drivers/net/phy/realtek.c ++++ b/drivers/net/phy/realtek.c +@@ -22,8 +22,12 @@ + #define RTL821x_INER 0x12 + #define RTL821x_INER_INIT 0x6400 + #define RTL821x_INSR 0x13 ++#define RTL8211E_INER_LINK_STATUS 0x400 + +-#define RTL8211E_INER_LINK_STATUS 0x400 ++#define RTL8211F_INER_LINK_STATUS 0x0010 ++#define RTL8211F_INSR 0x1d ++#define RTL8211F_PAGE_SELECT 0x1f ++#define RTL8211F_TX_DELAY 0x100 + + MODULE_DESCRIPTION("Realtek PHY driver"); + MODULE_AUTHOR("Johnson Leung"); +@@ -38,6 +42,18 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev) + return (err < 0) ? err : 0; + } + ++static int rtl8211f_ack_interrupt(struct phy_device *phydev) ++{ ++ int err; ++ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43); ++ err = phy_read(phydev, RTL8211F_INSR); ++ /* restore to default page 0 */ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); ++ ++ return (err < 0) ? err : 0; ++} ++ + static int rtl8211b_config_intr(struct phy_device *phydev) + { + int err; +@@ -64,6 +80,41 @@ static int rtl8211e_config_intr(struct phy_device *phydev) + return err; + } + ++static int rtl8211f_config_intr(struct phy_device *phydev) ++{ ++ int err; ++ ++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) ++ err = phy_write(phydev, RTL821x_INER, ++ RTL8211F_INER_LINK_STATUS); ++ else ++ err = phy_write(phydev, RTL821x_INER, 0); ++ ++ return err; ++} ++ ++static int rtl8211f_config_init(struct phy_device *phydev) ++{ ++ int ret; ++ u16 reg; ++ ++ ret = genphy_config_init(phydev); ++ if (ret < 0) ++ return ret; ++ ++ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { ++ /* enable TXDLY */ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); ++ reg = phy_read(phydev, 0x11); ++ reg |= RTL8211F_TX_DELAY; ++ phy_write(phydev, 0x11, reg); ++ /* restore to default page 0 */ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); ++ } ++ ++ return 0; ++} ++ + static struct phy_driver realtek_drvs[] = { + { + .phy_id = 0x00008201, +@@ -86,6 +137,19 @@ static struct phy_driver realtek_drvs[] = { + .config_intr = &rtl8211b_config_intr, + .driver = { .owner = THIS_MODULE,}, + }, { ++ .phy_id = 0x001cc914, ++ .name = "RTL8211DN Gigabit Ethernet", ++ .phy_id_mask = 0x001fffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .config_aneg = genphy_config_aneg, ++ .read_status = genphy_read_status, ++ .ack_interrupt = rtl821x_ack_interrupt, ++ .config_intr = rtl8211e_config_intr, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .driver = { .owner = THIS_MODULE,}, ++ }, { + .phy_id = 0x001cc915, + .name = "RTL8211E Gigabit Ethernet", + .phy_id_mask = 0x001fffff, +@@ -98,6 +162,20 @@ static struct phy_driver realtek_drvs[] = { + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, ++ }, { ++ .phy_id = 0x001cc916, ++ .name = "RTL8211F Gigabit Ethernet", ++ .phy_id_mask = 0x001fffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .config_aneg = &genphy_config_aneg, ++ .config_init = &rtl8211f_config_init, ++ .read_status = &genphy_read_status, ++ .ack_interrupt = &rtl8211f_ack_interrupt, ++ .config_intr = &rtl8211f_config_intr, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .driver = { .owner = THIS_MODULE }, + }, + }; + +@@ -116,7 +194,9 @@ module_exit(realtek_exit); + + static struct mdio_device_id __maybe_unused realtek_tbl[] = { + { 0x001cc912, 0x001fffff }, ++ { 0x001cc914, 0x001fffff }, + { 0x001cc915, 0x001fffff }, ++ { 0x001cc916, 0x001fffff }, + { } + }; + +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 469d2b7..210c876 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -32,8 +32,8 @@ + + LIST_HEAD(aliases_lookup); + +-struct device_node *of_allnodes; +-EXPORT_SYMBOL(of_allnodes); ++struct device_node *of_root; ++EXPORT_SYMBOL(of_root); + struct device_node *of_chosen; + struct device_node *of_aliases; + struct device_node *of_stdout; +@@ -48,7 +48,7 @@ struct kset *of_kset; + */ + DEFINE_MUTEX(of_mutex); + +-/* use when traversing tree through the allnext, child, sibling, ++/* use when traversing tree through the child, sibling, + * or parent members of struct device_node. + */ + DEFINE_RAW_SPINLOCK(devtree_lock); +@@ -204,7 +204,7 @@ static int __init of_init(void) + mutex_unlock(&of_mutex); + + /* Symlink in /proc as required by userspace ABI */ +- if (of_allnodes) ++ if (of_root) + proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); + + return 0; +@@ -245,6 +245,23 @@ struct property *of_find_property(const struct device_node *np, + } + EXPORT_SYMBOL(of_find_property); + ++struct device_node *__of_find_all_nodes(struct device_node *prev) ++{ ++ struct device_node *np; ++ if (!prev) { ++ np = of_root; ++ } else if (prev->child) { ++ np = prev->child; ++ } else { ++ /* Walk back up looking for a sibling, or the end of the structure */ ++ np = prev; ++ while (np->parent && !np->sibling) ++ np = np->parent; ++ np = np->sibling; /* Might be null at the end of the tree */ ++ } ++ return np; ++} ++ + /** + * of_find_all_nodes - Get next node in global list + * @prev: Previous node or NULL to start iteration +@@ -259,10 +276,8 @@ struct device_node *of_find_all_nodes(struct device_node *prev) + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = prev ? prev->allnext : of_allnodes; +- for (; np != NULL; np = np->allnext) +- if (of_node_get(np)) +- break; ++ np = __of_find_all_nodes(prev); ++ of_node_get(np); + of_node_put(prev); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +@@ -736,7 +751,7 @@ struct device_node *of_find_node_by_path(const char *path) + unsigned long flags; + + if (strcmp(path, "/") == 0) +- return of_node_get(of_allnodes); ++ return of_node_get(of_root); + + /* The path could begin with an alias */ + if (*path != '/') { +@@ -761,7 +776,7 @@ struct device_node *of_find_node_by_path(const char *path) + /* Step down the tree matching path components */ + raw_spin_lock_irqsave(&devtree_lock, flags); + if (!np) +- np = of_node_get(of_allnodes); ++ np = of_node_get(of_root); + while (np && *path == '/') { + path++; /* Increment past '/' delimiter */ + np = __of_find_node_by_path(np, path); +@@ -790,8 +805,7 @@ struct device_node *of_find_node_by_name(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) ++ for_each_of_allnodes_from(from, np) + if (np->name && (of_node_cmp(np->name, name) == 0) + && of_node_get(np)) + break; +@@ -820,8 +834,7 @@ struct device_node *of_find_node_by_type(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) ++ for_each_of_allnodes_from(from, np) + if (np->type && (of_node_cmp(np->type, type) == 0) + && of_node_get(np)) + break; +@@ -852,12 +865,10 @@ struct device_node *of_find_compatible_node(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) { ++ for_each_of_allnodes_from(from, np) + if (__of_device_is_compatible(np, compatible, type, NULL) && + of_node_get(np)) + break; +- } + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +@@ -884,8 +895,7 @@ struct device_node *of_find_node_with_property(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) { ++ for_each_of_allnodes_from(from, np) { + for (pp = np->properties; pp; pp = pp->next) { + if (of_prop_cmp(pp->name, prop_name) == 0) { + of_node_get(np); +@@ -967,8 +977,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from, + *match = NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) { ++ for_each_of_allnodes_from(from, np) { + m = __of_match_node(matches, np); + if (m && of_node_get(np)) { + if (match) +@@ -1025,7 +1034,7 @@ struct device_node *of_find_node_by_phandle(phandle handle) + return NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- for (np = of_allnodes; np; np = np->allnext) ++ for_each_of_allnodes(np) + if (np->phandle == handle) + break; + of_node_get(np); +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c +index d499417..d43f305 100644 +--- a/drivers/of/dynamic.c ++++ b/drivers/of/dynamic.c +@@ -117,8 +117,6 @@ void __of_attach_node(struct device_node *np) + + np->child = NULL; + np->sibling = np->parent->child; +- np->allnext = np->parent->allnext; +- np->parent->allnext = np; + np->parent->child = np; + of_node_clear_flag(np, OF_DETACHED); + } +@@ -154,17 +152,6 @@ void __of_detach_node(struct device_node *np) + if (WARN_ON(!parent)) + return; + +- if (of_allnodes == np) +- of_allnodes = np->allnext; +- else { +- struct device_node *prev; +- for (prev = of_allnodes; +- prev->allnext != np; +- prev = prev->allnext) +- ; +- prev->allnext = np->allnext; +- } +- + if (parent->child == np) + parent->child = np->sibling; + else { +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c +index d134710..f6eda02 100644 +--- a/drivers/of/fdt.c ++++ b/drivers/of/fdt.c +@@ -145,15 +145,15 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, + * @mem: Memory chunk to use for allocating device nodes and properties + * @p: pointer to node in flat tree + * @dad: Parent struct device_node +- * @allnextpp: pointer to ->allnext from last allocated device_node + * @fpsize: Size of the node path up at the current depth. + */ + static void * unflatten_dt_node(void *blob, + void *mem, + int *poffset, + struct device_node *dad, +- struct device_node ***allnextpp, +- unsigned long fpsize) ++ struct device_node **nodepp, ++ unsigned long fpsize, ++ bool dryrun) + { + const __be32 *p; + struct device_node *np; +@@ -200,7 +200,7 @@ static void * unflatten_dt_node(void *blob, + + np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, + __alignof__(struct device_node)); +- if (allnextpp) { ++ if (!dryrun) { + char *fn; + of_node_init(np); + np->full_name = fn = ((char *)np) + sizeof(*np); +@@ -222,8 +222,6 @@ static void * unflatten_dt_node(void *blob, + memcpy(fn, pathp, l); + + prev_pp = &np->properties; +- **allnextpp = np; +- *allnextpp = &np->allnext; + if (dad != NULL) { + np->parent = dad; + /* we temporarily use the next field as `last_child'*/ +@@ -254,7 +252,7 @@ static void * unflatten_dt_node(void *blob, + has_name = 1; + pp = unflatten_dt_alloc(&mem, sizeof(struct property), + __alignof__(struct property)); +- if (allnextpp) { ++ if (!dryrun) { + /* We accept flattened tree phandles either in + * ePAPR-style "phandle" properties, or the + * legacy "linux,phandle" properties. If both +@@ -296,7 +294,7 @@ static void * unflatten_dt_node(void *blob, + sz = (pa - ps) + 1; + pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, + __alignof__(struct property)); +- if (allnextpp) { ++ if (!dryrun) { + pp->name = "name"; + pp->length = sz; + pp->value = pp + 1; +@@ -308,7 +306,7 @@ static void * unflatten_dt_node(void *blob, + (char *)pp->value); + } + } +- if (allnextpp) { ++ if (!dryrun) { + *prev_pp = NULL; + np->name = of_get_property(np, "name", NULL); + np->type = of_get_property(np, "device_type", NULL); +@@ -324,11 +322,13 @@ static void * unflatten_dt_node(void *blob, + if (depth < 0) + depth = 0; + while (*poffset > 0 && depth > old_depth) +- mem = unflatten_dt_node(blob, mem, poffset, np, allnextpp, +- fpsize); ++ mem = unflatten_dt_node(blob, mem, poffset, np, NULL, ++ fpsize, dryrun); + + if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND) + pr_err("unflatten: error %d processing FDT\n", *poffset); ++ if (nodepp) ++ *nodepp = np; + + return mem; + } +@@ -352,7 +352,6 @@ static void __unflatten_device_tree(void *blob, + unsigned long size; + int start; + void *mem; +- struct device_node **allnextp = mynodes; + + pr_debug(" -> unflatten_device_tree()\n"); + +@@ -373,7 +372,7 @@ static void __unflatten_device_tree(void *blob, + + /* First pass, scan for size */ + start = 0; +- size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0); ++ size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true); + size = ALIGN(size, 4); + + pr_debug(" size is %lx, allocating...\n", size); +@@ -388,11 +387,10 @@ static void __unflatten_device_tree(void *blob, + + /* Second pass, do actual unflattening */ + start = 0; +- unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0); ++ unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false); + if (be32_to_cpup(mem + size) != 0xdeadbeef) + pr_warning("End of tree marker overwritten: %08x\n", + be32_to_cpup(mem + size)); +- *allnextp = NULL; + + pr_debug(" <- unflatten_device_tree()\n"); + } +@@ -1039,7 +1037,7 @@ bool __init early_init_dt_scan(void *params) + */ + void __init unflatten_device_tree(void) + { +- __unflatten_device_tree(initial_boot_params, &of_allnodes, ++ __unflatten_device_tree(initial_boot_params, &of_root, + early_init_dt_alloc_memory_arch); + + /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ +diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c +index 36b4035..d2acae8 100644 +--- a/drivers/of/pdt.c ++++ b/drivers/of/pdt.c +@@ -25,8 +25,7 @@ + + static struct of_pdt_ops *of_pdt_prom_ops __initdata; + +-void __initdata (*of_pdt_build_more)(struct device_node *dp, +- struct device_node ***nextp); ++void __initdata (*of_pdt_build_more)(struct device_node *dp); + + #if defined(CONFIG_SPARC) + unsigned int of_pdt_unique_id __initdata; +@@ -192,8 +191,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, + } + + static struct device_node * __init of_pdt_build_tree(struct device_node *parent, +- phandle node, +- struct device_node ***nextp) ++ phandle node) + { + struct device_node *ret = NULL, *prev_sibling = NULL; + struct device_node *dp; +@@ -210,16 +208,12 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, + ret = dp; + prev_sibling = dp; + +- *(*nextp) = dp; +- *nextp = &dp->allnext; +- + dp->full_name = of_pdt_build_full_name(dp); + +- dp->child = of_pdt_build_tree(dp, +- of_pdt_prom_ops->getchild(node), nextp); ++ dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node)); + + if (of_pdt_build_more) +- of_pdt_build_more(dp, nextp); ++ of_pdt_build_more(dp); + + node = of_pdt_prom_ops->getsibling(node); + } +@@ -234,20 +228,17 @@ static void * __init kernel_tree_alloc(u64 size, u64 align) + + void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops) + { +- struct device_node **nextp; +- + BUG_ON(!ops); + of_pdt_prom_ops = ops; + +- of_allnodes = of_pdt_create_node(root_node, NULL); ++ of_root = of_pdt_create_node(root_node, NULL); + #if defined(CONFIG_SPARC) +- of_allnodes->path_component_name = ""; ++ of_root->path_component_name = ""; + #endif +- of_allnodes->full_name = "/"; ++ of_root->full_name = "/"; + +- nextp = &of_allnodes->allnext; +- of_allnodes->child = of_pdt_build_tree(of_allnodes, +- of_pdt_prom_ops->getchild(of_allnodes->phandle), &nextp); ++ of_root->child = of_pdt_build_tree(of_root, ++ of_pdt_prom_ops->getchild(of_root->phandle)); + + /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ + of_alias_scan(kernel_tree_alloc); +diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c +index e2d79af..e40089e 100644 +--- a/drivers/of/selftest.c ++++ b/drivers/of/selftest.c +@@ -148,7 +148,7 @@ static void __init of_selftest_dynamic(void) + + static int __init of_selftest_check_node_linkage(struct device_node *np) + { +- struct device_node *child, *allnext_index = np; ++ struct device_node *child; + int count = 0, rc; + + for_each_child_of_node(np, child) { +@@ -158,14 +158,6 @@ static int __init of_selftest_check_node_linkage(struct device_node *np) + return -EINVAL; + } + +- while (allnext_index && allnext_index != child) +- allnext_index = allnext_index->allnext; +- if (allnext_index != child) { +- pr_err("Node %s is ordered differently in sibling and allnode lists\n", +- child->name); +- return -EINVAL; +- } +- + rc = of_selftest_check_node_linkage(child); + if (rc < 0) + return rc; +@@ -180,12 +172,12 @@ static void __init of_selftest_check_tree_linkage(void) + struct device_node *np; + int allnode_count = 0, child_count; + +- if (!of_allnodes) ++ if (!of_root) + return; + + for_each_of_allnodes(np) + allnode_count++; +- child_count = of_selftest_check_node_linkage(of_allnodes); ++ child_count = of_selftest_check_node_linkage(of_root); + + selftest(child_count > 0, "Device node data structure is corrupted\n"); + selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match" +@@ -775,33 +767,29 @@ static void update_node_properties(struct device_node *np, + */ + static int attach_node_and_children(struct device_node *np) + { +- struct device_node *next, *root = np, *dup; ++ struct device_node *next, *dup, *child; + +- /* skip root node */ +- np = np->child; +- /* storing a copy in temporary node */ +- dup = np; ++ dup = of_find_node_by_path(np->full_name); ++ if (dup) { ++ update_node_properties(np, dup); ++ return 0; ++ } + +- while (dup) { ++ /* Children of the root need to be remembered for removal */ ++ if (np->parent == of_root) { + if (WARN_ON(last_node_index >= NO_OF_NODES)) + return -EINVAL; +- nodes[last_node_index++] = dup; +- dup = dup->sibling; ++ nodes[last_node_index++] = np; + } +- dup = NULL; + +- while (np) { +- next = np->allnext; +- dup = of_find_node_by_path(np->full_name); +- if (dup) +- update_node_properties(np, dup); +- else { +- np->child = NULL; +- if (np->parent == root) +- np->parent = of_allnodes; +- of_attach_node(np); +- } +- np = next; ++ child = np->child; ++ np->child = NULL; ++ np->sibling = NULL; ++ of_attach_node(np); ++ while (child) { ++ next = child->sibling; ++ attach_node_and_children(child); ++ child = next; + } + + return 0; +@@ -846,10 +834,10 @@ static int __init selftest_data_add(void) + return -EINVAL; + } + +- if (!of_allnodes) { ++ if (!of_root) { + /* enabling flag for removing nodes */ + selftest_live_tree = true; +- of_allnodes = selftest_data_node; ++ of_root = selftest_data_node; + + for_each_of_allnodes(np) + __of_attach_node_sysfs(np); +@@ -859,7 +847,14 @@ static int __init selftest_data_add(void) + } + + /* attach the sub-tree to live tree */ +- return attach_node_and_children(selftest_data_node); ++ np = selftest_data_node->child; ++ while (np) { ++ struct device_node *next = np->sibling; ++ np->parent = of_root; ++ attach_node_and_children(np); ++ np = next; ++ } ++ return 0; + } + + /** +@@ -889,10 +884,10 @@ static void selftest_data_remove(void) + of_node_put(of_chosen); + of_aliases = NULL; + of_chosen = NULL; +- for_each_child_of_node(of_allnodes, np) ++ for_each_child_of_node(of_root, np) + detach_node_and_children(np); +- __of_detach_node_sysfs(of_allnodes); +- of_allnodes = NULL; ++ __of_detach_node_sysfs(of_root); ++ of_root = NULL; + return; + } + +diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile +index e04fe2d..e9815ac 100644 +--- a/drivers/pci/Makefile ++++ b/drivers/pci/Makefile +@@ -35,6 +35,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o + # + obj-$(CONFIG_ALPHA) += setup-irq.o + obj-$(CONFIG_ARM) += setup-irq.o ++obj-$(CONFIG_ARM64) += setup-irq.o + obj-$(CONFIG_UNICORE32) += setup-irq.o + obj-$(CONFIG_SUPERH) += setup-irq.o + obj-$(CONFIG_MIPS) += setup-irq.o +diff --git a/drivers/pci/access.c b/drivers/pci/access.c +index 7f249b9..b965c12 100644 +--- a/drivers/pci/access.c ++++ b/drivers/pci/access.c +@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); + EXPORT_SYMBOL(pci_bus_write_config_word); + EXPORT_SYMBOL(pci_bus_write_config_dword); + ++int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ void __iomem *addr; ++ ++ addr = bus->ops->map_bus(bus, devfn, where); ++ if (!addr) { ++ *val = ~0; ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ if (size == 1) ++ *val = readb(addr); ++ else if (size == 2) ++ *val = readw(addr); ++ else ++ *val = readl(addr); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_read); ++ ++int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ void __iomem *addr; ++ ++ addr = bus->ops->map_bus(bus, devfn, where); ++ if (!addr) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (size == 1) ++ writeb(val, addr); ++ else if (size == 2) ++ writew(val, addr); ++ else ++ writel(val, addr); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_write); ++ ++int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ void __iomem *addr; ++ ++ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); ++ if (!addr) { ++ *val = ~0; ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ *val = readl(addr); ++ ++ if (size <= 2) ++ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_read32); ++ ++int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ void __iomem *addr; ++ u32 mask, tmp; ++ ++ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); ++ if (!addr) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (size == 4) { ++ writel(val, addr); ++ return PCIBIOS_SUCCESSFUL; ++ } else { ++ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); ++ } ++ ++ tmp = readl(addr) & mask; ++ tmp |= val << ((where & 0x3) * 8); ++ writel(tmp, addr); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_write32); ++ + /** + * pci_bus_set_ops - Set raw operations of pci bus + * @bus: pci bus struct +diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig +index 96586b1..dafa3dc 100644 +--- a/drivers/pci/host/Kconfig ++++ b/drivers/pci/host/Kconfig +@@ -50,7 +50,7 @@ config PCI_RCAR_GEN2_PCIE + + config PCI_HOST_GENERIC + bool "Generic PCI host controller" +- depends on ARM && OF ++ depends on (ARM || ARM64) && OF + help + Say Y here if you want to support a simple generic PCI host + controller, such as the one emulated by kvmtool. +diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c +index d491b0f..baa1232 100644 +--- a/drivers/pci/host/pci-layerscape.c ++++ b/drivers/pci/host/pci-layerscape.c +@@ -36,12 +36,21 @@ + #define LTSSM_PCIE_L0 0x11 /* L0 state */ + #define LTSSM_PCIE_L2_IDLE 0x15 /* L2 idle state */ + ++#define PCIE_SRIOV_OFFSET 0x178 ++ ++/* CS2 */ ++#define PCIE_CS2_OFFSET 0x1000 /* For PCIe without SR-IOV */ ++#define PCIE_ENABLE_CS2 0x80000000 /* For PCIe with SR-IOV */ ++ + /* PEX Internal Configuration Registers */ + #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ + #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ ++#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ ++#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ + + /* PEX LUT registers */ + #define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ ++#define PCIE_LUT_CTRL0 0x7f8 + #define PCIE_LUT_UDR(n) (0x800 + (n) * 8) + #define PCIE_LUT_LDR(n) (0x804 + (n) * 8) + #define PCIE_LUT_MASK_ALL 0xffff +@@ -72,6 +81,8 @@ + #define CPLD_RST_PCIE_SLOT 0x14 + #define CPLD_RST_PCIESLOT 0x3 + ++#define PCIE_IATU_NUM 6 ++ + struct ls_pcie; + + struct ls_pcie_pm_data { +@@ -111,6 +122,8 @@ struct ls_pcie { + + #define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) + ++static void ls_pcie_host_init(struct pcie_port *pp); ++ + u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) + { + u32 index, streamid; +@@ -163,6 +176,42 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) + iowrite32(val, pcie->dbi + PCIE_STRFMR1); + } + ++/* Disable all bars in RC mode */ ++static void ls_pcie_disable_bars(struct ls_pcie *pcie) ++{ ++ u32 header; ++ ++ header = ioread32(pcie->dbi + PCIE_SRIOV_OFFSET); ++ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_SRIOV) { ++ iowrite32(PCIE_ENABLE_CS2, pcie->lut + PCIE_LUT_CTRL0); ++ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_0); ++ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_1); ++ iowrite32(0, pcie->dbi + PCI_ROM_ADDRESS1); ++ iowrite32(0, pcie->lut + PCIE_LUT_CTRL0); ++ } else { ++ iowrite32(0, ++ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0); ++ iowrite32(0, ++ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1); ++ iowrite32(0, ++ pcie->dbi + PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1); ++ } ++} ++ ++static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) ++{ ++ int i; ++ ++ for (i = 0; i < PCIE_IATU_NUM; i++) ++ dw_pcie_disable_outbound_atu(&pcie->pp, i); ++} ++ ++/* Forward error response of outbound non-posted requests */ ++static void ls_pcie_fix_error_response(struct ls_pcie *pcie) ++{ ++ iowrite32(PCIE_ABSERR_SETTING, pcie->dbi + PCIE_ABSERR); ++} ++ + static int ls1021_pcie_link_up(struct pcie_port *pp) + { + u32 state; +@@ -272,19 +321,24 @@ static void ls1021_pcie_host_init(struct pcie_port *pp) + } + pcie->index = index[1]; + +- dw_pcie_setup_rc(pp); ++ ls_pcie_host_init(pp); + +- ls_pcie_drop_msg_tlp(pcie); ++ dw_pcie_setup_rc(pp); + } + + static int ls_pcie_link_up(struct pcie_port *pp) + { + struct ls_pcie *pcie = to_ls_pcie(pp); +- u32 state; ++ u32 state, offset; + +- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> +- pcie->drvdata->ltssm_shift) & +- LTSSM_STATE_MASK; ++ if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) ++ offset = 0x407fc; ++ else ++ offset = PCIE_LUT_DBG; ++ ++ state = (ioread32(pcie->lut + offset) >> ++ pcie->drvdata->ltssm_shift) & ++ LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; +@@ -308,6 +362,10 @@ static void ls_pcie_host_init(struct pcie_port *pp) + ls_pcie_clear_multifunction(pcie); + ls_pcie_drop_msg_tlp(pcie); + iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); ++ ++ ls_pcie_disable_bars(pcie); ++ ls_pcie_disable_outbound_atus(pcie); ++ ls_pcie_fix_error_response(pcie); + } + + static int ls_pcie_msi_host_init(struct pcie_port *pp, +@@ -426,6 +484,11 @@ static int ls_pcie_host_pme_init(struct ls_pcie *pcie, + + pp = &pcie->pp; + ++ if (dw_pcie_link_up(&pcie->pp)) ++ pcie->in_slot = true; ++ else ++ pcie->in_slot = false; ++ + pcie->pme_irq = platform_get_irq_byname(pdev, "pme"); + if (pcie->pme_irq < 0) { + dev_err(&pdev->dev, +@@ -462,11 +525,6 @@ static int ls_pcie_host_pme_init(struct ls_pcie *pcie, + val |= PCIE_PEX_RCR_PMEIE; + iowrite16(val, pcie->dbi + PCIE_PEX_RCR); + +- if (dw_pcie_link_up(&pcie->pp)) +- pcie->in_slot = true; +- else +- pcie->in_slot = false; +- + return 0; + } + +@@ -590,12 +648,14 @@ static int ls_pcie_pm_do_resume(struct ls_pcie *pcie) + u32 state; + int i = 0; + u16 val; +- +- ls_pcie_host_init(&pcie->pp); ++ struct pcie_port *pp = &pcie->pp; + + if (!pcie->in_slot) + return 0; + ++ dw_pcie_setup_rc(pp); ++ ls_pcie_host_init(pp); ++ + /* Put RC in D0 */ + val = ioread16(pcie->dbi + PCIE_PM_SCR); + val &= PCIE_PM_SCR_PMEPS_D0; +diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c +index 8a9241b..0961ffc 100644 +--- a/drivers/pci/host/pcie-designware.c ++++ b/drivers/pci/host/pcie-designware.c +@@ -159,6 +159,13 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + } + ++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index) ++{ ++ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, ++ PCIE_ATU_VIEWPORT); ++ dw_pcie_writel_rc(pp, 0, PCIE_ATU_CR2); ++} ++ + int dw_pcie_link_up(struct pcie_port *pp) + { + if (pp->ops->link_up) +@@ -495,6 +502,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) + u32 membase; + u32 memlimit; + ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, ++ PCIE_ATU_TYPE_IO, pp->io_base, ++ pp->io_bus_addr, pp->io_size); ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, ++ PCIE_ATU_TYPE_MEM, pp->mem_base, ++ pp->mem_bus_addr, pp->mem_size); ++ + /* set the number of lanes */ + dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); + val &= ~PORT_LINK_MODE_MASK; +diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h +index 2f01284..fcd6431 100644 +--- a/drivers/pci/host/pcie-designware.h ++++ b/drivers/pci/host/pcie-designware.h +@@ -80,5 +80,6 @@ void dw_pcie_msi_init(struct pcie_port *pp); + int dw_pcie_link_up(struct pcie_port *pp); + void dw_pcie_setup_rc(struct pcie_port *pp); + int dw_pcie_host_init(struct pcie_port *pp); ++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index); + + #endif /* _PCIE_DESIGNWARE_H */ +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index 5dd4c96..5e64d37 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -667,11 +667,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) + { + resource_size_t phys_addr; + u32 table_offset; ++ unsigned long flags; + u8 bir; + + pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, + &table_offset); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); ++ flags = pci_resource_flags(dev, bir); ++ if (!flags || (flags & IORESOURCE_UNSET)) ++ return NULL; ++ + table_offset &= PCI_MSIX_TABLE_OFFSET; + phys_addr = pci_resource_start(dev, bir) + table_offset; + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index ce0aa47..a6783a5 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -2467,6 +2467,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) + *pinp = pin; + return PCI_SLOT(dev->devfn); + } ++EXPORT_SYMBOL_GPL(pci_common_swizzle); + + /** + * pci_release_region - Release a PCI bar +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c +index 2f0ce66..95ef171 100644 +--- a/drivers/pci/pcie/portdrv_core.c ++++ b/drivers/pci/pcie/portdrv_core.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include "../pci.h" + #include "portdrv.h" +@@ -199,6 +200,28 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) + static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) + { + int i, irq = -1; ++ int ret; ++ struct device_node *np = NULL; ++ ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) ++ irqs[i] = 0; ++ ++ if (dev->bus->dev.of_node) ++ np = dev->bus->dev.of_node; ++ ++ /* If root port doesn't support MSI/MSI-X/INTx in RC mode, ++ * request irq for aer ++ */ ++ if (IS_ENABLED(CONFIG_OF_IRQ) && np && ++ (mask & PCIE_PORT_SERVICE_PME)) { ++ ret = of_irq_get_byname(np, "aer"); ++ if (ret > 0) { ++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; ++ if (dev->irq) ++ irq = dev->irq; ++ goto no_msi; ++ } ++ } + + /* + * If MSI cannot be used for PCIe PME or hotplug, we have to use +@@ -224,11 +247,13 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) + irq = dev->irq; + + no_msi: +- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) +- irqs[i] = irq; ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { ++ if (!irqs[i]) ++ irqs[i] = irq; ++ } + irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; + +- if (irq < 0) ++ if (irq < 0 && irqs[PCIE_PORT_SERVICE_AER_SHIFT] < 0) + return -ENODEV; + return 0; + } +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 6bdeb75..0b16384 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -2024,6 +2024,7 @@ err_out: + kfree(b); + return NULL; + } ++EXPORT_SYMBOL_GPL(pci_create_root_bus); + + int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) + { +diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c +index 8bd76c9..8a280e9 100644 +--- a/drivers/pci/remove.c ++++ b/drivers/pci/remove.c +@@ -139,6 +139,7 @@ void pci_stop_root_bus(struct pci_bus *bus) + /* stop the host bridge */ + device_release_driver(&host_bridge->dev); + } ++EXPORT_SYMBOL_GPL(pci_stop_root_bus); + + void pci_remove_root_bus(struct pci_bus *bus) + { +@@ -158,3 +159,4 @@ void pci_remove_root_bus(struct pci_bus *bus) + /* remove the host bridge */ + device_unregister(&host_bridge->dev); + } ++EXPORT_SYMBOL_GPL(pci_remove_root_bus); +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c +index e3e17f3..8169597 100644 +--- a/drivers/pci/setup-bus.c ++++ b/drivers/pci/setup-bus.c +@@ -1750,3 +1750,4 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) + __pci_bus_assign_resources(bus, &add_list, NULL); + BUG_ON(!list_empty(&add_list)); + } ++EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources); +diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c +index 4e2d595..95c225b 100644 +--- a/drivers/pci/setup-irq.c ++++ b/drivers/pci/setup-irq.c +@@ -65,3 +65,4 @@ void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), + for_each_pci_dev(dev) + pdev_fixup_irq(dev, swizzle, map_irq); + } ++EXPORT_SYMBOL_GPL(pci_fixup_irqs); +diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig +index 76d6bd4..d4bcacf 100644 +--- a/drivers/soc/Kconfig ++++ b/drivers/soc/Kconfig +@@ -4,4 +4,17 @@ source "drivers/soc/qcom/Kconfig" + source "drivers/soc/ti/Kconfig" + source "drivers/soc/versatile/Kconfig" + ++config FSL_SOC_DRIVERS ++ bool "Freescale Soc Drivers" ++ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE ++ default n ++ help ++ Say y here to enable Freescale Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on Freescale platform. ++ ++if FSL_SOC_DRIVERS ++ source "drivers/soc/fsl/Kconfig" ++endif ++ + endmenu +diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile +index 063113d..ef82e45 100644 +--- a/drivers/soc/Makefile ++++ b/drivers/soc/Makefile +@@ -6,3 +6,4 @@ obj-$(CONFIG_ARCH_QCOM) += qcom/ + obj-$(CONFIG_ARCH_TEGRA) += tegra/ + obj-$(CONFIG_SOC_TI) += ti/ + obj-$(CONFIG_PLAT_VERSATILE) += versatile/ ++obj-$(CONFIG_FSL_SOC_DRIVERS) += fsl/ +diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig +new file mode 100644 +index 0000000..92a085e +--- /dev/null ++++ b/drivers/soc/fsl/Kconfig +@@ -0,0 +1,6 @@ ++config FSL_GUTS ++ bool ++ ++if ARM || ARM64 ++source "drivers/soc/fsl/Kconfig.arm" ++endif +diff --git a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm +new file mode 100644 +index 0000000..5f2d214 +--- /dev/null ++++ b/drivers/soc/fsl/Kconfig.arm +@@ -0,0 +1,25 @@ ++# ++# Freescale ARM SOC Drivers ++# ++ ++config LS1_SOC_DRIVERS ++ bool "LS1021A Soc Drivers" ++ depends on SOC_LS1021A ++ default n ++ help ++ Say y here to enable Freescale LS1021A Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on LS1021A platform. ++ ++config LS_SOC_DRIVERS ++ bool "Layerscape Soc Drivers" ++ depends on ARCH_LAYERSCAPE ++ default n ++ help ++ Say y here to enable Freescale Layerscape Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on Layerscape platform. ++ ++if LS1_SOC_DRIVERS ++ source "drivers/soc/fsl/ls1/Kconfig" ++endif +diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile +new file mode 100644 +index 0000000..9fc17b3 +--- /dev/null ++++ b/drivers/soc/fsl/Makefile +@@ -0,0 +1,6 @@ ++# ++# Makefile for Freescale Soc specific device drivers. ++# ++ ++obj-$(CONFIG_LS1_SOC_DRIVERS) += ls1/ ++obj-$(CONFIG_FSL_GUTS) += guts.o +diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c +new file mode 100644 +index 0000000..11065c2 +--- /dev/null ++++ b/drivers/soc/fsl/guts.c +@@ -0,0 +1,123 @@ ++/* ++ * Freescale QorIQ Platforms GUTS Driver ++ * ++ * Copyright (C) 2016 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct guts { ++ struct ccsr_guts __iomem *regs; ++ bool little_endian; ++}; ++ ++static struct guts *guts; ++ ++u32 guts_get_svr(void) ++{ ++ u32 svr = 0; ++ ++ if ((!guts) || (!(guts->regs))) { ++#ifdef CONFIG_PPC ++ svr = mfspr(SPRN_SVR); ++#endif ++ return svr; ++ } ++ ++ if (guts->little_endian) ++ svr = ioread32(&guts->regs->svr); ++ else ++ svr = ioread32be(&guts->regs->svr); ++ ++ return svr; ++} ++EXPORT_SYMBOL_GPL(guts_get_svr); ++ ++static int guts_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ ++ guts = kzalloc(sizeof(*guts), GFP_KERNEL); ++ if (!guts) ++ return -ENOMEM; ++ ++ if (of_property_read_bool(np, "little-endian")) ++ guts->little_endian = true; ++ else ++ guts->little_endian = false; ++ ++ guts->regs = of_iomap(np, 0); ++ if (!(guts->regs)) ++ return -ENOMEM; ++ ++ of_node_put(np); ++ return 0; ++} ++ ++static int guts_remove(struct platform_device *pdev) ++{ ++ iounmap(guts->regs); ++ kfree(guts); ++ return 0; ++} ++ ++/* ++ * Table for matching compatible strings, for device tree ++ * guts node, for Freescale QorIQ SOCs. ++ */ ++static const struct of_device_id guts_of_match[] = { ++ /* For T4 & B4 SOCs */ ++ { .compatible = "fsl,qoriq-device-config-1.0", }, ++ /* For P Series SOCs */ ++ { .compatible = "fsl,qoriq-device-config-2.0", }, ++ { .compatible = "fsl,p1010-guts", }, ++ { .compatible = "fsl,p1020-guts", }, ++ { .compatible = "fsl,p1021-guts", }, ++ { .compatible = "fsl,p1022-guts", }, ++ { .compatible = "fsl,p1023-guts", }, ++ { .compatible = "fsl,p2020-guts", }, ++ /* For BSC Series SOCs */ ++ { .compatible = "fsl,bsc9131-guts", }, ++ { .compatible = "fsl,bsc9132-guts", }, ++ /* For Layerscape Series SOCs */ ++ { .compatible = "fsl,ls1021a-dcfg", }, ++ { .compatible = "fsl,ls1043a-dcfg", }, ++ { .compatible = "fsl,ls2080a-dcfg", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, guts_of_match); ++ ++static struct platform_driver guts_driver = { ++ .driver = { ++ .name = "fsl-guts", ++ .of_match_table = guts_of_match, ++ }, ++ .probe = guts_probe, ++ .remove = guts_remove, ++}; ++ ++static int __init guts_drv_init(void) ++{ ++ return platform_driver_register(&guts_driver); ++} ++subsys_initcall(guts_drv_init); ++ ++static void __exit guts_drv_exit(void) ++{ ++ platform_driver_unregister(&guts_driver); ++} ++module_exit(guts_drv_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor, Inc."); ++MODULE_DESCRIPTION("Freescale QorIQ Platforms GUTS Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/fsl/ls1/Kconfig b/drivers/soc/fsl/ls1/Kconfig +new file mode 100644 +index 0000000..c9b04c4 +--- /dev/null ++++ b/drivers/soc/fsl/ls1/Kconfig +@@ -0,0 +1,11 @@ ++# ++# LS-1 Soc drivers ++# ++config FTM_ALARM ++ bool "FTM alarm driver" ++ depends on SOC_LS1021A ++ default n ++ help ++ Say y here to enable FTM alarm support. The FTM alarm provides ++ alarm functions for wakeup system from deep sleep. There is only ++ one FTM can be used in ALARM(FTM 0). +diff --git a/drivers/soc/fsl/ls1/Makefile b/drivers/soc/fsl/ls1/Makefile +new file mode 100644 +index 0000000..6299aa1 +--- /dev/null ++++ b/drivers/soc/fsl/ls1/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o +diff --git a/drivers/soc/fsl/ls1/ftm_alarm.c b/drivers/soc/fsl/ls1/ftm_alarm.c +new file mode 100644 +index 0000000..c42b26b +--- /dev/null ++++ b/drivers/soc/fsl/ls1/ftm_alarm.c +@@ -0,0 +1,274 @@ ++/* ++ * Freescale FlexTimer Module (FTM) Alarm driver. ++ * ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2 ++ * of the License, or (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define FTM_SC 0x00 ++#define FTM_SC_CLK_SHIFT 3 ++#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) ++#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) ++#define FTM_SC_PS_MASK 0x7 ++#define FTM_SC_TOIE BIT(6) ++#define FTM_SC_TOF BIT(7) ++ ++#define FTM_SC_CLKS_FIXED_FREQ 0x02 ++ ++#define FTM_CNT 0x04 ++#define FTM_MOD 0x08 ++#define FTM_CNTIN 0x4C ++ ++#define FIXED_FREQ_CLK 32000 ++#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK) ++#define MAX_COUNT_VAL 0xffff ++ ++static void __iomem *ftm1_base; ++static u32 alarm_freq; ++static bool big_endian; ++ ++static inline u32 ftm_readl(void __iomem *addr) ++{ ++ if (big_endian) ++ return ioread32be(addr); ++ ++ return ioread32(addr); ++} ++ ++static inline void ftm_writel(u32 val, void __iomem *addr) ++{ ++ if (big_endian) ++ iowrite32be(val, addr); ++ else ++ iowrite32(val, addr); ++} ++ ++static inline void ftm_counter_enable(void __iomem *base) ++{ ++ u32 val; ++ ++ /* select and enable counter clock source */ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); ++ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ)); ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_counter_disable(void __iomem *base) ++{ ++ u32 val; ++ ++ /* disable counter clock source */ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_acknowledge(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~FTM_SC_TOF; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_enable(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val |= FTM_SC_TOIE; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_disable(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~FTM_SC_TOIE; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_reset_counter(void __iomem *base) ++{ ++ /* ++ * The CNT register contains the FTM counter value. ++ * Reset clears the CNT register. Writing any value to COUNT ++ * updates the counter with its initial value, CNTIN. ++ */ ++ ftm_writel(0x00, base + FTM_CNT); ++} ++ ++static u32 time_to_cycle(unsigned long time) ++{ ++ u32 cycle; ++ ++ cycle = time * alarm_freq; ++ if (cycle > MAX_COUNT_VAL) { ++ pr_err("Out of alarm range.\n"); ++ cycle = 0; ++ } ++ ++ return cycle; ++} ++ ++static u32 cycle_to_time(u32 cycle) ++{ ++ return cycle / alarm_freq + 1; ++} ++ ++static void ftm_clean_alarm(void) ++{ ++ ftm_counter_disable(ftm1_base); ++ ++ ftm_writel(0x00, ftm1_base + FTM_CNTIN); ++ ftm_writel(~0UL, ftm1_base + FTM_MOD); ++ ++ ftm_reset_counter(ftm1_base); ++} ++ ++static int ftm_set_alarm(u64 cycle) ++{ ++ ftm_irq_disable(ftm1_base); ++ ++ /* ++ * The counter increments until the value of MOD is reached, ++ * at which point the counter is reloaded with the value of CNTIN. ++ * The TOF (the overflow flag) bit is set when the FTM counter ++ * changes from MOD to CNTIN. So we should using the cycle - 1. ++ */ ++ ftm_writel(cycle - 1, ftm1_base + FTM_MOD); ++ ++ ftm_counter_enable(ftm1_base); ++ ++ ftm_irq_enable(ftm1_base); ++ ++ return 0; ++} ++ ++static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id) ++{ ++ ftm_irq_acknowledge(ftm1_base); ++ ftm_irq_disable(ftm1_base); ++ ftm_clean_alarm(); ++ ++ return IRQ_HANDLED; ++} ++ ++static ssize_t ftm_alarm_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ u32 count, val; ++ ++ count = ftm_readl(ftm1_base + FTM_MOD); ++ val = ftm_readl(ftm1_base + FTM_CNT); ++ val = (count & MAX_COUNT_VAL) - val; ++ val = cycle_to_time(val); ++ ++ return sprintf(buf, "%u\n", val); ++} ++ ++static ssize_t ftm_alarm_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ u32 cycle; ++ unsigned long time; ++ ++ if (kstrtoul(buf, 0, &time)) ++ return -EINVAL; ++ ++ ftm_clean_alarm(); ++ ++ cycle = time_to_cycle(time); ++ if (!cycle) ++ return -EINVAL; ++ ++ ftm_set_alarm(cycle); ++ ++ return count; ++} ++ ++static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644, ++ ftm_alarm_show, ftm_alarm_store); ++ ++static int ftm_alarm_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct resource *r; ++ int irq; ++ int ret; ++ ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!r) ++ return -ENODEV; ++ ++ ftm1_base = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(ftm1_base)) ++ return PTR_ERR(ftm1_base); ++ ++ irq = irq_of_parse_and_map(np, 0); ++ if (irq <= 0) { ++ pr_err("ftm: unable to get IRQ from DT, %d\n", irq); ++ return -EINVAL; ++ } ++ ++ big_endian = of_property_read_bool(np, "big-endian"); ++ ++ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt, ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return ret; ++ } ++ ++ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes); ++ if (ret) { ++ dev_err(&pdev->dev, "create sysfs fail.\n"); ++ return ret; ++ } ++ ++ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV; ++ ++ ftm_clean_alarm(); ++ ++ device_init_wakeup(&pdev->dev, true); ++ ++ return ret; ++} ++ ++static const struct of_device_id ftm_alarm_match[] = { ++ { .compatible = "fsl,ftm-alarm", }, ++ { .compatible = "fsl,ftm-timer", }, ++ { }, ++}; ++ ++static struct platform_driver ftm_alarm_driver = { ++ .probe = ftm_alarm_probe, ++ .driver = { ++ .name = "ftm-alarm", ++ .owner = THIS_MODULE, ++ .of_match_table = ftm_alarm_match, ++ }, ++}; ++ ++static int __init ftm_alarm_init(void) ++{ ++ return platform_driver_register(&ftm_alarm_driver); ++} ++device_initcall(ftm_alarm_init); +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +index 27d1a91..cb52ede 100644 +--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +@@ -52,11 +52,6 @@ MODULE_LICENSE("Dual BSD/GPL"); + MODULE_AUTHOR("Freescale Semiconductor, Inc"); + MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); + +-/* Oldest DPAA2 objects version we are compatible with */ +-#define DPAA2_SUPPORTED_DPNI_VERSION 6 +-#define DPAA2_SUPPORTED_DPBP_VERSION 2 +-#define DPAA2_SUPPORTED_DPCON_VERSION 2 +- + static void validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) +@@ -261,7 +256,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + priv->buf_layout.private_data_size + + sizeof(struct dpaa2_fas)); + +- *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(*ns); + } +@@ -362,6 +357,25 @@ static int consume_frames(struct dpaa2_eth_channel *ch) + return cleaned; + } + ++/* Configure the egress frame annotation for timestamp update */ ++static void enable_tx_tstamp(struct dpaa2_fd *fd, void *hwa_start) ++{ ++ struct dpaa2_faead *faead; ++ u32 ctrl; ++ u32 frc; ++ ++ /* Mark the egress frame annotation area as valid */ ++ frc = dpaa2_fd_get_frc(fd); ++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); ++ ++ /* enable UPD (update prepanded data) bit in FAEAD field of ++ * hardware frame annotation area ++ */ ++ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; ++ faead = hwa_start + DPAA2_FAEAD_OFFSET; ++ faead->ctrl = cpu_to_le32(ctrl); ++} ++ + /* Create a frame descriptor based on a fragmented skb */ + static int build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, +@@ -369,6 +383,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, + { + struct device *dev = priv->net_dev->dev.parent; + void *sgt_buf = NULL; ++ void *hwa; + dma_addr_t addr; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct dpaa2_sg_entry *sgt; +@@ -414,7 +429,8 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, + * on TX confirmation. We are clearing FAS (Frame Annotation Status) + * field here. + */ +- memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); ++ hwa = sgt_buf + priv->buf_layout.private_data_size; ++ memset(hwa, 0, 8); + + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + +@@ -459,6 +475,9 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | + DPAA2_FD_CTRL_PTV1; + ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ++ enable_tx_tstamp(fd, hwa); ++ + return 0; + + dma_map_single_failed: +@@ -479,6 +498,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, + u8 *buffer_start; + struct sk_buff **skbh; + dma_addr_t addr; ++ void *hwa; + + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - + DPAA2_ETH_TX_BUF_ALIGN, +@@ -487,9 +507,10 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, + /* PTA from egress side is passed as is to the confirmation side so + * we need to clear some fields here in order to find consistent values + * on TX confirmation. We are clearing FAS (Frame Annotation Status) +- * field here. ++ * field here + */ +- memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); ++ hwa = buffer_start + priv->buf_layout.private_data_size; ++ memset(hwa, 0, 8); + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it +@@ -512,6 +533,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | + DPAA2_FD_CTRL_PTV1; + ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ++ enable_tx_tstamp(fd, hwa); ++ + return 0; + } + +@@ -579,7 +603,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, + ns = (u64 *)((void *)skbh + + priv->buf_layout.private_data_size + + sizeof(struct dpaa2_fas)); +- *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); + shhwtstamps.hwtstamp = ns_to_ktime(*ns); + skb_tstamp_tx(skb, &shhwtstamps); + } +@@ -779,7 +803,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) + /* Allocate buffer visible to WRIOP + skb shared info + + * alignment padding + */ +- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); ++ buf = netdev_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); + if (unlikely(!buf)) + goto err_alloc; + +@@ -973,7 +997,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) + } + + if (cleaned < budget) { +- napi_complete_done(napi, cleaned); ++ napi_complete(napi); + /* Re-enable data available notifications */ + do { + err = dpaa2_io_service_rearm(NULL, &ch->nctx); +@@ -1353,7 +1377,7 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) + * in promisc mode, in order to avoid frame loss while we + * progressively add entries to the table. + * We don't know whether we had been in promisc already, and +- * making an MC call to find it is expensive; so set uc promisc ++ * making an MC call to find out is expensive; so set uc promisc + * nonetheless. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); +@@ -1498,48 +1522,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) + /* Update NAPI statistics */ + ch->stats.cdan++; + +- napi_schedule_irqoff(&ch->napi); +-} +- +-/* Verify that the FLIB API version of various MC objects is supported +- * by our driver +- */ +-static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) +-{ +- char *name = ls_dev->obj_desc.type; +- struct device *dev = &ls_dev->dev; +- u16 supported_version, flib_version; +- +- if (strcmp(name, "dpni") == 0) { +- flib_version = DPNI_VER_MAJOR; +- supported_version = DPAA2_SUPPORTED_DPNI_VERSION; +- } else if (strcmp(name, "dpbp") == 0) { +- flib_version = DPBP_VER_MAJOR; +- supported_version = DPAA2_SUPPORTED_DPBP_VERSION; +- } else if (strcmp(name, "dpcon") == 0) { +- flib_version = DPCON_VER_MAJOR; +- supported_version = DPAA2_SUPPORTED_DPCON_VERSION; +- } else { +- dev_err(dev, "invalid object type (%s)\n", name); +- return -EINVAL; +- } +- +- /* Check that the FLIB-defined version matches the one reported by MC */ +- if (mc_version != flib_version) { +- dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", +- name, mc_version, flib_version); +- return -EINVAL; +- } +- +- /* ... and that we actually support it */ +- if (mc_version < supported_version) { +- dev_err(dev, "Unsupported %s FLIB version (%d)\n", +- name, mc_version); +- return -EINVAL; +- } +- dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); +- +- return 0; ++ napi_schedule(&ch->napi); + } + + /* Allocate and configure a DPCON object */ +@@ -1563,16 +1546,18 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) + goto err_open; + } + ++ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_reset() failed\n"); ++ goto err_reset; ++ } ++ + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto err_get_attr; + } + +- err = check_obj_version(dpcon, attrs.version.major); +- if (err) +- goto err_dpcon_ver; +- + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_enable() failed\n"); +@@ -1582,8 +1567,8 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) + return dpcon; + + err_enable: +-err_dpcon_ver: + err_get_attr: ++err_reset: + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); + err_open: + fsl_mc_object_free(dpcon); +@@ -1849,6 +1834,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) + goto err_open; + } + ++ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_reset() failed\n"); ++ goto err_reset; ++ } ++ + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); +@@ -1862,16 +1853,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) + goto err_get_attr; + } + +- err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); +- if (err) +- goto err_dpbp_ver; +- + return 0; + +-err_dpbp_ver: + err_get_attr: + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); + err_enable: ++err_reset: + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); + err_open: + fsl_mc_object_free(dpbp_dev); +@@ -1911,6 +1898,12 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) + ls_dev->mc_io = priv->mc_io; + ls_dev->mc_handle = priv->mc_token; + ++ err = dpni_reset(priv->mc_io, 0, priv->mc_token); ++ if (err) { ++ dev_err(dev, "dpni_reset() failed\n"); ++ goto err_reset; ++ } ++ + /* Map a memory region which will be used by MC to pass us an + * attribute structure + */ +@@ -1940,10 +1933,6 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) + goto err_get_attr; + } + +- err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); +- if (err) +- goto err_dpni_ver; +- + memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); + err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); + if (err) { +@@ -2019,11 +2008,11 @@ err_cls_rule: + err_data_offset: + err_buf_layout: + err_extract: +-err_dpni_ver: + err_get_attr: + err_dma_map: + kfree(dma_mem); + err_alloc: ++err_reset: + dpni_close(priv->mc_io, 0, priv->mc_token); + err_open: + return err; +@@ -2157,6 +2146,131 @@ static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, + } + #endif + ++/* default hash key fields */ ++static struct dpaa2_eth_hash_fields default_hash_fields[] = { ++ { ++ /* L2 header */ ++ .rxnfc_field = RXH_L2DA, ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_DA, ++ .size = 6, ++ }, { ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_SA, ++ .size = 6, ++ }, { ++ /* This is the last ethertype field parsed: ++ * depending on frame format, it can be the MAC ethertype ++ * or the VLAN etype. ++ */ ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_TYPE, ++ .size = 2, ++ }, { ++ /* VLAN header */ ++ .rxnfc_field = RXH_VLAN, ++ .cls_prot = NET_PROT_VLAN, ++ .cls_field = NH_FLD_VLAN_TCI, ++ .size = 2, ++ }, { ++ /* IP header */ ++ .rxnfc_field = RXH_IP_SRC, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_SRC, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_IP_DST, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_DST, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_L3_PROTO, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_PROTO, ++ .size = 1, ++ }, { ++ /* Using UDP ports, this is functionally equivalent to raw ++ * byte pairs from L4 header. ++ */ ++ .rxnfc_field = RXH_L4_B_0_1, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_SRC, ++ .size = 2, ++ }, { ++ .rxnfc_field = RXH_L4_B_2_3, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_DST, ++ .size = 2, ++ }, ++}; ++ ++/* Set RX hash options */ ++int set_hash(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpkg_profile_cfg cls_cfg; ++ struct dpni_rx_tc_dist_cfg dist_cfg; ++ u8 *dma_mem; ++ int i; ++ int err = 0; ++ ++ memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ ++ for (i = 0; i < priv->num_hash_fields; i++) { ++ struct dpkg_extract *key = ++ &cls_cfg.extracts[cls_cfg.num_extracts]; ++ ++ key->type = DPKG_EXTRACT_FROM_HDR; ++ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot; ++ key->extract.from_hdr.type = DPKG_FULL_FIELD; ++ key->extract.from_hdr.field = priv->hash_fields[i].cls_field; ++ cls_cfg.num_extracts++; ++ ++ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field; ++ } ++ ++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_prepare_key_cfg error %d", err); ++ return err; ++ } ++ ++ memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ ++ /* Prepare for setting the rx dist */ ++ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, ++ DPAA2_CLASSIFIER_DMA_SIZE, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ kfree(dma_mem); ++ return -ENOMEM; ++ } ++ ++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); ++ if (dpaa2_eth_fs_enabled(priv)) { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; ++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; ++ } else { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ } ++ ++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); ++ dma_unmap_single(dev, dist_cfg.key_cfg_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ kfree(dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ + /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, + * frame queues and channels + */ +@@ -2179,15 +2293,22 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) + return err; + } + +- check_fs_support(net_dev); ++ /* Verify classification options and disable hashing and/or ++ * flow steering support in case of invalid configuration values ++ */ ++ check_cls_support(priv); + +- /* have the interface implicitly distribute traffic based on supported +- * header fields ++ /* have the interface implicitly distribute traffic based on ++ * a static hash key + */ + if (dpaa2_eth_hash_enabled(priv)) { +- err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); +- if (err) ++ priv->hash_fields = default_hash_fields; ++ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields); ++ err = set_hash(priv); ++ if (err) { ++ dev_err(dev, "Hashing configuration failed\n"); + return err; ++ } + } + + /* Configure handling of error frames */ +@@ -2512,7 +2633,7 @@ static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, + { + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); + +- return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); ++ return cpumask_scnprintf(buf, PAGE_SIZE, &priv->txconf_cpumask); + } + + static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +index 7274fbe..bdcdbd6 100644 +--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +@@ -40,7 +40,6 @@ + #include "../../fsl-mc/include/dpbp-cmd.h" + #include "../../fsl-mc/include/dpcon.h" + #include "../../fsl-mc/include/dpcon-cmd.h" +-#include "../../fsl-mc/include/dpmng.h" + #include "dpni.h" + #include "dpni-cmd.h" + +@@ -54,8 +53,8 @@ + */ + #define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) + +-/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced +- * Max Frame Length (currently 10k). ++/* Maximum acceptable MTU value. It is in direct relation with the hardware ++ * enforced Max Frame Length (currently 10k). + */ + #define DPAA2_ETH_MFL (10 * 1024) + #define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) +@@ -100,8 +99,8 @@ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + DPAA2_ETH_RX_BUF_ALIGN) + +-/* PTP nominal frequency 1MHz */ +-#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 ++/* PTP nominal frequency 1GHz */ ++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 + + /* We are accommodating a skb backpointer and some S/G info + * in the frame's software annotation. The hardware +@@ -138,6 +137,18 @@ struct dpaa2_fas { + __le32 status; + } __packed; + ++/* Frame annotation egress action descriptor */ ++#define DPAA2_FAEAD_OFFSET 0x58 ++ ++struct dpaa2_faead { ++ __le32 conf_fqid; ++ __le32 ctrl; ++}; ++ ++#define DPAA2_FAEAD_A2V 0x20000000 ++#define DPAA2_FAEAD_UPDV 0x00001000 ++#define DPAA2_FAEAD_UPD 0x00000010 ++ + /* Error and status bits in the frame annotation status word */ + /* Debug frame, otherwise supposed to be discarded */ + #define DPAA2_FAS_DISC 0x80000000 +@@ -274,6 +285,14 @@ struct dpaa2_eth_cls_rule { + bool in_use; + }; + ++struct dpaa2_eth_hash_fields { ++ u64 rxnfc_field; ++ enum net_prot cls_prot; ++ int cls_field; ++ int offset; ++ int size; ++}; ++ + /* Driver private data */ + struct dpaa2_eth_priv { + struct net_device *net_dev; +@@ -318,8 +337,10 @@ struct dpaa2_eth_priv { + bool do_link_poll; + struct task_struct *poll_thread; + ++ struct dpaa2_eth_hash_fields *hash_fields; ++ u8 num_hash_fields; + /* enabled ethtool hashing bits */ +- u64 rx_hash_fields; ++ u64 rx_flow_hash; + + #ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS + struct dpaa2_debugfs dbg; +@@ -334,25 +355,24 @@ struct dpaa2_eth_priv { + bool ts_rx_en; /* Rx timestamping enabled */ + }; + +-/* default Rx hash options, set during probing */ +-#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ +- | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ +- | RXH_L4_B_2_3) +- + #define dpaa2_eth_hash_enabled(priv) \ + ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) + + #define dpaa2_eth_fs_enabled(priv) \ + ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) + ++#define dpaa2_eth_fs_mask_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT) ++ + #define DPAA2_CLASSIFIER_ENTRY_COUNT 16 + + /* Required by struct dpni_attr::ext_cfg_iova */ + #define DPAA2_EXT_CFG_SIZE 256 + +-extern const struct ethtool_ops dpaa2_ethtool_ops; ++/* size of DMA memory used to pass configuration to classifier, in bytes */ ++#define DPAA2_CLASSIFIER_DMA_SIZE 256 + +-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); ++extern const struct ethtool_ops dpaa2_ethtool_ops; + + static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) + { +@@ -372,6 +392,6 @@ static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) + priv->dpni_attrs.max_senders); + } + +-void check_fs_support(struct net_device *); ++void check_cls_support(struct dpaa2_eth_priv *priv); + + #endif /* __DPAA2_H */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +index fdab07f..1d792cd 100644 +--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +@@ -32,9 +32,6 @@ + #include "dpni.h" /* DPNI_LINK_OPT_* */ + #include "dpaa2-eth.h" + +-/* size of DMA memory used to pass configuration to classifier, in bytes */ +-#define DPAA2_CLASSIFIER_DMA_SIZE 256 +- + /* To be kept in sync with 'enum dpni_counter' */ + char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { + "rx frames", +@@ -89,28 +86,9 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { + static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *drvinfo) + { +- struct mc_version mc_ver; +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- char fw_version[ETHTOOL_FWVERS_LEN]; +- char version[32]; +- int err; +- +- err = mc_get_version(priv->mc_io, 0, &mc_ver); +- if (err) { +- strlcpy(drvinfo->fw_version, "Error retrieving MC version", +- sizeof(drvinfo->fw_version)); +- } else { +- scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", +- mc_ver.major, mc_ver.minor, mc_ver.revision); +- strlcpy(drvinfo->fw_version, fw_version, +- sizeof(drvinfo->fw_version)); +- } +- +- scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, +- DPNI_VER_MINOR); +- strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); +- + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); ++ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info)); + } +@@ -152,7 +130,7 @@ static int dpaa2_eth_set_settings(struct net_device *net_dev, + + netdev_dbg(net_dev, "Setting link parameters..."); + +- /* Due to a temporary firmware limitation, the DPNI must be down ++ /* Due to a temporary MC limitation, the DPNI must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ +@@ -211,7 +189,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) + } + } + +-/** Fill in hardware counters, as returned by the MC firmware. ++/** Fill in hardware counters, as returned by MC. + */ + static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + struct ethtool_stats *stats, +@@ -296,203 +274,223 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + #endif + } + +-static const struct dpaa2_eth_hash_fields { +- u64 rxnfc_field; +- enum net_prot cls_prot; +- int cls_field; +- int size; +-} hash_fields[] = { +- { +- /* L2 header */ +- .rxnfc_field = RXH_L2DA, +- .cls_prot = NET_PROT_ETH, +- .cls_field = NH_FLD_ETH_DA, +- .size = 6, +- }, { +- /* VLAN header */ +- .rxnfc_field = RXH_VLAN, +- .cls_prot = NET_PROT_VLAN, +- .cls_field = NH_FLD_VLAN_TCI, +- .size = 2, +- }, { +- /* IP header */ +- .rxnfc_field = RXH_IP_SRC, +- .cls_prot = NET_PROT_IP, +- .cls_field = NH_FLD_IP_SRC, +- .size = 4, +- }, { +- .rxnfc_field = RXH_IP_DST, +- .cls_prot = NET_PROT_IP, +- .cls_field = NH_FLD_IP_DST, +- .size = 4, +- }, { +- .rxnfc_field = RXH_L3_PROTO, +- .cls_prot = NET_PROT_IP, +- .cls_field = NH_FLD_IP_PROTO, +- .size = 1, +- }, { +- /* Using UDP ports, this is functionally equivalent to raw +- * byte pairs from L4 header. +- */ +- .rxnfc_field = RXH_L4_B_0_1, +- .cls_prot = NET_PROT_UDP, +- .cls_field = NH_FLD_UDP_PORT_SRC, +- .size = 2, +- }, { +- .rxnfc_field = RXH_L4_B_2_3, +- .cls_prot = NET_PROT_UDP, +- .cls_field = NH_FLD_UDP_PORT_DST, +- .size = 2, +- }, +-}; +- +-static int cls_is_enabled(struct net_device *net_dev, u64 flag) +-{ +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- +- return !!(priv->rx_hash_fields & flag); +-} +- +-static int cls_key_off(struct net_device *net_dev, u64 flag) ++static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) + { + int i, off = 0; + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { +- if (hash_fields[i].rxnfc_field & flag) ++ for (i = 0; i < priv->num_hash_fields; i++) { ++ if (priv->hash_fields[i].cls_prot == prot && ++ priv->hash_fields[i].cls_field == field) + return off; +- if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) +- off += hash_fields[i].size; ++ off += priv->hash_fields[i].size; + } + + return -1; + } + +-static u8 cls_key_size(struct net_device *net_dev) ++static u8 cls_key_size(struct dpaa2_eth_priv *priv) + { + u8 i, size = 0; + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { +- if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) +- continue; +- size += hash_fields[i].size; +- } ++ for (i = 0; i < priv->num_hash_fields; i++) ++ size += priv->hash_fields[i].size; + + return size; + } + +-static u8 cls_max_key_size(struct net_device *net_dev) ++void check_cls_support(struct dpaa2_eth_priv *priv) + { +- u8 i, size = 0; ++ u8 key_size = cls_key_size(priv); ++ struct device *dev = priv->net_dev->dev.parent; ++ ++ if (dpaa2_eth_hash_enabled(priv)) { ++ if (priv->dpni_attrs.max_dist_key_size < key_size) { ++ dev_dbg(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", ++ priv->dpni_attrs.max_dist_key_size, ++ key_size); ++ goto disable_cls; ++ } ++ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) { ++ dev_dbg(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", ++ DPKG_MAX_NUM_OF_EXTRACTS); ++ goto disable_cls; ++ } ++ } + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) +- size += hash_fields[i].size; ++ if (dpaa2_eth_fs_enabled(priv)) { ++ if (!dpaa2_eth_hash_enabled(priv)) { ++ dev_dbg(dev, "DPNI_OPT_DIST_HASH option missing. Steering is disabled\n"); ++ goto disable_cls; ++ } ++ if (!dpaa2_eth_fs_mask_enabled(priv)) { ++ dev_dbg(dev, "Key masks not supported. Steering is disabled\n"); ++ goto disable_fs; ++ } ++ } + +- return size; ++ return; ++ ++disable_cls: ++ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_HASH; ++disable_fs: ++ priv->dpni_attrs.options &= ~(DPNI_OPT_DIST_FS | ++ DPNI_OPT_FS_MASK_SUPPORT); + } + +-void check_fs_support(struct net_device *net_dev) ++static int prep_l4_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_tcpip4_spec *l4_value, ++ struct ethtool_tcpip4_spec *l4_mask, ++ void *key, void *mask, u8 l4_proto) + { +- u8 key_size = cls_max_key_size(net_dev); +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int offset; + +- if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && +- priv->dpni_attrs.max_dist_key_size < key_size) { +- dev_err(&net_dev->dev, +- "max_dist_key_size = %d, expected %d. Steering is disabled\n", +- priv->dpni_attrs.max_dist_key_size, +- key_size); +- priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; ++ if (l4_mask->tos) { ++ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); ++ return -EOPNOTSUPP; + } +-} + +-/* Set RX hash options +- * flags is a combination of RXH_ bits +- */ +-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +-{ +- struct device *dev = net_dev->dev.parent; +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- struct dpkg_profile_cfg cls_cfg; +- struct dpni_rx_tc_dist_cfg dist_cfg; +- u8 *dma_mem; +- u64 enabled_flags = 0; +- int i; +- int err = 0; ++ if (l4_mask->ip4src) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); ++ *(u32 *)(key + offset) = l4_value->ip4src; ++ *(u32 *)(mask + offset) = l4_mask->ip4src; ++ } + +- if (!dpaa2_eth_hash_enabled(priv)) { +- dev_err(dev, "Hashing support is not enabled\n"); +- return -EOPNOTSUPP; ++ if (l4_mask->ip4dst) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); ++ *(u32 *)(key + offset) = l4_value->ip4dst; ++ *(u32 *)(mask + offset) = l4_mask->ip4dst; + } + +- if (flags & ~DPAA2_RXH_SUPPORTED) { +- /* RXH_DISCARD is not supported */ +- dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); +- return -EOPNOTSUPP; ++ if (l4_mask->psrc) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); ++ *(u32 *)(key + offset) = l4_value->psrc; ++ *(u32 *)(mask + offset) = l4_mask->psrc; + } + +- memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ if (l4_mask->pdst) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); ++ *(u32 *)(key + offset) = l4_value->pdst; ++ *(u32 *)(mask + offset) = l4_mask->pdst; ++ } + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { +- struct dpkg_extract *key = +- &cls_cfg.extracts[cls_cfg.num_extracts]; ++ /* Only apply the rule for the user-specified L4 protocol ++ * and if ethertype matches IPv4 ++ */ ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); ++ *(u16 *)(key + offset) = htons(ETH_P_IP); ++ *(u16 *)(mask + offset) = 0xFFFF; + +- if (!(flags & hash_fields[i].rxnfc_field)) +- continue; ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); ++ *(u8 *)(key + offset) = l4_proto; ++ *(u8 *)(mask + offset) = 0xFF; + +- if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { +- dev_err(dev, "error adding key extraction rule, too many rules?\n"); +- return -E2BIG; +- } ++ /* TODO: check IP version */ + +- key->type = DPKG_EXTRACT_FROM_HDR; +- key->extract.from_hdr.prot = hash_fields[i].cls_prot; +- key->extract.from_hdr.type = DPKG_FULL_FIELD; +- key->extract.from_hdr.field = hash_fields[i].cls_field; +- cls_cfg.num_extracts++; ++ return 0; ++} ++ ++static int prep_eth_rule(struct dpaa2_eth_priv *priv, ++ struct ethhdr *eth_value, struct ethhdr *eth_mask, ++ void *key, void *mask) ++{ ++ int offset; + +- enabled_flags |= hash_fields[i].rxnfc_field; ++ if (eth_mask->h_proto) { ++ netdev_err(priv->net_dev, "Ethertype is not supported!\n"); ++ return -EOPNOTSUPP; + } + +- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); +- if (!dma_mem) +- return -ENOMEM; ++ if (!is_zero_ether_addr(eth_mask->h_source)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); ++ ether_addr_copy(key + offset, eth_value->h_source); ++ ether_addr_copy(mask + offset, eth_mask->h_source); ++ } + +- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); +- if (err) { +- dev_err(dev, "dpni_prepare_key_cfg error %d", err); +- return err; ++ if (!is_zero_ether_addr(eth_mask->h_dest)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); ++ ether_addr_copy(key + offset, eth_value->h_dest); ++ ether_addr_copy(mask + offset, eth_mask->h_dest); + } + +- memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ return 0; ++} + +- /* Prepare for setting the rx dist */ +- dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, +- DPAA2_CLASSIFIER_DMA_SIZE, +- DMA_TO_DEVICE); +- if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { +- dev_err(dev, "DMA mapping failed\n"); +- kfree(dma_mem); +- return -ENOMEM; ++static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_usrip4_spec *uip_value, ++ struct ethtool_usrip4_spec *uip_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (uip_mask->tos) ++ return -EOPNOTSUPP; ++ ++ if (uip_mask->ip4src) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); ++ *(u32 *)(key + offset) = uip_value->ip4src; ++ *(u32 *)(mask + offset) = uip_mask->ip4src; + } + +- dist_cfg.dist_size = dpaa2_eth_queue_count(priv); +- if (dpaa2_eth_fs_enabled(priv)) { +- dist_cfg.dist_mode = DPNI_DIST_MODE_FS; +- dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; +- } else { +- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ if (uip_mask->ip4dst) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); ++ *(u32 *)(key + offset) = uip_value->ip4dst; ++ *(u32 *)(mask + offset) = uip_mask->ip4dst; + } + +- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); +- dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, +- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); +- kfree(dma_mem); +- if (err) { +- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); +- return err; ++ if (uip_mask->proto) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); ++ *(u32 *)(key + offset) = uip_value->proto; ++ *(u32 *)(mask + offset) = uip_mask->proto; ++ } ++ if (uip_mask->l4_4_bytes) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); ++ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; ++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; ++ ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); ++ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; ++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; + } + +- priv->rx_hash_fields = enabled_flags; ++ /* Ethertype must be IP */ ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); ++ *(u16 *)(key + offset) = htons(ETH_P_IP); ++ *(u16 *)(mask + offset) = 0xFFFF; ++ ++ return 0; ++} ++ ++static int prep_ext_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_flow_ext *ext_value, ++ struct ethtool_flow_ext *ext_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (ext_mask->vlan_etype) ++ return -EOPNOTSUPP; ++ ++ if (ext_mask->vlan_tci) { ++ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); ++ *(u16 *)(key + offset) = ext_value->vlan_tci; ++ *(u16 *)(mask + offset) = ext_mask->vlan_tci; ++ } ++ ++ return 0; ++} ++ ++static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_flow_ext *ext_value, ++ struct ethtool_flow_ext *ext_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (!is_zero_ether_addr(ext_mask->h_dest)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); ++ ether_addr_copy(key + offset, ext_value->h_dest); ++ ether_addr_copy(mask + offset, ext_mask->h_dest); ++ } + + return 0; + } +@@ -501,140 +499,56 @@ static int prep_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs, + void *key) + { +- struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; +- struct ethhdr *eth_h, *eth_m; +- struct ethtool_flow_ext *ext_h, *ext_m; +- const u8 key_size = cls_key_size(net_dev); ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const u8 key_size = cls_key_size(priv); + void *msk = key + key_size; ++ int err; + + memset(key, 0, key_size * 2); + +- /* This code is a major mess, it has to be cleaned up after the +- * classification mask issue is fixed and key format will be made static +- */ +- + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: +- l4ip4_h = &fs->h_u.tcp_ip4_spec; +- l4ip4_m = &fs->m_u.tcp_ip4_spec; +- /* TODO: ethertype to match IPv4 and protocol to match TCP */ +- goto l4ip4; +- ++ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, ++ &fs->m_u.tcp_ip4_spec, key, msk, ++ IPPROTO_TCP); ++ break; + case UDP_V4_FLOW: +- l4ip4_h = &fs->h_u.udp_ip4_spec; +- l4ip4_m = &fs->m_u.udp_ip4_spec; +- goto l4ip4; +- ++ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, ++ &fs->m_u.udp_ip4_spec, key, msk, ++ IPPROTO_UDP); ++ break; + case SCTP_V4_FLOW: +- l4ip4_h = &fs->h_u.sctp_ip4_spec; +- l4ip4_m = &fs->m_u.sctp_ip4_spec; +- +-l4ip4: +- if (l4ip4_m->tos) { +- netdev_err(net_dev, +- "ToS is not supported for IPv4 L4\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { +- netdev_err(net_dev, "IP SRC not supported!\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { +- netdev_err(net_dev, "IP DST not supported!\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { +- netdev_err(net_dev, "PSRC not supported, ignored\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { +- netdev_err(net_dev, "PDST not supported, ignored\n"); +- return -EOPNOTSUPP; +- } +- +- if (cls_is_enabled(net_dev, RXH_IP_SRC)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) +- = l4ip4_h->ip4src; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) +- = l4ip4_m->ip4src; +- } +- if (cls_is_enabled(net_dev, RXH_IP_DST)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) +- = l4ip4_h->ip4dst; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) +- = l4ip4_m->ip4dst; +- } +- +- if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) +- = l4ip4_h->psrc; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) +- = l4ip4_m->psrc; +- } +- +- if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) +- = l4ip4_h->pdst; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) +- = l4ip4_m->pdst; +- } ++ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, ++ &fs->m_u.sctp_ip4_spec, key, msk, ++ IPPROTO_SCTP); + break; +- + case ETHER_FLOW: +- eth_h = &fs->h_u.ether_spec; +- eth_m = &fs->m_u.ether_spec; +- +- if (eth_m->h_proto) { +- netdev_err(net_dev, "Ethertype is not supported!\n"); +- return -EOPNOTSUPP; +- } +- +- if (!is_zero_ether_addr(eth_m->h_source)) { +- netdev_err(net_dev, "ETH SRC is not supported!\n"); +- return -EOPNOTSUPP; +- } +- +- if (cls_is_enabled(net_dev, RXH_L2DA)) { +- ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), +- eth_h->h_dest); +- ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), +- eth_m->h_dest); +- } else { +- if (!is_zero_ether_addr(eth_m->h_dest)) { +- netdev_err(net_dev, +- "ETH DST is not supported!\n"); +- return -EOPNOTSUPP; +- } +- } ++ err = prep_eth_rule(priv, &fs->h_u.ether_spec, ++ &fs->m_u.ether_spec, key, msk); ++ break; ++ case IP_USER_FLOW: ++ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, ++ &fs->m_u.usr_ip4_spec, key, msk); + break; +- + default: +- /* TODO: IP user flow, AH, ESP */ ++ /* TODO: AH, ESP */ + return -EOPNOTSUPP; + } ++ if (err) ++ return err; + + if (fs->flow_type & FLOW_EXT) { +- /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ +- return -EOPNOTSUPP; ++ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); ++ if (err) ++ return err; + } + + if (fs->flow_type & FLOW_MAC_EXT) { +- ext_h = &fs->h_ext; +- ext_m = &fs->m_ext; +- +- if (cls_is_enabled(net_dev, RXH_L2DA)) { +- ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), +- ext_h->h_dest); +- ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), +- ext_m->h_dest); +- } else { +- if (!is_zero_ether_addr(ext_m->h_dest)) { +- netdev_err(net_dev, +- "ETH DST is not supported!\n"); +- return -EOPNOTSUPP; +- } +- } ++ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); ++ if (err) ++ return err; + } ++ + return 0; + } + +@@ -643,6 +557,7 @@ static int do_cls(struct net_device *net_dev, + bool add) + { + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; + const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; + struct dpni_rule_cfg rule_cfg; + void *dma_mem; +@@ -660,7 +575,7 @@ static int do_cls(struct net_device *net_dev, + return -EINVAL; + + memset(&rule_cfg, 0, sizeof(rule_cfg)); +- rule_cfg.key_size = cls_key_size(net_dev); ++ rule_cfg.key_size = cls_key_size(priv); + + /* allocate twice the key size, for the actual key and for mask */ + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); +@@ -671,27 +586,12 @@ static int do_cls(struct net_device *net_dev, + if (err) + goto err_free_mem; + +- rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, ++ rule_cfg.key_iova = dma_map_single(dev, dma_mem, + rule_cfg.key_size * 2, + DMA_TO_DEVICE); + + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; + +- if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { +- int i; +- u8 *mask = dma_mem + rule_cfg.key_size; +- +- /* check that nothing is masked out, otherwise it won't work */ +- for (i = 0; i < rule_cfg.key_size; i++) { +- if (mask[i] == 0xff) +- continue; +- netdev_err(net_dev, "dev does not support masking!\n"); +- err = -EOPNOTSUPP; +- goto err_free_mem; +- } +- rule_cfg.mask_iova = 0; +- } +- + /* No way to control rule order in firmware */ + if (add) + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, +@@ -700,10 +600,10 @@ static int do_cls(struct net_device *net_dev, + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, + &rule_cfg); + +- dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, ++ dma_unmap_single(dev, rule_cfg.key_iova, + rule_cfg.key_size * 2, DMA_TO_DEVICE); + if (err) { +- netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); ++ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); + goto err_free_mem; + } + +@@ -746,40 +646,12 @@ static int del_cls(struct net_device *net_dev, int location) + return 0; + } + +-static void clear_cls(struct net_device *net_dev) +-{ +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- int i, err; +- +- for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { +- if (!priv->cls_rule[i].in_use) +- continue; +- +- err = del_cls(net_dev, i); +- if (err) +- netdev_warn(net_dev, +- "err trying to delete classification entry %d\n", +- i); +- } +-} +- + static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc) + { + int err = 0; + + switch (rxnfc->cmd) { +- case ETHTOOL_SRXFH: +- /* first off clear ALL classification rules, chaging key +- * composition will break them anyway +- */ +- clear_cls(net_dev); +- /* we purposely ignore cmd->flow_type for now, because the +- * classifier only supports a single set of fields for all +- * protocols +- */ +- err = dpaa2_eth_set_hash(net_dev, rxnfc->data); +- break; + case ETHTOOL_SRXCLSRLINS: + err = add_cls(net_dev, &rxnfc->fs); + break; +@@ -804,11 +676,10 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, + + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: +- /* we purposely ignore cmd->flow_type for now, because the +- * classifier only supports a single set of fields for all +- * protocols ++ /* we purposely ignore cmd->flow_type, because the hashing key ++ * is the same (and fixed) for all protocols + */ +- rxnfc->data = priv->rx_hash_fields; ++ rxnfc->data = priv->rx_flow_hash; + break; + + case ETHTOOL_GRXRINGS: +diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c +index 366ad4c..fe16b8b 100644 +--- a/drivers/staging/fsl-dpaa2/mac/mac.c ++++ b/drivers/staging/fsl-dpaa2/mac/mac.c +@@ -120,7 +120,7 @@ static void dpaa2_mac_link_changed(struct net_device *netdev) + phy_print_status(phydev); + } + +- /* We must call into the MC firmware at all times, because we don't know ++ /* We must interrogate MC at all times, because we don't know + * when and whether a potential DPNI may have read the link state. + */ + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, +@@ -532,7 +532,7 @@ static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) + goto err_close; + } + +- dev_info_once(dev, "Using DPMAC API %d.%d\n", ++ dev_warn(dev, "Using DPMAC API %d.%d\n", + priv->attr.version.major, priv->attr.version.minor); + + /* Look up the DPMAC node in the device-tree. */ +diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c +index f8d8cbe..5b6fa1c 100644 +--- a/drivers/staging/fsl-mc/bus/dprc-driver.c ++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c +@@ -1078,7 +1078,7 @@ int __init dprc_driver_init(void) + return fsl_mc_driver_register(&dprc_driver); + } + +-void __exit dprc_driver_exit(void) ++void dprc_driver_exit(void) + { + fsl_mc_driver_unregister(&dprc_driver); + } +diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h +index 1246ca8..58ed441 100644 +--- a/drivers/staging/fsl-mc/include/mc-private.h ++++ b/drivers/staging/fsl-mc/include/mc-private.h +@@ -143,7 +143,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, + + int __init dprc_driver_init(void); + +-void __exit dprc_driver_exit(void); ++void dprc_driver_exit(void); + + int __init fsl_mc_allocator_driver_init(void); + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index f951b75..600a137 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1685,8 +1685,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + cpu_to_le32(EP_STATE_DISABLED)) || + le32_to_cpu(ctrl_ctx->drop_flags) & + xhci_get_endpoint_flag(&ep->desc)) { +- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", +- __func__, ep); ++ /* Do not warn when called after a usb_device_reset */ ++ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) ++ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", ++ __func__, ep); + return 0; + } + +diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h +index 84d971f..f13b12e 100644 +--- a/include/linux/fsl/guts.h ++++ b/include/linux/fsl/guts.h +@@ -29,83 +29,86 @@ + * #ifdefs. + */ + struct ccsr_guts { +- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ +- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ +- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ +- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ +- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ +- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ ++ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ ++ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ ++ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ ++ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ ++ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ ++ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; +- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ ++ u32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u8 res024[0x30 - 0x24]; +- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ ++ u32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; +- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ ++ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u8 res044[0x50 - 0x44]; +- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ ++ u32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u8 res054[0x60 - 0x54]; +- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ +- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ +- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ ++ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ ++ u32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ ++ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; +- __be32 devdisr; /* 0x.0070 - Device Disable Control */ ++ u32 devdisr; /* 0x.0070 - Device Disable Control */ + #define CCSR_GUTS_DEVDISR_TB1 0x00001000 + #define CCSR_GUTS_DEVDISR_TB0 0x00004000 +- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ ++ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; +- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ +- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ +- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ +- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ +- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ +- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ +- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ +- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ +- __be32 autorstsr; /* 0x.009c - Automatic reset status register */ +- __be32 pvr; /* 0x.00a0 - Processor Version Register */ +- __be32 svr; /* 0x.00a4 - System Version Register */ ++ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ ++ u32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ ++ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ ++ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ ++ u32 pmcdr; /* 0x.008c - 4Power management clock disable register */ ++ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ ++ u32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ ++ u32 ectrstcr; /* 0x.0098 - Exception reset control register */ ++ u32 autorstsr; /* 0x.009c - Automatic reset status register */ ++ u32 pvr; /* 0x.00a0 - Processor Version Register */ ++ u32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; +- __be32 rstcr; /* 0x.00b0 - Reset Control Register */ ++ u32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; +- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register ++ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; +- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers ++ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; +- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ +- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ ++ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ ++ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; +- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ ++ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; +- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ ++ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; +- __be32 ircr; /* 0x.0900 - Infrared Control Register */ ++ u32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; +- __be32 dmacr; /* 0x.0908 - DMA Control Register */ ++ u32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; +- __be32 elbccr; /* 0x.0914 - eLBC Control Register */ ++ u32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; +- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ +- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ +- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ ++ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ ++ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ ++ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; +- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ ++ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; +- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ ++ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; +- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ +- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ ++ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ ++ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u8 rese28[0xf04 - 0xe28]; +- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ +- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ ++ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ ++ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; +- __be32 itcr; /* 0x.0f2c - Internal transaction control register */ ++ u32 itcr; /* 0x.0f2c - Internal transaction control register */ + u8 resf30[0xf40 - 0xf30]; +- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ +- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ ++ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ ++ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ + } __attribute__ ((packed)); + ++#ifdef CONFIG_FSL_GUTS ++extern u32 guts_get_svr(void); ++#endif + + /* Alternate function signal multiplex control */ + #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) +diff --git a/include/linux/fsl/svr.h b/include/linux/fsl/svr.h +new file mode 100644 +index 0000000..8d13836 +--- /dev/null ++++ b/include/linux/fsl/svr.h +@@ -0,0 +1,95 @@ ++/* ++ * MPC85xx cpu type detection ++ * ++ * Copyright 2011-2012 Freescale Semiconductor, Inc. ++ * ++ * This is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#ifndef FSL_SVR_H ++#define FSL_SVR_H ++ ++#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ ++#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ ++#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ ++ ++/* Some parts define SVR[0:23] as the SOC version */ ++#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ ++ ++#define SVR_8533 0x803400 ++#define SVR_8535 0x803701 ++#define SVR_8536 0x803700 ++#define SVR_8540 0x803000 ++#define SVR_8541 0x807200 ++#define SVR_8543 0x803200 ++#define SVR_8544 0x803401 ++#define SVR_8545 0x803102 ++#define SVR_8547 0x803101 ++#define SVR_8548 0x803100 ++#define SVR_8555 0x807100 ++#define SVR_8560 0x807000 ++#define SVR_8567 0x807501 ++#define SVR_8568 0x807500 ++#define SVR_8569 0x808000 ++#define SVR_8572 0x80E000 ++#define SVR_P1010 0x80F100 ++#define SVR_P1011 0x80E500 ++#define SVR_P1012 0x80E501 ++#define SVR_P1013 0x80E700 ++#define SVR_P1014 0x80F101 ++#define SVR_P1017 0x80F700 ++#define SVR_P1020 0x80E400 ++#define SVR_P1021 0x80E401 ++#define SVR_P1022 0x80E600 ++#define SVR_P1023 0x80F600 ++#define SVR_P1024 0x80E402 ++#define SVR_P1025 0x80E403 ++#define SVR_P2010 0x80E300 ++#define SVR_P2020 0x80E200 ++#define SVR_P2040 0x821000 ++#define SVR_P2041 0x821001 ++#define SVR_P3041 0x821103 ++#define SVR_P4040 0x820100 ++#define SVR_P4080 0x820000 ++#define SVR_P5010 0x822100 ++#define SVR_P5020 0x822000 ++#define SVR_P5021 0X820500 ++#define SVR_P5040 0x820400 ++#define SVR_T4240 0x824000 ++#define SVR_T4120 0x824001 ++#define SVR_T4160 0x824100 ++#define SVR_T4080 0x824102 ++#define SVR_C291 0x850000 ++#define SVR_C292 0x850020 ++#define SVR_C293 0x850030 ++#define SVR_B4860 0X868000 ++#define SVR_G4860 0x868001 ++#define SVR_G4060 0x868003 ++#define SVR_B4440 0x868100 ++#define SVR_G4440 0x868101 ++#define SVR_B4420 0x868102 ++#define SVR_B4220 0x868103 ++#define SVR_T1040 0x852000 ++#define SVR_T1041 0x852001 ++#define SVR_T1042 0x852002 ++#define SVR_T1020 0x852100 ++#define SVR_T1021 0x852101 ++#define SVR_T1022 0x852102 ++#define SVR_T2080 0x853000 ++#define SVR_T2081 0x853100 ++ ++#define SVR_8610 0x80A000 ++#define SVR_8641 0x809000 ++#define SVR_8641D 0x809001 ++ ++#define SVR_9130 0x860001 ++#define SVR_9131 0x860000 ++#define SVR_9132 0x861000 ++#define SVR_9232 0x861400 ++ ++#define SVR_Unknown 0xFFFFFF ++ ++#endif +diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h +index 84d60cb..3f9778c 100644 +--- a/include/linux/fsl_ifc.h ++++ b/include/linux/fsl_ifc.h +@@ -29,7 +29,20 @@ + #include + #include + +-#define FSL_IFC_BANK_COUNT 4 ++/* ++ * The actual number of banks implemented depends on the IFC version ++ * - IFC version 1.0 implements 4 banks. ++ * - IFC version 1.1 onward implements 8 banks. ++ */ ++#define FSL_IFC_BANK_COUNT 8 ++ ++#define FSL_IFC_VERSION_MASK 0x0F0F0000 ++#define FSL_IFC_VERSION_1_0_0 0x01000000 ++#define FSL_IFC_VERSION_1_1_0 0x01010000 ++#define FSL_IFC_VERSION_2_0_0 0x02000000 ++ ++#define PGOFFSET_64K (64*1024) ++#define PGOFFSET_4K (4*1024) + + /* + * CSPR - Chip Select Property Register +@@ -714,20 +727,26 @@ struct fsl_ifc_nand { + __be32 nand_evter_en; + u32 res17[0x2]; + __be32 nand_evter_intr_en; +- u32 res18[0x2]; ++ __be32 nand_vol_addr_stat; ++ u32 res18; + __be32 nand_erattr0; + __be32 nand_erattr1; + u32 res19[0x10]; + __be32 nand_fsr; +- u32 res20; +- __be32 nand_eccstat[4]; +- u32 res21[0x20]; ++ u32 res20[0x3]; ++ __be32 nand_eccstat[6]; ++ u32 res21[0x1c]; + __be32 nanndcr; + u32 res22[0x2]; + __be32 nand_autoboot_trgr; + u32 res23; + __be32 nand_mdr; +- u32 res24[0x5C]; ++ u32 res24[0x1C]; ++ __be32 nand_dll_lowcfg0; ++ __be32 nand_dll_lowcfg1; ++ u32 res25; ++ __be32 nand_dll_lowstat; ++ u32 res26[0x3c]; + }; + + /* +@@ -762,13 +781,12 @@ struct fsl_ifc_gpcm { + __be32 gpcm_erattr1; + __be32 gpcm_erattr2; + __be32 gpcm_stat; +- u32 res4[0x1F3]; + }; + + /* + * IFC Controller Registers + */ +-struct fsl_ifc_regs { ++struct fsl_ifc_global { + __be32 ifc_rev; + u32 res1[0x2]; + struct { +@@ -776,39 +794,44 @@ struct fsl_ifc_regs { + __be32 cspr; + u32 res2; + } cspr_cs[FSL_IFC_BANK_COUNT]; +- u32 res3[0x19]; ++ u32 res3[0xd]; + struct { + __be32 amask; + u32 res4[0x2]; + } amask_cs[FSL_IFC_BANK_COUNT]; +- u32 res5[0x18]; ++ u32 res5[0xc]; + struct { + __be32 csor; + __be32 csor_ext; + u32 res6; + } csor_cs[FSL_IFC_BANK_COUNT]; +- u32 res7[0x18]; ++ u32 res7[0xc]; + struct { + __be32 ftim[4]; + u32 res8[0x8]; + } ftim_cs[FSL_IFC_BANK_COUNT]; +- u32 res9[0x60]; ++ u32 res9[0x30]; + __be32 rb_stat; +- u32 res10[0x2]; ++ __be32 rb_map; ++ __be32 wb_map; + __be32 ifc_gcr; +- u32 res11[0x2]; ++ u32 res10[0x2]; + __be32 cm_evter_stat; +- u32 res12[0x2]; ++ u32 res11[0x2]; + __be32 cm_evter_en; +- u32 res13[0x2]; ++ u32 res12[0x2]; + __be32 cm_evter_intr_en; +- u32 res14[0x2]; ++ u32 res13[0x2]; + __be32 cm_erattr0; + __be32 cm_erattr1; +- u32 res15[0x2]; ++ u32 res14[0x2]; + __be32 ifc_ccr; + __be32 ifc_csr; +- u32 res16[0x2EB]; ++ __be32 ddr_ccr_low; ++}; ++ ++ ++struct fsl_ifc_runtime { + struct fsl_ifc_nand ifc_nand; + struct fsl_ifc_nor ifc_nor; + struct fsl_ifc_gpcm ifc_gpcm; +@@ -822,17 +845,70 @@ extern int fsl_ifc_find(phys_addr_t addr_base); + struct fsl_ifc_ctrl { + /* device info */ + struct device *dev; +- struct fsl_ifc_regs __iomem *regs; ++ struct fsl_ifc_global __iomem *gregs; ++ struct fsl_ifc_runtime __iomem *rregs; + int irq; + int nand_irq; + spinlock_t lock; + void *nand; ++ int version; ++ int banks; + + u32 nand_stat; + wait_queue_head_t nand_wait; ++ bool little_endian; + }; + + extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + ++static inline u32 ifc_in32(void __iomem *addr) ++{ ++ u32 val; ++ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ val = ioread32(addr); ++ else ++ val = ioread32be(addr); ++ ++ return val; ++} ++ ++static inline u16 ifc_in16(void __iomem *addr) ++{ ++ u16 val; ++ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ val = ioread16(addr); ++ else ++ val = ioread16be(addr); ++ ++ return val; ++} ++ ++static inline u8 ifc_in8(void __iomem *addr) ++{ ++ return ioread8(addr); ++} ++ ++static inline void ifc_out32(u32 val, void __iomem *addr) ++{ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ iowrite32(val, addr); ++ else ++ iowrite32be(val, addr); ++} ++ ++static inline void ifc_out16(u16 val, void __iomem *addr) ++{ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ iowrite16(val, addr); ++ else ++ iowrite16be(val, addr); ++} ++ ++static inline void ifc_out8(u8 val, void __iomem *addr) ++{ ++ iowrite8(val, addr); ++} + + #endif /* __ASM_FSL_IFC_H */ +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index 69517a2..cbbe6a2 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -356,6 +356,20 @@ static inline int disable_irq_wake(unsigned int irq) + return irq_set_irq_wake(irq, 0); + } + ++/* ++ * irq_get_irqchip_state/irq_set_irqchip_state specific flags ++ */ ++enum irqchip_irq_state { ++ IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ ++ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ ++ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ ++ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ ++}; ++ ++extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool *state); ++extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool state); + + #ifdef CONFIG_IRQ_FORCED_THREADING + extern bool force_irqthreads; +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index 04229cb..7421bdf 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -30,6 +30,7 @@ + #define IOMMU_WRITE (1 << 1) + #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ + #define IOMMU_NOEXEC (1 << 3) ++#define IOMMU_MMIO (1 << 4) /* Device memory access */ + + struct iommu_ops; + struct iommu_group; +diff --git a/include/linux/irq.h b/include/linux/irq.h +index 9ba173b..4931a8b 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -30,6 +30,7 @@ + struct seq_file; + struct module; + struct msi_msg; ++enum irqchip_irq_state; + + /* + * IRQ line status. +@@ -324,6 +325,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) + * irq_request_resources + * @irq_compose_msi_msg: optional to compose message content for MSI + * @irq_write_msi_msg: optional to write message content for MSI ++ * @irq_get_irqchip_state: return the internal state of an interrupt ++ * @irq_set_irqchip_state: set the internal state of a interrupt + * @flags: chip specific flags + */ + struct irq_chip { +@@ -363,6 +366,9 @@ struct irq_chip { + void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); + void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); + ++ int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); ++ int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); ++ + unsigned long flags; + }; + +@@ -460,6 +466,8 @@ extern void irq_chip_eoi_parent(struct irq_data *data); + extern int irq_chip_set_affinity_parent(struct irq_data *data, + const struct cpumask *dest, + bool force); ++extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); ++extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); + #endif + + /* Handling of unhandled and spurious interrupts: */ +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h +index da1aa15..36caf46 100644 +--- a/include/linux/irqchip/arm-gic-v3.h ++++ b/include/linux/irqchip/arm-gic-v3.h +@@ -270,6 +270,18 @@ + #define ICC_SRE_EL2_SRE (1 << 0) + #define ICC_SRE_EL2_ENABLE (1 << 3) + ++#define ICC_SGI1R_TARGET_LIST_SHIFT 0 ++#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) ++#define ICC_SGI1R_AFFINITY_1_SHIFT 16 ++#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) ++#define ICC_SGI1R_SGI_ID_SHIFT 24 ++#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) ++#define ICC_SGI1R_AFFINITY_2_SHIFT 32 ++#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) ++#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 ++#define ICC_SGI1R_AFFINITY_3_SHIFT 48 ++#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) ++ + /* + * System register definitions + */ +diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h +index 13eed92..60b09ed 100644 +--- a/include/linux/irqchip/arm-gic.h ++++ b/include/linux/irqchip/arm-gic.h +@@ -106,6 +106,8 @@ static inline void gic_init(unsigned int nr, int start, + gic_init_bases(nr, start, dist, cpu, 0, NULL); + } + ++int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); ++ + void gic_send_sgi(unsigned int cpu_id, unsigned int irq); + int gic_get_cpu_id(unsigned int cpu); + void gic_migrate_target(unsigned int new_cpu_id); +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h +index ebace05..3c5ca45 100644 +--- a/include/linux/irqdomain.h ++++ b/include/linux/irqdomain.h +@@ -56,6 +56,7 @@ enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_PCI_MSI, + DOMAIN_BUS_PLATFORM_MSI, ++ DOMAIN_BUS_NEXUS, + }; + + /** +diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h +index dba793e..62d966a 100644 +--- a/include/linux/mmc/sdhci.h ++++ b/include/linux/mmc/sdhci.h +@@ -100,6 +100,10 @@ struct sdhci_host { + #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) + /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ + #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) ++/* Controller does not support 64-bit DMA */ ++#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) ++/* Controller broken with using ACMD23 */ ++#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) + + int irq; /* Device IRQ */ + void __iomem *ioaddr; /* Mapped address */ +@@ -130,6 +134,7 @@ struct sdhci_host { + #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ + #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ + #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ ++#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ + + unsigned int version; /* SDHCI spec. version */ + +@@ -155,12 +160,19 @@ struct sdhci_host { + + int sg_count; /* Mapped sg entries */ + +- u8 *adma_desc; /* ADMA descriptor table */ +- u8 *align_buffer; /* Bounce buffer */ ++ void *adma_table; /* ADMA descriptor table */ ++ void *align_buffer; /* Bounce buffer */ ++ ++ size_t adma_table_sz; /* ADMA descriptor table size */ ++ size_t align_buffer_sz; /* Bounce buffer size */ + + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ + dma_addr_t align_addr; /* Mapped bounce buffer */ + ++ unsigned int desc_sz; /* ADMA descriptor size */ ++ unsigned int align_sz; /* ADMA alignment */ ++ unsigned int align_mask; /* ADMA alignment mask */ ++ + struct tasklet_struct finish_tasklet; /* Tasklet structures */ + + struct timer_list timer; /* Timer for timeouts */ +diff --git a/include/linux/of.h b/include/linux/of.h +index 4a6a489..25111fb 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -57,7 +57,6 @@ struct device_node { + struct device_node *child; + struct device_node *sibling; + struct device_node *next; /* next device of same type */ +- struct device_node *allnext; /* next in list of all nodes */ + struct kobject kobj; + unsigned long _flags; + void *data; +@@ -109,7 +108,7 @@ static inline void of_node_put(struct device_node *node) { } + #ifdef CONFIG_OF + + /* Pointer for first entry in chain of all nodes. */ +-extern struct device_node *of_allnodes; ++extern struct device_node *of_root; + extern struct device_node *of_chosen; + extern struct device_node *of_aliases; + extern struct device_node *of_stdout; +@@ -117,7 +116,7 @@ extern raw_spinlock_t devtree_lock; + + static inline bool of_have_populated_dt(void) + { +- return of_allnodes != NULL; ++ return of_root != NULL; + } + + static inline bool of_node_is_root(const struct device_node *node) +@@ -161,6 +160,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag + clear_bit(flag, &p->_flags); + } + ++extern struct device_node *__of_find_all_nodes(struct device_node *prev); + extern struct device_node *of_find_all_nodes(struct device_node *prev); + + /* +@@ -216,8 +216,9 @@ static inline const char *of_node_full_name(const struct device_node *np) + return np ? np->full_name : ""; + } + +-#define for_each_of_allnodes(dn) \ +- for (dn = of_allnodes; dn; dn = dn->allnext) ++#define for_each_of_allnodes_from(from, dn) \ ++ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) ++#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) + extern struct device_node *of_find_node_by_name(struct device_node *from, + const char *name); + extern struct device_node *of_find_node_by_type(struct device_node *from, +diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h +index c65a18a..7e09244 100644 +--- a/include/linux/of_pdt.h ++++ b/include/linux/of_pdt.h +@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); + /* for building the device tree */ + extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); + +-extern void (*of_pdt_build_more)(struct device_node *dp, +- struct device_node ***nextp); ++extern void (*of_pdt_build_more)(struct device_node *dp); + + #endif /* _LINUX_OF_PDT_H */ +diff --git a/include/linux/pci.h b/include/linux/pci.h +index a99f301..f28c88b 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -562,6 +562,7 @@ static inline int pcibios_err_to_errno(int err) + /* Low-level architecture-dependent routines */ + + struct pci_ops { ++ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); + int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); + int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); + }; +@@ -859,6 +860,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 val); + int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 val); ++ ++int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val); ++int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val); ++int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val); ++int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val); ++ + struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); + + static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) +diff --git a/include/linux/phy.h b/include/linux/phy.h +index d090cfc..eda18a8 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -700,6 +700,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, + struct phy_c45_device_ids *c45_ids); + struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); + int phy_device_register(struct phy_device *phy); ++void phy_device_remove(struct phy_device *phydev); + int phy_init_hw(struct phy_device *phydev); + int phy_suspend(struct phy_device *phydev); + int phy_resume(struct phy_device *phydev); +diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h +index f2ca1b4..fe5732d 100644 +--- a/include/linux/phy_fixed.h ++++ b/include/linux/phy_fixed.h +@@ -11,7 +11,7 @@ struct fixed_phy_status { + + struct device_node; + +-#ifdef CONFIG_FIXED_PHY ++#if IS_ENABLED(CONFIG_FIXED_PHY) + extern int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status); + extern struct phy_device *fixed_phy_register(unsigned int irq, +@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr); + extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); ++extern int fixed_phy_update_state(struct phy_device *phydev, ++ const struct fixed_phy_status *status, ++ const struct fixed_phy_status *changed); + #else + static inline int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status) +@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, + { + return -ENODEV; + } ++static inline int fixed_phy_update_state(struct phy_device *phydev, ++ const struct fixed_phy_status *status, ++ const struct fixed_phy_status *changed) ++{ ++ return -ENODEV; ++} + #endif /* CONFIG_FIXED_PHY */ + + #endif /* __PHY_FIXED_H */ +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c +index 63c16d1..55dd2fb 100644 +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -731,7 +731,30 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + if (!handle) { + handle = handle_bad_irq; + } else { +- if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) ++ struct irq_data *irq_data = &desc->irq_data; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ /* ++ * With hierarchical domains we might run into a ++ * situation where the outermost chip is not yet set ++ * up, but the inner chips are there. Instead of ++ * bailing we install the handler, but obviously we ++ * cannot enable/startup the interrupt at this point. ++ */ ++ while (irq_data) { ++ if (irq_data->chip != &no_irq_chip) ++ break; ++ /* ++ * Bail out if the outer chip is not set up ++ * and the interrrupt supposed to be started ++ * right away. ++ */ ++ if (WARN_ON(is_chained)) ++ goto out; ++ /* Try the parent */ ++ irq_data = irq_data->parent_data; ++ } ++#endif ++ if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) + goto out; + } + +@@ -911,6 +934,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data, + } + + /** ++ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt ++ * @data: Pointer to interrupt specific data ++ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h ++ * ++ * Conditional, as the underlying parent chip might not implement it. ++ */ ++int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) ++{ ++ data = data->parent_data; ++ ++ if (data->chip->irq_set_type) ++ return data->chip->irq_set_type(data, type); ++ ++ return -ENOSYS; ++} ++ ++/** + * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware + * @data: Pointer to interrupt specific data + * +@@ -925,6 +965,22 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data) + + return -ENOSYS; + } ++ ++/** ++ * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt ++ * @data: Pointer to interrupt specific data ++ * @on: Whether to set or reset the wake-up capability of this irq ++ * ++ * Conditional, as the underlying parent chip might not implement it. ++ */ ++int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) ++{ ++ data = data->parent_data; ++ if (data->chip->irq_set_wake) ++ return data->chip->irq_set_wake(data, on); ++ ++ return -ENOSYS; ++} + #endif + + /** +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 8069237..acb401f 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1758,3 +1758,94 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler, + + return retval; + } ++ ++/** ++ * irq_get_irqchip_state - returns the irqchip state of a interrupt. ++ * @irq: Interrupt line that is forwarded to a VM ++ * @which: One of IRQCHIP_STATE_* the caller wants to know about ++ * @state: a pointer to a boolean where the state is to be storeed ++ * ++ * This call snapshots the internal irqchip state of an ++ * interrupt, returning into @state the bit corresponding to ++ * stage @which ++ * ++ * This function should be called with preemption disabled if the ++ * interrupt controller has per-cpu registers. ++ */ ++int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool *state) ++{ ++ struct irq_desc *desc; ++ struct irq_data *data; ++ struct irq_chip *chip; ++ unsigned long flags; ++ int err = -EINVAL; ++ ++ desc = irq_get_desc_buslock(irq, &flags, 0); ++ if (!desc) ++ return err; ++ ++ data = irq_desc_get_irq_data(desc); ++ ++ do { ++ chip = irq_data_get_irq_chip(data); ++ if (chip->irq_get_irqchip_state) ++ break; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ data = data->parent_data; ++#else ++ data = NULL; ++#endif ++ } while (data); ++ ++ if (data) ++ err = chip->irq_get_irqchip_state(data, which, state); ++ ++ irq_put_desc_busunlock(desc, flags); ++ return err; ++} ++ ++/** ++ * irq_set_irqchip_state - set the state of a forwarded interrupt. ++ * @irq: Interrupt line that is forwarded to a VM ++ * @which: State to be restored (one of IRQCHIP_STATE_*) ++ * @val: Value corresponding to @which ++ * ++ * This call sets the internal irqchip state of an interrupt, ++ * depending on the value of @which. ++ * ++ * This function should be called with preemption disabled if the ++ * interrupt controller has per-cpu registers. ++ */ ++int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool val) ++{ ++ struct irq_desc *desc; ++ struct irq_data *data; ++ struct irq_chip *chip; ++ unsigned long flags; ++ int err = -EINVAL; ++ ++ desc = irq_get_desc_buslock(irq, &flags, 0); ++ if (!desc) ++ return err; ++ ++ data = irq_desc_get_irq_data(desc); ++ ++ do { ++ chip = irq_data_get_irq_chip(data); ++ if (chip->irq_set_irqchip_state) ++ break; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ data = data->parent_data; ++#else ++ data = NULL; ++#endif ++ } while (data); ++ ++ if (data) ++ err = chip->irq_set_irqchip_state(data, which, val); ++ ++ irq_put_desc_busunlock(desc, flags); ++ return err; ++} +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +index 2495ed0..54433c2 100644 +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -106,8 +106,10 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq = ops->get_hwirq(info, arg); + int i, ret; + ++#if 0 + if (irq_find_mapping(domain, hwirq) > 0) + return -EEXIST; ++#endif + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret < 0) +@@ -327,8 +329,15 @@ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) + struct msi_desc *desc; + + for_each_msi_entry(desc, dev) { +- irq_domain_free_irqs(desc->irq, desc->nvec_used); +- desc->irq = 0; ++ /* ++ * We might have failed to allocate an MSI early ++ * enough that there is no IRQ associated to this ++ * entry. If that's the case, don't do anything. ++ */ ++ if (desc->irq) { ++ irq_domain_free_irqs(desc->irq, desc->nvec_used); ++ desc->irq = 0; ++ } + } + } + +diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c +index fa756d0..ad57f0c 100644 +--- a/sound/soc/fsl/mpc8610_hpcd.c ++++ b/sound/soc/fsl/mpc8610_hpcd.c +@@ -12,11 +12,11 @@ + + #include + #include ++#include + #include + #include + #include + #include +-#include + + #include "fsl_dma.h" + #include "fsl_ssi.h" +diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c +index f75c3cf..64a0bb6 100644 +--- a/sound/soc/fsl/p1022_ds.c ++++ b/sound/soc/fsl/p1022_ds.c +@@ -11,12 +11,12 @@ + */ + + #include ++#include + #include + #include + #include + #include + #include +-#include + + #include "fsl_dma.h" + #include "fsl_ssi.h" +diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c +index 9d89bb0..4ce4aff 100644 +--- a/sound/soc/fsl/p1022_rdk.c ++++ b/sound/soc/fsl/p1022_rdk.c +@@ -18,12 +18,12 @@ + */ + + #include ++#include + #include + #include + #include + #include + #include +-#include + + #include "fsl_dma.h" + #include "fsl_ssi.h" +-- +2.1.0.27.g96db324 + diff --git a/packages/base/any/kernels/3.18.25/patches/series.arm64 b/packages/base/any/kernels/3.18.25/patches/series.arm64 index 0983a8d1..f32ec8fa 100644 --- a/packages/base/any/kernels/3.18.25/patches/series.arm64 +++ b/packages/base/any/kernels/3.18.25/patches/series.arm64 @@ -2,3 +2,5 @@ aufs.patch driver-support-intel-igb-bcm54616-phy.patch add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch add-nxp-arm64-ls2088ardb-device-tree.patch +add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch +backport-some-kernel-patches-based-on-3.18.25.patch From b30567d4106392f6e2d9452cdb4fd44331a57f90 Mon Sep 17 00:00:00 2001 From: Shengzhou Liu Date: Mon, 26 Sep 2016 22:27:02 +0800 Subject: [PATCH 197/255] Update arm64-all.config to support NXP DPAA2.0 networking and misc peripheral. --- .../configs/arm64-all/arm64-all.config | 67 +++++++++++++++++-- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config b/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config index bfee8e37..5dd10453 100644 --- a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config +++ b/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config @@ -311,6 +311,7 @@ CONFIG_PCI_MSI_IRQ_DOMAIN=y # PCI host controller drivers # CONFIG_PCIE_DW=y +# CONFIG_PCI_HOST_GENERIC is not set CONFIG_PCI_XGENE=y CONFIG_PCI_XGENE_MSI=y CONFIG_PCI_LAYERSCAPE=y @@ -823,6 +824,7 @@ CONFIG_MTD_NAND_IDS=y # CONFIG_MTD_NAND_CAFE is not set # CONFIG_MTD_NAND_NANDSIM is not set # CONFIG_MTD_NAND_PLATFORM is not set +CONFIG_MTD_NAND_FSL_IFC=y # CONFIG_MTD_ONENAND is not set # @@ -1169,6 +1171,9 @@ CONFIG_NET_VENDOR_EMULEX=y CONFIG_NET_VENDOR_EXAR=y # CONFIG_S2IO is not set # CONFIG_VXGE is not set +CONFIG_NET_VENDOR_FREESCALE=y +# CONFIG_FSL_PQ_MDIO is not set +CONFIG_FSL_XGMAC_MDIO=y CONFIG_NET_VENDOR_HP=y # CONFIG_HP100 is not set CONFIG_NET_VENDOR_INTEL=y @@ -1268,6 +1273,7 @@ CONFIG_PHYLIB=y # # MII PHY device drivers # +CONFIG_AQUANTIA_PHY=y # CONFIG_AT803X_PHY is not set # CONFIG_AMD_PHY is not set # CONFIG_AMD_XGBE_PHY is not set @@ -1277,6 +1283,7 @@ CONFIG_PHYLIB=y # CONFIG_LXT_PHY is not set # CONFIG_CICADA_PHY is not set CONFIG_VITESSE_PHY=y +# CONFIG_TERANETICS_PHY is not set CONFIG_SMSC_PHY=y CONFIG_BROADCOM_PHY=y # CONFIG_BCM7XXX_PHY is not set @@ -1292,6 +1299,7 @@ CONFIG_FIXED_PHY=y CONFIG_MDIO_BUS_MUX=y # CONFIG_MDIO_BUS_MUX_GPIO is not set CONFIG_MDIO_BUS_MUX_MMIOREG=y +# CONFIG_FSL_10GBASE_KR is not set # CONFIG_MDIO_BCM_UNIMAC is not set # CONFIG_MICREL_KS8995MA is not set # CONFIG_PPP is not set @@ -1458,7 +1466,10 @@ CONFIG_SERIAL_OF_PLATFORM=y CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set -# CONFIG_HW_RANDOM is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +# CONFIG_HW_RANDOM_VIRTIO is not set +CONFIG_HW_RANDOM_XGENE=y # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set @@ -1517,6 +1528,7 @@ CONFIG_I2C_HELPER_AUTO=y # CONFIG_I2C_DESIGNWARE_PLATFORM is not set # CONFIG_I2C_DESIGNWARE_PCI is not set # CONFIG_I2C_GPIO is not set +CONFIG_I2C_IMX=y # CONFIG_I2C_NOMADIK is not set # CONFIG_I2C_OCORES is not set # CONFIG_I2C_PCA_PLATFORM is not set @@ -1682,13 +1694,13 @@ CONFIG_POWER_SUPPLY=y # CONFIG_CHARGER_BQ24735 is not set # CONFIG_CHARGER_SMB347 is not set CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_LAYERSCAPE=y # CONFIG_POWER_RESET_GPIO is not set # CONFIG_POWER_RESET_GPIO_RESTART is not set # CONFIG_POWER_RESET_LTC2952 is not set CONFIG_POWER_RESET_VEXPRESS=y # CONFIG_POWER_RESET_XGENE is not set # CONFIG_POWER_RESET_SYSCON is not set +CONFIG_POWER_RESET_LAYERSCAPE=y # CONFIG_POWER_AVS is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set @@ -2159,9 +2171,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y # CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_IO_ACCESSORS=y # CONFIG_MMC_SDHCI_PCI is not set CONFIG_MMC_SDHCI_PLTFM=y # CONFIG_MMC_SDHCI_OF_ARASAN is not set +CONFIG_MMC_SDHCI_OF_ESDHC=y # CONFIG_MMC_SDHCI_PXAV3 is not set # CONFIG_MMC_SDHCI_PXAV2 is not set # CONFIG_MMC_TIFM_SD is not set @@ -2197,7 +2211,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_DS1307 is not set # CONFIG_RTC_DRV_DS1374 is not set # CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_DS3232 is not set +CONFIG_RTC_DRV_DS3232=y # CONFIG_RTC_DRV_HYM8563 is not set # CONFIG_RTC_DRV_MAX6900 is not set # CONFIG_RTC_DRV_RS5C372 is not set @@ -2295,7 +2309,7 @@ CONFIG_VIRTIO=y # # Virtio drivers # -# CONFIG_VIRTIO_PCI is not set +CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_MMIO=y # CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set @@ -2303,12 +2317,50 @@ CONFIG_VIRTIO_MMIO=y # # Microsoft Hyper-V guest support # -# CONFIG_STAGING is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_XGI is not set +# CONFIG_BCM_WIMAX is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_USB_WPAN_HCD is not set +# CONFIG_WIMAX_GDM72XX is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_MTD_SPINAND_MT29F is not set +# CONFIG_LUSTRE_FS is not set +# CONFIG_DGNC is not set +# CONFIG_DGAP is not set +# CONFIG_GS_FPGABOOT is not set +CONFIG_FSL_MC_BUS=y +CONFIG_FSL_MC_RESTOOL=y +CONFIG_FSL_MC_DPIO=y +# CONFIG_FSL_QBMAN_DEBUG is not set +CONFIG_FSL_DPAA2=y +CONFIG_FSL_DPAA2_ETH=y +# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set +CONFIG_FSL_DPAA2_MAC=y +# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set # # SOC (System On Chip) specific Drivers # # CONFIG_SOC_TI is not set +CONFIG_FSL_SOC_DRIVERS=y +CONFIG_FSL_GUTS=y +CONFIG_LS_SOC_DRIVERS=y CONFIG_CLKDEV_LOOKUP=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y @@ -2321,6 +2373,7 @@ CONFIG_CLK_SP810=y CONFIG_CLK_VEXPRESS_OSC=y # CONFIG_COMMON_CLK_SI5351 is not set # CONFIG_COMMON_CLK_SI570 is not set +CONFIG_CLK_QORIQ=y CONFIG_COMMON_CLK_XGENE=y # CONFIG_COMMON_CLK_PXA is not set # CONFIG_COMMON_CLK_QCOM is not set @@ -2370,15 +2423,17 @@ CONFIG_ARM_SMMU=y # CONFIG_PM_DEVFREQ is not set # CONFIG_EXTCON is not set CONFIG_MEMORY=y +CONFIG_FSL_IFC=y # CONFIG_IIO is not set # CONFIG_VME_BUS is not set # CONFIG_PWM is not set CONFIG_IRQCHIP=y CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V2M=y CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y # CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set +CONFIG_RESET_CONTROLLER=y # CONFIG_FMC is not set # From a3ce59259466a547229afdfefb053141103f78a9 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 15:03:41 +0000 Subject: [PATCH 198/255] Revert "Platform support patch for the Dell S6000." This reverts commit 40ebb1080b96f91292cd877f01e802ed8a5716fd. --- ...rivers-platform-x86-dell-s6000-s1220.patch | 157 ------------------ 1 file changed, 157 deletions(-) delete mode 100644 packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch diff --git a/packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch b/packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch deleted file mode 100644 index 1fe1d928..00000000 --- a/packages/base/any/kernels/3.16-lts/patches/drivers-platform-x86-dell-s6000-s1220.patch +++ /dev/null @@ -1,157 +0,0 @@ -diff -urpN a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig ---- a/drivers/platform/x86/Kconfig 2016-11-20 01:17:41.000000000 +0000 -+++ b/drivers/platform/x86/Kconfig 2016-12-22 20:28:22.048502394 +0000 -@@ -838,4 +838,10 @@ config PVPANIC - a paravirtualized device provided by QEMU; it lets a virtual machine - (guest) communicate panic events to the host. - -+config DELL_S6000_S1220 -+ tristate "Platform Driver for the DELL S6000" -+ ---help--- -+ Support the Dell S6000. -+ -+ - endif # X86_PLATFORM_DEVICES -diff -urpN a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile ---- a/drivers/platform/x86/Makefile 2016-11-20 01:17:41.000000000 +0000 -+++ b/drivers/platform/x86/Makefile 2016-12-22 20:29:50.024504303 +0000 -@@ -57,3 +57,4 @@ obj-$(CONFIG_INTEL_SMARTCONNECT) += inte - - obj-$(CONFIG_PVPANIC) += pvpanic.o - obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o -+obj-$(CONFIG_DELL_S6000_S1220) += dell_s6000_s1220.o -diff -urpN a/drivers/platform/x86/dell_s6000_s1220.c b/drivers/platform/x86/dell_s6000_s1220.c ---- a/drivers/platform/x86/dell_s6000_s1220.c 1970-01-01 00:00:00.000000000 +0000 -+++ b/drivers/platform/x86/dell_s6000_s1220.c 2016-12-22 20:26:50.728500412 +0000 -@@ -0,0 +1,131 @@ -+/** -+ * Dell S6000 Platform Support. -+ */ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+ -+/************************************************************* -+ * -+ * I2C Bus 0 on the S6000 is muxed via gpio1 and gpio2. -+ * -+ ************************************************************/ -+static const unsigned s6000_gpiomux_gpios[] = { -+ 1, 2 -+}; -+ -+static const unsigned s6000_gpiomux_values[] = { -+ 0, 1, 2, 3 -+}; -+ -+static struct i2c_mux_gpio_platform_data s6000_i2cmux_data = { -+ /* -+ * i2c Bus 0 -+ */ -+ .parent = 0, -+ -+ /* -+ * Start the bus numbers at 10. The first digit -+ * will represent the different bus numbers based -+ * the gpio selector (00, 01, 10, 11): -+ * -+ * i2c-10 --> i2c-0, gpios = 00 -+ * i2c-11 --> i2c-0, gpios = 01 -+ * i2c-12 --> i2c-0, gpios = 10 -+ * i2c-13 --> i2c-0, gpios = 11 -+ */ -+ .base_nr = 10, -+ -+ .values = s6000_gpiomux_values, -+ .n_values = ARRAY_SIZE(s6000_gpiomux_values), -+ .gpios = s6000_gpiomux_gpios, -+ .n_gpios = ARRAY_SIZE(s6000_gpiomux_gpios), -+ .idle = 0, -+}; -+ -+static struct platform_device s6000_i2cmux = { -+ .name = "i2c-mux-gpio", -+ .id = 12, -+ .dev = { -+ .platform_data = &s6000_i2cmux_data, -+ }, -+}; -+ -+/************************************************************* -+ * -+ * Sensors on i2c-11 (See mux data above). -+ * -+ ************************************************************/ -+static struct i2c_board_info s6000_i2c_11_board_info[] = { -+ { I2C_BOARD_INFO("lm75", 0x4c) }, -+ { I2C_BOARD_INFO("lm75", 0x4d) }, -+ { I2C_BOARD_INFO("lm75", 0x4e) }, -+ { I2C_BOARD_INFO("ltc4215", 0x42) }, -+ { I2C_BOARD_INFO("ltc4215", 0x40) }, -+ { I2C_BOARD_INFO("max6620", 0x29) }, -+ { I2C_BOARD_INFO("max6620", 0x2A) }, -+ { I2C_BOARD_INFO("24c02", 0x51) }, -+ { I2C_BOARD_INFO("24c02", 0x52) }, -+ { I2C_BOARD_INFO("24c02", 0x53) }, -+}; -+ -+static int __init dell_s6000_s1220_init(void) -+{ -+ int i; -+ int rv = 0; -+ char const *vendor, *product; -+ struct i2c_adapter * i2ca; -+ -+ vendor = dmi_get_system_info(DMI_SYS_VENDOR); -+ product = dmi_get_system_info(DMI_PRODUCT_NAME); -+ -+ if(strcmp(vendor, "Dell Inc") || -+ (strcmp(product, "S6000 (SI)") && strcmp(product, "S6000-ON") && -+ strcmp(product, "S6000-ON (SI)"))) { -+ /* Not the S6000 */ -+ return -ENODEV; -+ } -+ -+ /** -+ * Register the GPIO mux for bus 0. -+ */ -+ rv = platform_device_register(&s6000_i2cmux); -+ if(rv < 0) { -+ pr_err("%s: platform_device_register() failed: %d", __FUNCTION__, rv); -+ return rv; -+ } -+ -+ /** -+ * Register I2C devices on new buses -+ */ -+ i2ca = i2c_get_adapter(11); -+ for(i = 0; i < ARRAY_SIZE(s6000_i2c_11_board_info); i++) { -+ if(i2c_new_device(i2ca, s6000_i2c_11_board_info+i) == NULL) { -+ pr_err("%s: i2c_new_device for bus 11:0x%x failed.", -+ __FUNCTION__, s6000_i2c_11_board_info[i].addr); -+ } -+ } -+ -+ return 0; -+ -+} -+ -+static void __exit dell_s6000_s1220_cleanup(void) -+{ -+ platform_device_unregister(&s6000_i2cmux); -+} -+ -+module_init(dell_s6000_s1220_init); -+module_exit(dell_s6000_s1220_cleanup); -+ -+MODULE_AUTHOR("Big Switch Networks (support@bigswitch.com)"); -+MODULE_VERSION("1.0"); -+MODULE_DESCRIPTION("Dell S6000"); -+MODULE_LICENSE("GPL"); From 05363d9ba23c5aeaa3f444ef0776e75b6b27f032 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 25 Dec 2016 13:19:17 -0800 Subject: [PATCH 199/255] - New utility onlp_file_find() Finds the requested file starting at the given root directory. - Support implicit directory searching for filenames If a filename passed to any of the file APIs contains an asterisk then it is considered a search request rather than an absolute path. Preceding the asterisk is the root directory and following the asterisk is the filename to search. --- .../src/onlplib/module/inc/onlplib/file.h | 5 +- .../any/onlp/src/onlplib/module/src/file.c | 55 +++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/file.h b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/file.h index ff1a6ca9..250047c3 100644 --- a/packages/base/any/onlp/src/onlplib/module/inc/onlplib/file.h +++ b/packages/base/any/onlp/src/onlplib/module/inc/onlplib/file.h @@ -141,6 +141,9 @@ int onlp_file_open(int flags, int log, const char* fmt, ...); int onlp_file_vopen(int flags, int log, const char* fmt, va_list vargs); - +/** + * @brief Search a directory tree for the given file. + */ +int onlp_file_find(char* root, char* fname, char** rpath); #endif /* __ONLPLIB_FILE_H__ */ diff --git a/packages/base/any/onlp/src/onlplib/module/src/file.c b/packages/base/any/onlp/src/onlplib/module/src/file.c index d0adce8e..1380c2f4 100644 --- a/packages/base/any/onlp/src/onlplib/module/src/file.c +++ b/packages/base/any/onlp/src/onlplib/module/src/file.c @@ -23,6 +23,7 @@ * ***********************************************************/ #include +#include "onlplib_log.h" #include #include #include @@ -74,8 +75,25 @@ vopen__(char** dst, int flags, const char* fmt, va_list vargs) int fd; struct stat sb; char fname[PATH_MAX]; + char* asterisk; ONLPLIB_VSNPRINTF(fname, sizeof(fname)-1, fmt, vargs); + + /** + * An asterisk in the filename separates a search root + * directory from a filename. + */ + if( (asterisk = strchr(fname, '*')) ) { + char* root = fname; + char* rpath = NULL; + *asterisk = 0; + if(onlp_file_find(root, asterisk+1, &rpath) < 0) { + return ONLP_STATUS_E_MISSING; + } + strcpy(fname, rpath); + aim_free(rpath); + } + if(dst) { *dst = aim_strdup(fname); } @@ -282,3 +300,40 @@ onlp_file_vopen(int flags, int log, const char* fmt, va_list vargs) aim_free(fname); return rv; } + +#include +#include +#include +#include + +int +onlp_file_find(char* root, char* fname, char** rpath) +{ + FTS *fs; + FTSENT *ent; + char* argv[] = { NULL, NULL }; + argv[0] = root; + + if ((fs = fts_open(argv, FTS_PHYSICAL | FTS_NOCHDIR | FTS_COMFOLLOW, + NULL)) == NULL) { + AIM_LOG_ERROR("fts_open(%s): %{errno}", argv[0], errno); + return ONLP_STATUS_E_INTERNAL; + } + + while ((ent = fts_read(fs)) != NULL) { + switch (ent->fts_info) + { + case FTS_F: + { + if(!strcmp(fname, ent->fts_name)) { + *rpath = realpath(ent->fts_path, NULL); + fts_close(fs); + return ONLP_STATUS_OK; + } + } + break; + } + } + fts_close(fs); + return ONLP_STATUS_E_MISSING; +} From 305cd0b696da1c5f009bb7ebd5ce89db0db354d2 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 25 Dec 2016 21:34:35 +0000 Subject: [PATCH 200/255] - Use the new search syntax for locating the device files Allows compatibility between kernels with different sysfs paths. --- .../onlp/builds/src/module/src/thermali.c | 63 ++++--------------- 1 file changed, 13 insertions(+), 50 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/thermali.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/thermali.c index 81fc220f..425f348e 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/onlp/builds/src/module/src/thermali.c @@ -29,10 +29,6 @@ #include #include -#define prefix_path "/sys/bus/i2c/devices/" -#define filename "temp1_input" -#define LOCAL_DEBUG 0 - #define VALIDATE(_id) \ do { \ if(!ONLP_OID_IS_THERMAL(_id)) { \ @@ -41,22 +37,7 @@ } while(0) -#define OPEN_READ_FILE(fd,fullpath,data,nbytes,len) \ - if (LOCAL_DEBUG) \ - printf("[Debug][%s][%d][openfile: %s]\n", __FUNCTION__, __LINE__, fullpath); \ - if ((fd = open(fullpath, O_RDONLY)) == -1) \ - return ONLP_STATUS_E_INTERNAL; \ - if ((len = read(fd, r_data, nbytes)) <= 0){ \ - close(fd); \ - return ONLP_STATUS_E_INTERNAL;} \ - if (LOCAL_DEBUG) \ - printf("[Debug][%s][%d][read data: %s]\n", __FUNCTION__, __LINE__, r_data); \ - if (close(fd) == -1) \ - return ONLP_STATUS_E_INTERNAL - - -enum onlp_thermal_id -{ +enum onlp_thermal_id { THERMAL_RESERVED = 0, THERMAL_CPU_CORE, THERMAL_1_ON_MAIN_BROAD, @@ -66,23 +47,22 @@ enum onlp_thermal_id THERMAL_1_ON_PSU2, }; -static char* last_path[] = /* must map with onlp_thermal_id */ -{ +static char* devfiles[] = { /* must map with onlp_thermal_id */ "reserved", NULL, /* CPU_CORE files */ - "61-0048/", - "62-0049/", - "63-004a/", - "57-003c/psu_", - "58-003f/psu_", + "/sys/bus/i2c/devices/61-0048*temp1_input", + "/sys/bus/i2c/devices/62-0049*temp1_input", + "/sys/bus/i2c/devices/63-004a*temp1_input", + "/sys/bus/i2c/devices/57-003c*psu_temp1_input", + "/sys/bus/i2c/devices/58-003f*psu_temp1_input", }; static char* cpu_coretemp_files[] = { - "/sys/devices/platform/coretemp.0/temp2_input", - "/sys/devices/platform/coretemp.0/temp3_input", - "/sys/devices/platform/coretemp.0/temp4_input", - "/sys/devices/platform/coretemp.0/temp5_input", + "/sys/devices/platform/coretemp.0*temp2_input", + "/sys/devices/platform/coretemp.0*temp3_input", + "/sys/devices/platform/coretemp.0*temp4_input", + "/sys/devices/platform/coretemp.0*temp5_input", NULL, }; @@ -141,16 +121,11 @@ onlp_thermali_init(void) int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { - int fd, len, nbytes = 10, temp_base=1, local_id; - char r_data[10] = {0}; - char fullpath[50] = {0}; + int local_id; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - if (LOCAL_DEBUG) - printf("\n[Debug][%s][%d][local_id: %d]", __FUNCTION__, __LINE__, local_id); - /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; @@ -159,17 +134,5 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) return rv; } - /* get fullpath */ - sprintf(fullpath, "%s%s%s", prefix_path, last_path[local_id], filename); - - OPEN_READ_FILE(fd,fullpath,r_data,nbytes,len); - - info->mcelsius = atoi(r_data)/temp_base; - - if (LOCAL_DEBUG) - printf("\n[Debug][%s][%d][save data: %d]\n", __FUNCTION__, __LINE__, info->mcelsius); - - return ONLP_STATUS_OK; + return onlp_file_read_int(&info->mcelsius, devfiles[local_id]); } - - From 7428d3505c29c381bd6988cf7879e6b0f38ec146 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 25 Dec 2016 22:11:40 +0000 Subject: [PATCH 201/255] - Fix timeout processing. --- .../src/python/onl/mounts/__init__.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py b/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py index 7f33e0c3..3c64c0eb 100755 --- a/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/mounts/__init__.py @@ -137,25 +137,27 @@ class OnlMountManager(object): self.missing = None def init(self, timeout=5): + for(k, v) in self.mdata['mounts'].iteritems(): # # Get the partition device for the given label. # The timeout logic is here to handle waiting for the # block devices to arrive at boot. # - while timeout >= 0: + t = timeout + while t >= 0: try: v['device'] = subprocess.check_output("blkid -L %s" % k, shell=True).strip() break except subprocess.CalledProcessError: self.logger.debug("Block label %s does not yet exist..." % k) time.sleep(1) - timeout -= 1 + t -= 1 - if 'device' not in v: - self.logger.error("Timeout waiting for block label %s after %d seconds." % (k, timeout)) - self.missing = k - return False + if 'device' not in v: + self.logger.error("Timeout waiting for block label %s after %d seconds." % (k, timeout)) + self.missing = k + return False # # Make the mount point for future use. From b5b429af3bc03607d50d1888f16288fd76254b27 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 25 Dec 2016 22:12:12 +0000 Subject: [PATCH 202/255] Convenience tool for manual kernel replacement. --- packages/base/all/vendor-config-onl/src/bin/onlkernel | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100755 packages/base/all/vendor-config-onl/src/bin/onlkernel diff --git a/packages/base/all/vendor-config-onl/src/bin/onlkernel b/packages/base/all/vendor-config-onl/src/bin/onlkernel new file mode 100755 index 00000000..051a1729 --- /dev/null +++ b/packages/base/all/vendor-config-onl/src/bin/onlkernel @@ -0,0 +1,11 @@ +#!/bin/sh +############################################################ +if [ -z "$1" ]; then + echo "usage: $0 " + exit 1 +fi + +dir=`mktemp -d` +(cd $dir && wget $1) +onlfs rw boot mv $dir/* /mnt/onl/boot +rmdir $dir From da96c861d4594a567c9a3a71daa4c62aebbb9b90 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 25 Dec 2016 23:16:19 +0000 Subject: [PATCH 203/255] - Use the new search syntax for locating the device files Allows compatibility between kernels with different sysfs paths. --- .../onlp/builds/src/module/src/thermali.c | 80 +++++-------------- 1 file changed, 20 insertions(+), 60 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/onlp/builds/src/module/src/thermali.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/onlp/builds/src/module/src/thermali.c index 19abd796..98d9a816 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/onlp/builds/src/module/src/thermali.c @@ -30,10 +30,6 @@ #include #include "platform_lib.h" -#define prefix_path "/sys/bus/i2c/devices/" -#define filename "temp1_input" -#define LOCAL_DEBUG 0 - #define VALIDATE(_id) \ do { \ if(!ONLP_OID_IS_THERMAL(_id)) { \ @@ -41,19 +37,14 @@ } \ } while(0) -#define OPEN_READ_FILE(fd,fullpath,data,nbytes,len) \ - DEBUG_PRINT("[Debug][%s][%d][openfile: %s]\n", __FUNCTION__, __LINE__, fullpath); \ - if ((fd = open(fullpath, O_RDONLY)) == -1) \ - return ONLP_STATUS_E_INTERNAL; \ - if ((len = read(fd, r_data, nbytes)) <= 0){ \ - close(fd); \ - return ONLP_STATUS_E_INTERNAL;} \ - DEBUG_PRINT("[Debug][%s][%d][read data: %s]\n", __FUNCTION__, __LINE__, r_data); \ - if (close(fd) == -1) \ - return ONLP_STATUS_E_INTERNAL - -enum onlp_thermal_id +int +onlp_thermali_init(void) { + return ONLP_STATUS_OK; +} + + +enum onlp_thermal_id { THERMAL_RESERVED = 0, THERMAL_CPU_CORE, THERMAL_1_ON_MAIN_BROAD, @@ -64,24 +55,23 @@ enum onlp_thermal_id THERMAL_1_ON_PSU2, }; -static char* last_path[] = /* must map with onlp_thermal_id */ -{ +static char* devfiles[] = { /* must map with onlp_thermal_id */ "reserved", NULL, /* CPU Core */ - "38-0048/temp1_input", - "39-0049/temp1_input", - "40-004a/temp1_input", - "41-004b/temp1_input", - "35-003c/psu_temp1_input", - "36-003f/psu_temp1_input", + "/sys/bus/i2c/devices/38-0048*temp1_input", + "/sys/bus/i2c/devices/39-0049*temp1_input", + "/sys/bus/i2c/devices/40-004a*temp1_input", + "/sys/bus/i2c/devices/41-004b*temp1_input", + "/sys/bus/i2c/devices/35-003c*psu_temp1_input", + "/sys/bus/i2c/devices/36-003f*psu_temp1_input", }; static char* cpu_coretemp_files[] = { - "/sys/devices/platform/coretemp.0/temp2_input", - "/sys/devices/platform/coretemp.0/temp3_input", - "/sys/devices/platform/coretemp.0/temp4_input", - "/sys/devices/platform/coretemp.0/temp5_input", + "/sys/devices/platform/coretemp.0*temp2_input", + "/sys/devices/platform/coretemp.0*temp3_input", + "/sys/devices/platform/coretemp.0*temp4_input", + "/sys/devices/platform/coretemp.0*temp5_input", NULL, }; @@ -118,35 +108,13 @@ static onlp_thermal_info_t linfo[] = { } }; -/* - * This will be called to intiialize the thermali subsystem. - */ -int -onlp_thermali_init(void) -{ - return ONLP_STATUS_OK; -} - -/* - * Retrieve the information structure for the given thermal OID. - * - * If the OID is invalid, return ONLP_E_STATUS_INVALID. - * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. - * Otherwise, return ONLP_STATUS_OK with the OID's information. - * - * Note -- it is expected that you fill out the information - * structure even if the sensor described by the OID is not present. - */ int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { - int fd, len, nbytes = 10, temp_base=1, local_id; - char r_data[10] = {0}; - char fullpath[50] = {0}; + int local_id; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - DEBUG_PRINT("\n[Debug][%s][%d][local_id: %d]", __FUNCTION__, __LINE__, local_id); /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; @@ -156,13 +124,5 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) return rv; } - /* get fullpath */ - sprintf(fullpath, "%s%s", prefix_path, last_path[local_id]); - - OPEN_READ_FILE(fd, fullpath, r_data, nbytes, len); - info->mcelsius = atoi(r_data) / temp_base; - DEBUG_PRINT("\n[Debug][%s][%d][save data: %d]\n", __FUNCTION__, __LINE__, info->mcelsius); - - return ONLP_STATUS_OK; + return onlp_file_read_int(&info->mcelsius, devfiles[local_id]); } - From 8650f0b7b325544a4847eea0a13868b63e38511f Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 26 Dec 2016 00:04:29 +0000 Subject: [PATCH 204/255] - Use the new search syntax for locating the device files Allows compatibility between kernels with different sysfs paths. --- .../onlp/builds/src/module/src/thermali.c | 62 +++++-------------- 1 file changed, 14 insertions(+), 48 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/thermali.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/thermali.c index b5fc49c2..6ab6f5a5 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/onlp/builds/src/module/src/thermali.c @@ -29,9 +29,6 @@ #include #include -#define prefix_path "/sys/bus/i2c/devices/" -#define filename "temp1_input" -#define LOCAL_DEBUG 0 #define VALIDATE(_id) \ do { \ @@ -41,22 +38,7 @@ } while(0) -#define OPEN_READ_FILE(fd,fullpath,data,nbytes,len) \ - if (LOCAL_DEBUG) \ - printf("[Debug][%s][%d][openfile: %s]\n", __FUNCTION__, __LINE__, fullpath); \ - if ((fd = open(fullpath, O_RDONLY)) == -1) \ - return ONLP_STATUS_E_INTERNAL; \ - if ((len = read(fd, r_data, nbytes)) <= 0){ \ - close(fd); \ - return ONLP_STATUS_E_INTERNAL;} \ - if (LOCAL_DEBUG) \ - printf("[Debug][%s][%d][read data: %s]\n", __FUNCTION__, __LINE__, r_data); \ - if (close(fd) == -1) \ - return ONLP_STATUS_E_INTERNAL - - -enum onlp_thermal_id -{ +enum onlp_thermal_id { THERMAL_RESERVED = 0, THERMAL_CPU_CORE, THERMAL_1_ON_MAIN_BROAD, @@ -66,23 +48,22 @@ enum onlp_thermal_id THERMAL_1_ON_PSU2, }; -static char* last_path[] = /* must map with onlp_thermal_id */ -{ +static char* devfiles[] = { /* must map with onlp_thermal_id */ "reserved", NULL, /* CPU_CORE files */ - "61-0048/", - "62-0049/", - "63-004a/", - "57-003c/psu_", - "58-003f/psu_", + "/sys/bus/i2c/devices/61-0048*temp1_input", + "/sys/bus/i2c/devices/62-0049*temp1_input", + "/sys/bus/i2c/devices/63-004a*temp1_input", + "/sys/bus/i2c/devices/57-003c*psu_temp1_input", + "/sys/bus/i2c/devices/58-003f*psu_temp1_input", }; static char* cpu_coretemp_files[] = { - "/sys/devices/platform/coretemp.0/temp2_input", - "/sys/devices/platform/coretemp.0/temp3_input", - "/sys/devices/platform/coretemp.0/temp4_input", - "/sys/devices/platform/coretemp.0/temp5_input", + "/sys/devices/platform/coretemp.0*temp2_input", + "/sys/devices/platform/coretemp.0*temp3_input", + "/sys/devices/platform/coretemp.0*temp4_input", + "/sys/devices/platform/coretemp.0*temp5_input", NULL, }; @@ -112,7 +93,7 @@ static onlp_thermal_info_t linfo[] = { { { ONLP_THERMAL_ID_CREATE(THERMAL_1_ON_PSU2), "PSU-2 Thermal Sensor 1", ONLP_PSU_ID_CREATE(2)}, ONLP_THERMAL_STATUS_PRESENT, ONLP_THERMAL_CAPS_ALL, 0, ONLP_THERMAL_THRESHOLD_INIT_DEFAULTS - }, + } }; @@ -141,16 +122,11 @@ onlp_thermali_init(void) int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { - int fd, len, nbytes = 10, temp_base=1, local_id; - char r_data[10] = {0}; - char fullpath[50] = {0}; + int local_id; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - if (LOCAL_DEBUG) - printf("\n[Debug][%s][%d][local_id: %d]", __FUNCTION__, __LINE__, local_id); - /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; @@ -159,15 +135,5 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) return rv; } - /* get fullpath */ - sprintf(fullpath, "%s%s%s", prefix_path, last_path[local_id], filename); - - OPEN_READ_FILE(fd,fullpath,r_data,nbytes,len); - - info->mcelsius = atoi(r_data)/temp_base; - - if (LOCAL_DEBUG) - printf("\n[Debug][%s][%d][save data: %d]\n", __FUNCTION__, __LINE__, info->mcelsius); - - return ONLP_STATUS_OK; + return onlp_file_read_int(&info->mcelsius, devfiles[local_id]); } From 688a3705d7d20ad877122b72e0645bffa6ea653b Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 19:56:45 +0000 Subject: [PATCH 205/255] Moved to kernel module. --- packages/base/any/kernels/3.16-lts/patches/series | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/base/any/kernels/3.16-lts/patches/series b/packages/base/any/kernels/3.16-lts/patches/series index e72aafa3..f0408a45 100644 --- a/packages/base/any/kernels/3.16-lts/patches/series +++ b/packages/base/any/kernels/3.16-lts/patches/series @@ -14,5 +14,4 @@ driver-arista-piix4-mux-patch.patch driver-igb-version-5.3.54.patch driver-support-intel-igb-bcm5461X-phy.patch driver-i2c-bus-intel-ismt-enable-param.patch -drivers-platform-x86-dell-s6000-s1220.patch drivers-i2c-busses-i2c-isch-probe-param.patch From bb37da9ad919302bb948c40370f53f722ae7ebb6 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 20:57:59 +0000 Subject: [PATCH 206/255] New method to support platform-specific kernel modules. --- .../src/python/onl/platform/base.py | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index e98f56b4..ab293919 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -190,6 +190,26 @@ class OnlPlatformBase(object): def baseconfig(self): return True + def insmod(self, module, required=True): + kv = os.uname()[2] + + # Search paths in this order: + locations = [ self.PLATFORM, + '-'.join(self.PLATFORM.split('-')[:-1]), + ".", + ] + for l in locations: + path = os.path.join("/lib/modules/%s/%s/%s" % (kv, l, module)) + print "searching: %s" % path + if os.path.exists(path): + subprocess.check_call("insmod %s" % path, shell=True) + return True + + if required: + raise RuntimeError("kernel module %s could not be found." % path) + else: + return False + def onie_machine_get(self): mc = self.basedir_onl("etc/onie/machine.json") if not os.path.exists(mc): From d3a1f1546d96ba2fd0d9b56b7606d1aa71d04190 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 20:58:20 +0000 Subject: [PATCH 207/255] Allow comma-separated package lists in prerequisites. Makes some templating easier. --- tools/onlpm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/onlpm.py b/tools/onlpm.py index 3ce1cb32..e48235af 100755 --- a/tools/onlpm.py +++ b/tools/onlpm.py @@ -480,7 +480,10 @@ class OnlPackageGroup(object): return True def prerequisite_packages(self): - return list(onlu.sflatten(self._pkgs.get('prerequisites', {}).get('packages', []))) + rv = [] + for e in list(onlu.sflatten(self._pkgs.get('prerequisites', {}).get('packages', []))): + rv += e.split(',') + return rv def prerequisite_submodules(self): return self._pkgs.get('prerequisites', {}).get('submodules', []) From 216e4f0eed1d418fc179cca0794d5b82df8d6170 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 20:59:27 +0000 Subject: [PATCH 208/255] Platform Kernel Module Support Per-platform kernel modules can now be built out of tree against multiple kernel mbuild packages. --- make/kmodule.mk | 14 ++++++++++++++ tools/scripts/kmodbuild.sh | 21 +++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 make/kmodule.mk create mode 100755 tools/scripts/kmodbuild.sh diff --git a/make/kmodule.mk b/make/kmodule.mk new file mode 100644 index 00000000..84a7bfbb --- /dev/null +++ b/make/kmodule.mk @@ -0,0 +1,14 @@ +ifndef KERNELS +$(error $$KERNELS must be set) +endif + +ifndef KMODULES +$(error $$KMODULES must be set) +endif + +ifndef PLATFORM +$(error $$PLATFORM must be set) +endif + +modules: + $(ONL)/tools/scripts/kmodbuild.sh "$(KERNELS)" "$(KMODULES)" $(PLATFORM) diff --git a/tools/scripts/kmodbuild.sh b/tools/scripts/kmodbuild.sh new file mode 100755 index 00000000..36d801cd --- /dev/null +++ b/tools/scripts/kmodbuild.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +# +# kmodbuild.sh kernel-packages module-directories platform-name +# + +function build_module +{ + KERNEL=`onlpm --find-dir $1 mbuilds` + BUILD_DIR=`mktemp -d` + cp -R $2/* "$BUILD_DIR" + make -C $KERNEL M=$BUILD_DIR modules + make -C $KERNEL M=$BUILD_DIR INSTALL_MOD_PATH=`pwd` INSTALL_MOD_DIR="$3" modules_install +} + +for kernel in $1; do + for module in $2; do + build_module $kernel $module $3 + done +done From f2c0427c38d3f49e8edd4631ff36b9dc6f349625 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 21:00:35 +0000 Subject: [PATCH 209/255] Platform kernel module package template. --- .../base/any/templates/platform-modules.yml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 packages/base/any/templates/platform-modules.yml diff --git a/packages/base/any/templates/platform-modules.yml b/packages/base/any/templates/platform-modules.yml new file mode 100644 index 00000000..a0587d5e --- /dev/null +++ b/packages/base/any/templates/platform-modules.yml @@ -0,0 +1,21 @@ +############################################################ +# +# PKG template for all platform module packages +# +############################################################ +prerequisites: + packages: [ $KERNELS ] + +packages: + - name: onl-platform-modules-${PLATFORM} + version: 1.0.0 + arch: $ARCH + copyright: Copyright 2013, 2014, 2015 Big Switch Networks + maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com + summary: ONL Platform Modules Package for the ${PLATFORM} + + files: + builds/lib: /lib + + changelog: Changes From b38d28c9669d37bd3052f9beaf72f6abe0846401 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 21:00:59 +0000 Subject: [PATCH 210/255] New platform-config template which provides a kernel module package dependency. --- .../platform-config-with-modules.yml | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 packages/base/any/templates/platform-config-with-modules.yml diff --git a/packages/base/any/templates/platform-config-with-modules.yml b/packages/base/any/templates/platform-config-with-modules.yml new file mode 100644 index 00000000..35169b0b --- /dev/null +++ b/packages/base/any/templates/platform-config-with-modules.yml @@ -0,0 +1,23 @@ +############################################################ +# +# PKG template for all platform-config packages. +# +############################################################ +prerequisites: + packages: [ "onl-vendor-config-${VENDOR}:all" ] + +packages: + - name: onl-platform-config-${PLATFORM} + depends: onl-vendor-config-${VENDOR},$MODULES + version: 1.0.0 + arch: $ARCH + copyright: Copyright 2013, 2014, 2015 Big Switch Networks + maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com + summary: ONL Platform Configuration Package for the ${PLATFORM} + + files: + src/lib: /lib/platform-config/${PLATFORM}/onl + src/python : ${PY_INSTALL}/onl/platform/ + + changelog: Changes From ded4a622fda9b46503f4867a614f2a78efabe661 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 21:02:00 +0000 Subject: [PATCH 211/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 4ac99ea3..bfab32c4 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 4ac99ea3ba7233b620c8ac269a59a9bd75d3bba0 +Subproject commit bfab32c494d48248420e0ea059c6c161a733fd7d From 925fca400834780e8c7cdd16df014fccd2901338 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 21:26:05 +0000 Subject: [PATCH 212/255] Add module package as a build dependency. --- packages/base/any/templates/platform-config-with-modules.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/any/templates/platform-config-with-modules.yml b/packages/base/any/templates/platform-config-with-modules.yml index 35169b0b..0040aecc 100644 --- a/packages/base/any/templates/platform-config-with-modules.yml +++ b/packages/base/any/templates/platform-config-with-modules.yml @@ -4,7 +4,7 @@ # ############################################################ prerequisites: - packages: [ "onl-vendor-config-${VENDOR}:all" ] + packages: [ "onl-vendor-config-${VENDOR}:all", "$MODULES" ] packages: - name: onl-platform-config-${PLATFORM} From c6c44fe214064a5aac81dc7e41d605d238974913 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 21:41:01 +0000 Subject: [PATCH 213/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index bfab32c4..7a14f91a 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit bfab32c494d48248420e0ea059c6c161a733fd7d +Subproject commit 7a14f91a850545429f75f0069332a56cbef65ea4 From 9150307d4d060dfc2622bd17c968d7462f42c1ee Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sat, 31 Dec 2016 14:22:32 -0800 Subject: [PATCH 214/255] Latest --- packages/platforms-closed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/platforms-closed b/packages/platforms-closed index 7a14f91a..0c85b120 160000 --- a/packages/platforms-closed +++ b/packages/platforms-closed @@ -1 +1 @@ -Subproject commit 7a14f91a850545429f75f0069332a56cbef65ea4 +Subproject commit 0c85b1202684cdaca8e9362dfc50f20c01dc8089 From f6f9cdc99a57dc8b08a4c8f426d17c02d205878c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:02:19 +0000 Subject: [PATCH 215/255] - Allow building individual source files - Allow direct specification of the kernel build tree. --- tools/scripts/kmodbuild.sh | 44 +++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/tools/scripts/kmodbuild.sh b/tools/scripts/kmodbuild.sh index 36d801cd..0e5890ec 100755 --- a/tools/scripts/kmodbuild.sh +++ b/tools/scripts/kmodbuild.sh @@ -5,17 +5,51 @@ set -e # kmodbuild.sh kernel-packages module-directories platform-name # -function build_module +# +# build +# +function build { - KERNEL=`onlpm --find-dir $1 mbuilds` - BUILD_DIR=`mktemp -d` - cp -R $2/* "$BUILD_DIR" + if [ -d $1 ]; then + KERNEL=$1 + else + KERNEL=`onlpm --find-dir $1 mbuilds` + fi + BUILD_DIR=$2 + INSTALL_DIR=$3 make -C $KERNEL M=$BUILD_DIR modules make -C $KERNEL M=$BUILD_DIR INSTALL_MOD_PATH=`pwd` INSTALL_MOD_DIR="$3" modules_install } +# +# build_directory +# +function build_directory +{ + BUILD_DIR=`mktemp -d` + cp -R $2/* "$BUILD_DIR" + build $1 $BUILD_DIR $3 +} + +# +# build_source +# +function build_source +{ + BUILD_DIR=`mktemp -d` + cp $2 $BUILD_DIR + src=$(basename $2) + obj=${src%.c}.o + echo "obj-m := $obj" >> $BUILD_DIR/Kbuild + build $1 $BUILD_DIR $3 +} + for kernel in $1; do for module in $2; do - build_module $kernel $module $3 + if [ -d $module ]; then + build_directory $kernel $module $3 + else + build_source $kernel $module $3 + fi done done From fb2009914b8706e3bf656319794a4e9a06f249e6 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:04:42 +0000 Subject: [PATCH 216/255] Common kernel modules. These will be built and installed automatically for each kernel. --- .../base/any/kernels/modules/cpr_4011_4mxx.c | 397 ++++++++++++++++++ 1 file changed, 397 insertions(+) create mode 100644 packages/base/any/kernels/modules/cpr_4011_4mxx.c diff --git a/packages/base/any/kernels/modules/cpr_4011_4mxx.c b/packages/base/any/kernels/modules/cpr_4011_4mxx.c new file mode 100644 index 00000000..c14c733c --- /dev/null +++ b/packages/base/any/kernels/modules/cpr_4011_4mxx.c @@ -0,0 +1,397 @@ +/* + * An hwmon driver for the CPR-4011-4Mxx Redundant Power Module + * + * Copyright (C) Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_FAN_DUTY_CYCLE 100 + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x3c, 0x3d, 0x3e, 0x3f, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct cpr_4011_4mxx_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 vout_mode; /* Register value */ + u16 v_in; /* Register value */ + u16 v_out; /* Register value */ + u16 i_in; /* Register value */ + u16 i_out; /* Register value */ + u16 p_in; /* Register value */ + u16 p_out; /* Register value */ + u16 temp_input[2]; /* Register value */ + u8 fan_fault; /* Register value */ + u16 fan_duty_cycle[2]; /* Register value */ + u16 fan_speed[2]; /* Register value */ +}; + +static ssize_t show_linear(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_vout(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, const char *buf, size_t count); +static int cpr_4011_4mxx_write_word(struct i2c_client *client, u8 reg, u16 value); +static struct cpr_4011_4mxx_data *cpr_4011_4mxx_update_device(struct device *dev); + +enum cpr_4011_4mxx_sysfs_attributes { + PSU_V_IN, + PSU_V_OUT, + PSU_I_IN, + PSU_I_OUT, + PSU_P_IN, + PSU_P_OUT, + PSU_TEMP1_INPUT, + PSU_FAN1_FAULT, + PSU_FAN1_DUTY_CYCLE, + PSU_FAN1_SPEED, +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_v_in, S_IRUGO, show_linear, NULL, PSU_V_IN); +static SENSOR_DEVICE_ATTR(psu_v_out, S_IRUGO, show_vout, NULL, PSU_V_OUT); +static SENSOR_DEVICE_ATTR(psu_i_in, S_IRUGO, show_linear, NULL, PSU_I_IN); +static SENSOR_DEVICE_ATTR(psu_i_out, S_IRUGO, show_linear, NULL, PSU_I_OUT); +static SENSOR_DEVICE_ATTR(psu_p_in, S_IRUGO, show_linear, NULL, PSU_P_IN); +static SENSOR_DEVICE_ATTR(psu_p_out, S_IRUGO, show_linear, NULL, PSU_P_OUT); +static SENSOR_DEVICE_ATTR(psu_temp1_input, S_IRUGO, show_linear, NULL, PSU_TEMP1_INPUT); +static SENSOR_DEVICE_ATTR(psu_fan1_fault, S_IRUGO, show_fan_fault, NULL, PSU_FAN1_FAULT); +static SENSOR_DEVICE_ATTR(psu_fan1_duty_cycle_percentage, S_IWUSR | S_IRUGO, show_linear, set_fan_duty_cycle, PSU_FAN1_DUTY_CYCLE); +static SENSOR_DEVICE_ATTR(psu_fan1_speed_rpm, S_IRUGO, show_linear, NULL, PSU_FAN1_SPEED); + +static struct attribute *cpr_4011_4mxx_attributes[] = { + &sensor_dev_attr_psu_v_in.dev_attr.attr, + &sensor_dev_attr_psu_v_out.dev_attr.attr, + &sensor_dev_attr_psu_i_in.dev_attr.attr, + &sensor_dev_attr_psu_i_out.dev_attr.attr, + &sensor_dev_attr_psu_p_in.dev_attr.attr, + &sensor_dev_attr_psu_p_out.dev_attr.attr, + &sensor_dev_attr_psu_temp1_input.dev_attr.attr, + &sensor_dev_attr_psu_fan1_fault.dev_attr.attr, + &sensor_dev_attr_psu_fan1_duty_cycle_percentage.dev_attr.attr, + &sensor_dev_attr_psu_fan1_speed_rpm.dev_attr.attr, + NULL +}; + +static int two_complement_to_int(u16 data, u8 valid_bit, int mask) +{ + u16 valid_data = data & mask; + bool is_negative = valid_data >> (valid_bit - 1); + + return is_negative ? (-(((~valid_data) & mask) + 1)) : valid_data; +} + +static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpr_4011_4mxx_data *data = i2c_get_clientdata(client); + int nr = (attr->index == PSU_FAN1_DUTY_CYCLE) ? 0 : 1; + long speed; + int error; + + error = kstrtol(buf, 10, &speed); + if (error) + return error; + + if (speed < 0 || speed > MAX_FAN_DUTY_CYCLE) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->fan_duty_cycle[nr] = speed; + cpr_4011_4mxx_write_word(client, 0x3B + nr, data->fan_duty_cycle[nr]); + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t show_linear(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct cpr_4011_4mxx_data *data = cpr_4011_4mxx_update_device(dev); + + u16 value = 0; + int exponent, mantissa; + int multiplier = 1000; + + switch (attr->index) { + case PSU_V_IN: + value = data->v_in; + break; + case PSU_I_IN: + value = data->i_in; + break; + case PSU_I_OUT: + value = data->i_out; + break; + case PSU_P_IN: + value = data->p_in; + break; + case PSU_P_OUT: + value = data->p_out; + break; + case PSU_TEMP1_INPUT: + value = data->temp_input[0]; + break; + case PSU_FAN1_DUTY_CYCLE: + multiplier = 1; + value = data->fan_duty_cycle[0]; + break; + case PSU_FAN1_SPEED: + multiplier = 1; + value = data->fan_speed[0]; + break; + default: + break; + } + + exponent = two_complement_to_int(value >> 11, 5, 0x1f); + mantissa = two_complement_to_int(value & 0x7ff, 11, 0x7ff); + + return (exponent >= 0) ? sprintf(buf, "%d\n", (mantissa << exponent) * multiplier) : + sprintf(buf, "%d\n", (mantissa * multiplier) / (1 << -exponent)); +} + +static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct cpr_4011_4mxx_data *data = cpr_4011_4mxx_update_device(dev); + + u8 shift = (attr->index == PSU_FAN1_FAULT) ? 7 : 6; + + return sprintf(buf, "%d\n", data->fan_fault >> shift); +} + +static ssize_t show_vout(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct cpr_4011_4mxx_data *data = cpr_4011_4mxx_update_device(dev); + int exponent, mantissa; + int multiplier = 1000; + + exponent = two_complement_to_int(data->vout_mode, 5, 0x1f); + mantissa = data->v_out; + + return (exponent > 0) ? sprintf(buf, "%d\n", (mantissa << exponent) * multiplier) : + sprintf(buf, "%d\n", (mantissa * multiplier) / (1 << -exponent)); +} + +static const struct attribute_group cpr_4011_4mxx_group = { + .attrs = cpr_4011_4mxx_attributes, +}; + +static int cpr_4011_4mxx_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct cpr_4011_4mxx_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct cpr_4011_4mxx_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &cpr_4011_4mxx_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &cpr_4011_4mxx_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int cpr_4011_4mxx_remove(struct i2c_client *client) +{ + struct cpr_4011_4mxx_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &cpr_4011_4mxx_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id cpr_4011_4mxx_id[] = { + { "cpr_4011_4mxx", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, cpr_4011_4mxx_id); + +static struct i2c_driver cpr_4011_4mxx_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "cpr_4011_4mxx", + }, + .probe = cpr_4011_4mxx_probe, + .remove = cpr_4011_4mxx_remove, + .id_table = cpr_4011_4mxx_id, + .address_list = normal_i2c, +}; + +static int cpr_4011_4mxx_read_byte(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int cpr_4011_4mxx_read_word(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_word_data(client, reg); +} + +static int cpr_4011_4mxx_write_word(struct i2c_client *client, u8 reg, u16 value) +{ + return i2c_smbus_write_word_data(client, reg, value); +} + +struct reg_data_byte { + u8 reg; + u8 *value; +}; + +struct reg_data_word { + u8 reg; + u16 *value; +}; + +static struct cpr_4011_4mxx_data *cpr_4011_4mxx_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cpr_4011_4mxx_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int i, status; + struct reg_data_byte regs_byte[] = { {0x20, &data->vout_mode}, + {0x81, &data->fan_fault}}; + struct reg_data_word regs_word[] = { {0x88, &data->v_in}, + {0x8b, &data->v_out}, + {0x89, &data->i_in}, + {0x8c, &data->i_out}, + {0x96, &data->p_out}, + {0x97, &data->p_in}, + {0x8d, &(data->temp_input[0])}, + {0x8e, &(data->temp_input[1])}, + {0x3b, &(data->fan_duty_cycle[0])}, + {0x3c, &(data->fan_duty_cycle[1])}, + {0x90, &(data->fan_speed[0])}, + {0x91, &(data->fan_speed[1])}}; + + dev_dbg(&client->dev, "Starting cpr_4011_4mxx update\n"); + + /* Read byte data */ + for (i = 0; i < ARRAY_SIZE(regs_byte); i++) { + status = cpr_4011_4mxx_read_byte(client, regs_byte[i].reg); + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", + regs_byte[i].reg, status); + } + else { + *(regs_byte[i].value) = status; + } + } + + /* Read word data */ + for (i = 0; i < ARRAY_SIZE(regs_word); i++) { + status = cpr_4011_4mxx_read_word(client, regs_word[i].reg); + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", + regs_word[i].reg, status); + } + else { + *(regs_word[i].value) = status; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init cpr_4011_4mxx_init(void) +{ + return i2c_add_driver(&cpr_4011_4mxx_driver); +} + +static void __exit cpr_4011_4mxx_exit(void) +{ + i2c_del_driver(&cpr_4011_4mxx_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("CPR_4011_4MXX driver"); +MODULE_LICENSE("GPL"); + +module_init(cpr_4011_4mxx_init); +module_exit(cpr_4011_4mxx_exit); From 31b6766dc964f7a3d84299d0b0b4c8413c9e8871 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:09:44 +0000 Subject: [PATCH 217/255] Improve module search. --- .../src/python/onl/platform/base.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index ab293919..fd442256 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -192,21 +192,24 @@ class OnlPlatformBase(object): def insmod(self, module, required=True): kv = os.uname()[2] + searched = [] # Search paths in this order: locations = [ self.PLATFORM, '-'.join(self.PLATFORM.split('-')[:-1]), + 'onl', ".", ] for l in locations: - path = os.path.join("/lib/modules/%s/%s/%s" % (kv, l, module)) - print "searching: %s" % path - if os.path.exists(path): - subprocess.check_call("insmod %s" % path, shell=True) - return True + for e in [ ".ko", "" ]: + path = "/lib/modules/%s/%s/%s%s" % (kv, l, module, e) + searched.append(path) + if os.path.exists(path): + subprocess.check_call("insmod %s" % path, shell=True) + return True if required: - raise RuntimeError("kernel module %s could not be found." % path) + raise RuntimeError("kernel module %s could not be found. Searched: %s" % (module, searched)) else: return False From 5c337ad789ad496037e00558e50ae838b6ec5349 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:10:13 +0000 Subject: [PATCH 218/255] - Build common kernel modules - New module package --- .../kernels/kernel-3.16-lts-x86-64-all/PKG.yml | 15 +++++++++++++-- .../kernel-3.16-lts-x86-64-all/builds/.gitignore | 1 + .../kernel-3.16-lts-x86-64-all/builds/Makefile | 1 + 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml index 47496c0e..4165c4c6 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/PKG.yml @@ -1,3 +1,5 @@ +variables: + basename: onl-kernel-3.16-lts-x86-64-all common: arch: amd64 @@ -7,12 +9,21 @@ common: support: opennetworklinux@googlegroups.com packages: - - name: onl-kernel-3.16-lts-x86-64-all + - name: $basename version: 1.0.0 - summary: Open Network Linux Kernel 3.16 LTS for X86_64 Platforms. + summary: Open Network Linux 3.16 LTS Kernel for X86_64 Platforms. files: builds/kernel-3.16* : $$PKG_INSTALL/ builds/linux-*mbuild : $$PKG_INSTALL/mbuilds changelog: Change changes changes., + + - name: $basename-modules + version: 1.0.0 + summary: Open Network Linux 3.16 LTS Kernel Modules for X86_64 Platforms + + files: + builds/lib: /lib + + changelog: Change changes changes., diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore index ef51fa80..73d2c193 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/.gitignore @@ -1,2 +1,3 @@ linux-* kernel-* +lib diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile index e198a046..f9d1bdc8 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile @@ -14,6 +14,7 @@ include $(ONL)/make/config.mk kernel: $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) + $(ONL)/tools/scripts/kmodbuild.sh $(wildcard *-mbuild) $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl clean: rm -rf linux-3.16* kernel-3.16* From 001af039770e340434ae0595d56f7e3a27a980fa Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:10:49 +0000 Subject: [PATCH 219/255] Add 3.16 module build. This dependency needs to be fixed. --- builds/any/rootfs/jessie/common/amd64-base-packages.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/builds/any/rootfs/jessie/common/amd64-base-packages.yml b/builds/any/rootfs/jessie/common/amd64-base-packages.yml index b6b63b94..a07863f3 100644 --- a/builds/any/rootfs/jessie/common/amd64-base-packages.yml +++ b/builds/any/rootfs/jessie/common/amd64-base-packages.yml @@ -10,5 +10,4 @@ - onl-upgrade - hw-management - sx-kernel - - +- onl-kernel-3.16-lts-x86-64-all-modules From 016fda218667ce484f81bbb14df89ce0de1fc4e3 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:16:36 +0000 Subject: [PATCH 220/255] - AS5712 Kernel Modules - Upgrade to 3.16 LTS --- .../x86-64-accton-as5712-54x/modules/Makefile | 1 + .../x86-64-accton-as5712-54x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 4 + .../builds/x86-64-accton-as5712-54x-cpld.c | 467 ++++++++++++ .../builds/x86-64-accton-as5712-54x-fan.c | 442 ++++++++++++ .../builds/x86-64-accton-as5712-54x-leds.c | 597 ++++++++++++++++ .../builds/x86-64-accton-as5712-54x-psu.c | 293 ++++++++ .../builds/x86-64-accton-as5712-54x-sfp.c | 672 ++++++++++++++++++ .../platform-config/r0/PKG.yml | 2 +- .../src/lib/x86-64-accton-as5712-54x-r0.yml | 2 +- .../x86_64_accton_as5712_54x_r0/__init__.py | 4 + 12 files changed, 2484 insertions(+), 2 deletions(-) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/PKG.yml new file mode 100644 index 00000000..50a0ba44 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as5712-54x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile new file mode 100644 index 00000000..a61eaca9 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile @@ -0,0 +1,4 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as5712-54x +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c new file mode 100644 index 00000000..a947fca1 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c @@ -0,0 +1,467 @@ +/* + * I2C multiplexer + * + * Copyright (C) Brandon Chuang + * + * This module supports the accton cpld that hold the channel select + * mechanism for other i2c slave devices, such as SFP. + * This includes the: + * Accton as5712_54x CPLD1/CPLD2/CPLD3 + * + * Based on: + * pca954x.c from Kumar Gala + * Copyright (C) 2006 + * + * Based on: + * pca954x.c from Ken Harrenstien + * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) + * + * Based on: + * i2c-virtual_cb.c from Brian Kuschak + * and + * pca9540.c from Jean Delvare . + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct dmi_system_id as5712_dmi_table[] = { + { + .ident = "Accton AS5712", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), + }, + }, + { + .ident = "Accton AS5712", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), + }, + }, +}; + +int platform_accton_as5712_54x(void) +{ + return dmi_check_system(as5712_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as5712_54x); + +#define NUM_OF_CPLD1_CHANS 0x0 +#define NUM_OF_CPLD2_CHANS 0x18 +#define NUM_OF_CPLD3_CHANS 0x1E +#define CPLD_CHANNEL_SELECT_REG 0x2 +#define CPLD_DESELECT_CHANNEL 0xFF + +#if 0 +#define NUM_OF_ALL_CPLD_CHANS (NUM_OF_CPLD2_CHANS + NUM_OF_CPLD3_CHANS) +#endif + +#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +enum cpld_mux_type { + as5712_54x_cpld2, + as5712_54x_cpld3, + as5712_54x_cpld1 +}; + +struct accton_i2c_cpld_mux { + enum cpld_mux_type type; + struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; + u8 last_chan; /* last register value */ +}; + +#if 0 +/* The mapping table between mux index and adapter index + array index : the mux index + the content : adapter index + */ +static int mux_adap_map[NUM_OF_ALL_CPLD_CHANS]; +#endif + +struct chip_desc { + u8 nchans; + u8 deselectChan; +}; + +/* Provide specs for the PCA954x types we know about */ +static const struct chip_desc chips[] = { + [as5712_54x_cpld1] = { + .nchans = NUM_OF_CPLD1_CHANS, + .deselectChan = CPLD_DESELECT_CHANNEL, + }, + [as5712_54x_cpld2] = { + .nchans = NUM_OF_CPLD2_CHANS, + .deselectChan = CPLD_DESELECT_CHANNEL, + }, + [as5712_54x_cpld3] = { + .nchans = NUM_OF_CPLD3_CHANS, + .deselectChan = CPLD_DESELECT_CHANNEL, + } +}; + +static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { + { "as5712_54x_cpld1", as5712_54x_cpld1 }, + { "as5712_54x_cpld2", as5712_54x_cpld2 }, + { "as5712_54x_cpld3", as5712_54x_cpld3 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); + +/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() + for this as they will try to lock adapter a second time */ +static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, + struct i2c_client *client, u8 val) +{ +#if 0 + int ret = -ENODEV; + + //if (adap->algo->master_xfer) { + if (0) + struct i2c_msg msg; + char buf[2]; + + msg.addr = client->addr; + msg.flags = 0; + msg.len = 2; + buf[0] = 0x2; + buf[1] = val; + msg.buf = buf; + ret = adap->algo->master_xfer(adap, &msg, 1); + } + else { + union i2c_smbus_data data; + ret = adap->algo->smbus_xfer(adap, client->addr, + client->flags, + I2C_SMBUS_WRITE, + 0x2, I2C_SMBUS_BYTE, &data); + } + + return ret; +#else + unsigned long orig_jiffies; + unsigned short flags; + union i2c_smbus_data data; + int try; + s32 res = -EIO; + + data.byte = val; + flags = client->flags; + flags &= I2C_M_TEN | I2C_CLIENT_PEC; + + if (adap->algo->smbus_xfer) { + /* Retry automatically on arbitration loss */ + orig_jiffies = jiffies; + for (res = 0, try = 0; try <= adap->retries; try++) { + res = adap->algo->smbus_xfer(adap, client->addr, flags, + I2C_SMBUS_WRITE, CPLD_CHANNEL_SELECT_REG, + I2C_SMBUS_BYTE_DATA, &data); + if (res != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + adap->timeout)) + break; + } + } + + return res; +#endif +} + +static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + u8 regval; + int ret = 0; + regval = chan; + + /* Only select the channel if its different from the last channel */ + if (data->last_chan != regval) { + ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); + data->last_chan = regval; + } + + return ret; +} + +static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + + /* Deselect active channel */ + data->last_chan = chips[data->type].deselectChan; + + return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); +} + +static void accton_i2c_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void accton_i2c_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + u8 reg = 0x1; + struct i2c_client *client; + int len; + + client = to_i2c_client(dev); + len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); + + return len; +} + +static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); + +/* + * I2C init/probing/exit functions + */ +static int accton_i2c_cpld_mux_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); + int chan=0; + struct accton_i2c_cpld_mux *data; + int ret = -ENODEV; + + if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) + goto err; + + data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err; + } + + i2c_set_clientdata(client, data); + +#if 0 + /* Write the mux register at addr to verify + * that the mux is in fact present. + */ + if (i2c_smbus_write_byte(client, 0) < 0) { + dev_warn(&client->dev, "probe failed\n"); + goto exit_free; + } +#endif + + data->type = id->driver_data; + + if (data->type == as5712_54x_cpld2 || data->type == as5712_54x_cpld3) { + data->last_chan = chips[data->type].deselectChan; /* force the first selection */ + + /* Now create an adapter for each channel */ + for (chan = 0; chan < chips[data->type].nchans; chan++) { +#if 0 + int idx; +#endif + data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, + I2C_CLASS_HWMON | I2C_CLASS_SPD, + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); + + if (data->virt_adaps[chan] == NULL) { + ret = -ENODEV; + dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); + goto virt_reg_failed; + } + +#if 0 + idx = (data->type - as5712_54x_cpld2) * NUM_OF_CPLD2_CHANS + chan; + mux_adap_map[idx] = data->virt_adaps[chan]->nr; +#endif + } + + dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", + chan, client->name); + } + + accton_i2c_cpld_add_client(client); + + ret = sysfs_create_file(&client->dev.kobj, &ver.attr); + if (ret) + goto virt_reg_failed; + + return 0; + +virt_reg_failed: + for (chan--; chan >= 0; chan--) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + } + + kfree(data); +err: + return ret; +} + +static int accton_i2c_cpld_mux_remove(struct i2c_client *client) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + const struct chip_desc *chip = &chips[data->type]; + int chan; + + sysfs_remove_file(&client->dev.kobj, &ver.attr); + + for (chan = 0; chan < chip->nchans; ++chan) { + if (data->virt_adaps[chan]) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + data->virt_adaps[chan] = NULL; + } + } + + kfree(data); + accton_i2c_cpld_remove_client(client); + + return 0; +} + +int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_read_byte_data(cpld_node->client, reg); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as5712_54x_i2c_cpld_read); + +int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as5712_54x_i2c_cpld_write); + +#if 0 +int accton_i2c_cpld_mux_get_index(int adap_index) +{ + int i; + + for (i = 0; i < NUM_OF_ALL_CPLD_CHANS; i++) { + if (mux_adap_map[i] == adap_index) { + return i; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL(accton_i2c_cpld_mux_get_index); +#endif + +static struct i2c_driver accton_i2c_cpld_mux_driver = { + .driver = { + .name = "as5712_54x_cpld", + .owner = THIS_MODULE, + }, + .probe = accton_i2c_cpld_mux_probe, + .remove = accton_i2c_cpld_mux_remove, + .id_table = accton_i2c_cpld_mux_id, +}; + +static int __init accton_i2c_cpld_mux_init(void) +{ + mutex_init(&list_lock); + return i2c_add_driver(&accton_i2c_cpld_mux_driver); +} + +static void __exit accton_i2c_cpld_mux_exit(void) +{ + i2c_del_driver(&accton_i2c_cpld_mux_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_i2c_cpld_mux_init); +module_exit(accton_i2c_cpld_mux_exit); + + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-fan.c new file mode 100644 index 00000000..d6ffe7b6 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-fan.c @@ -0,0 +1,442 @@ +/* + * A hwmon driver for the Accton as5710 54x fan contrl + * + * Copyright (C) 2013 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAN_MAX_NUMBER 5 +#define FAN_SPEED_CPLD_TO_RPM_STEP 150 +#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 +#define FAN_DUTY_CYCLE_MIN 0 /* 10% ??*/ +#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ + +#define CPLD_REG_FAN_STATUS_OFFSET 0xC +#define CPLD_REG_FANR_STATUS_OFFSET 0x1F +#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E + +#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 +#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 +#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 +#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 +#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 + +#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 +#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 +#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A +#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B +#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C + +#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD + +#define CPLD_FAN1_INFO_BIT_MASK 0x1 +#define CPLD_FAN2_INFO_BIT_MASK 0x2 +#define CPLD_FAN3_INFO_BIT_MASK 0x4 +#define CPLD_FAN4_INFO_BIT_MASK 0x8 +#define CPLD_FAN5_INFO_BIT_MASK 0x10 + +#define PROJECT_NAME + +#define LOCAL_DEBUG 0 + +static struct accton_as5712_54x_fan *fan_data = NULL; + +struct accton_as5712_54x_fan { + struct platform_device *pdev; + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ + u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ + u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ + u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ + u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ + u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ +}; + +/*******************/ +#define MAKE_FAN_MASK_OR_REG(name,type) \ + CPLD_FAN##type##1_##name, \ + CPLD_FAN##type##2_##name, \ + CPLD_FAN##type##3_##name, \ + CPLD_FAN##type##4_##name, \ + CPLD_FAN##type##5_##name, + +/* fan related data + */ +static const u8 fan_info_mask[] = { + MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) +}; + +static const u8 fan_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) +}; + +static const u8 fanr_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) +}; + +/*******************/ +#define DEF_FAN_SET(id) \ + FAN##id##_FAULT, \ + FAN##id##_SPEED, \ + FAN##id##_DUTY_CYCLE, \ + FAN##id##_DIRECTION, \ + FANR##id##_FAULT, \ + FANR##id##_SPEED, + +enum sysfs_fan_attributes { + DEF_FAN_SET(1) + DEF_FAN_SET(2) + DEF_FAN_SET(3) + DEF_FAN_SET(4) + DEF_FAN_SET(5) +}; +/*******************/ +static void accton_as5712_54x_fan_update_device(struct device *dev); +static int accton_as5712_54x_fan_read_value(u8 reg); +static int accton_as5712_54x_fan_write_value(u8 reg, u8 value); + +static ssize_t fan_set_duty_cycle(struct device *dev, + struct device_attribute *da,const char *buf, size_t count); +static ssize_t fan_show_value(struct device *dev, + struct device_attribute *da, char *buf); + +extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + + +/*******************/ +#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ + fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); + +#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) + +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) +/*******************/ + +#define _MAKE_FAN_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ + &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, + +#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) + +static struct attribute *accton_as5712_54x_fan_attributes[] = { + /* fan related attributes */ + MAKE_FAN_ATTR(PROJECT_NAME,1) + MAKE_FAN_ATTR(PROJECT_NAME,2) + MAKE_FAN_ATTR(PROJECT_NAME,3) + MAKE_FAN_ATTR(PROJECT_NAME,4) + MAKE_FAN_ATTR(PROJECT_NAME,5) + NULL +}; +/*******************/ + +/* fan related functions + */ +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + ssize_t ret = 0; + int data_index, type_index; + + accton_as5712_54x_fan_update_device(dev); + + if (fan_data->valid == 0) { + return ret; + } + + type_index = attr->index%FAN2_FAULT; + data_index = attr->index/FAN2_FAULT; + + switch (type_index) { + case FAN1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DUTY_CYCLE: + ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DIRECTION: + ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + default: + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); + break; + } + + return ret; +} +/*******************/ +static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) { + + int error, value; + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) + return -EINVAL; + + accton_as5712_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); + + fan_data->valid = 0; + + return count; +} + +static const struct attribute_group accton_as5712_54x_fan_group = { + .attrs = accton_as5712_54x_fan_attributes, +}; + +static int accton_as5712_54x_fan_read_value(u8 reg) +{ + return as5712_54x_i2c_cpld_read(0x60, reg); +} + +static int accton_as5712_54x_fan_write_value(u8 reg, u8 value) +{ + return as5712_54x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5712_54x_fan_update_device(struct device *dev) +{ + int speed, r_speed, fault, r_fault, ctrl_speed, direction; + int i; + + mutex_lock(&fan_data->update_lock); + + if (LOCAL_DEBUG) + printk ("Starting accton_as5712_54x_fan update \n"); + + if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { + /* do nothing */ + goto _exit; + } + + fan_data->valid = 0; + + if (LOCAL_DEBUG) + printk ("Starting accton_as5712_54x_fan update 2 \n"); + + fault = accton_as5712_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); + r_fault = accton_as5712_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); + direction = accton_as5712_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); + ctrl_speed = accton_as5712_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); + + if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); + + for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; + if (LOCAL_DEBUG) + printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); + + fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; + fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; + fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; + + /* fan speed + */ + speed = accton_as5712_54x_fan_read_value(fan_speed_reg[i]); + r_speed = accton_as5712_54x_fan_read_value(fanr_speed_reg[i]); + if ( (speed < 0) || (r_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); + + fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; + fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; + } + + /* finish to update */ + fan_data->last_updated = jiffies; + fan_data->valid = 1; + +_exit: + mutex_unlock(&fan_data->update_lock); +} + +static int accton_as5712_54x_fan_probe(struct platform_device *pdev) +{ + int status = -1; + + /* Register sysfs hooks */ + status = sysfs_create_group(&pdev->dev.kobj, &accton_as5712_54x_fan_group); + if (status) { + goto exit; + + } + + fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(fan_data->hwmon_dev)) { + status = PTR_ERR(fan_data->hwmon_dev); + goto exit_remove; + } + + dev_info(&pdev->dev, "accton_as5712_54x_fan\n"); + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &accton_as5712_54x_fan_group); +exit: + return status; +} + +static int accton_as5712_54x_fan_remove(struct platform_device *pdev) +{ + hwmon_device_unregister(fan_data->hwmon_dev); + sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5712_54x_fan_group); + + return 0; +} + +#define DRVNAME "as5712_54x_fan" + +static struct platform_driver accton_as5712_54x_fan_driver = { + .probe = accton_as5712_54x_fan_probe, + .remove = accton_as5712_54x_fan_remove, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5712_54x_fan_init(void) +{ + int ret; + + extern int platform_accton_as5712_54x(void); + if(!platform_accton_as5712_54x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as5712_54x_fan_driver); + if (ret < 0) { + goto exit; + } + + fan_data = kzalloc(sizeof(struct accton_as5712_54x_fan), GFP_KERNEL); + if (!fan_data) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5712_54x_fan_driver); + goto exit; + } + + mutex_init(&fan_data->update_lock); + fan_data->valid = 0; + + fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(fan_data->pdev)) { + ret = PTR_ERR(fan_data->pdev); + platform_driver_unregister(&accton_as5712_54x_fan_driver); + kfree(fan_data); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5712_54x_fan_exit(void) +{ + platform_device_unregister(fan_data->pdev); + platform_driver_unregister(&accton_as5712_54x_fan_driver); + kfree(fan_data); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5712_54x_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_as5712_54x_fan_init); +module_exit(accton_as5712_54x_fan_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-leds.c new file mode 100644 index 00000000..cf8868e5 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-leds.c @@ -0,0 +1,597 @@ +/* + * A LED driver for the accton_as5712_54x_led + * + * Copyright (C) 2013 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include + +extern int as5712_54x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "as5712_54x_led" + +struct accton_as5712_54x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED + 1 = PSU1/PSU2 LED + 2 = FAN1-4 LED + 3 = FAN5-6 LED */ +}; + +static struct accton_as5712_54x_led_data *ledctl = NULL; + +/* LED related data + */ +#define LED_TYPE_PSU1_REG_MASK 0x03 +#define LED_MODE_PSU1_GREEN_MASK 0x02 +#define LED_MODE_PSU1_AMBER_MASK 0x01 +#define LED_MODE_PSU1_OFF_MASK 0x03 +#define LED_MODE_PSU1_AUTO_MASK 0x00 + +#define LED_TYPE_PSU2_REG_MASK 0x0C +#define LED_MODE_PSU2_GREEN_MASK 0x08 +#define LED_MODE_PSU2_AMBER_MASK 0x04 +#define LED_MODE_PSU2_OFF_MASK 0x0C +#define LED_MODE_PSU2_AUTO_MASK 0x00 + +#define LED_TYPE_DIAG_REG_MASK 0x0C +#define LED_MODE_DIAG_GREEN_MASK 0x08 +#define LED_MODE_DIAG_AMBER_MASK 0x04 +#define LED_MODE_DIAG_OFF_MASK 0x0C + +#define LED_TYPE_FAN_REG_MASK 0x03 +#define LED_MODE_FAN_GREEN_MASK 0x02 +#define LED_MODE_FAN_AMBER_MASK 0x01 +#define LED_MODE_FAN_OFF_MASK 0x03 +#define LED_MODE_FAN_AUTO_MASK 0x00 + +#define LED_TYPE_FAN1_REG_MASK 0x03 +#define LED_TYPE_FAN2_REG_MASK 0x0C +#define LED_TYPE_FAN3_REG_MASK 0x30 +#define LED_TYPE_FAN4_REG_MASK 0xC0 +#define LED_TYPE_FAN5_REG_MASK 0x03 +#define LED_TYPE_FAN6_REG_MASK 0x0C + +#define LED_MODE_FANX_GREEN_MASK 0x01 +#define LED_MODE_FANX_RED_MASK 0x02 +#define LED_MODE_FANX_OFF_MASK 0x00 + +#define LED_TYPE_LOC_REG_MASK 0x30 +#define LED_MODE_LOC_ON_MASK 0x00 +#define LED_MODE_LOC_OFF_MASK 0x10 +#define LED_MODE_LOC_BLINK_MASK 0x20 + +static const u8 led_reg[] = { + 0xA, /* LOC/DIAG/FAN LED*/ + 0xB, /* PSU1/PSU2 LED */ + 0x16, /* FAN1-4 LED */ + 0x17, /* FAN4-6 LED */ +}; + +enum led_type { + LED_TYPE_PSU1, + LED_TYPE_PSU2, + LED_TYPE_DIAG, + LED_TYPE_FAN, + LED_TYPE_FAN1, + LED_TYPE_FAN2, + LED_TYPE_FAN3, + LED_TYPE_FAN4, + LED_TYPE_FAN5, + LED_TYPE_LOC +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_AMBER, + LED_MODE_RED, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER_BLINK, + LED_MODE_RED_BLINK, + LED_MODE_AUTO, +}; + +struct led_type_mode { + enum led_type type; + int type_mask; + enum led_light_mode mode; + int mode_mask; +}; + +static struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} +}; + + +struct fanx_info_s { + u8 cname; /* device name */ + enum led_type type; + u8 reg_id; /* map to led_reg & reg_val */ +}; + +static struct fanx_info_s fanx_info[] = { + {'1', LED_TYPE_FAN1, 2}, + {'2', LED_TYPE_FAN2, 2}, + {'3', LED_TYPE_FAN3, 2}, + {'4', LED_TYPE_FAN4, 2}, + {'5', LED_TYPE_FAN5, 3} +}; + +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if ((led_type_mode_data[i].type_mask & reg_val) == + led_type_mode_data[i].mode_mask) + { + return led_type_mode_data[i].mode; + } + } + + return 0; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + reg_val = led_type_mode_data[i].mode_mask | + (reg_val & (~led_type_mode_data[i].type_mask)); + } + + return reg_val; +} + +static int accton_as5712_54x_led_read_value(u8 reg) +{ + return as5712_54x_i2c_cpld_read(0x60, reg); +} + +static int accton_as5712_54x_led_write_value(u8 reg, u8 value) +{ + return as5712_54x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5712_54x_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as5712_54x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as5712_54x_led_read_value(led_reg[i]); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5712_54x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as5712_54x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as5712_54x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5712_54x_led_psu_1_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); +} + +static enum led_brightness accton_as5712_54x_led_psu_1_get(struct led_classdev *cdev) +{ + accton_as5712_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); +} + +static void accton_as5712_54x_led_psu_2_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); +} + +static enum led_brightness accton_as5712_54x_led_psu_2_get(struct led_classdev *cdev) +{ + accton_as5712_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); +} + +static void accton_as5712_54x_led_fan_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); +} + +static enum led_brightness accton_as5712_54x_led_fan_get(struct led_classdev *cdev) +{ + accton_as5712_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); +} + + +static void accton_as5712_54x_led_fanx_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (led_cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); + return; + } + } +} + + +static enum led_brightness accton_as5712_54x_led_fanx_get(struct led_classdev *cdev) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as5712_54x_led_update(); + return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); + } + } + + + return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); +} + + +static void accton_as5712_54x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); +} + +static enum led_brightness accton_as5712_54x_led_diag_get(struct led_classdev *cdev) +{ + accton_as5712_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as5712_54x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); +} + +static enum led_brightness accton_as5712_54x_led_loc_get(struct led_classdev *cdev) +{ + accton_as5712_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static struct led_classdev accton_as5712_54x_leds[] = { + [LED_TYPE_PSU1] = { + .name = "accton_as5712_54x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_psu_1_set, + .brightness_get = accton_as5712_54x_led_psu_1_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as5712_54x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_psu_2_set, + .brightness_get = accton_as5712_54x_led_psu_2_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN] = { + .name = "accton_as5712_54x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_fan_set, + .brightness_get = accton_as5712_54x_led_fan_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN1] = { + .name = "accton_as5712_54x_led::fan1", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_fanx_set, + .brightness_get = accton_as5712_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN2] = { + .name = "accton_as5712_54x_led::fan2", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_fanx_set, + .brightness_get = accton_as5712_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN3] = { + .name = "accton_as5712_54x_led::fan3", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_fanx_set, + .brightness_get = accton_as5712_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN4] = { + .name = "accton_as5712_54x_led::fan4", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_fanx_set, + .brightness_get = accton_as5712_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN5] = { + .name = "accton_as5712_54x_led::fan5", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_fanx_set, + .brightness_get = accton_as5712_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_DIAG] = { + .name = "accton_as5712_54x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_diag_set, + .brightness_get = accton_as5712_54x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_LOC] = { + .name = "accton_as5712_54x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as5712_54x_led_loc_set, + .brightness_get = accton_as5712_54x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as5712_54x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { + led_classdev_suspend(&accton_as5712_54x_leds[i]); + } + + return 0; +} + +static int accton_as5712_54x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { + led_classdev_resume(&accton_as5712_54x_leds[i]); + } + + return 0; +} + +static int accton_as5712_54x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as5712_54x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as5712_54x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as5712_54x_leds[i]); + } + } + + return ret; +} + +static int accton_as5712_54x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { + led_classdev_unregister(&accton_as5712_54x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as5712_54x_led_driver = { + .probe = accton_as5712_54x_led_probe, + .remove = accton_as5712_54x_led_remove, + .suspend = accton_as5712_54x_led_suspend, + .resume = accton_as5712_54x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5712_54x_led_init(void) +{ + int ret; + + extern int platform_accton_as5712_54x(void); + if(!platform_accton_as5712_54x()) { + return -ENODEV; + } + ret = platform_driver_register(&accton_as5712_54x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as5712_54x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5712_54x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as5712_54x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5712_54x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as5712_54x_led_driver); + kfree(ledctl); +} + +module_init(accton_as5712_54x_led_init); +module_exit(accton_as5712_54x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5712_54x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-psu.c new file mode 100644 index 00000000..1e53140a --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-psu.c @@ -0,0 +1,293 @@ +/* + * An hwmon driver for accton as5712_54x Power Module + * + * Copyright (C) Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as5712_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5712_54x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[14]; /* Model name, read from eeprom */ +}; + +static struct as5712_54x_psu_data *as5712_54x_psu_update_device(struct device *dev); + +enum as5712_54x_psu_sysfs_attributes { + PSU_INDEX, + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as5712_54x_psu_attributes[] = { + &sensor_dev_attr_psu_index.dev_attr.attr, + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_index(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5712_54x_psu_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->index); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5712_54x_psu_data *data = as5712_54x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((data->index - 1) * 4) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5712_54x_psu_data *data = as5712_54x_psu_update_device(dev); + + return sprintf(buf, "%s", data->model_name); +} + +static const struct attribute_group as5712_54x_psu_group = { + .attrs = as5712_54x_psu_attributes, +}; + +static int as5712_54x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5712_54x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5712_54x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5712_54x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x38 || client->addr == 0x50) { + data->index = 1; + } + else if (client->addr == 0x3b || client->addr == 0x53) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5712_54x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5712_54x_psu_remove(struct i2c_client *client) +{ + struct as5712_54x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5712_54x_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as5712_54x_psu_id[] = { + { "as5712_54x_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as5712_54x_psu_id); + +static struct i2c_driver as5712_54x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5712_54x_psu", + }, + .probe = as5712_54x_psu_probe, + .remove = as5712_54x_psu_remove, + .id_table = as5712_54x_psu_id, + .address_list = normal_i2c, +}; + +static int as5712_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as5712_54x_psu_data *as5712_54x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5712_54x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + + dev_dbg(&client->dev, "Starting as5712_54x update\n"); + + /* Read model name */ + if (client->addr == 0x38 || client->addr == 0x3b) { + /* AC power */ + status = as5712_54x_psu_read_block(client, 0x26, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + else { + /* DC power */ + status = as5712_54x_psu_read_block(client, 0x50, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + + /* Read psu status */ + status = as5712_54x_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as5712_54x_psu_init(void) +{ + extern int platform_accton_as5712_54x(void); + if(!platform_accton_as5712_54x()) { + return -ENODEV; + } + return i2c_add_driver(&as5712_54x_psu_driver); +} + +static void __exit as5712_54x_psu_exit(void) +{ + i2c_del_driver(&as5712_54x_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5712_54x_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as5712_54x_psu_init); +module_exit(as5712_54x_psu_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-sfp.c new file mode 100644 index 00000000..1c2a93c1 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-sfp.c @@ -0,0 +1,672 @@ +/* + * An hwmon driver for accton as5712_54x sfp + * + * Copyright (C) Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NUM_OF_SFP_PORT 54 +#define BIT_INDEX(i) (1ULL << (i)) + +#if 0 +static ssize_t show_status(struct device *dev, struct device_attribute *da,char *buf); +static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +static int as5712_54x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); +#endif + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5712_54x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u64 status[4]; /* bit0:port0, bit1:port1 and so on */ + /* index 0 => is_present + 1 => tx_fail + 2 => tx_disable + 3 => rx_loss */ +}; + +/* The table maps active port to cpld port. + * Array index 0 is for active port 1, + * index 1 for active port 2, and so on. + * The array content implies cpld port index. + */ +static const u8 cpld_to_front_port_table[] = +{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 52, 50, 53, 51, 54}; + +#define CPLD_PORT_TO_FRONT_PORT(port) (cpld_to_front_port_table[port]) + +static struct as5712_54x_sfp_data *as5712_54x_sfp_update_device(struct device *dev, int update_eeprom); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +enum as5712_54x_sfp_sysfs_attributes { + SFP_IS_PRESENT, + SFP_TX_FAULT, + SFP_TX_DISABLE, + SFP_RX_LOSS, + SFP_PORT_NUMBER, + SFP_EEPROM, + SFP_RX_LOS_ALL, + SFP_IS_PRESENT_ALL, +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_status, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, show_status, NULL, SFP_TX_FAULT); +static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, show_status, set_tx_disable, SFP_TX_DISABLE); +static SENSOR_DEVICE_ATTR(sfp_rx_loss, S_IRUGO, show_status,NULL, SFP_RX_LOSS); +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); +static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, show_status,NULL, SFP_RX_LOS_ALL); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_status,NULL, SFP_IS_PRESENT_ALL); + +static struct attribute *as5712_54x_sfp_attributes[] = { + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, + &sensor_dev_attr_sfp_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5712_54x_sfp_data *data; + u8 val; + int values[7]; + + /* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ + do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ + } while(0) + + if(attr->index == SFP_RX_LOS_ALL) { + /* + * Report the RX_LOS status for all ports. + * This does not depend on the currently active SFP selector. + */ + + /* RX_LOS Ports 1-8 */ + VALIDATED_READ(buf, values[0], as5712_54x_i2c_cpld_read(0x61, 0x0F), 0); + /* RX_LOS Ports 9-16 */ + VALIDATED_READ(buf, values[1], as5712_54x_i2c_cpld_read(0x61, 0x10), 0); + /* RX_LOS Ports 17-24 */ + VALIDATED_READ(buf, values[2], as5712_54x_i2c_cpld_read(0x61, 0x11), 0); + /* RX_LOS Ports 25-32 */ + VALIDATED_READ(buf, values[3], as5712_54x_i2c_cpld_read(0x62, 0x0F), 0); + /* RX_LOS Ports 33-40 */ + VALIDATED_READ(buf, values[4], as5712_54x_i2c_cpld_read(0x62, 0x10), 0); + /* RX_LOS Ports 41-48 */ + VALIDATED_READ(buf, values[5], as5712_54x_i2c_cpld_read(0x62, 0x11), 0); + + /** Return values 1 -> 48 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], + values[3], values[4], values[5]); + } + + if(attr->index == SFP_IS_PRESENT_ALL) { + /* + * Report the SFP_PRESENCE status for all ports. + * This does not depend on the currently active SFP selector. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], as5712_54x_i2c_cpld_read(0x61, 0x6), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], as5712_54x_i2c_cpld_read(0x61, 0x7), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], as5712_54x_i2c_cpld_read(0x61, 0x8), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], as5712_54x_i2c_cpld_read(0x62, 0x6), 1); + /* SFP_PRESENT Ports 33-40 */ + VALIDATED_READ(buf, values[4], as5712_54x_i2c_cpld_read(0x62, 0x7), 1); + /* SFP_PRESENT Ports 41-48 */ + VALIDATED_READ(buf, values[5], as5712_54x_i2c_cpld_read(0x62, 0x8), 1); + /* QSFP_PRESENT Ports 49-54 */ + VALIDATED_READ(buf, values[6], as5712_54x_i2c_cpld_read(0x62, 0x14), 1); + + /* Return values 1 -> 54 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], + values[3], values[4], values[5], + values[6] & 0x3F); + } + /* + * The remaining attributes are gathered on a per-selected-sfp basis. + */ + data = as5712_54x_sfp_update_device(dev, 0); + if (attr->index == SFP_IS_PRESENT) { + val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 0 : 1; + } + else { + val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 1 : 0; + } + + return sprintf(buf, "%d", val); +} + +static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); + unsigned short cpld_addr = 0; + u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; + long disable; + int error; + + /* Tx disable is not supported for QSFP ports(49-54) */ + if (data->port >= 48) { + return -EINVAL; + } + + error = kstrtol(buf, 10, &disable); + if (error) { + return error; + } + + mutex_lock(&data->update_lock); + + if(data->port < 24) { + cpld_addr = 0x61; + cpld_reg = 0xC + data->port / 8; + cpld_bit = 1 << (data->port % 8); + } + else { + cpld_addr = 0x62; + cpld_reg = 0xC + (data->port - 24) / 8; + cpld_bit = 1 << (data->port % 8); + } + + cpld_val = as5712_54x_i2c_cpld_read(cpld_addr, cpld_reg); + + /* Update tx_disable status */ + if (disable) { + data->status[SFP_TX_DISABLE] |= BIT_INDEX(data->port); + cpld_val |= cpld_bit; + } + else { + data->status[SFP_TX_DISABLE] &= ~BIT_INDEX(data->port); + cpld_val &= ~cpld_bit; + } + + as5712_54x_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); + + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5712_54x_sfp_data *data = as5712_54x_sfp_update_device(dev, 1); + + if (!data->valid) { + return 0; + } + + if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) != 0) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as5712_54x_sfp_group = { + .attrs = as5712_54x_sfp_attributes, +}; + +static int as5712_54x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5712_54x_sfp_data *data; + int status; + + extern int platform_accton_as5712_54x(void); + if(!platform_accton_as5712_54x()) { + return -ENODEV; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5712_54x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5712_54x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5712_54x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5712_54x_sfp_remove(struct i2c_client *client) +{ + struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5712_54x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as5712_54x_sfp1, as5712_54x_sfp2, as5712_54x_sfp3, as5712_54x_sfp4, +as5712_54x_sfp5, as5712_54x_sfp6, as5712_54x_sfp7, as5712_54x_sfp8, +as5712_54x_sfp9, as5712_54x_sfp10, as5712_54x_sfp11,as5712_54x_sfp12, +as5712_54x_sfp13, as5712_54x_sfp14, as5712_54x_sfp15,as5712_54x_sfp16, +as5712_54x_sfp17, as5712_54x_sfp18, as5712_54x_sfp19,as5712_54x_sfp20, +as5712_54x_sfp21, as5712_54x_sfp22, as5712_54x_sfp23,as5712_54x_sfp24, +as5712_54x_sfp25, as5712_54x_sfp26, as5712_54x_sfp27,as5712_54x_sfp28, +as5712_54x_sfp29, as5712_54x_sfp30, as5712_54x_sfp31,as5712_54x_sfp32, +as5712_54x_sfp33, as5712_54x_sfp34, as5712_54x_sfp35,as5712_54x_sfp36, +as5712_54x_sfp37, as5712_54x_sfp38, as5712_54x_sfp39,as5712_54x_sfp40, +as5712_54x_sfp41, as5712_54x_sfp42, as5712_54x_sfp43,as5712_54x_sfp44, +as5712_54x_sfp45, as5712_54x_sfp46, as5712_54x_sfp47,as5712_54x_sfp48, +as5712_54x_sfp49, as5712_54x_sfp52, as5712_54x_sfp50,as5712_54x_sfp53, +as5712_54x_sfp51, as5712_54x_sfp54 +}; + +static const struct i2c_device_id as5712_54x_sfp_id[] = { +{ "as5712_54x_sfp1", as5712_54x_sfp1 }, { "as5712_54x_sfp2", as5712_54x_sfp2 }, +{ "as5712_54x_sfp3", as5712_54x_sfp3 }, { "as5712_54x_sfp4", as5712_54x_sfp4 }, +{ "as5712_54x_sfp5", as5712_54x_sfp5 }, { "as5712_54x_sfp6", as5712_54x_sfp6 }, +{ "as5712_54x_sfp7", as5712_54x_sfp7 }, { "as5712_54x_sfp8", as5712_54x_sfp8 }, +{ "as5712_54x_sfp9", as5712_54x_sfp9 }, { "as5712_54x_sfp10", as5712_54x_sfp10 }, +{ "as5712_54x_sfp11", as5712_54x_sfp11 }, { "as5712_54x_sfp12", as5712_54x_sfp12 }, +{ "as5712_54x_sfp13", as5712_54x_sfp13 }, { "as5712_54x_sfp14", as5712_54x_sfp14 }, +{ "as5712_54x_sfp15", as5712_54x_sfp15 }, { "as5712_54x_sfp16", as5712_54x_sfp16 }, +{ "as5712_54x_sfp17", as5712_54x_sfp17 }, { "as5712_54x_sfp18", as5712_54x_sfp18 }, +{ "as5712_54x_sfp19", as5712_54x_sfp19 }, { "as5712_54x_sfp20", as5712_54x_sfp20 }, +{ "as5712_54x_sfp21", as5712_54x_sfp21 }, { "as5712_54x_sfp22", as5712_54x_sfp22 }, +{ "as5712_54x_sfp23", as5712_54x_sfp23 }, { "as5712_54x_sfp24", as5712_54x_sfp24 }, +{ "as5712_54x_sfp25", as5712_54x_sfp25 }, { "as5712_54x_sfp26", as5712_54x_sfp26 }, +{ "as5712_54x_sfp27", as5712_54x_sfp27 }, { "as5712_54x_sfp28", as5712_54x_sfp28 }, +{ "as5712_54x_sfp29", as5712_54x_sfp29 }, { "as5712_54x_sfp30", as5712_54x_sfp30 }, +{ "as5712_54x_sfp31", as5712_54x_sfp31 }, { "as5712_54x_sfp32", as5712_54x_sfp32 }, +{ "as5712_54x_sfp33", as5712_54x_sfp33 }, { "as5712_54x_sfp34", as5712_54x_sfp34 }, +{ "as5712_54x_sfp35", as5712_54x_sfp35 }, { "as5712_54x_sfp36", as5712_54x_sfp36 }, +{ "as5712_54x_sfp37", as5712_54x_sfp37 }, { "as5712_54x_sfp38", as5712_54x_sfp38 }, +{ "as5712_54x_sfp39", as5712_54x_sfp39 }, { "as5712_54x_sfp40", as5712_54x_sfp40 }, +{ "as5712_54x_sfp41", as5712_54x_sfp41 }, { "as5712_54x_sfp42", as5712_54x_sfp42 }, +{ "as5712_54x_sfp43", as5712_54x_sfp43 }, { "as5712_54x_sfp44", as5712_54x_sfp44 }, +{ "as5712_54x_sfp45", as5712_54x_sfp45 }, { "as5712_54x_sfp46", as5712_54x_sfp46 }, +{ "as5712_54x_sfp47", as5712_54x_sfp47 }, { "as5712_54x_sfp48", as5712_54x_sfp48 }, +{ "as5712_54x_sfp49", as5712_54x_sfp49 }, { "as5712_54x_sfp50", as5712_54x_sfp50 }, +{ "as5712_54x_sfp51", as5712_54x_sfp51 }, { "as5712_54x_sfp52", as5712_54x_sfp52 }, +{ "as5712_54x_sfp53", as5712_54x_sfp53 }, { "as5712_54x_sfp54", as5712_54x_sfp54 }, + +{} +}; +MODULE_DEVICE_TABLE(i2c, as5712_54x_sfp_id); + +static struct i2c_driver as5712_54x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5712_54x_sfp", + }, + .probe = as5712_54x_sfp_probe, + .remove = as5712_54x_sfp_remove, + .id_table = as5712_54x_sfp_id, + .address_list = normal_i2c, +}; + +static int as5712_54x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) +{ + int result = i2c_smbus_read_byte_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u8)result; + result = 0; + +abort: + return result; +} + +#define ALWAYS_UPDATE_DEVICE 1 + +static struct as5712_54x_sfp_data *as5712_54x_sfp_update_device(struct device *dev, int update_eeprom) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (ALWAYS_UPDATE_DEVICE || time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0, j = 0; + + data->valid = 0; + //dev_dbg(&client->dev, "Starting as5712_54x sfp status update\n"); + memset(data->status, 0, sizeof(data->status)); + + /* Read status of port 1~48(SFP port) */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 12; j++) { + status = as5712_54x_i2c_cpld_read(0x61+i, 0x6+j); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + goto exit; + } + + data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); + } + } + + /* + * Bring QSFPs out of reset, + * This is a temporary fix until the QSFP+_MOD_RST register + * can be exposed through the driver. + */ + as5712_54x_i2c_cpld_write(0x62, 0x15, 0x3F); + + /* Read present status of port 49-54(QSFP port) */ + status = as5712_54x_i2c_cpld_read(0x62, 0x14); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + } + else { + data->status[SFP_IS_PRESENT] |= (u64)status << 48; + } + + if (update_eeprom) { + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) == 0) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom); i++) { + status = as5712_54x_sfp_read_byte(client, i, data->eeprom + i); + + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", + CPLD_PORT_TO_FRONT_PORT(data->port)); + goto exit; + } + } + } + } + + data->valid = 1; + data->last_updated = jiffies; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +module_i2c_driver(as5712_54x_sfp_driver); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5712_54x_sfp driver"); +MODULE_LICENSE("GPL"); + +#if 0 + int i = 0, j = 0; + + data->valid = 0; + //dev_dbg(&client->dev, "Starting as5712_54x sfp update\n"); + memset(data->status, 0, sizeof(data->status)); + + /* Read status of port 1~48(SFP port) */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 12; j++) { + status = as5712_54x_i2c_cpld_read(0x61+i, 0x6+j); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + continue; + } + + data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); + } + } + + /* Read present status of port 49-54(QSFP port) */ + status = as5712_54x_i2c_cpld_read(0x62, 0x14); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + } + else { + data->status[SFP_IS_PRESENT] |= (u64)status << 48; + } +#endif + +/* Reserver to prevent from CPLD port mapping is changed + */ +#if 0 +BIT_INDEX(port_present_index[data->port]) +/* The bit index of is_present field read from CPLD + * Array index 0 is for as5712_54x_sfp1, + * index 1 is for as5712_54x_sfp2, and so on. + */ +static const int port_present_index[] = { + 4, 5, 6, 7, 9, 8, 11, 10, + 0, 1, 2, 3, 12, 13, 14, 15, +16, 17, 18, 19, 28, 29, 30, 31, +20, 21, 22, 23, 24, 25, 26, 27 +}; +#endif + +#if 0 +static struct as5712_54x_sfp_data *as5712_54x_sfp_update_status(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); + int status = -1; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->status_last_updated + HZ + HZ / 2) + || !data->status_valid) { + int status = -1; + int i = 0, j = 0; + + data->status_valid = 0; + //dev_dbg(&client->dev, "Starting as5712_54x sfp status update\n"); + memset(data->status, 0, sizeof(data->status)); + + /* Read status of port 1~48(SFP port) */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 12; j++) { + status = as5712_54x_i2c_cpld_read(0x61+i, 0x6+j); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + goto exit; + } + + data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); + } + } + + /* + * Bring QSFPs out of reset, + * This is a temporary fix until the QSFP+_MOD_RST register + * can be exposed through the driver. + */ + as5712_54x_i2c_cpld_write(0x62, 0x15, 0x3F); + + /* Read present status of port 49-54(QSFP port) */ + status = as5712_54x_i2c_cpld_read(0x62, 0x14); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + } + else { + data->status[SFP_IS_PRESENT] |= (u64)status << 48; + } + + data->status_valid = 1; + data->status_last_updated = jiffies; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +static struct as5712_54x_sfp_data *as5712_54x_sfp_update_eeprom(struct device *dev) +{ + struct as5712_54x_sfp_data *data = NULL; + + data = as5712_54x_sfp_update_status(dev); + + if (data == NULL || data->status_valid == 0) { + data->eeprom_valid = 0; + return data; + } + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->eeprom_last_updated + HZ + HZ / 2) + || !data->eeprom_valid) { + int status = -1; + int i = 0; + + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) == 0) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { + status = as5712_54x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, + data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), + I2C_SMBUS_BLOCK_MAX); + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", + CPLD_PORT_TO_FRONT_PORT(data->port)); + goto exit; + } + } + } + + data->eeprom_last_updated = jiffies; + data->eeprom_valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} +#endif diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/PKG.yml index cc049fb9..f618c7cc 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5712-54x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5712-54x-r0 MODULES=onl-platform-modules-x86-64-accton-as5712-54x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml index fa4eea34..e03fe786 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as5712-54x-r0: --stop=1 kernel: - <<: *kernel-3-2 + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py index 0fc60af9..6e9d0aa0 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py @@ -9,6 +9,10 @@ class OnlPlatform_x86_64_accton_as5712_54x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".5712.54" def baseconfig(self): + self.insmod('cpr_4011_4mxx', required=False) + for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: + self.insmod("x86-64-accton-as5712-54x-%s.ko" % m, required=False) + ########### initialize I2C bus 0 ########### # initialize CPLDs self.new_i2c_devices( From 2afe7d78c5df4ae2593bbbe2f4f9065487839682 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:36:59 +0000 Subject: [PATCH 221/255] System, Loader and Kernel upgrades are now required. --- .../vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml index a9519949..623a059c 100644 --- a/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml +++ b/packages/base/all/vendor-config-onl/src/etc/onl/sysconfig/00-defaults.yml @@ -23,7 +23,7 @@ upgrade: dir: /lib/platform-config/current/onl/upgrade/onie system: - auto: advisory + auto: force firmware: auto: advisory @@ -31,7 +31,7 @@ upgrade: dir: /lib/platform-config/current/onl/upgrade/firmware loader: - auto: advisory + auto: force versions: /etc/onl/loader/versions.json package: dir: /etc/onl/upgrade/$PARCH From 3678d85b9b5240f7f62c0f4bfa018c0bf7420687 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:37:29 +0000 Subject: [PATCH 222/255] The Loader and Kernel upgrade stage must now be performed prior to platform baseconfig. This avoids the possibility of inserting incompatible kernel modules built for the new kernel into the old one. --- .../src/boot.d/{63.upgrade-loader => 15.upgrade-loader} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packages/base/all/vendor-config-onl/src/boot.d/{63.upgrade-loader => 15.upgrade-loader} (100%) diff --git a/packages/base/all/vendor-config-onl/src/boot.d/63.upgrade-loader b/packages/base/all/vendor-config-onl/src/boot.d/15.upgrade-loader similarity index 100% rename from packages/base/all/vendor-config-onl/src/boot.d/63.upgrade-loader rename to packages/base/all/vendor-config-onl/src/boot.d/15.upgrade-loader From c91cbc5968292fc3e3bae33207cc79d4d70eaf01 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 18:51:25 +0000 Subject: [PATCH 223/255] Use absolute path. --- .../amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile index f9d1bdc8..60d9fa3b 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile @@ -14,7 +14,7 @@ include $(ONL)/make/config.mk kernel: $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) - $(ONL)/tools/scripts/kmodbuild.sh $(wildcard *-mbuild) $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl + $(ONL)/tools/scripts/kmodbuild.sh $(wildcard $(THIS_DIR)/*mbuild) $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl clean: rm -rf linux-3.16* kernel-3.16* From fbda6574a902e7b8b08c85f337fbca4ceef4dd8b Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 19:03:45 +0000 Subject: [PATCH 224/255] The wildcard expansion is happening at the wrong time. Fix this later. --- .../amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile index 60d9fa3b..3ea84a98 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile @@ -14,7 +14,7 @@ include $(ONL)/make/config.mk kernel: $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) - $(ONL)/tools/scripts/kmodbuild.sh $(wildcard $(THIS_DIR)/*mbuild) $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl + $(ONL)/tools/scripts/kmodbuild.sh linux-3.16.39-mbuild $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl clean: rm -rf linux-3.16* kernel-3.16* From 13e20e596c131d943666c7ac728d5caeac06ee72 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 19:39:04 +0000 Subject: [PATCH 225/255] ARCH check. --- make/kmodule.mk | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/make/kmodule.mk b/make/kmodule.mk index 84a7bfbb..2347925c 100644 --- a/make/kmodule.mk +++ b/make/kmodule.mk @@ -10,5 +10,10 @@ ifndef PLATFORM $(error $$PLATFORM must be set) endif +ifndef ARCH +$(error $$ARCH must be set) +endif + + modules: - $(ONL)/tools/scripts/kmodbuild.sh "$(KERNELS)" "$(KMODULES)" $(PLATFORM) + ARCH=$(ARCH) $(ONL)/tools/scripts/kmodbuild.sh "$(KERNELS)" "$(KMODULES)" $(PLATFORM) From 918485eabc2f08d899455c666aee2094187a8a0c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 19:39:12 +0000 Subject: [PATCH 226/255] Set ARCH properly. --- .../x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile index a61eaca9..9de6dc2f 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile @@ -1,4 +1,5 @@ KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as5712-54x +ARCH := x86_64 include $(ONL)/make/kmodule.mk From 78f76de258c5971e7a460a32bdbb4f2e30b55e79 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 22:48:35 +0000 Subject: [PATCH 227/255] - AS6712 Kernel Modules - Upgrade to 3.16 LTS --- .../x86-64-accton-as6712-32x/modules/Makefile | 1 + .../x86-64-accton-as6712-32x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as6712-32x-cpld.c | 428 ++++++++++++ .../builds/x86-64-accton-as6712-32x-fan.c | 434 ++++++++++++ .../builds/x86-64-accton-as6712-32x-leds.c | 617 ++++++++++++++++++ .../builds/x86-64-accton-as6712-32x-psu.c | 304 +++++++++ .../builds/x86-64-accton-as6712-32x-sfp.c | 377 +++++++++++ .../platform-config/r0/PKG.yml | 2 +- .../src/lib/x86-64-accton-as6712-32x-r0.yml | 4 +- .../x86_64_accton_as6712_32x_r0/__init__.py | 3 + 12 files changed, 2174 insertions(+), 3 deletions(-) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/PKG.yml new file mode 100644 index 00000000..7b9a60c4 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as6712-32x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile new file mode 100644 index 00000000..7a8c22ac --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as6712-32x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c new file mode 100644 index 00000000..e37d06bc --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c @@ -0,0 +1,428 @@ +/* + * I2C multiplexer + * + * Copyright (C) 2014 Accton Technology Corporation. + * + * This module supports the accton cpld that hold the channel select + * mechanism for other i2c slave devices, such as SFP. + * This includes the: + * Accton as6712_32x CPLD1/CPLD2/CPLD3 + * + * Based on: + * pca954x.c from Kumar Gala + * Copyright (C) 2006 + * + * Based on: + * pca954x.c from Ken Harrenstien + * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) + * + * Based on: + * i2c-virtual_cb.c from Brian Kuschak + * and + * pca9540.c from Jean Delvare . + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct dmi_system_id as6712_dmi_table[] = { + { + .ident = "Accton AS6712", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), + }, + }, + { + .ident = "Accton AS6712", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), + }, + }, +}; + +int platform_accton_as6712_32x(void) +{ + return dmi_check_system(as6712_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as6712_32x); + +#define NUM_OF_CPLD1_CHANS 0x0 +#define NUM_OF_CPLD2_CHANS 0x10 +#define NUM_OF_CPLD3_CHANS 0x10 +#define NUM_OF_ALL_CPLD_CHANS (NUM_OF_CPLD2_CHANS + NUM_OF_CPLD3_CHANS) +#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +enum cpld_mux_type { + as6712_32x_cpld2, + as6712_32x_cpld3, + as6712_32x_cpld1 +}; + +struct accton_i2c_cpld_mux { + enum cpld_mux_type type; + struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; + u8 last_chan; /* last register value */ +}; + +struct chip_desc { + u8 nchans; + u8 deselectChan; +}; + +/* Provide specs for the PCA954x types we know about */ +static const struct chip_desc chips[] = { + [as6712_32x_cpld1] = { + .nchans = NUM_OF_CPLD1_CHANS, + .deselectChan = NUM_OF_CPLD1_CHANS, + }, + [as6712_32x_cpld2] = { + .nchans = NUM_OF_CPLD2_CHANS, + .deselectChan = NUM_OF_CPLD2_CHANS, + }, + [as6712_32x_cpld3] = { + .nchans = NUM_OF_CPLD3_CHANS, + .deselectChan = NUM_OF_CPLD3_CHANS, + } +}; + +static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { + { "as6712_32x_cpld1", as6712_32x_cpld1 }, + { "as6712_32x_cpld2", as6712_32x_cpld2 }, + { "as6712_32x_cpld3", as6712_32x_cpld3 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); + +/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() + for this as they will try to lock adapter a second time */ +static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, + struct i2c_client *client, u8 val) +{ +#if 0 + int ret = -ENODEV; + + //if (adap->algo->master_xfer) { + if (0) + struct i2c_msg msg; + char buf[2]; + + msg.addr = client->addr; + msg.flags = 0; + msg.len = 2; + buf[0] = 0x2; + buf[1] = val; + msg.buf = buf; + ret = adap->algo->master_xfer(adap, &msg, 1); + } + else { + union i2c_smbus_data data; + ret = adap->algo->smbus_xfer(adap, client->addr, + client->flags, + I2C_SMBUS_WRITE, + 0x2, I2C_SMBUS_BYTE, &data); + } + + return ret; +#else + unsigned long orig_jiffies; + unsigned short flags; + union i2c_smbus_data data; + int try; + s32 res = -EIO; + + data.byte = val; + flags = client->flags; + flags &= I2C_M_TEN | I2C_CLIENT_PEC; + + if (adap->algo->smbus_xfer) { + /* Retry automatically on arbitration loss */ + orig_jiffies = jiffies; + for (res = 0, try = 0; try <= adap->retries; try++) { + res = adap->algo->smbus_xfer(adap, client->addr, flags, + I2C_SMBUS_WRITE, 0x2, + I2C_SMBUS_BYTE_DATA, &data); + if (res != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + adap->timeout)) + break; + } + } + + return res; +#endif +} + +static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + u8 regval; + int ret = 0; + regval = chan; + + /* Only select the channel if its different from the last channel */ + if (data->last_chan != regval) { + ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); + data->last_chan = regval; + } + + return ret; +} + +static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + + /* Deselect active channel */ + data->last_chan = chips[data->type].deselectChan; + + return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); +} + +static void accton_i2c_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void accton_i2c_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + u8 reg = 0x1; + struct i2c_client *client; + int len; + + client = to_i2c_client(dev); + len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); + + return len; +} + +static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); + +/* + * I2C init/probing/exit functions + */ +static int accton_i2c_cpld_mux_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); + int chan=0; + struct accton_i2c_cpld_mux *data; + int ret = -ENODEV; + + if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) + goto err; + + data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err; + } + + i2c_set_clientdata(client, data); + +#if 0 + /* Write the mux register at addr to verify + * that the mux is in fact present. + */ + if (i2c_smbus_write_byte(client, 0) < 0) { + dev_warn(&client->dev, "probe failed\n"); + goto exit_free; + } +#endif + + data->type = id->driver_data; + + if (data->type == as6712_32x_cpld2 || data->type == as6712_32x_cpld3) { + data->last_chan = chips[data->type].deselectChan; /* force the first selection */ + + /* Now create an adapter for each channel */ + for (chan = 0; chan < chips[data->type].nchans; chan++) { + data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, + I2C_CLASS_HWMON | I2C_CLASS_SPD, + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); + + if (data->virt_adaps[chan] == NULL) { + ret = -ENODEV; + dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); + goto virt_reg_failed; + } + } + + dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", + chan, client->name); + } + + accton_i2c_cpld_add_client(client); + + ret = sysfs_create_file(&client->dev.kobj, &ver.attr); + if (ret) + goto virt_reg_failed; + + return 0; + +virt_reg_failed: + for (chan--; chan >= 0; chan--) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + } + kfree(data); +err: + return ret; +} + +static int accton_i2c_cpld_mux_remove(struct i2c_client *client) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + const struct chip_desc *chip = &chips[data->type]; + int chan; + + sysfs_remove_file(&client->dev.kobj, &ver.attr); + + for (chan = 0; chan < chip->nchans; ++chan) { + if (data->virt_adaps[chan]) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + data->virt_adaps[chan] = NULL; + } + } + + kfree(data); + accton_i2c_cpld_remove_client(client); + + return 0; +} + +int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_read_byte_data(cpld_node->client, reg); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as6712_32x_i2c_cpld_read); + +int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as6712_32x_i2c_cpld_write); + +static struct i2c_driver accton_i2c_cpld_mux_driver = { + .driver = { + .name = "as6712_32x_cpld", + .owner = THIS_MODULE, + }, + .probe = accton_i2c_cpld_mux_probe, + .remove = accton_i2c_cpld_mux_remove, + .id_table = accton_i2c_cpld_mux_id, +}; + +static int __init accton_i2c_cpld_mux_init(void) +{ + mutex_init(&list_lock); + return i2c_add_driver(&accton_i2c_cpld_mux_driver); +} + +static void __exit accton_i2c_cpld_mux_exit(void) +{ + i2c_del_driver(&accton_i2c_cpld_mux_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_i2c_cpld_mux_init); +module_exit(accton_i2c_cpld_mux_exit); + + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-fan.c new file mode 100644 index 00000000..b04aec92 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-fan.c @@ -0,0 +1,434 @@ +/* + * A hwmon driver for the Accton as6712 32x fan contrl + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAN_MAX_NUMBER 5 +#define FAN_SPEED_CPLD_TO_RPM_STEP 150 +#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 +#define FAN_DUTY_CYCLE_MIN 0 /* 10% ??*/ +#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ + +#define CPLD_REG_FAN_STATUS_OFFSET 0xC +#define CPLD_REG_FANR_STATUS_OFFSET 0x17 +#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E + +#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 +#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 +#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 +#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 +#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 + +#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 +#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 +#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A +#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B +#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C + +#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD + +#define CPLD_FAN1_INFO_BIT_MASK 0x1 +#define CPLD_FAN2_INFO_BIT_MASK 0x2 +#define CPLD_FAN3_INFO_BIT_MASK 0x4 +#define CPLD_FAN4_INFO_BIT_MASK 0x8 +#define CPLD_FAN5_INFO_BIT_MASK 0x10 + +#define PROJECT_NAME + +#define DEBUG_MODE 0 + +#if (DEBUG_MODE == 1) + #define DEBUG_PRINT(format, ...) printk(format, __VA_ARGS__) +#else + #define DEBUG_PRINT(format, ...) +#endif + +static struct accton_as6712_32x_fan *fan_data = NULL; + +struct accton_as6712_32x_fan { + struct platform_device *pdev; + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ + u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ + u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ + u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ + u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ + u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ +}; + +/*******************/ +#define MAKE_FAN_MASK_OR_REG(name,type) \ + CPLD_FAN##type##1_##name, \ + CPLD_FAN##type##2_##name, \ + CPLD_FAN##type##3_##name, \ + CPLD_FAN##type##4_##name, \ + CPLD_FAN##type##5_##name, + +/* fan related data + */ +static const u8 fan_info_mask[] = { + MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) +}; + +static const u8 fan_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) +}; + +static const u8 fanr_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) +}; + +/*******************/ +#define DEF_FAN_SET(id) \ + FAN##id##_FAULT, \ + FAN##id##_SPEED, \ + FAN##id##_DUTY_CYCLE, \ + FAN##id##_DIRECTION, \ + FANR##id##_FAULT, \ + FANR##id##_SPEED, + +enum sysfs_fan_attributes { + DEF_FAN_SET(1) + DEF_FAN_SET(2) + DEF_FAN_SET(3) + DEF_FAN_SET(4) + DEF_FAN_SET(5) +}; +/*******************/ +static void accton_as6712_32x_fan_update_device(struct device *dev); +static int accton_as6712_32x_fan_read_value(u8 reg); +static int accton_as6712_32x_fan_write_value(u8 reg, u8 value); + +static ssize_t fan_set_duty_cycle(struct device *dev, + struct device_attribute *da,const char *buf, size_t count); +static ssize_t fan_show_value(struct device *dev, + struct device_attribute *da, char *buf); + +extern int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + + +/*******************/ +#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ + fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); + +#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) + +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) +/*******************/ + +#define _MAKE_FAN_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ + &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, + +#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) + +static struct attribute *accton_as6712_32x_fan_attributes[] = { + /* fan related attributes */ + MAKE_FAN_ATTR(PROJECT_NAME,1) + MAKE_FAN_ATTR(PROJECT_NAME,2) + MAKE_FAN_ATTR(PROJECT_NAME,3) + MAKE_FAN_ATTR(PROJECT_NAME,4) + MAKE_FAN_ATTR(PROJECT_NAME,5) + NULL +}; +/*******************/ + +/* fan related functions + */ +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + ssize_t ret = 0; + int data_index, type_index; + + accton_as6712_32x_fan_update_device(dev); + + if (fan_data->valid == 0) { + return ret; + } + + type_index = attr->index%FAN2_FAULT; + data_index = attr->index/FAN2_FAULT; + + switch (type_index) { + case FAN1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->status[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DUTY_CYCLE: + ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DIRECTION: + ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + default: + DEBUG_PRINT("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); + break; + } + + return ret; +} +/*******************/ +static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) { + + int error, value; + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) + return -EINVAL; + + accton_as6712_32x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); + + fan_data->valid = 0; + + return count; +} + +static const struct attribute_group accton_as6712_32x_fan_group = { + .attrs = accton_as6712_32x_fan_attributes, +}; + +static int accton_as6712_32x_fan_read_value(u8 reg) +{ + return as6712_32x_i2c_cpld_read(0x60, reg); +} + +static int accton_as6712_32x_fan_write_value(u8 reg, u8 value) +{ + return as6712_32x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as6712_32x_fan_update_device(struct device *dev) +{ + int speed, r_speed, fault, r_fault, direction, ctrl_speed; + int i; + + mutex_lock(&fan_data->update_lock); + + DEBUG_PRINT("Starting accton_as6712_32x_fan update \n"); + + if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { + /* do nothing */ + goto _exit; + } + + fan_data->valid = 0; + + DEBUG_PRINT("Starting accton_as6712_32x_fan update 2 \n"); + + fault = accton_as6712_32x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); + r_fault = accton_as6712_32x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); + direction = accton_as6712_32x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); + ctrl_speed = accton_as6712_32x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); + + if ( (fault < 0) || (r_fault < 0) || (ctrl_speed < 0) ) + { + DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + DEBUG_PRINT("[fan:] fault:%d, r_fault=%d, ctrl_speed=%d \n",fault, r_fault, ctrl_speed); + + for (i = 0; i < FAN_MAX_NUMBER; i++) + { + /* Update fan data + */ + + /* fan fault + * 0: normal, 1:abnormal + * Each FAN-tray module has two fans. + */ + fan_data->status[i] = (fault & fan_info_mask[i]) >> i; + DEBUG_PRINT("[fan%d:] fail=%d \n",i, fan_data->status[i]); + + fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; + fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; + fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; + + /* fan speed + */ + speed = accton_as6712_32x_fan_read_value(fan_speed_reg[i]); + r_speed = accton_as6712_32x_fan_read_value(fanr_speed_reg[i]); + if ( (speed < 0) || (r_speed < 0) ) + { + DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + DEBUG_PRINT("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); + + fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; + fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; + } + + /* finish to update */ + fan_data->last_updated = jiffies; + fan_data->valid = 1; + +_exit: + mutex_unlock(&fan_data->update_lock); +} + +static int accton_as6712_32x_fan_probe(struct platform_device *pdev) +{ + int status = -1; + + /* Register sysfs hooks */ + status = sysfs_create_group(&pdev->dev.kobj, &accton_as6712_32x_fan_group); + if (status) { + goto exit; + + } + + fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(fan_data->hwmon_dev)) { + status = PTR_ERR(fan_data->hwmon_dev); + goto exit_remove; + } + + dev_info(&pdev->dev, "accton_as6712_32x_fan\n"); + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &accton_as6712_32x_fan_group); +exit: + return status; +} + +static int accton_as6712_32x_fan_remove(struct platform_device *pdev) +{ + hwmon_device_unregister(fan_data->hwmon_dev); + sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as6712_32x_fan_group); + + return 0; +} + +#define DRVNAME "as6712_32x_fan" + +static struct platform_driver accton_as6712_32x_fan_driver = { + .probe = accton_as6712_32x_fan_probe, + .remove = accton_as6712_32x_fan_remove, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as6712_32x_fan_init(void) +{ + int ret; + + extern int platform_accton_as6712_32x(void); + if(!platform_accton_as6712_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as6712_32x_fan_driver); + if (ret < 0) { + goto exit; + } + + fan_data = kzalloc(sizeof(struct accton_as6712_32x_fan), GFP_KERNEL); + if (!fan_data) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as6712_32x_fan_driver); + goto exit; + } + + mutex_init(&fan_data->update_lock); + fan_data->valid = 0; + + fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(fan_data->pdev)) { + ret = PTR_ERR(fan_data->pdev); + platform_driver_unregister(&accton_as6712_32x_fan_driver); + kfree(fan_data); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as6712_32x_fan_exit(void) +{ + platform_device_unregister(fan_data->pdev); + platform_driver_unregister(&accton_as6712_32x_fan_driver); + kfree(fan_data); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as6712_32x_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_as6712_32x_fan_init); +module_exit(accton_as6712_32x_fan_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-leds.c new file mode 100644 index 00000000..ded3baca --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-leds.c @@ -0,0 +1,617 @@ +/* + * A LED driver for the accton_as6712_32x_led + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include + +extern int as6712_32x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "as6712_32x_led" + +struct accton_as6712_32x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED + 1 = PSU1/PSU2 LED + 2 = FAN1-4 LED + 3 = FAN5-6 LED */ +}; + +static struct accton_as6712_32x_led_data *ledctl = NULL; + +/* LED related data + */ +#define LED_TYPE_PSU1_REG_MASK 0x03 +#define LED_MODE_PSU1_GREEN_MASK 0x02 +#define LED_MODE_PSU1_AMBER_MASK 0x01 +#define LED_MODE_PSU1_OFF_MASK 0x03 +#define LED_MODE_PSU1_AUTO_MASK 0x00 + +#define LED_TYPE_PSU2_REG_MASK 0x0C +#define LED_MODE_PSU2_GREEN_MASK 0x08 +#define LED_MODE_PSU2_AMBER_MASK 0x04 +#define LED_MODE_PSU2_OFF_MASK 0x0C +#define LED_MODE_PSU2_AUTO_MASK 0x00 + +#define LED_TYPE_DIAG_REG_MASK 0x0C +#define LED_MODE_DIAG_GREEN_MASK 0x08 +#define LED_MODE_DIAG_AMBER_MASK 0x04 +#define LED_MODE_DIAG_OFF_MASK 0x0C +#define LED_MODE_DIAG_BLINK_MASK 0x48 + +#define LED_TYPE_FAN_REG_MASK 0x03 +#define LED_MODE_FAN_GREEN_MASK 0x02 +#define LED_MODE_FAN_AMBER_MASK 0x01 +#define LED_MODE_FAN_OFF_MASK 0x03 +#define LED_MODE_FAN_AUTO_MASK 0x00 + +#define LED_TYPE_FAN1_REG_MASK 0x03 +#define LED_TYPE_FAN2_REG_MASK 0xC0 +#define LED_TYPE_FAN3_REG_MASK 0x30 +#define LED_TYPE_FAN4_REG_MASK 0x0C +#define LED_TYPE_FAN5_REG_MASK 0x03 + +#define LED_MODE_FANX_GREEN_MASK 0x01 +#define LED_MODE_FANX_RED_MASK 0x02 +#define LED_MODE_FANX_OFF_MASK 0x00 + +#define LED_TYPE_LOC_REG_MASK 0x30 +#define LED_MODE_LOC_ON_MASK 0x00 +#define LED_MODE_LOC_OFF_MASK 0x10 +#define LED_MODE_LOC_BLINK_MASK 0x20 + +static const u8 led_reg[] = { + 0xA, /* LOC/DIAG/FAN LED*/ + 0xB, /* PSU1/PSU2 LED */ + 0xE, /* FAN2-5 LED */ + 0xF, /* FAN1 LED */ +}; + +enum led_type { + LED_TYPE_PSU1, + LED_TYPE_PSU2, + LED_TYPE_DIAG, + LED_TYPE_FAN, + LED_TYPE_FAN1, + LED_TYPE_FAN2, + LED_TYPE_FAN3, + LED_TYPE_FAN4, + LED_TYPE_FAN5, + LED_TYPE_LOC +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_AMBER, + LED_MODE_RED, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER_BLINK, + LED_MODE_RED_BLINK, + LED_MODE_AUTO, +}; + +struct led_type_mode { + enum led_type type; + int type_mask; + enum led_light_mode mode; + int mode_mask; +}; + +struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN_BLINK, LED_MODE_DIAG_BLINK_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} +}; + + +struct fanx_info_s { + u8 cname; /* device name */ + enum led_type type; + u8 reg_id; /* map to led_reg & reg_val */ +}; + +static struct fanx_info_s fanx_info[] = { + {'1', LED_TYPE_FAN1, 3}, + {'2', LED_TYPE_FAN2, 2}, + {'3', LED_TYPE_FAN3, 2}, + {'4', LED_TYPE_FAN4, 2}, + {'5', LED_TYPE_FAN5, 2}, +}; + +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if (type == LED_TYPE_DIAG) + { /* special case : bit 6 - meaning blinking */ + if (0x40 & reg_val) + return LED_MODE_GREEN_BLINK; + } + if ((led_type_mode_data[i].type_mask & reg_val) == + led_type_mode_data[i].mode_mask) + { + return led_type_mode_data[i].mode; + } + } + + return 0; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + if (type == LED_TYPE_DIAG) + { + if (mode == LED_MODE_GREEN_BLINK) + { /* special case : bit 6 - meaning blinking */ + reg_val = 0x48 | (reg_val & ~0x4C); + break; + } + else + { /* for diag led, other case must cancel bit 6 first */ + reg_val = reg_val & ~0x40; + } + } + reg_val = led_type_mode_data[i].mode_mask | + (reg_val & (~led_type_mode_data[i].type_mask)); + break; + } + + return reg_val; +} + +static int accton_as6712_32x_led_read_value(u8 reg) +{ + return as6712_32x_i2c_cpld_read(0x60, reg); +} + +static int accton_as6712_32x_led_write_value(u8 reg, u8 value) +{ + return as6712_32x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as6712_32x_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as6712_32x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as6712_32x_led_read_value(led_reg[i]); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as6712_32x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as6712_32x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as6712_32x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as6712_32x_led_psu_1_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); +} + +static enum led_brightness accton_as6712_32x_led_psu_1_get(struct led_classdev *cdev) +{ + accton_as6712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); +} + +static void accton_as6712_32x_led_psu_2_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); +} + +static enum led_brightness accton_as6712_32x_led_psu_2_get(struct led_classdev *cdev) +{ + accton_as6712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); +} + +static void accton_as6712_32x_led_fan_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); +} + +static enum led_brightness accton_as6712_32x_led_fan_get(struct led_classdev *cdev) +{ + accton_as6712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); +} + + +static void accton_as6712_32x_led_fanx_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (led_cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); + return; + } + } +} + + +static enum led_brightness accton_as6712_32x_led_fanx_get(struct led_classdev *cdev) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as6712_32x_led_update(); + return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); + } + } + + + return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); +} + + +static void accton_as6712_32x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); +} + +static enum led_brightness accton_as6712_32x_led_diag_get(struct led_classdev *cdev) +{ + accton_as6712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as6712_32x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); +} + +static enum led_brightness accton_as6712_32x_led_loc_get(struct led_classdev *cdev) +{ + accton_as6712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static struct led_classdev accton_as6712_32x_leds[] = { + [LED_TYPE_PSU1] = { + .name = "accton_as6712_32x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_psu_1_set, + .brightness_get = accton_as6712_32x_led_psu_1_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as6712_32x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_psu_2_set, + .brightness_get = accton_as6712_32x_led_psu_2_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN] = { + .name = "accton_as6712_32x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_fan_set, + .brightness_get = accton_as6712_32x_led_fan_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN1] = { + .name = "accton_as6712_32x_led::fan1", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_fanx_set, + .brightness_get = accton_as6712_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN2] = { + .name = "accton_as6712_32x_led::fan2", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_fanx_set, + .brightness_get = accton_as6712_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN3] = { + .name = "accton_as6712_32x_led::fan3", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_fanx_set, + .brightness_get = accton_as6712_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN4] = { + .name = "accton_as6712_32x_led::fan4", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_fanx_set, + .brightness_get = accton_as6712_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN5] = { + .name = "accton_as6712_32x_led::fan5", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_fanx_set, + .brightness_get = accton_as6712_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_DIAG] = { + .name = "accton_as6712_32x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_diag_set, + .brightness_get = accton_as6712_32x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_LOC] = { + .name = "accton_as6712_32x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as6712_32x_led_loc_set, + .brightness_get = accton_as6712_32x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as6712_32x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { + led_classdev_suspend(&accton_as6712_32x_leds[i]); + } + + return 0; +} + +static int accton_as6712_32x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { + led_classdev_resume(&accton_as6712_32x_leds[i]); + } + + return 0; +} + +static int accton_as6712_32x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as6712_32x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as6712_32x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as6712_32x_leds[i]); + } + } + + return ret; +} + +static int accton_as6712_32x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { + led_classdev_unregister(&accton_as6712_32x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as6712_32x_led_driver = { + .probe = accton_as6712_32x_led_probe, + .remove = accton_as6712_32x_led_remove, + .suspend = accton_as6712_32x_led_suspend, + .resume = accton_as6712_32x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as6712_32x_led_init(void) +{ + int ret; + + extern int platform_accton_as6712_32x(void); + if(!platform_accton_as6712_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as6712_32x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as6712_32x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as6712_32x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as6712_32x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as6712_32x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as6712_32x_led_driver); + kfree(ledctl); +} + +module_init(accton_as6712_32x_led_init); +module_exit(accton_as6712_32x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as6712_32x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-psu.c new file mode 100644 index 00000000..bae3afe0 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-psu.c @@ -0,0 +1,304 @@ +/* + * An hwmon driver for accton as6712_32x Power Module + * + * Copyright (C) 2014 Accton Technology Corporation. + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as6712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as6712_32x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[14]; /* Model name, read from eeprom */ +}; + +static struct as6712_32x_psu_data *as6712_32x_psu_update_device(struct device *dev); + +enum as6712_32x_psu_sysfs_attributes { + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as6712_32x_psu_attributes[] = { + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as6712_32x_psu_data *data = as6712_32x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((data->index-1)*4) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = data->status >> ((data->index-1)*4 + 1) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as6712_32x_psu_data *data = as6712_32x_psu_update_device(dev); + + return sprintf(buf, "%s\n", data->model_name); +} + +static const struct attribute_group as6712_32x_psu_group = { + .attrs = as6712_32x_psu_attributes, +}; + +static int as6712_32x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as6712_32x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as6712_32x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as6712_32x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x50 || client->addr == 0x38) { + data->index = 1; + } + else if (client->addr == 0x53 || client->addr == 0x3b) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as6712_32x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as6712_32x_psu_remove(struct i2c_client *client) +{ + struct as6712_32x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as6712_32x_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as6712_32x_psu_id[] = { + { "as6712_32x_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as6712_32x_psu_id); + +static struct i2c_driver as6712_32x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as6712_32x_psu", + }, + .probe = as6712_32x_psu_probe, + .remove = as6712_32x_psu_remove, + .id_table = as6712_32x_psu_id, + .address_list = normal_i2c, +}; + +static int as6712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = 0; + int retry_count = 5; + + while (retry_count) { + retry_count--; + + result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) { + msleep(10); + continue; + } + + if (unlikely(result != data_len)) { + result = -EIO; + msleep(10); + continue; + } + + result = 0; + break; + } + + return result; +} + +static struct as6712_32x_psu_data *as6712_32x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as6712_32x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status; + int present = 0; + + dev_dbg(&client->dev, "Starting as6712_32x update\n"); + + /* Read psu status */ + status = as6712_32x_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + /* Read model name */ + memset(data->model_name, 0, sizeof(data->model_name)); + present = !(data->status >> ((data->index-1)*4) & 0x1); + + if (present) { + u8 command; + int model_name_len = 0; + + if (client->addr == 0x38 || client->addr == 0x3b) { + /* cpr_4011_4mxx AC power */ + command = 0x26; + model_name_len = 13; + } + else { /* 0x50 & 0x53 */ + /* um400d01x DC power */ + command = 0x50; + model_name_len = 13; + } + + status = as6712_32x_psu_read_block(client,command,data->model_name, + model_name_len); + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[model_name_len] = '\0'; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as6712_32x_psu_init(void) +{ + extern int platform_accton_as6712_32x(void); + if(!platform_accton_as6712_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as6712_32x_psu_driver); +} + +static void __exit as6712_32x_psu_exit(void) +{ + i2c_del_driver(&as6712_32x_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as6712_32x_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as6712_32x_psu_init); +module_exit(as6712_32x_psu_exit); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-sfp.c new file mode 100644 index 00000000..bb064b46 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-sfp.c @@ -0,0 +1,377 @@ +/* + * An hwmon driver for accton as6712_32x sfp + * + * Copyright (C) 2014 Accton Technology Corporation. + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT_INDEX(i) (1ULL << (i)) + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as6712_32x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u64 is_present; /* present status */ +}; + +static struct as6712_32x_sfp_data *as6712_32x_sfp_update_device(struct device *dev, int update_eeprom); +static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf); +static int as6712_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data); +extern int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); +//extern int accton_i2c_cpld_mux_get_index(int adap_index); + +enum as6712_32x_sfp_sysfs_attributes { + SFP_IS_PRESENT, + SFP_EEPROM, + SFP_PORT_NUMBER, + SFP_IS_PRESENT_ALL +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); + +static struct attribute *as6712_32x_sfp_attributes[] = { + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as6712_32x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->port+1); +} + +/* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ +do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ +} while(0) + +static ssize_t show_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + + if(attr->index == SFP_IS_PRESENT_ALL) { + int values[4]; + /* + * Report the SFP_PRESENCE status for all ports. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], as6712_32x_i2c_cpld_read(0x62, 0xA), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], as6712_32x_i2c_cpld_read(0x62, 0xB), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], as6712_32x_i2c_cpld_read(0x64, 0xA), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], as6712_32x_i2c_cpld_read(0x64, 0xB), 1); + + /* Return values 1 -> 32 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], values[3]); + } + else { /* SFP_IS_PRESENT */ + u8 val; + struct as6712_32x_sfp_data *data = as6712_32x_sfp_update_device(dev, 0); + + if (!data->valid) { + return -EIO; + } + + val = (data->is_present & BIT_INDEX(data->port)) ? 0 : 1; + return sprintf(buf, "%d", val); + } +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as6712_32x_sfp_data *data = as6712_32x_sfp_update_device(dev, 1); + + if (!data->valid) { + return 0; + } + + if ((data->is_present & BIT_INDEX(data->port)) != 0) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as6712_32x_sfp_group = { + .attrs = as6712_32x_sfp_attributes, +}; + +static int as6712_32x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as6712_32x_sfp_data *data; + int status; + + extern int platform_accton_as6712_32x(void); + if(!platform_accton_as6712_32x()) { + return -ENODEV; + } + + if (!i2c_check_functionality(client->adapter, /*I2C_FUNC_SMBUS_BYTE_DATA | */I2C_FUNC_SMBUS_WORD_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as6712_32x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as6712_32x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as6712_32x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as6712_32x_sfp_remove(struct i2c_client *client) +{ + struct as6712_32x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as6712_32x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as6712_32x_sfp1, as6712_32x_sfp2, as6712_32x_sfp3, as6712_32x_sfp4, +as6712_32x_sfp5, as6712_32x_sfp6, as6712_32x_sfp7, as6712_32x_sfp8, +as6712_32x_sfp9, as6712_32x_sfp10, as6712_32x_sfp11,as6712_32x_sfp12, +as6712_32x_sfp13, as6712_32x_sfp14, as6712_32x_sfp15,as6712_32x_sfp16, +as6712_32x_sfp17, as6712_32x_sfp18, as6712_32x_sfp19,as6712_32x_sfp20, +as6712_32x_sfp21, as6712_32x_sfp22, as6712_32x_sfp23,as6712_32x_sfp24, +as6712_32x_sfp25, as6712_32x_sfp26, as6712_32x_sfp27,as6712_32x_sfp28, +as6712_32x_sfp29, as6712_32x_sfp30, as6712_32x_sfp31,as6712_32x_sfp32 +}; + +static const struct i2c_device_id as6712_32x_sfp_id[] = { +{ "as6712_32x_sfp1", as6712_32x_sfp1 }, { "as6712_32x_sfp2", as6712_32x_sfp2 }, +{ "as6712_32x_sfp3", as6712_32x_sfp3 }, { "as6712_32x_sfp4", as6712_32x_sfp4 }, +{ "as6712_32x_sfp5", as6712_32x_sfp5 }, { "as6712_32x_sfp6", as6712_32x_sfp6 }, +{ "as6712_32x_sfp7", as6712_32x_sfp7 }, { "as6712_32x_sfp8", as6712_32x_sfp8 }, +{ "as6712_32x_sfp9", as6712_32x_sfp9 }, { "as6712_32x_sfp10", as6712_32x_sfp10 }, +{ "as6712_32x_sfp11", as6712_32x_sfp11 }, { "as6712_32x_sfp12", as6712_32x_sfp12 }, +{ "as6712_32x_sfp13", as6712_32x_sfp13 }, { "as6712_32x_sfp14", as6712_32x_sfp14 }, +{ "as6712_32x_sfp15", as6712_32x_sfp15 }, { "as6712_32x_sfp16", as6712_32x_sfp16 }, +{ "as6712_32x_sfp17", as6712_32x_sfp17 }, { "as6712_32x_sfp18", as6712_32x_sfp18 }, +{ "as6712_32x_sfp19", as6712_32x_sfp19 }, { "as6712_32x_sfp20", as6712_32x_sfp20 }, +{ "as6712_32x_sfp21", as6712_32x_sfp21 }, { "as6712_32x_sfp22", as6712_32x_sfp22 }, +{ "as6712_32x_sfp23", as6712_32x_sfp23 }, { "as6712_32x_sfp24", as6712_32x_sfp24 }, +{ "as6712_32x_sfp25", as6712_32x_sfp25 }, { "as6712_32x_sfp26", as6712_32x_sfp26 }, +{ "as6712_32x_sfp27", as6712_32x_sfp27 }, { "as6712_32x_sfp28", as6712_32x_sfp28 }, +{ "as6712_32x_sfp29", as6712_32x_sfp29 }, { "as6712_32x_sfp30", as6712_32x_sfp30 }, +{ "as6712_32x_sfp31", as6712_32x_sfp31 }, { "as6712_32x_sfp32", as6712_32x_sfp32 }, +{} +}; +MODULE_DEVICE_TABLE(i2c, as6712_32x_sfp_id); + + +static struct i2c_driver as6712_32x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as6712_32x_sfp", + }, + .probe = as6712_32x_sfp_probe, + .remove = as6712_32x_sfp_remove, + .id_table = as6712_32x_sfp_id, + .address_list = normal_i2c, +}; + +#if 0 +static int as6712_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) +{ + int result = i2c_smbus_read_byte_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u8)result; + result = 0; + +abort: + return result; +} +#endif + +static int as6712_32x_sfp_read_word(struct i2c_client *client, u8 command, u16 *data) +{ + int result = i2c_smbus_read_word_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u16)result; + result = 0; + +abort: + return result; +} + +#define ALWAYS_UPDATE 1 + +static struct as6712_32x_sfp_data *as6712_32x_sfp_update_device(struct device *dev, int update_eeprom) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as6712_32x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (ALWAYS_UPDATE || time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0, j = 0; + + data->valid = 0; + + /* Read present status of port 1~32 */ + data->is_present = 0; + + for (i = 0; i < 2; i++) { + for (j = 0; j < 2; j++) { + status = as6712_32x_i2c_cpld_read(0x62+i*2, 0xA+j); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x62+i*2, 0xA+j, status); + goto exit; + } + + data->is_present |= (u64)status << ((i*16) + (j*8)); + } + } + + if (update_eeprom) { + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if ((data->is_present & BIT_INDEX(data->port)) == 0) { + /* read eeprom */ + u16 eeprom_data; + for (i = 0; i < (sizeof(data->eeprom) / 2); i++) { + status = as6712_32x_sfp_read_word(client, i*2, &eeprom_data); + + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); + goto exit; + } + + data->eeprom[i*2] = eeprom_data & 0xff; + data->eeprom[i*2 + 1] = eeprom_data >> 8; + } + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +module_i2c_driver(as6712_32x_sfp_driver); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as6712_32x_sfp driver"); +MODULE_LICENSE("GPL"); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/PKG.yml index 2d66982c..cfd24152 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as6712-32x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as6712-32x-r0 MODULES=onl-platform-modules-x86-64-accton-as6712-32x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml index b3310b8d..f685f030 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml @@ -17,8 +17,8 @@ x86-64-accton-as6712-32x-r0: --parity=no --stop=1 - kernel: - <<: *kernel-3-2 + kernel: + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py index d1779f7d..9ac98c66 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py @@ -8,6 +8,9 @@ class OnlPlatform_x86_64_accton_as6712_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".6712.32" def baseconfig(self): + self.insmod('cpr_4011_4mxx', required=False) + for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: + self.insmod("x86-64-accton-as6712-32x-%s.ko" % m, required=False) ########### initialize I2C bus 0 ########### # initialize CPLD From 646b809358dd1a4c6d6e1640a83feb9510bea5a9 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Sun, 1 Jan 2017 23:13:28 +0000 Subject: [PATCH 228/255] Add arch. --- .../amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile index 3ea84a98..d750cf56 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile @@ -14,7 +14,7 @@ include $(ONL)/make/config.mk kernel: $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) - $(ONL)/tools/scripts/kmodbuild.sh linux-3.16.39-mbuild $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl + ARCH=x86_64 $(ONL)/tools/scripts/kmodbuild.sh linux-3.16.39-mbuild $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl clean: rm -rf linux-3.16* kernel-3.16* From f880d433a3ef28456eb9af4cf76a62f61f7d398c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:27:53 +0000 Subject: [PATCH 229/255] - Use the new search syntax for locating the device files Allows compatibility between kernels with different sysfs paths. --- .../onlp/builds/src/module/src/thermali.c | 81 +++++-------------- 1 file changed, 21 insertions(+), 60 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/thermali.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/thermali.c index dc6a5698..98d9a816 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/thermali.c @@ -2,7 +2,7 @@ * * * Copyright 2014 Big Switch Networks, Inc. - * Copyright 2015 Accton Technology Corporation. + * Copyright 2014 Accton Technology Corporation. * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance @@ -30,10 +30,6 @@ #include #include "platform_lib.h" -#define prefix_path "/sys/bus/i2c/devices/" -#define filename "temp1_input" -#define LOCAL_DEBUG 0 - #define VALIDATE(_id) \ do { \ if(!ONLP_OID_IS_THERMAL(_id)) { \ @@ -41,19 +37,14 @@ } \ } while(0) -#define OPEN_READ_FILE(fd,fullpath,data,nbytes,len) \ - DEBUG_PRINT("[Debug][%s][%d][openfile: %s]\n", __FUNCTION__, __LINE__, fullpath); \ - if ((fd = open(fullpath, O_RDONLY)) == -1) \ - return ONLP_STATUS_E_INTERNAL; \ - if ((len = read(fd, r_data, nbytes)) <= 0){ \ - close(fd); \ - return ONLP_STATUS_E_INTERNAL;} \ - DEBUG_PRINT("[Debug][%s][%d][read data: %s]\n", __FUNCTION__, __LINE__, r_data); \ - if (close(fd) == -1) \ - return ONLP_STATUS_E_INTERNAL - -enum onlp_thermal_id +int +onlp_thermali_init(void) { + return ONLP_STATUS_OK; +} + + +enum onlp_thermal_id { THERMAL_RESERVED = 0, THERMAL_CPU_CORE, THERMAL_1_ON_MAIN_BROAD, @@ -64,24 +55,23 @@ enum onlp_thermal_id THERMAL_1_ON_PSU2, }; -static char* last_path[] = /* must map with onlp_thermal_id */ -{ +static char* devfiles[] = { /* must map with onlp_thermal_id */ "reserved", NULL, /* CPU Core */ - "38-0048/temp1_input", - "39-0049/temp1_input", - "40-004a/temp1_input", - "41-004b/temp1_input", - "35-003c/psu_temp1_input", - "36-003f/psu_temp1_input", + "/sys/bus/i2c/devices/38-0048*temp1_input", + "/sys/bus/i2c/devices/39-0049*temp1_input", + "/sys/bus/i2c/devices/40-004a*temp1_input", + "/sys/bus/i2c/devices/41-004b*temp1_input", + "/sys/bus/i2c/devices/35-003c*psu_temp1_input", + "/sys/bus/i2c/devices/36-003f*psu_temp1_input", }; static char* cpu_coretemp_files[] = { - "/sys/devices/platform/coretemp.0/temp2_input", - "/sys/devices/platform/coretemp.0/temp3_input", - "/sys/devices/platform/coretemp.0/temp4_input", - "/sys/devices/platform/coretemp.0/temp5_input", + "/sys/devices/platform/coretemp.0*temp2_input", + "/sys/devices/platform/coretemp.0*temp3_input", + "/sys/devices/platform/coretemp.0*temp4_input", + "/sys/devices/platform/coretemp.0*temp5_input", NULL, }; @@ -118,35 +108,13 @@ static onlp_thermal_info_t linfo[] = { } }; -/* - * This will be called to intiialize the thermali subsystem. - */ -int -onlp_thermali_init(void) -{ - return ONLP_STATUS_OK; -} - -/* - * Retrieve the information structure for the given thermal OID. - * - * If the OID is invalid, return ONLP_E_STATUS_INVALID. - * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. - * Otherwise, return ONLP_STATUS_OK with the OID's information. - * - * Note -- it is expected that you fill out the information - * structure even if the sensor described by the OID is not present. - */ int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { - int fd, len, nbytes = 10, temp_base=1, local_id; - char r_data[10] = {0}; - char fullpath[50] = {0}; + int local_id; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - DEBUG_PRINT("\n[Debug][%s][%d][local_id: %d]", __FUNCTION__, __LINE__, local_id); /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; @@ -156,12 +124,5 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) return rv; } - /* get fullpath */ - sprintf(fullpath, "%s%s", prefix_path, last_path[local_id]); - - OPEN_READ_FILE(fd, fullpath, r_data, nbytes, len); - info->mcelsius = atoi(r_data) / temp_base; - DEBUG_PRINT("\n[Debug][%s][%d][save data: %d]\n", __FUNCTION__, __LINE__, info->mcelsius); - - return ONLP_STATUS_OK; + return onlp_file_read_int(&info->mcelsius, devfiles[local_id]); } From fc7bd1dd778cc16f99e8bed2476a6c16767ef443 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:35:20 +0000 Subject: [PATCH 230/255] Initial. --- .../any/kernels/modules/accton_i2c_cpld.c | 330 +++++++++ packages/base/any/kernels/modules/ym2651y.c | 631 ++++++++++++++++++ 2 files changed, 961 insertions(+) create mode 100644 packages/base/any/kernels/modules/accton_i2c_cpld.c create mode 100644 packages/base/any/kernels/modules/ym2651y.c diff --git a/packages/base/any/kernels/modules/accton_i2c_cpld.c b/packages/base/any/kernels/modules/accton_i2c_cpld.c new file mode 100644 index 00000000..7f6d2ea0 --- /dev/null +++ b/packages/base/any/kernels/modules/accton_i2c_cpld.c @@ -0,0 +1,330 @@ +/* + * A hwmon driver for the accton_i2c_cpld + * + * Copyright (C) 2013 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +/* Addresses scanned for accton_i2c_cpld + */ +static const unsigned short normal_i2c[] = { 0x31, 0x35, 0x60, 0x61, 0x62, I2C_CLIENT_END }; + +static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + int val = 0; + struct i2c_client *client = to_i2c_client(dev); + + val = i2c_smbus_read_byte_data(client, 0x1); + + if (val < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x1) err %d\n", client->addr, val); + } + + return sprintf(buf, "%d", val); +} + +static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); + +static void accton_i2c_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void accton_i2c_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static int accton_i2c_cpld_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_dbg(&client->dev, "i2c_check_functionality failed (0x%x)\n", client->addr); + status = -EIO; + goto exit; + } + + status = sysfs_create_file(&client->dev.kobj, &ver.attr); + if (status) { + goto exit; + } + + dev_info(&client->dev, "chip found\n"); + accton_i2c_cpld_add_client(client); + + return 0; + +exit: + return status; +} + +static int accton_i2c_cpld_remove(struct i2c_client *client) +{ + sysfs_remove_file(&client->dev.kobj, &ver.attr); + accton_i2c_cpld_remove_client(client); + + return 0; +} + +static const struct i2c_device_id accton_i2c_cpld_id[] = { + { "accton_i2c_cpld", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_id); + +static struct i2c_driver accton_i2c_cpld_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "accton_i2c_cpld", + }, + .probe = accton_i2c_cpld_probe, + .remove = accton_i2c_cpld_remove, + .id_table = accton_i2c_cpld_id, + .address_list = normal_i2c, +}; + +int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_read_byte_data(cpld_node->client, reg); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(accton_i2c_cpld_read); + +int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(accton_i2c_cpld_write); + +static int __init accton_i2c_cpld_init(void) +{ + mutex_init(&list_lock); + return i2c_add_driver(&accton_i2c_cpld_driver); +} + +static void __exit accton_i2c_cpld_exit(void) +{ + i2c_del_driver(&accton_i2c_cpld_driver); +} + +static struct dmi_system_id as7512_dmi_table[] = { + { + .ident = "Accton AS7512", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), + }, + }, + { + .ident = "Accton AS7512", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), + }, + }, +}; + +int platform_accton_as7512_32x(void) +{ + return dmi_check_system(as7512_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as7512_32x); + +static struct dmi_system_id as7712_dmi_table[] = { + { + .ident = "Accton AS7712", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), + }, + }, + { + .ident = "Accton AS7712", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), + }, + }, +}; + +int platform_accton_as7712_32x(void) +{ + return dmi_check_system(as7712_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as7712_32x); + +static struct dmi_system_id as5812_54t_dmi_table[] = { + { + .ident = "Accton AS5812 54t", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), + }, + }, + { + .ident = "Accton AS5812 54t", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), + }, + }, +}; + +int platform_accton_as5812_54t(void) +{ + return dmi_check_system(as5812_54t_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as5812_54t); + +static struct dmi_system_id as5512_54x_dmi_table[] = { + { + .ident = "Accton AS5512", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), + }, + }, + { + .ident = "Accton AS5512", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), + }, + }, +}; + +int platform_accton_as5512_54x(void) +{ + return dmi_check_system(as5512_54x_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as5512_54x); + +static struct dmi_system_id as7716_dmi_table[] = { + { + .ident = "Accton AS7716", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), + }, + }, + { + .ident = "Accton AS7716", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), + }, + }, +}; + +int platform_accton_as7716_32x(void) +{ + return dmi_check_system(as7716_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as7716_32x); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_i2c_cpld driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_i2c_cpld_init); +module_exit(accton_i2c_cpld_exit); diff --git a/packages/base/any/kernels/modules/ym2651y.c b/packages/base/any/kernels/modules/ym2651y.c new file mode 100644 index 00000000..2fe455b1 --- /dev/null +++ b/packages/base/any/kernels/modules/ym2651y.c @@ -0,0 +1,631 @@ +/* + * An hwmon driver for the 3Y Power YM-2651Y Power Module + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_FAN_DUTY_CYCLE 100 + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x58, 0x5b, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct ym2651y_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 capability; /* Register value */ + u16 status_word; /* Register value */ + u8 fan_fault; /* Register value */ + u8 over_temp; /* Register value */ + u16 v_out; /* Register value */ + u16 i_out; /* Register value */ + u16 p_out; /* Register value */ + u16 temp; /* Register value */ + u16 fan_speed; /* Register value */ + u16 fan_duty_cycle[2]; /* Register value */ + u8 fan_dir[5]; /* Register value */ + u8 pmbus_revision; /* Register value */ + u8 mfr_id[10]; /* Register value */ + u8 mfr_model[10]; /* Register value */ + u8 mfr_revsion[3]; /* Register value */ + u16 mfr_vin_min; /* Register value */ + u16 mfr_vin_max; /* Register value */ + u16 mfr_iin_max; /* Register value */ + u16 mfr_iout_max; /* Register value */ + u16 mfr_pin_max; /* Register value */ + u16 mfr_pout_max; /* Register value */ + u16 mfr_vout_min; /* Register value */ + u16 mfr_vout_max; /* Register value */ +}; + +static ssize_t show_byte(struct device *dev, struct device_attribute *da, + char *buf); +static ssize_t show_word(struct device *dev, struct device_attribute *da, + char *buf); +static ssize_t show_linear(struct device *dev, struct device_attribute *da, + char *buf); +static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, + char *buf); +static ssize_t show_over_temp(struct device *dev, struct device_attribute *da, + char *buf); +static ssize_t show_ascii(struct device *dev, struct device_attribute *da, + char *buf); +static struct ym2651y_data *ym2651y_update_device(struct device *dev); +static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +static int ym2651y_write_word(struct i2c_client *client, u8 reg, u16 value); + +enum ym2651y_sysfs_attributes { + PSU_POWER_ON = 0, + PSU_TEMP_FAULT, + PSU_POWER_GOOD, + PSU_FAN1_FAULT, + PSU_FAN_DIRECTION, + PSU_OVER_TEMP, + PSU_V_OUT, + PSU_I_OUT, + PSU_P_OUT, + PSU_TEMP1_INPUT, + PSU_FAN1_SPEED, + PSU_FAN1_DUTY_CYCLE, + PSU_PMBUS_REVISION, + PSU_MFR_ID, + PSU_MFR_MODEL, + PSU_MFR_REVISION, + PSU_MFR_VIN_MIN, + PSU_MFR_VIN_MAX, + PSU_MFR_VOUT_MIN, + PSU_MFR_VOUT_MAX, + PSU_MFR_IIN_MAX, + PSU_MFR_IOUT_MAX, + PSU_MFR_PIN_MAX, + PSU_MFR_POUT_MAX +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_power_on, S_IRUGO, show_word, NULL, PSU_POWER_ON); +static SENSOR_DEVICE_ATTR(psu_temp_fault, S_IRUGO, show_word, NULL, PSU_TEMP_FAULT); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_word, NULL, PSU_POWER_GOOD); +static SENSOR_DEVICE_ATTR(psu_fan1_fault, S_IRUGO, show_fan_fault, NULL, PSU_FAN1_FAULT); +static SENSOR_DEVICE_ATTR(psu_over_temp, S_IRUGO, show_over_temp, NULL, PSU_OVER_TEMP); +static SENSOR_DEVICE_ATTR(psu_v_out, S_IRUGO, show_linear, NULL, PSU_V_OUT); +static SENSOR_DEVICE_ATTR(psu_i_out, S_IRUGO, show_linear, NULL, PSU_I_OUT); +static SENSOR_DEVICE_ATTR(psu_p_out, S_IRUGO, show_linear, NULL, PSU_P_OUT); +static SENSOR_DEVICE_ATTR(psu_temp1_input, S_IRUGO, show_linear, NULL, PSU_TEMP1_INPUT); +static SENSOR_DEVICE_ATTR(psu_fan1_speed_rpm, S_IRUGO, show_linear, NULL, PSU_FAN1_SPEED); +static SENSOR_DEVICE_ATTR(psu_fan1_duty_cycle_percentage, S_IWUSR | S_IRUGO, show_linear, set_fan_duty_cycle, PSU_FAN1_DUTY_CYCLE); +static SENSOR_DEVICE_ATTR(psu_fan_dir, S_IRUGO, show_ascii, NULL, PSU_FAN_DIRECTION); +static SENSOR_DEVICE_ATTR(psu_pmbus_revision,S_IRUGO, show_byte, NULL, PSU_PMBUS_REVISION); +static SENSOR_DEVICE_ATTR(psu_mfr_id, S_IRUGO, show_ascii, NULL, PSU_MFR_ID); +static SENSOR_DEVICE_ATTR(psu_mfr_model, S_IRUGO, show_ascii, NULL, PSU_MFR_MODEL); +static SENSOR_DEVICE_ATTR(psu_mfr_revision, S_IRUGO, show_ascii, NULL, PSU_MFR_REVISION); +static SENSOR_DEVICE_ATTR(psu_mfr_vin_min, S_IRUGO, show_linear, NULL, PSU_MFR_VIN_MIN); +static SENSOR_DEVICE_ATTR(psu_mfr_vin_max, S_IRUGO, show_linear, NULL, PSU_MFR_VIN_MAX); +static SENSOR_DEVICE_ATTR(psu_mfr_vout_min, S_IRUGO, show_linear, NULL, PSU_MFR_VOUT_MIN); +static SENSOR_DEVICE_ATTR(psu_mfr_vout_max, S_IRUGO, show_linear, NULL, PSU_MFR_VOUT_MAX); +static SENSOR_DEVICE_ATTR(psu_mfr_iin_max, S_IRUGO, show_linear, NULL, PSU_MFR_IIN_MAX); +static SENSOR_DEVICE_ATTR(psu_mfr_iout_max, S_IRUGO, show_linear, NULL, PSU_MFR_IOUT_MAX); +static SENSOR_DEVICE_ATTR(psu_mfr_pin_max, S_IRUGO, show_linear, NULL, PSU_MFR_PIN_MAX); +static SENSOR_DEVICE_ATTR(psu_mfr_pout_max, S_IRUGO, show_linear, NULL, PSU_MFR_POUT_MAX); + +static struct attribute *ym2651y_attributes[] = { + &sensor_dev_attr_psu_power_on.dev_attr.attr, + &sensor_dev_attr_psu_temp_fault.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + &sensor_dev_attr_psu_fan1_fault.dev_attr.attr, + &sensor_dev_attr_psu_over_temp.dev_attr.attr, + &sensor_dev_attr_psu_v_out.dev_attr.attr, + &sensor_dev_attr_psu_i_out.dev_attr.attr, + &sensor_dev_attr_psu_p_out.dev_attr.attr, + &sensor_dev_attr_psu_temp1_input.dev_attr.attr, + &sensor_dev_attr_psu_fan1_speed_rpm.dev_attr.attr, + &sensor_dev_attr_psu_fan1_duty_cycle_percentage.dev_attr.attr, + &sensor_dev_attr_psu_fan_dir.dev_attr.attr, + &sensor_dev_attr_psu_pmbus_revision.dev_attr.attr, + &sensor_dev_attr_psu_mfr_id.dev_attr.attr, + &sensor_dev_attr_psu_mfr_model.dev_attr.attr, + &sensor_dev_attr_psu_mfr_revision.dev_attr.attr, + &sensor_dev_attr_psu_mfr_vin_min.dev_attr.attr, + &sensor_dev_attr_psu_mfr_vin_max.dev_attr.attr, + &sensor_dev_attr_psu_mfr_pout_max.dev_attr.attr, + &sensor_dev_attr_psu_mfr_iin_max.dev_attr.attr, + &sensor_dev_attr_psu_mfr_pin_max.dev_attr.attr, + &sensor_dev_attr_psu_mfr_vout_min.dev_attr.attr, + &sensor_dev_attr_psu_mfr_vout_max.dev_attr.attr, + &sensor_dev_attr_psu_mfr_iout_max.dev_attr.attr, + NULL +}; + +static ssize_t show_byte(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ym2651y_data *data = ym2651y_update_device(dev); + + if (!data->valid) { + return 0; + } + + return (attr->index == PSU_PMBUS_REVISION) ? sprintf(buf, "%d\n", data->pmbus_revision) : + sprintf(buf, "0\n"); +} + +static ssize_t show_word(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ym2651y_data *data = ym2651y_update_device(dev); + u16 status = 0; + + if (!data->valid) { + return 0; + } + + switch (attr->index) { + case PSU_POWER_ON: /* psu_power_on, low byte bit 6 of status_word, 0=>ON, 1=>OFF */ + status = (data->status_word & 0x40) ? 0 : 1; + break; + case PSU_TEMP_FAULT: /* psu_temp_fault, low byte bit 2 of status_word, 0=>Normal, 1=>temp fault */ + status = (data->status_word & 0x4) >> 2; + break; + case PSU_POWER_GOOD: /* psu_power_good, high byte bit 3 of status_word, 0=>OK, 1=>FAIL */ + status = (data->status_word & 0x800) ? 0 : 1; + break; + } + + return sprintf(buf, "%d\n", status); +} + +static int two_complement_to_int(u16 data, u8 valid_bit, int mask) +{ + u16 valid_data = data & mask; + bool is_negative = valid_data >> (valid_bit - 1); + + return is_negative ? (-(((~valid_data) & mask) + 1)) : valid_data; +} + +static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct ym2651y_data *data = i2c_get_clientdata(client); + int nr = (attr->index == PSU_FAN1_DUTY_CYCLE) ? 0 : 1; + long speed; + int error; + + error = kstrtol(buf, 10, &speed); + if (error) + return error; + + if (speed < 0 || speed > MAX_FAN_DUTY_CYCLE) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->fan_duty_cycle[nr] = speed; + ym2651y_write_word(client, 0x3B + nr, data->fan_duty_cycle[nr]); + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t show_linear(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ym2651y_data *data = ym2651y_update_device(dev); + + u16 value = 0; + int exponent, mantissa; + int multiplier = 1000; + + if (!data->valid) { + return 0; + } + + switch (attr->index) { + case PSU_V_OUT: + value = data->v_out; + break; + case PSU_I_OUT: + value = data->i_out; + break; + case PSU_P_OUT: + value = data->p_out; + break; + case PSU_TEMP1_INPUT: + value = data->temp; + break; + case PSU_FAN1_SPEED: + value = data->fan_speed; + multiplier = 1; + break; + case PSU_FAN1_DUTY_CYCLE: + value = data->fan_duty_cycle[0]; + multiplier = 1; + break; + case PSU_MFR_VIN_MIN: + value = data->mfr_vin_min; + break; + case PSU_MFR_VIN_MAX: + value = data->mfr_vin_max; + break; + case PSU_MFR_VOUT_MIN: + value = data->mfr_vout_min; + break; + case PSU_MFR_VOUT_MAX: + value = data->mfr_vout_max; + break; + case PSU_MFR_PIN_MAX: + value = data->mfr_pin_max; + break; + case PSU_MFR_POUT_MAX: + value = data->mfr_pout_max; + break; + case PSU_MFR_IOUT_MAX: + value = data->mfr_iout_max; + break; + case PSU_MFR_IIN_MAX: + value = data->mfr_iin_max; + break; + } + + exponent = two_complement_to_int(value >> 11, 5, 0x1f); + mantissa = two_complement_to_int(value & 0x7ff, 11, 0x7ff); + + return (exponent >= 0) ? sprintf(buf, "%d\n", (mantissa << exponent) * multiplier) : + sprintf(buf, "%d\n", (mantissa * multiplier) / (1 << -exponent)); +} + +static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ym2651y_data *data = ym2651y_update_device(dev); + u8 shift; + + if (!data->valid) { + return 0; + } + + shift = (attr->index == PSU_FAN1_FAULT) ? 7 : 6; + + return sprintf(buf, "%d\n", data->fan_fault >> shift); +} + +static ssize_t show_over_temp(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct ym2651y_data *data = ym2651y_update_device(dev); + + if (!data->valid) { + return 0; + } + + return sprintf(buf, "%d\n", data->over_temp >> 7); +} + +static ssize_t show_ascii(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ym2651y_data *data = ym2651y_update_device(dev); + u8 *ptr = NULL; + + if (!data->valid) { + return 0; + } + + switch (attr->index) { + case PSU_FAN_DIRECTION: /* psu_fan_dir */ + ptr = data->fan_dir + 1; /* Skip the first byte since it is the length of string. */ + break; + case PSU_MFR_ID: /* psu_mfr_id */ + ptr = data->mfr_id + 1; /* The first byte is the count byte of string. */; + break; + case PSU_MFR_MODEL: /* psu_mfr_model */ + ptr = data->mfr_model + 1; /* The first byte is the count byte of string. */ + break; + case PSU_MFR_REVISION: /* psu_mfr_revision */ + ptr = data->mfr_revsion + 1; /* The first byte is the count byte of string. */ + break; + default: + return 0; + } + + return sprintf(buf, "%s\n", ptr); +} + +static const struct attribute_group ym2651y_group = { + .attrs = ym2651y_attributes, +}; + +static int ym2651y_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct ym2651y_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct ym2651y_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &ym2651y_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &ym2651y_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int ym2651y_remove(struct i2c_client *client) +{ + struct ym2651y_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &ym2651y_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id ym2651y_id[] = { + { "ym2651", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, ym2651y_id); + +static struct i2c_driver ym2651y_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "ym2651", + }, + .probe = ym2651y_probe, + .remove = ym2651y_remove, + .id_table = ym2651y_id, + .address_list = normal_i2c, +}; + +static int ym2651y_read_byte(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int ym2651y_read_word(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_word_data(client, reg); +} + +static int ym2651y_write_word(struct i2c_client *client, u8 reg, u16 value) +{ + return i2c_smbus_write_word_data(client, reg, value); +} + +static int ym2651y_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +struct reg_data_byte { + u8 reg; + u8 *value; +}; + +struct reg_data_word { + u8 reg; + u16 *value; +}; + +static struct ym2651y_data *ym2651y_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ym2651y_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int i, status; + u8 command; + struct reg_data_byte regs_byte[] = { {0x19, &data->capability}, + {0x7d, &data->over_temp}, + {0x81, &data->fan_fault}, + {0x98, &data->pmbus_revision}}; + struct reg_data_word regs_word[] = { {0x79, &data->status_word}, + {0x8b, &data->v_out}, + {0x8c, &data->i_out}, + {0x96, &data->p_out}, + {0x8d, &data->temp}, + {0x3b, &(data->fan_duty_cycle[0])}, + {0x3c, &(data->fan_duty_cycle[1])}, + {0x90, &data->fan_speed}, + {0xa0, &data->mfr_vin_min}, + {0xa1, &data->mfr_vin_max}, + {0xa2, &data->mfr_iin_max}, + {0xa3, &data->mfr_pin_max}, + {0xa4, &data->mfr_vout_min}, + {0xa5, &data->mfr_vout_max}, + {0xa6, &data->mfr_iout_max}, + {0xa7, &data->mfr_pout_max}}; + + dev_dbg(&client->dev, "Starting ym2651 update\n"); + data->valid = 0; + + /* Read byte data */ + for (i = 0; i < ARRAY_SIZE(regs_byte); i++) { + status = ym2651y_read_byte(client, regs_byte[i].reg); + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", + regs_byte[i].reg, status); + goto exit; + } + else { + *(regs_byte[i].value) = status; + } + } + + /* Read word data */ + for (i = 0; i < ARRAY_SIZE(regs_word); i++) { + status = ym2651y_read_word(client, regs_word[i].reg); + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", + regs_word[i].reg, status); + goto exit; + } + else { + *(regs_word[i].value) = status; + } + } + + /* Read fan_direction */ + command = 0xC3; + status = ym2651y_read_block(client, command, data->fan_dir, + ARRAY_SIZE(data->fan_dir)-1); + data->fan_dir[ARRAY_SIZE(data->fan_dir)-1] = '\0'; + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", command, status); + goto exit; + } + + /* Read mfr_id */ + command = 0x99; + status = ym2651y_read_block(client, command, data->mfr_id, + ARRAY_SIZE(data->mfr_id)-1); + data->mfr_id[ARRAY_SIZE(data->mfr_id)-1] = '\0'; + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", command, status); + goto exit; + } + + /* Read mfr_model */ + command = 0x9a; + status = ym2651y_read_block(client, command, data->mfr_model, + ARRAY_SIZE(data->mfr_model)-1); + data->mfr_model[ARRAY_SIZE(data->mfr_model)-1] = '\0'; + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", command, status); + goto exit; + } + + /* Read mfr_revsion */ + command = 0x9b; + status = ym2651y_read_block(client, command, data->mfr_revsion, + ARRAY_SIZE(data->mfr_revsion)-1); + data->mfr_revsion[ARRAY_SIZE(data->mfr_revsion)-1] = '\0'; + + if (status < 0) { + dev_dbg(&client->dev, "reg %d, err %d\n", command, status); + goto exit; + } + + data->last_updated = jiffies; + data->valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init ym2651y_init(void) +{ + return i2c_add_driver(&ym2651y_driver); +} + +static void __exit ym2651y_exit(void) +{ + i2c_del_driver(&ym2651y_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("3Y Power YM-2651Y driver"); +MODULE_LICENSE("GPL"); + +module_init(ym2651y_init); +module_exit(ym2651y_exit); + From 1d730ef5047ff3346713a65f735599bdab61c18e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:35:52 +0000 Subject: [PATCH 231/255] AS5812-54T Kernel Modules. --- .../x86-64-accton-as5812-54t/modules/Makefile | 1 + .../x86-64-accton-as5812-54t/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as5812-54t-fan.c | 442 +++++++++++++ .../builds/x86-64-accton-as5812-54t-leds.c | 601 ++++++++++++++++++ .../builds/x86-64-accton-as5812-54t-psu.c | 295 +++++++++ .../builds/x86-64-accton-as5812-54t-sfp.c | 318 +++++++++ 8 files changed, 1664 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/PKG.yml new file mode 100644 index 00000000..7701094b --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as5812-54t ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile new file mode 100644 index 00000000..46ebca16 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as5812-54t +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-fan.c new file mode 100644 index 00000000..bad9245e --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-fan.c @@ -0,0 +1,442 @@ +/* + * A hwmon driver for the Accton as5812 54t fan + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAN_MAX_NUMBER 5 +#define FAN_SPEED_CPLD_TO_RPM_STEP 150 +#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 +#define FAN_DUTY_CYCLE_MIN 0 +#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ + +#define CPLD_REG_FAN_STATUS_OFFSET 0xC +#define CPLD_REG_FANR_STATUS_OFFSET 0x1F +#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E + +#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 +#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 +#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 +#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 +#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 + +#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 +#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 +#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A +#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B +#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C + +#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD + +#define CPLD_FAN1_INFO_BIT_MASK 0x1 +#define CPLD_FAN2_INFO_BIT_MASK 0x2 +#define CPLD_FAN3_INFO_BIT_MASK 0x4 +#define CPLD_FAN4_INFO_BIT_MASK 0x8 +#define CPLD_FAN5_INFO_BIT_MASK 0x10 + +#define PROJECT_NAME + +#define LOCAL_DEBUG 0 + +static struct accton_as5812_54t_fan *fan_data = NULL; + +struct accton_as5812_54t_fan { + struct platform_device *pdev; + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ + u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ + u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ + u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ + u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ + u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ +}; + +/*******************/ +#define MAKE_FAN_MASK_OR_REG(name,type) \ + CPLD_FAN##type##1_##name, \ + CPLD_FAN##type##2_##name, \ + CPLD_FAN##type##3_##name, \ + CPLD_FAN##type##4_##name, \ + CPLD_FAN##type##5_##name, + +/* fan related data + */ +static const u8 fan_info_mask[] = { + MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) +}; + +static const u8 fan_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) +}; + +static const u8 fanr_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) +}; + +/*******************/ +#define DEF_FAN_SET(id) \ + FAN##id##_FAULT, \ + FAN##id##_SPEED, \ + FAN##id##_DUTY_CYCLE, \ + FAN##id##_DIRECTION, \ + FANR##id##_FAULT, \ + FANR##id##_SPEED, + +enum sysfs_fan_attributes { + DEF_FAN_SET(1) + DEF_FAN_SET(2) + DEF_FAN_SET(3) + DEF_FAN_SET(4) + DEF_FAN_SET(5) +}; +/*******************/ +static void accton_as5812_54t_fan_update_device(struct device *dev); +static int accton_as5812_54t_fan_read_value(u8 reg); +static int accton_as5812_54t_fan_write_value(u8 reg, u8 value); + +static ssize_t fan_set_duty_cycle(struct device *dev, + struct device_attribute *da,const char *buf, size_t count); +static ssize_t fan_show_value(struct device *dev, + struct device_attribute *da, char *buf); + +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + + +/*******************/ +#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ + fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); + +#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) + +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) +/*******************/ + +#define _MAKE_FAN_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ + &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, + +#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) + +static struct attribute *accton_as5812_54t_fan_attributes[] = { + /* fan related attributes */ + MAKE_FAN_ATTR(PROJECT_NAME,1) + MAKE_FAN_ATTR(PROJECT_NAME,2) + MAKE_FAN_ATTR(PROJECT_NAME,3) + MAKE_FAN_ATTR(PROJECT_NAME,4) + MAKE_FAN_ATTR(PROJECT_NAME,5) + NULL +}; +/*******************/ + +/* fan related functions + */ +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + ssize_t ret = 0; + int data_index, type_index; + + accton_as5812_54t_fan_update_device(dev); + + if (fan_data->valid == 0) { + return ret; + } + + type_index = attr->index%FAN2_FAULT; + data_index = attr->index/FAN2_FAULT; + + switch (type_index) { + case FAN1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DUTY_CYCLE: + ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DIRECTION: + ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + default: + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); + break; + } + + return ret; +} +/*******************/ +static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) { + + int error, value; + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) + return -EINVAL; + + accton_as5812_54t_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); + + fan_data->valid = 0; + + return count; +} + +static const struct attribute_group accton_as5812_54t_fan_group = { + .attrs = accton_as5812_54t_fan_attributes, +}; + +static int accton_as5812_54t_fan_read_value(u8 reg) +{ + return accton_i2c_cpld_read(0x60, reg); +} + +static int accton_as5812_54t_fan_write_value(u8 reg, u8 value) +{ + return accton_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5812_54t_fan_update_device(struct device *dev) +{ + int speed, r_speed, fault, r_fault, ctrl_speed, direction; + int i; + + mutex_lock(&fan_data->update_lock); + + if (LOCAL_DEBUG) + printk ("Starting accton_as5812_54t_fan update \n"); + + if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { + /* do nothing */ + goto _exit; + } + + fan_data->valid = 0; + + if (LOCAL_DEBUG) + printk ("Starting accton_as5812_54t_fan update 2 \n"); + + fault = accton_as5812_54t_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); + r_fault = accton_as5812_54t_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); + direction = accton_as5812_54t_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); + ctrl_speed = accton_as5812_54t_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); + + if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); + + for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; + if (LOCAL_DEBUG) + printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); + + fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; + fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; + fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; + + /* fan speed + */ + speed = accton_as5812_54t_fan_read_value(fan_speed_reg[i]); + r_speed = accton_as5812_54t_fan_read_value(fanr_speed_reg[i]); + if ( (speed < 0) || (r_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); + + fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; + fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; + } + + /* finish to update */ + fan_data->last_updated = jiffies; + fan_data->valid = 1; + +_exit: + mutex_unlock(&fan_data->update_lock); +} + +static int accton_as5812_54t_fan_probe(struct platform_device *pdev) +{ + int status = -1; + + /* Register sysfs hooks */ + status = sysfs_create_group(&pdev->dev.kobj, &accton_as5812_54t_fan_group); + if (status) { + goto exit; + + } + + fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(fan_data->hwmon_dev)) { + status = PTR_ERR(fan_data->hwmon_dev); + goto exit_remove; + } + + dev_info(&pdev->dev, "accton_as5812_54t_fan\n"); + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &accton_as5812_54t_fan_group); +exit: + return status; +} + +static int accton_as5812_54t_fan_remove(struct platform_device *pdev) +{ + hwmon_device_unregister(fan_data->hwmon_dev); + sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5812_54t_fan_group); + + return 0; +} + +#define DRVNAME "as5812_54t_fan" + +static struct platform_driver accton_as5812_54t_fan_driver = { + .probe = accton_as5812_54t_fan_probe, + .remove = accton_as5812_54t_fan_remove, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5812_54t_fan_init(void) +{ + int ret; + + extern int platform_accton_as5812_54t(void); + if (!platform_accton_as5812_54t()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as5812_54t_fan_driver); + if (ret < 0) { + goto exit; + } + + fan_data = kzalloc(sizeof(struct accton_as5812_54t_fan), GFP_KERNEL); + if (!fan_data) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5812_54t_fan_driver); + goto exit; + } + + mutex_init(&fan_data->update_lock); + fan_data->valid = 0; + + fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(fan_data->pdev)) { + ret = PTR_ERR(fan_data->pdev); + platform_driver_unregister(&accton_as5812_54t_fan_driver); + kfree(fan_data); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5812_54t_fan_exit(void) +{ + platform_device_unregister(fan_data->pdev); + platform_driver_unregister(&accton_as5812_54t_fan_driver); + kfree(fan_data); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5812_54t_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_as5812_54t_fan_init); +module_exit(accton_as5812_54t_fan_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-leds.c new file mode 100644 index 00000000..011f62e7 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-leds.c @@ -0,0 +1,601 @@ +/* + * A LED driver for the accton_as5812_54t_led + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include + +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "as5812_54t_led" + +struct accton_as5812_54t_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED + 1 = PSU1/PSU2 LED + 2 = FAN1-4 LED + 3 = FAN5-6 LED */ +}; + +static struct accton_as5812_54t_led_data *ledctl = NULL; + +/* LED related data + */ +#define LED_TYPE_PSU1_REG_MASK 0x03 +#define LED_MODE_PSU1_GREEN_MASK 0x02 +#define LED_MODE_PSU1_AMBER_MASK 0x01 +#define LED_MODE_PSU1_OFF_MASK 0x03 +#define LED_MODE_PSU1_AUTO_MASK 0x00 + +#define LED_TYPE_PSU2_REG_MASK 0x0C +#define LED_MODE_PSU2_GREEN_MASK 0x08 +#define LED_MODE_PSU2_AMBER_MASK 0x04 +#define LED_MODE_PSU2_OFF_MASK 0x0C +#define LED_MODE_PSU2_AUTO_MASK 0x00 + +#define LED_TYPE_DIAG_REG_MASK 0x0C +#define LED_MODE_DIAG_GREEN_MASK 0x08 +#define LED_MODE_DIAG_AMBER_MASK 0x04 +#define LED_MODE_DIAG_OFF_MASK 0x0C + +#define LED_TYPE_FAN_REG_MASK 0x03 +#define LED_MODE_FAN_GREEN_MASK 0x02 +#define LED_MODE_FAN_AMBER_MASK 0x01 +#define LED_MODE_FAN_OFF_MASK 0x03 +#define LED_MODE_FAN_AUTO_MASK 0x00 + +#define LED_TYPE_FAN1_REG_MASK 0x03 +#define LED_TYPE_FAN2_REG_MASK 0x0C +#define LED_TYPE_FAN3_REG_MASK 0x30 +#define LED_TYPE_FAN4_REG_MASK 0xC0 +#define LED_TYPE_FAN5_REG_MASK 0x03 +#define LED_TYPE_FAN6_REG_MASK 0x0C + +#define LED_MODE_FANX_GREEN_MASK 0x01 +#define LED_MODE_FANX_RED_MASK 0x02 +#define LED_MODE_FANX_OFF_MASK 0x00 + +#define LED_TYPE_LOC_REG_MASK 0x30 +#define LED_MODE_LOC_ON_MASK 0x00 +#define LED_MODE_LOC_OFF_MASK 0x10 +#define LED_MODE_LOC_BLINK_MASK 0x20 + +static const u8 led_reg[] = { + 0xA, /* LOC/DIAG/FAN LED*/ + 0xB, /* PSU1/PSU2 LED */ + 0x16, /* FAN1-4 LED */ + 0x17, /* FAN4-6 LED */ +}; + +enum led_type { + LED_TYPE_PSU1, + LED_TYPE_PSU2, + LED_TYPE_DIAG, + LED_TYPE_FAN, + LED_TYPE_FAN1, + LED_TYPE_FAN2, + LED_TYPE_FAN3, + LED_TYPE_FAN4, + LED_TYPE_FAN5, + LED_TYPE_LOC +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER, + LED_MODE_AMBER_BLINK, + LED_MODE_RED, + LED_MODE_RED_BLINK, + LED_MODE_BLUE, + LED_MODE_BLUE_BLINK, + LED_MODE_AUTO, + LED_MODE_UNKNOWN +}; + +struct led_type_mode { + enum led_type type; + int type_mask; + enum led_light_mode mode; + int mode_mask; +}; + +static struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} +}; + + +struct fanx_info_s { + u8 cname; /* device name */ + enum led_type type; + u8 reg_id; /* map to led_reg & reg_val */ +}; + +static struct fanx_info_s fanx_info[] = { + {'1', LED_TYPE_FAN1, 2}, + {'2', LED_TYPE_FAN2, 2}, + {'3', LED_TYPE_FAN3, 2}, + {'4', LED_TYPE_FAN4, 2}, + {'5', LED_TYPE_FAN5, 3} +}; + +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if ((led_type_mode_data[i].type_mask & reg_val) == + led_type_mode_data[i].mode_mask) + { + return led_type_mode_data[i].mode; + } + } + + return LED_MODE_UNKNOWN; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + reg_val = led_type_mode_data[i].mode_mask | + (reg_val & (~led_type_mode_data[i].type_mask)); + } + + return reg_val; +} + +static int accton_as5812_54t_led_read_value(u8 reg) +{ + return accton_i2c_cpld_read(0x60, reg); +} + +static int accton_as5812_54t_led_write_value(u8 reg, u8 value) +{ + return accton_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5812_54t_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as5812_54t_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as5812_54t_led_read_value(led_reg[i]); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5812_54t_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as5812_54t_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as5812_54t_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5812_54t_led_psu_1_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); +} + +static enum led_brightness accton_as5812_54t_led_psu_1_get(struct led_classdev *cdev) +{ + accton_as5812_54t_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); +} + +static void accton_as5812_54t_led_psu_2_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); +} + +static enum led_brightness accton_as5812_54t_led_psu_2_get(struct led_classdev *cdev) +{ + accton_as5812_54t_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); +} + +static void accton_as5812_54t_led_fan_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); +} + +static enum led_brightness accton_as5812_54t_led_fan_get(struct led_classdev *cdev) +{ + accton_as5812_54t_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); +} + + +static void accton_as5812_54t_led_fanx_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (led_cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); + return; + } + } +} + + +static enum led_brightness accton_as5812_54t_led_fanx_get(struct led_classdev *cdev) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as5812_54t_led_update(); + return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); + } + } + + + return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); +} + + +static void accton_as5812_54t_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); +} + +static enum led_brightness accton_as5812_54t_led_diag_get(struct led_classdev *cdev) +{ + accton_as5812_54t_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as5812_54t_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); +} + +static enum led_brightness accton_as5812_54t_led_loc_get(struct led_classdev *cdev) +{ + accton_as5812_54t_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static struct led_classdev accton_as5812_54t_leds[] = { + [LED_TYPE_PSU1] = { + .name = "accton_as5812_54t_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_psu_1_set, + .brightness_get = accton_as5812_54t_led_psu_1_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as5812_54t_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_psu_2_set, + .brightness_get = accton_as5812_54t_led_psu_2_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN] = { + .name = "accton_as5812_54t_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_fan_set, + .brightness_get = accton_as5812_54t_led_fan_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN1] = { + .name = "accton_as5812_54t_led::fan1", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_fanx_set, + .brightness_get = accton_as5812_54t_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN2] = { + .name = "accton_as5812_54t_led::fan2", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_fanx_set, + .brightness_get = accton_as5812_54t_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN3] = { + .name = "accton_as5812_54t_led::fan3", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_fanx_set, + .brightness_get = accton_as5812_54t_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN4] = { + .name = "accton_as5812_54t_led::fan4", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_fanx_set, + .brightness_get = accton_as5812_54t_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN5] = { + .name = "accton_as5812_54t_led::fan5", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_fanx_set, + .brightness_get = accton_as5812_54t_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_DIAG] = { + .name = "accton_as5812_54t_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_diag_set, + .brightness_get = accton_as5812_54t_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_LOC] = { + .name = "accton_as5812_54t_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as5812_54t_led_loc_set, + .brightness_get = accton_as5812_54t_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as5812_54t_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { + led_classdev_suspend(&accton_as5812_54t_leds[i]); + } + + return 0; +} + +static int accton_as5812_54t_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { + led_classdev_resume(&accton_as5812_54t_leds[i]); + } + + return 0; +} + +static int accton_as5812_54t_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as5812_54t_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as5812_54t_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as5812_54t_leds[i]); + } + } + + return ret; +} + +static int accton_as5812_54t_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { + led_classdev_unregister(&accton_as5812_54t_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as5812_54t_led_driver = { + .probe = accton_as5812_54t_led_probe, + .remove = accton_as5812_54t_led_remove, + .suspend = accton_as5812_54t_led_suspend, + .resume = accton_as5812_54t_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5812_54t_led_init(void) +{ + int ret; + + extern int platform_accton_as5812_54t(void); + if (!platform_accton_as5812_54t()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as5812_54t_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as5812_54t_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5812_54t_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as5812_54t_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5812_54t_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as5812_54t_led_driver); + kfree(ledctl); +} + +module_init(accton_as5812_54t_led_init); +module_exit(accton_as5812_54t_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5812_54t_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-psu.c new file mode 100644 index 00000000..bf1b79ec --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-psu.c @@ -0,0 +1,295 @@ +/* + * An hwmon driver for accton as5812_54t Power Module + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as5812_54t_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5812_54t_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[14]; /* Model name, read from eeprom */ +}; + +static struct as5812_54t_psu_data *as5812_54t_psu_update_device(struct device *dev); + +enum as5812_54t_psu_sysfs_attributes { + PSU_INDEX, + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as5812_54t_psu_attributes[] = { + &sensor_dev_attr_psu_index.dev_attr.attr, + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_index(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54t_psu_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->index); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5812_54t_psu_data *data = as5812_54t_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((data->index - 1) * 4) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5812_54t_psu_data *data = as5812_54t_psu_update_device(dev); + + return sprintf(buf, "%s", data->model_name); +} + +static const struct attribute_group as5812_54t_psu_group = { + .attrs = as5812_54t_psu_attributes, +}; + +static int as5812_54t_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5812_54t_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5812_54t_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5812_54t_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x38 || client->addr == 0x50) { + data->index = 1; + } + else if (client->addr == 0x3b || client->addr == 0x53) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5812_54t_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5812_54t_psu_remove(struct i2c_client *client) +{ + struct as5812_54t_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5812_54t_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as5812_54t_psu_id[] = { + { "as5812_54t_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as5812_54t_psu_id); + +static struct i2c_driver as5812_54t_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5812_54t_psu", + }, + .probe = as5812_54t_psu_probe, + .remove = as5812_54t_psu_remove, + .id_table = as5812_54t_psu_id, + .address_list = normal_i2c, +}; + +static int as5812_54t_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as5812_54t_psu_data *as5812_54t_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54t_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + + dev_dbg(&client->dev, "Starting as5812_54t update\n"); + + /* Read model name */ + if (client->addr == 0x38 || client->addr == 0x3b) { + /* AC power */ + status = as5812_54t_psu_read_block(client, 0x26, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + else { + /* DC power */ + status = as5812_54t_psu_read_block(client, 0x50, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + + /* Read psu status */ + status = accton_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as5812_54t_psu_init(void) +{ + extern int platform_accton_as5812_54t(void); + if (!platform_accton_as5812_54t()) { + return -ENODEV; + } + + return i2c_add_driver(&as5812_54t_psu_driver); +} + +static void __exit as5812_54t_psu_exit(void) +{ + i2c_del_driver(&as5812_54t_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5812_54t_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as5812_54t_psu_init); +module_exit(as5812_54t_psu_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-sfp.c new file mode 100644 index 00000000..88bf552d --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/x86-64-accton-as5812-54t-sfp.c @@ -0,0 +1,318 @@ +/* + * An hwmon driver for accton as5812_54t sfp + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QSFP_PORT_START_INDEX 49 +#define BIT_INDEX(i) (1ULL << (i)) + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5812_54t_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u8 status; /* bit0:port49, bit1:port50 and so on */ +}; + +static struct as5812_54t_sfp_data *as5812_54t_sfp_update_device(struct device *dev, int update_eeprom); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +enum as5812_54t_sfp_sysfs_attributes { + SFP_IS_PRESENT, + SFP_PORT_NUMBER, + SFP_EEPROM, + SFP_IS_PRESENT_ALL, +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_status, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_status,NULL, SFP_IS_PRESENT_ALL); + +static struct attribute *as5812_54t_sfp_attributes[] = { + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54t_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n",data->port); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5812_54t_sfp_data *data = as5812_54t_sfp_update_device(dev, 0); + + if (attr->index == SFP_IS_PRESENT) { + u8 val; + + val = (data->status & BIT_INDEX(data->port - QSFP_PORT_START_INDEX)) ? 0 : 1; + return sprintf(buf, "%d", val); + } + else { /* SFP_IS_PRESENT_ALL */ + return sprintf(buf, "%.2x\n", ~data->status); + } +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5812_54t_sfp_data *data = as5812_54t_sfp_update_device(dev, 1); + + if (!data->valid) { + return 0; + } + + if ((data->status & BIT_INDEX(data->port - QSFP_PORT_START_INDEX)) != 0) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as5812_54t_sfp_group = { + .attrs = as5812_54t_sfp_attributes, +}; + +static int as5812_54t_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5812_54t_sfp_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5812_54t_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5812_54t_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5812_54t_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5812_54t_sfp_remove(struct i2c_client *client) +{ + struct as5812_54t_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5812_54t_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as5812_54t_qsfp49 = 49, +as5812_54t_qsfp50, +as5812_54t_qsfp51, +as5812_54t_qsfp52, +as5812_54t_qsfp53, +as5812_54t_qsfp54 +}; + +static const struct i2c_device_id as5812_54t_sfp_id[] = { +{ "as5812_54t_qsfp49", as5812_54t_qsfp49 }, { "as5812_54t_qsfp50", as5812_54t_qsfp50 }, +{ "as5812_54t_qsfp51", as5812_54t_qsfp51 }, { "as5812_54t_qsfp52", as5812_54t_qsfp52 }, +{ "as5812_54t_qsfp53", as5812_54t_qsfp53 }, { "as5812_54t_qsfp54", as5812_54t_qsfp54 }, +{} +}; +MODULE_DEVICE_TABLE(i2c, as5812_54t_sfp_id); + +static struct i2c_driver as5812_54t_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5812_54t_sfp", + }, + .probe = as5812_54t_sfp_probe, + .remove = as5812_54t_sfp_remove, + .id_table = as5812_54t_sfp_id, + .address_list = normal_i2c, +}; + +static int as5812_54t_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) +{ + int result = i2c_smbus_read_byte_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u8)result; + result = 0; + +abort: + return result; +} + +static struct as5812_54t_sfp_data *as5812_54t_sfp_update_device(struct device *dev, int update_eeprom) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54t_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid || update_eeprom) { + int status = -1; + int i = 0; + + data->valid = 0; + //dev_dbg(&client->dev, "Starting as5812_54t sfp status update\n"); + data->status = 0xFF; + + /* + * Bring QSFPs out of reset, + * This is a temporary fix until the QSFP+_MOD_RST register + * can be exposed through the driver. + */ + accton_i2c_cpld_write(0x60, 0x23, 0x3F); + + /* Read present status of port 49-54(QSFP port) */ + status = accton_i2c_cpld_read(0x60, 0x22); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x60) reg(0x22) err %d\n", status); + } + else { + data->status = status & 0x3F; /* (u32)status */ + } + + if (update_eeprom) { + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if ((data->status & BIT_INDEX(data->port - QSFP_PORT_START_INDEX)) == 0) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom); i++) { + status = as5812_54t_sfp_read_byte(client, i, data->eeprom + i); + + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", + data->port); + goto exit; + } + } + } + } + + data->valid = 1; + data->last_updated = jiffies; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as5812_54t_sfp_init(void) +{ + extern int platform_accton_as5812_54t(void); + if (!platform_accton_as5812_54t()) { + return -ENODEV; + } + + return i2c_add_driver(&as5812_54t_sfp_driver); +} + +static void __exit as5812_54t_sfp_exit(void) +{ + i2c_del_driver(&as5812_54t_sfp_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5812_54t_sfp driver"); +MODULE_LICENSE("GPL"); + +module_init(as5812_54t_sfp_init); +module_exit(as5812_54t_sfp_exit); + From 4e437e2af0957976fe3214a0103f16e21c2dd453 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:36:13 +0000 Subject: [PATCH 232/255] AS5812-54X Kernel Modules. --- .../x86-64-accton-as5812-54x/modules/Makefile | 1 + .../x86-64-accton-as5812-54x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as5812-54x-cpld.c | 395 ++++++++++++ .../builds/x86-64-accton-as5812-54x-fan.c | 442 +++++++++++++ .../builds/x86-64-accton-as5812-54x-leds.c | 597 ++++++++++++++++++ .../builds/x86-64-accton-as5812-54x-psu.c | 294 +++++++++ .../builds/x86-64-accton-as5812-54x-sfp.c | 508 +++++++++++++++ 9 files changed, 2244 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/PKG.yml new file mode 100644 index 00000000..dbd3f513 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as5812-54x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile new file mode 100644 index 00000000..9fc1159e --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as5812-54x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c new file mode 100644 index 00000000..710c5202 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c @@ -0,0 +1,395 @@ +/* + * An I2C multiplexer dirver for accton as5812 CPLD + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This module supports the accton cpld that hold the channel select + * mechanism for other i2c slave devices, such as SFP. + * This includes the: + * Accton as5812_54x CPLD1/CPLD2/CPLD3 + * + * Based on: + * pca954x.c from Kumar Gala + * Copyright (C) 2006 + * + * Based on: + * pca954x.c from Ken Harrenstien + * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) + * + * Based on: + * i2c-virtual_cb.c from Brian Kuschak + * and + * pca9540.c from Jean Delvare . + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct dmi_system_id as5812_54x_dmi_table[] = { + { + .ident = "Accton AS5812-54X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), + }, + }, + { + .ident = "Accton AS5812-54X", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), + }, + }, +}; + +int platform_accton_as5812_54x(void) +{ + return dmi_check_system(as5812_54x_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as5812_54x); + +#define NUM_OF_CPLD1_CHANS 0x0 +#define NUM_OF_CPLD2_CHANS 0x18 +#define NUM_OF_CPLD3_CHANS 0x1E +#define CPLD_CHANNEL_SELECT_REG 0x2 +#define CPLD_DESELECT_CHANNEL 0xFF + +#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +enum cpld_mux_type { + as5812_54x_cpld2, + as5812_54x_cpld3, + as5812_54x_cpld1 +}; + +struct accton_i2c_cpld_mux { + enum cpld_mux_type type; + struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; + u8 last_chan; /* last register value */ +}; + +struct chip_desc { + u8 nchans; + u8 deselectChan; +}; + +/* Provide specs for the PCA954x types we know about */ +static const struct chip_desc chips[] = { + [as5812_54x_cpld1] = { + .nchans = NUM_OF_CPLD1_CHANS, + .deselectChan = CPLD_DESELECT_CHANNEL, + }, + [as5812_54x_cpld2] = { + .nchans = NUM_OF_CPLD2_CHANS, + .deselectChan = CPLD_DESELECT_CHANNEL, + }, + [as5812_54x_cpld3] = { + .nchans = NUM_OF_CPLD3_CHANS, + .deselectChan = CPLD_DESELECT_CHANNEL, + } +}; + +static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { + { "as5812_54x_cpld1", as5812_54x_cpld1 }, + { "as5812_54x_cpld2", as5812_54x_cpld2 }, + { "as5812_54x_cpld3", as5812_54x_cpld3 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); + +/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() + for this as they will try to lock adapter a second time */ +static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, + struct i2c_client *client, u8 val) +{ + unsigned long orig_jiffies; + unsigned short flags; + union i2c_smbus_data data; + int try; + s32 res = -EIO; + + data.byte = val; + flags = client->flags; + flags &= I2C_M_TEN | I2C_CLIENT_PEC; + + if (adap->algo->smbus_xfer) { + /* Retry automatically on arbitration loss */ + orig_jiffies = jiffies; + for (res = 0, try = 0; try <= adap->retries; try++) { + res = adap->algo->smbus_xfer(adap, client->addr, flags, + I2C_SMBUS_WRITE, CPLD_CHANNEL_SELECT_REG, + I2C_SMBUS_BYTE_DATA, &data); + if (res != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + adap->timeout)) + break; + } + } + + return res; +} + +static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + u8 regval; + int ret = 0; + regval = chan; + + /* Only select the channel if its different from the last channel */ + if (data->last_chan != regval) { + ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); + data->last_chan = regval; + } + + return ret; +} + +static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + + /* Deselect active channel */ + data->last_chan = chips[data->type].deselectChan; + + return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); +} + +static void accton_i2c_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void accton_i2c_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + u8 reg = 0x1; + struct i2c_client *client; + int len; + + client = to_i2c_client(dev); + len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); + + return len; +} + +static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); + +/* + * I2C init/probing/exit functions + */ +static int accton_i2c_cpld_mux_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); + int chan=0; + struct accton_i2c_cpld_mux *data; + int ret = -ENODEV; + + if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) + goto err; + + data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err; + } + + i2c_set_clientdata(client, data); + + data->type = id->driver_data; + + if (data->type == as5812_54x_cpld2 || data->type == as5812_54x_cpld3) { + data->last_chan = chips[data->type].deselectChan; /* force the first selection */ + + /* Now create an adapter for each channel */ + for (chan = 0; chan < chips[data->type].nchans; chan++) { + data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, + I2C_CLASS_HWMON | I2C_CLASS_SPD, + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); + + if (data->virt_adaps[chan] == NULL) { + ret = -ENODEV; + dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); + goto virt_reg_failed; + } + } + + dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", + chan, client->name); + } + + accton_i2c_cpld_add_client(client); + + ret = sysfs_create_file(&client->dev.kobj, &ver.attr); + if (ret) + goto virt_reg_failed; + + return 0; + +virt_reg_failed: + for (chan--; chan >= 0; chan--) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + } + + kfree(data); +err: + return ret; +} + +static int accton_i2c_cpld_mux_remove(struct i2c_client *client) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + const struct chip_desc *chip = &chips[data->type]; + int chan; + + sysfs_remove_file(&client->dev.kobj, &ver.attr); + + for (chan = 0; chan < chip->nchans; ++chan) { + if (data->virt_adaps[chan]) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + data->virt_adaps[chan] = NULL; + } + } + + kfree(data); + accton_i2c_cpld_remove_client(client); + + return 0; +} + +int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_read_byte_data(cpld_node->client, reg); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as5812_54x_i2c_cpld_read); + +int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as5812_54x_i2c_cpld_write); + +static struct i2c_driver accton_i2c_cpld_mux_driver = { + .driver = { + .name = "as5812_54x_cpld", + .owner = THIS_MODULE, + }, + .probe = accton_i2c_cpld_mux_probe, + .remove = accton_i2c_cpld_mux_remove, + .id_table = accton_i2c_cpld_mux_id, +}; + +static int __init accton_i2c_cpld_mux_init(void) +{ + mutex_init(&list_lock); + return i2c_add_driver(&accton_i2c_cpld_mux_driver); +} + +static void __exit accton_i2c_cpld_mux_exit(void) +{ + i2c_del_driver(&accton_i2c_cpld_mux_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_i2c_cpld_mux_init); +module_exit(accton_i2c_cpld_mux_exit); + + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-fan.c new file mode 100644 index 00000000..3e25db1e --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-fan.c @@ -0,0 +1,442 @@ +/* + * A hwmon driver for the Accton as5812 54x fan + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAN_MAX_NUMBER 5 +#define FAN_SPEED_CPLD_TO_RPM_STEP 150 +#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 +#define FAN_DUTY_CYCLE_MIN 0 +#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ + +#define CPLD_REG_FAN_STATUS_OFFSET 0xC +#define CPLD_REG_FANR_STATUS_OFFSET 0x1F +#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E + +#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 +#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 +#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 +#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 +#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 + +#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 +#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 +#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A +#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B +#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C + +#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD + +#define CPLD_FAN1_INFO_BIT_MASK 0x1 +#define CPLD_FAN2_INFO_BIT_MASK 0x2 +#define CPLD_FAN3_INFO_BIT_MASK 0x4 +#define CPLD_FAN4_INFO_BIT_MASK 0x8 +#define CPLD_FAN5_INFO_BIT_MASK 0x10 + +#define PROJECT_NAME + +#define LOCAL_DEBUG 0 + +static struct accton_as5812_54x_fan *fan_data = NULL; + +struct accton_as5812_54x_fan { + struct platform_device *pdev; + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ + u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ + u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ + u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ + u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ + u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ +}; + +/*******************/ +#define MAKE_FAN_MASK_OR_REG(name,type) \ + CPLD_FAN##type##1_##name, \ + CPLD_FAN##type##2_##name, \ + CPLD_FAN##type##3_##name, \ + CPLD_FAN##type##4_##name, \ + CPLD_FAN##type##5_##name, + +/* fan related data + */ +static const u8 fan_info_mask[] = { + MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) +}; + +static const u8 fan_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) +}; + +static const u8 fanr_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) +}; + +/*******************/ +#define DEF_FAN_SET(id) \ + FAN##id##_FAULT, \ + FAN##id##_SPEED, \ + FAN##id##_DUTY_CYCLE, \ + FAN##id##_DIRECTION, \ + FANR##id##_FAULT, \ + FANR##id##_SPEED, + +enum sysfs_fan_attributes { + DEF_FAN_SET(1) + DEF_FAN_SET(2) + DEF_FAN_SET(3) + DEF_FAN_SET(4) + DEF_FAN_SET(5) +}; +/*******************/ +static void accton_as5812_54x_fan_update_device(struct device *dev); +static int accton_as5812_54x_fan_read_value(u8 reg); +static int accton_as5812_54x_fan_write_value(u8 reg, u8 value); + +static ssize_t fan_set_duty_cycle(struct device *dev, + struct device_attribute *da,const char *buf, size_t count); +static ssize_t fan_show_value(struct device *dev, + struct device_attribute *da, char *buf); + +extern int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + + +/*******************/ +#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ + fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); + +#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) + +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) +/*******************/ + +#define _MAKE_FAN_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ + &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, + +#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) + +static struct attribute *accton_as5812_54x_fan_attributes[] = { + /* fan related attributes */ + MAKE_FAN_ATTR(PROJECT_NAME,1) + MAKE_FAN_ATTR(PROJECT_NAME,2) + MAKE_FAN_ATTR(PROJECT_NAME,3) + MAKE_FAN_ATTR(PROJECT_NAME,4) + MAKE_FAN_ATTR(PROJECT_NAME,5) + NULL +}; +/*******************/ + +/* fan related functions + */ +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + ssize_t ret = 0; + int data_index, type_index; + + accton_as5812_54x_fan_update_device(dev); + + if (fan_data->valid == 0) { + return ret; + } + + type_index = attr->index%FAN2_FAULT; + data_index = attr->index/FAN2_FAULT; + + switch (type_index) { + case FAN1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DUTY_CYCLE: + ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DIRECTION: + ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + default: + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); + break; + } + + return ret; +} +/*******************/ +static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) { + + int error, value; + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) + return -EINVAL; + + accton_as5812_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); + + fan_data->valid = 0; + + return count; +} + +static const struct attribute_group accton_as5812_54x_fan_group = { + .attrs = accton_as5812_54x_fan_attributes, +}; + +static int accton_as5812_54x_fan_read_value(u8 reg) +{ + return as5812_54x_i2c_cpld_read(0x60, reg); +} + +static int accton_as5812_54x_fan_write_value(u8 reg, u8 value) +{ + return as5812_54x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5812_54x_fan_update_device(struct device *dev) +{ + int speed, r_speed, fault, r_fault, ctrl_speed, direction; + int i; + + mutex_lock(&fan_data->update_lock); + + if (LOCAL_DEBUG) + printk ("Starting accton_as5812_54x_fan update \n"); + + if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { + /* do nothing */ + goto _exit; + } + + fan_data->valid = 0; + + if (LOCAL_DEBUG) + printk ("Starting accton_as5812_54x_fan update 2 \n"); + + fault = accton_as5812_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); + r_fault = accton_as5812_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); + direction = accton_as5812_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); + ctrl_speed = accton_as5812_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); + + if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); + + for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; + if (LOCAL_DEBUG) + printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); + + fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; + fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; + fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; + + /* fan speed + */ + speed = accton_as5812_54x_fan_read_value(fan_speed_reg[i]); + r_speed = accton_as5812_54x_fan_read_value(fanr_speed_reg[i]); + if ( (speed < 0) || (r_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); + + fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; + fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; + } + + /* finish to update */ + fan_data->last_updated = jiffies; + fan_data->valid = 1; + +_exit: + mutex_unlock(&fan_data->update_lock); +} + +static int accton_as5812_54x_fan_probe(struct platform_device *pdev) +{ + int status = -1; + + /* Register sysfs hooks */ + status = sysfs_create_group(&pdev->dev.kobj, &accton_as5812_54x_fan_group); + if (status) { + goto exit; + + } + + fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(fan_data->hwmon_dev)) { + status = PTR_ERR(fan_data->hwmon_dev); + goto exit_remove; + } + + dev_info(&pdev->dev, "accton_as5812_54x_fan\n"); + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &accton_as5812_54x_fan_group); +exit: + return status; +} + +static int accton_as5812_54x_fan_remove(struct platform_device *pdev) +{ + hwmon_device_unregister(fan_data->hwmon_dev); + sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5812_54x_fan_group); + + return 0; +} + +#define DRVNAME "as5812_54x_fan" + +static struct platform_driver accton_as5812_54x_fan_driver = { + .probe = accton_as5812_54x_fan_probe, + .remove = accton_as5812_54x_fan_remove, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5812_54x_fan_init(void) +{ + int ret; + + extern int platform_accton_as5812_54x(void); + if(!platform_accton_as5812_54x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as5812_54x_fan_driver); + if (ret < 0) { + goto exit; + } + + fan_data = kzalloc(sizeof(struct accton_as5812_54x_fan), GFP_KERNEL); + if (!fan_data) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5812_54x_fan_driver); + goto exit; + } + + mutex_init(&fan_data->update_lock); + fan_data->valid = 0; + + fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(fan_data->pdev)) { + ret = PTR_ERR(fan_data->pdev); + platform_driver_unregister(&accton_as5812_54x_fan_driver); + kfree(fan_data); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5812_54x_fan_exit(void) +{ + platform_device_unregister(fan_data->pdev); + platform_driver_unregister(&accton_as5812_54x_fan_driver); + kfree(fan_data); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5812_54x_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_as5812_54x_fan_init); +module_exit(accton_as5812_54x_fan_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-leds.c new file mode 100644 index 00000000..b7018683 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-leds.c @@ -0,0 +1,597 @@ +/* + * A LED driver for the accton_as5812_54x_led + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include + +extern int as5812_54x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "as5812_54x_led" + +struct accton_as5812_54x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED + 1 = PSU1/PSU2 LED + 2 = FAN1-4 LED + 3 = FAN5-6 LED */ +}; + +static struct accton_as5812_54x_led_data *ledctl = NULL; + +/* LED related data + */ +#define LED_TYPE_PSU1_REG_MASK 0x03 +#define LED_MODE_PSU1_GREEN_MASK 0x02 +#define LED_MODE_PSU1_AMBER_MASK 0x01 +#define LED_MODE_PSU1_OFF_MASK 0x03 +#define LED_MODE_PSU1_AUTO_MASK 0x00 + +#define LED_TYPE_PSU2_REG_MASK 0x0C +#define LED_MODE_PSU2_GREEN_MASK 0x08 +#define LED_MODE_PSU2_AMBER_MASK 0x04 +#define LED_MODE_PSU2_OFF_MASK 0x0C +#define LED_MODE_PSU2_AUTO_MASK 0x00 + +#define LED_TYPE_DIAG_REG_MASK 0x0C +#define LED_MODE_DIAG_GREEN_MASK 0x08 +#define LED_MODE_DIAG_AMBER_MASK 0x04 +#define LED_MODE_DIAG_OFF_MASK 0x0C + +#define LED_TYPE_FAN_REG_MASK 0x03 +#define LED_MODE_FAN_GREEN_MASK 0x02 +#define LED_MODE_FAN_AMBER_MASK 0x01 +#define LED_MODE_FAN_OFF_MASK 0x03 +#define LED_MODE_FAN_AUTO_MASK 0x00 + +#define LED_TYPE_FAN1_REG_MASK 0x03 +#define LED_TYPE_FAN2_REG_MASK 0x0C +#define LED_TYPE_FAN3_REG_MASK 0x30 +#define LED_TYPE_FAN4_REG_MASK 0xC0 +#define LED_TYPE_FAN5_REG_MASK 0x03 +#define LED_TYPE_FAN6_REG_MASK 0x0C + +#define LED_MODE_FANX_GREEN_MASK 0x01 +#define LED_MODE_FANX_RED_MASK 0x02 +#define LED_MODE_FANX_OFF_MASK 0x00 + +#define LED_TYPE_LOC_REG_MASK 0x30 +#define LED_MODE_LOC_ON_MASK 0x00 +#define LED_MODE_LOC_OFF_MASK 0x10 +#define LED_MODE_LOC_BLINK_MASK 0x20 + +static const u8 led_reg[] = { + 0xA, /* LOC/DIAG/FAN LED*/ + 0xB, /* PSU1/PSU2 LED */ + 0x16, /* FAN1-4 LED */ + 0x17, /* FAN4-6 LED */ +}; + +enum led_type { + LED_TYPE_PSU1, + LED_TYPE_PSU2, + LED_TYPE_DIAG, + LED_TYPE_FAN, + LED_TYPE_FAN1, + LED_TYPE_FAN2, + LED_TYPE_FAN3, + LED_TYPE_FAN4, + LED_TYPE_FAN5, + LED_TYPE_LOC +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_AMBER, + LED_MODE_RED, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER_BLINK, + LED_MODE_RED_BLINK, + LED_MODE_AUTO, +}; + +struct led_type_mode { + enum led_type type; + int type_mask; + enum led_light_mode mode; + int mode_mask; +}; + +static struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} +}; + + +struct fanx_info_s { + u8 cname; /* device name */ + enum led_type type; + u8 reg_id; /* map to led_reg & reg_val */ +}; + +static struct fanx_info_s fanx_info[] = { + {'1', LED_TYPE_FAN1, 2}, + {'2', LED_TYPE_FAN2, 2}, + {'3', LED_TYPE_FAN3, 2}, + {'4', LED_TYPE_FAN4, 2}, + {'5', LED_TYPE_FAN5, 3} +}; + +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if ((led_type_mode_data[i].type_mask & reg_val) == + led_type_mode_data[i].mode_mask) + { + return led_type_mode_data[i].mode; + } + } + + return 0; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + reg_val = led_type_mode_data[i].mode_mask | + (reg_val & (~led_type_mode_data[i].type_mask)); + } + + return reg_val; +} + +static int accton_as5812_54x_led_read_value(u8 reg) +{ + return as5812_54x_i2c_cpld_read(0x60, reg); +} + +static int accton_as5812_54x_led_write_value(u8 reg, u8 value) +{ + return as5812_54x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5812_54x_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as5812_54x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as5812_54x_led_read_value(led_reg[i]); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5812_54x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as5812_54x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as5812_54x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5812_54x_led_psu_1_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); +} + +static enum led_brightness accton_as5812_54x_led_psu_1_get(struct led_classdev *cdev) +{ + accton_as5812_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); +} + +static void accton_as5812_54x_led_psu_2_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); +} + +static enum led_brightness accton_as5812_54x_led_psu_2_get(struct led_classdev *cdev) +{ + accton_as5812_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); +} + +static void accton_as5812_54x_led_fan_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); +} + +static enum led_brightness accton_as5812_54x_led_fan_get(struct led_classdev *cdev) +{ + accton_as5812_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); +} + + +static void accton_as5812_54x_led_fanx_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (led_cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); + return; + } + } +} + + +static enum led_brightness accton_as5812_54x_led_fanx_get(struct led_classdev *cdev) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as5812_54x_led_update(); + return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); + } + } + + + return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); +} + + +static void accton_as5812_54x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); +} + +static enum led_brightness accton_as5812_54x_led_diag_get(struct led_classdev *cdev) +{ + accton_as5812_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as5812_54x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); +} + +static enum led_brightness accton_as5812_54x_led_loc_get(struct led_classdev *cdev) +{ + accton_as5812_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static struct led_classdev accton_as5812_54x_leds[] = { + [LED_TYPE_PSU1] = { + .name = "accton_as5812_54x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_psu_1_set, + .brightness_get = accton_as5812_54x_led_psu_1_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as5812_54x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_psu_2_set, + .brightness_get = accton_as5812_54x_led_psu_2_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN] = { + .name = "accton_as5812_54x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_fan_set, + .brightness_get = accton_as5812_54x_led_fan_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN1] = { + .name = "accton_as5812_54x_led::fan1", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_fanx_set, + .brightness_get = accton_as5812_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN2] = { + .name = "accton_as5812_54x_led::fan2", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_fanx_set, + .brightness_get = accton_as5812_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN3] = { + .name = "accton_as5812_54x_led::fan3", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_fanx_set, + .brightness_get = accton_as5812_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN4] = { + .name = "accton_as5812_54x_led::fan4", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_fanx_set, + .brightness_get = accton_as5812_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN5] = { + .name = "accton_as5812_54x_led::fan5", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_fanx_set, + .brightness_get = accton_as5812_54x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_DIAG] = { + .name = "accton_as5812_54x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_diag_set, + .brightness_get = accton_as5812_54x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_LOC] = { + .name = "accton_as5812_54x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as5812_54x_led_loc_set, + .brightness_get = accton_as5812_54x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as5812_54x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { + led_classdev_suspend(&accton_as5812_54x_leds[i]); + } + + return 0; +} + +static int accton_as5812_54x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { + led_classdev_resume(&accton_as5812_54x_leds[i]); + } + + return 0; +} + +static int accton_as5812_54x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as5812_54x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as5812_54x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as5812_54x_leds[i]); + } + } + + return ret; +} + +static int accton_as5812_54x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { + led_classdev_unregister(&accton_as5812_54x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as5812_54x_led_driver = { + .probe = accton_as5812_54x_led_probe, + .remove = accton_as5812_54x_led_remove, + .suspend = accton_as5812_54x_led_suspend, + .resume = accton_as5812_54x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5812_54x_led_init(void) +{ + int ret; + + extern int platform_accton_as5812_54x(void); + if(!platform_accton_as5812_54x()) { + return -ENODEV; + } + ret = platform_driver_register(&accton_as5812_54x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as5812_54x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5812_54x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as5812_54x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5812_54x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as5812_54x_led_driver); + kfree(ledctl); +} + +module_init(accton_as5812_54x_led_init); +module_exit(accton_as5812_54x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5812_54x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-psu.c new file mode 100644 index 00000000..0d299807 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-psu.c @@ -0,0 +1,294 @@ +/* + * An hwmon driver for accton as5812_54x Power Module + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as5812_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5812_54x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[14]; /* Model name, read from eeprom */ +}; + +static struct as5812_54x_psu_data *as5812_54x_psu_update_device(struct device *dev); + +enum as5812_54x_psu_sysfs_attributes { + PSU_INDEX, + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as5812_54x_psu_attributes[] = { + &sensor_dev_attr_psu_index.dev_attr.attr, + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_index(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54x_psu_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->index); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5812_54x_psu_data *data = as5812_54x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((data->index - 1) * 4) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5812_54x_psu_data *data = as5812_54x_psu_update_device(dev); + + return sprintf(buf, "%s", data->model_name); +} + +static const struct attribute_group as5812_54x_psu_group = { + .attrs = as5812_54x_psu_attributes, +}; + +static int as5812_54x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5812_54x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5812_54x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5812_54x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x38 || client->addr == 0x50) { + data->index = 1; + } + else if (client->addr == 0x3b || client->addr == 0x53) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5812_54x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5812_54x_psu_remove(struct i2c_client *client) +{ + struct as5812_54x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5812_54x_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as5812_54x_psu_id[] = { + { "as5812_54x_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as5812_54x_psu_id); + +static struct i2c_driver as5812_54x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5812_54x_psu", + }, + .probe = as5812_54x_psu_probe, + .remove = as5812_54x_psu_remove, + .id_table = as5812_54x_psu_id, + .address_list = normal_i2c, +}; + +static int as5812_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as5812_54x_psu_data *as5812_54x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + + dev_dbg(&client->dev, "Starting as5812_54x update\n"); + + /* Read model name */ + if (client->addr == 0x38 || client->addr == 0x3b) { + /* AC power */ + status = as5812_54x_psu_read_block(client, 0x26, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + else { + /* DC power */ + status = as5812_54x_psu_read_block(client, 0x50, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + + /* Read psu status */ + status = as5812_54x_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as5812_54x_psu_init(void) +{ + extern int platform_accton_as5812_54x(void); + if(!platform_accton_as5812_54x()) { + return -ENODEV; + } + return i2c_add_driver(&as5812_54x_psu_driver); +} + +static void __exit as5812_54x_psu_exit(void) +{ + i2c_del_driver(&as5812_54x_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5812_54x_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as5812_54x_psu_init); +module_exit(as5812_54x_psu_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-sfp.c new file mode 100644 index 00000000..44727e22 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-sfp.c @@ -0,0 +1,508 @@ +/* + * An hwmon driver for accton as5812_54x sfp + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NUM_OF_SFP_PORT 54 +#define BIT_INDEX(i) (1ULL << (i)) + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5812_54x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u64 status[4]; /* bit0:port0, bit1:port1 and so on */ + /* index 0 => is_present + 1 => tx_fail + 2 => tx_disable + 3 => rx_loss */ +}; + +/* The table maps active port to cpld port. + * Array index 0 is for active port 1, + * index 1 for active port 2, and so on. + * The array content implies cpld port index. + */ +static const u8 cpld_to_front_port_table[] = +{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 52, 50, 53, 51, 54}; + +#define CPLD_PORT_TO_FRONT_PORT(port) (cpld_to_front_port_table[port]) + +static struct as5812_54x_sfp_data *as5812_54x_sfp_update_device(struct device *dev, int update_eeprom); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +extern int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +enum as5812_54x_sfp_sysfs_attributes { + SFP_IS_PRESENT, + SFP_TX_FAULT, + SFP_TX_DISABLE, + SFP_RX_LOSS, + SFP_PORT_NUMBER, + SFP_EEPROM, + SFP_RX_LOS_ALL, + SFP_IS_PRESENT_ALL, +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_status, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, show_status, NULL, SFP_TX_FAULT); +static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, show_status, set_tx_disable, SFP_TX_DISABLE); +static SENSOR_DEVICE_ATTR(sfp_rx_loss, S_IRUGO, show_status,NULL, SFP_RX_LOSS); +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); +static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, show_status,NULL, SFP_RX_LOS_ALL); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_status,NULL, SFP_IS_PRESENT_ALL); + +static struct attribute *as5812_54x_sfp_attributes[] = { + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, + &sensor_dev_attr_sfp_rx_loss.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5812_54x_sfp_data *data; + u8 val; + int values[7]; + + /* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ + do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ + } while(0) + + if(attr->index == SFP_RX_LOS_ALL) { + /* + * Report the RX_LOS status for all ports. + * This does not depend on the currently active SFP selector. + */ + + /* RX_LOS Ports 1-8 */ + VALIDATED_READ(buf, values[0], as5812_54x_i2c_cpld_read(0x61, 0x0F), 0); + /* RX_LOS Ports 9-16 */ + VALIDATED_READ(buf, values[1], as5812_54x_i2c_cpld_read(0x61, 0x10), 0); + /* RX_LOS Ports 17-24 */ + VALIDATED_READ(buf, values[2], as5812_54x_i2c_cpld_read(0x61, 0x11), 0); + /* RX_LOS Ports 25-32 */ + VALIDATED_READ(buf, values[3], as5812_54x_i2c_cpld_read(0x62, 0x0F), 0); + /* RX_LOS Ports 33-40 */ + VALIDATED_READ(buf, values[4], as5812_54x_i2c_cpld_read(0x62, 0x10), 0); + /* RX_LOS Ports 41-48 */ + VALIDATED_READ(buf, values[5], as5812_54x_i2c_cpld_read(0x62, 0x11), 0); + + /** Return values 1 -> 48 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], + values[3], values[4], values[5]); + } + + if(attr->index == SFP_IS_PRESENT_ALL) { + /* + * Report the SFP_PRESENCE status for all ports. + * This does not depend on the currently active SFP selector. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], as5812_54x_i2c_cpld_read(0x61, 0x6), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], as5812_54x_i2c_cpld_read(0x61, 0x7), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], as5812_54x_i2c_cpld_read(0x61, 0x8), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], as5812_54x_i2c_cpld_read(0x62, 0x6), 1); + /* SFP_PRESENT Ports 33-40 */ + VALIDATED_READ(buf, values[4], as5812_54x_i2c_cpld_read(0x62, 0x7), 1); + /* SFP_PRESENT Ports 41-48 */ + VALIDATED_READ(buf, values[5], as5812_54x_i2c_cpld_read(0x62, 0x8), 1); + /* QSFP_PRESENT Ports 49-54 */ + VALIDATED_READ(buf, values[6], as5812_54x_i2c_cpld_read(0x62, 0x14), 1); + + /* Return values 1 -> 54 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], + values[3], values[4], values[5], + values[6] & 0x3F); + } + /* + * The remaining attributes are gathered on a per-selected-sfp basis. + */ + data = as5812_54x_sfp_update_device(dev, 0); + if (attr->index == SFP_IS_PRESENT) { + val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 0 : 1; + } + else { + val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 1 : 0; + } + + return sprintf(buf, "%d", val); +} + +static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); + unsigned short cpld_addr = 0; + u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; + long disable; + int error; + + /* Tx disable is not supported for QSFP ports(49-54) */ + if (data->port >= 48) { + return -EINVAL; + } + + error = kstrtol(buf, 10, &disable); + if (error) { + return error; + } + + mutex_lock(&data->update_lock); + + if(data->port < 24) { + cpld_addr = 0x61; + cpld_reg = 0xC + data->port / 8; + cpld_bit = 1 << (data->port % 8); + } + else { + cpld_addr = 0x62; + cpld_reg = 0xC + (data->port - 24) / 8; + cpld_bit = 1 << (data->port % 8); + } + + cpld_val = as5812_54x_i2c_cpld_read(cpld_addr, cpld_reg); + + /* Update tx_disable status */ + if (disable) { + data->status[SFP_TX_DISABLE] |= BIT_INDEX(data->port); + cpld_val |= cpld_bit; + } + else { + data->status[SFP_TX_DISABLE] &= ~BIT_INDEX(data->port); + cpld_val &= ~cpld_bit; + } + + as5812_54x_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); + + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5812_54x_sfp_data *data = as5812_54x_sfp_update_device(dev, 1); + + if (!data->valid) { + return 0; + } + + if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) != 0) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as5812_54x_sfp_group = { + .attrs = as5812_54x_sfp_attributes, +}; + +static int as5812_54x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5812_54x_sfp_data *data; + int status; + + extern int platform_accton_as5812_54x(void); + if(!platform_accton_as5812_54x()) { + return -ENODEV; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5812_54x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5812_54x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5812_54x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5812_54x_sfp_remove(struct i2c_client *client) +{ + struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5812_54x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as5812_54x_sfp1, as5812_54x_sfp2, as5812_54x_sfp3, as5812_54x_sfp4, +as5812_54x_sfp5, as5812_54x_sfp6, as5812_54x_sfp7, as5812_54x_sfp8, +as5812_54x_sfp9, as5812_54x_sfp10, as5812_54x_sfp11,as5812_54x_sfp12, +as5812_54x_sfp13, as5812_54x_sfp14, as5812_54x_sfp15,as5812_54x_sfp16, +as5812_54x_sfp17, as5812_54x_sfp18, as5812_54x_sfp19,as5812_54x_sfp20, +as5812_54x_sfp21, as5812_54x_sfp22, as5812_54x_sfp23,as5812_54x_sfp24, +as5812_54x_sfp25, as5812_54x_sfp26, as5812_54x_sfp27,as5812_54x_sfp28, +as5812_54x_sfp29, as5812_54x_sfp30, as5812_54x_sfp31,as5812_54x_sfp32, +as5812_54x_sfp33, as5812_54x_sfp34, as5812_54x_sfp35,as5812_54x_sfp36, +as5812_54x_sfp37, as5812_54x_sfp38, as5812_54x_sfp39,as5812_54x_sfp40, +as5812_54x_sfp41, as5812_54x_sfp42, as5812_54x_sfp43,as5812_54x_sfp44, +as5812_54x_sfp45, as5812_54x_sfp46, as5812_54x_sfp47,as5812_54x_sfp48, +as5812_54x_sfp49, as5812_54x_sfp52, as5812_54x_sfp50,as5812_54x_sfp53, +as5812_54x_sfp51, as5812_54x_sfp54 +}; + +static const struct i2c_device_id as5812_54x_sfp_id[] = { +{ "as5812_54x_sfp1", as5812_54x_sfp1 }, { "as5812_54x_sfp2", as5812_54x_sfp2 }, +{ "as5812_54x_sfp3", as5812_54x_sfp3 }, { "as5812_54x_sfp4", as5812_54x_sfp4 }, +{ "as5812_54x_sfp5", as5812_54x_sfp5 }, { "as5812_54x_sfp6", as5812_54x_sfp6 }, +{ "as5812_54x_sfp7", as5812_54x_sfp7 }, { "as5812_54x_sfp8", as5812_54x_sfp8 }, +{ "as5812_54x_sfp9", as5812_54x_sfp9 }, { "as5812_54x_sfp10", as5812_54x_sfp10 }, +{ "as5812_54x_sfp11", as5812_54x_sfp11 }, { "as5812_54x_sfp12", as5812_54x_sfp12 }, +{ "as5812_54x_sfp13", as5812_54x_sfp13 }, { "as5812_54x_sfp14", as5812_54x_sfp14 }, +{ "as5812_54x_sfp15", as5812_54x_sfp15 }, { "as5812_54x_sfp16", as5812_54x_sfp16 }, +{ "as5812_54x_sfp17", as5812_54x_sfp17 }, { "as5812_54x_sfp18", as5812_54x_sfp18 }, +{ "as5812_54x_sfp19", as5812_54x_sfp19 }, { "as5812_54x_sfp20", as5812_54x_sfp20 }, +{ "as5812_54x_sfp21", as5812_54x_sfp21 }, { "as5812_54x_sfp22", as5812_54x_sfp22 }, +{ "as5812_54x_sfp23", as5812_54x_sfp23 }, { "as5812_54x_sfp24", as5812_54x_sfp24 }, +{ "as5812_54x_sfp25", as5812_54x_sfp25 }, { "as5812_54x_sfp26", as5812_54x_sfp26 }, +{ "as5812_54x_sfp27", as5812_54x_sfp27 }, { "as5812_54x_sfp28", as5812_54x_sfp28 }, +{ "as5812_54x_sfp29", as5812_54x_sfp29 }, { "as5812_54x_sfp30", as5812_54x_sfp30 }, +{ "as5812_54x_sfp31", as5812_54x_sfp31 }, { "as5812_54x_sfp32", as5812_54x_sfp32 }, +{ "as5812_54x_sfp33", as5812_54x_sfp33 }, { "as5812_54x_sfp34", as5812_54x_sfp34 }, +{ "as5812_54x_sfp35", as5812_54x_sfp35 }, { "as5812_54x_sfp36", as5812_54x_sfp36 }, +{ "as5812_54x_sfp37", as5812_54x_sfp37 }, { "as5812_54x_sfp38", as5812_54x_sfp38 }, +{ "as5812_54x_sfp39", as5812_54x_sfp39 }, { "as5812_54x_sfp40", as5812_54x_sfp40 }, +{ "as5812_54x_sfp41", as5812_54x_sfp41 }, { "as5812_54x_sfp42", as5812_54x_sfp42 }, +{ "as5812_54x_sfp43", as5812_54x_sfp43 }, { "as5812_54x_sfp44", as5812_54x_sfp44 }, +{ "as5812_54x_sfp45", as5812_54x_sfp45 }, { "as5812_54x_sfp46", as5812_54x_sfp46 }, +{ "as5812_54x_sfp47", as5812_54x_sfp47 }, { "as5812_54x_sfp48", as5812_54x_sfp48 }, +{ "as5812_54x_sfp49", as5812_54x_sfp49 }, { "as5812_54x_sfp50", as5812_54x_sfp50 }, +{ "as5812_54x_sfp51", as5812_54x_sfp51 }, { "as5812_54x_sfp52", as5812_54x_sfp52 }, +{ "as5812_54x_sfp53", as5812_54x_sfp53 }, { "as5812_54x_sfp54", as5812_54x_sfp54 }, + +{} +}; +MODULE_DEVICE_TABLE(i2c, as5812_54x_sfp_id); + +static struct i2c_driver as5812_54x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5812_54x_sfp", + }, + .probe = as5812_54x_sfp_probe, + .remove = as5812_54x_sfp_remove, + .id_table = as5812_54x_sfp_id, + .address_list = normal_i2c, +}; + +static int as5812_54x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) +{ + int result = i2c_smbus_read_byte_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u8)result; + result = 0; + +abort: + return result; +} + +#define ALWAYS_UPDATE_DEVICE 1 + +static struct as5812_54x_sfp_data *as5812_54x_sfp_update_device(struct device *dev, int update_eeprom) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (ALWAYS_UPDATE_DEVICE || time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0, j = 0; + + data->valid = 0; + //dev_dbg(&client->dev, "Starting as5812_54x sfp status update\n"); + memset(data->status, 0, sizeof(data->status)); + + /* Read status of port 1~48(SFP port) */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 12; j++) { + status = as5812_54x_i2c_cpld_read(0x61+i, 0x6+j); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + goto exit; + } + + data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); + } + } + + /* + * Bring QSFPs out of reset, + * This is a temporary fix until the QSFP+_MOD_RST register + * can be exposed through the driver. + */ + as5812_54x_i2c_cpld_write(0x62, 0x15, 0x3F); + + /* Read present status of port 49-54(QSFP port) */ + status = as5812_54x_i2c_cpld_read(0x62, 0x14); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); + } + else { + data->status[SFP_IS_PRESENT] |= (u64)status << 48; + } + + if (update_eeprom) { + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) == 0) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom); i++) { + status = as5812_54x_sfp_read_byte(client, i, data->eeprom + i); + + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", + CPLD_PORT_TO_FRONT_PORT(data->port)); + goto exit; + } + } + } + } + + data->valid = 1; + data->last_updated = jiffies; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +module_i2c_driver(as5812_54x_sfp_driver); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5812_54x_sfp driver"); +MODULE_LICENSE("GPL"); From 98b33a3a4bbd1c5916ba1a618acf37620a7fe4e2 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:36:38 +0000 Subject: [PATCH 233/255] AS6812-32X Kernel Modules. --- .../x86-64-accton-as6812-32x/modules/Makefile | 1 + .../x86-64-accton-as6812-32x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as6812-32x-cpld.c | 390 +++++++++++ .../builds/x86-64-accton-as6812-32x-fan.c | 434 ++++++++++++ .../builds/x86-64-accton-as6812-32x-leds.c | 617 ++++++++++++++++++ .../builds/x86-64-accton-as6812-32x-psu.c | 305 +++++++++ .../builds/x86-64-accton-as6812-32x-sfp.c | 372 +++++++++++ 9 files changed, 2126 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/PKG.yml new file mode 100644 index 00000000..44e45136 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as6812-32x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile new file mode 100644 index 00000000..81d692dd --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as6812-32x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c new file mode 100644 index 00000000..dafa5e1b --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c @@ -0,0 +1,390 @@ +/* + * I2C multiplexer for accton as6812 CPLD + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This module supports the accton cpld that hold the channel select + * mechanism for other i2c slave devices, such as SFP. + * This includes the: + * Accton as6812_32x CPLD1/CPLD2/CPLD3 + * + * Based on: + * pca954x.c from Kumar Gala + * Copyright (C) 2006 + * + * Based on: + * pca954x.c from Ken Harrenstien + * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) + * + * Based on: + * i2c-virtual_cb.c from Brian Kuschak + * and + * pca9540.c from Jean Delvare . + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct dmi_system_id as6812_dmi_table[] = { + { + .ident = "Accton AS6812", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), + }, + }, + { + .ident = "Accton AS6812", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), + }, + }, +}; + +int platform_accton_as6812_32x(void) +{ + return dmi_check_system(as6812_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as6812_32x); + +#define NUM_OF_CPLD1_CHANS 0x0 +#define NUM_OF_CPLD2_CHANS 0x10 +#define NUM_OF_CPLD3_CHANS 0x10 +#define NUM_OF_ALL_CPLD_CHANS (NUM_OF_CPLD2_CHANS + NUM_OF_CPLD3_CHANS) +#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS + +static LIST_HEAD(cpld_client_list); +static struct mutex list_lock; + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +enum cpld_mux_type { + as6812_32x_cpld2, + as6812_32x_cpld3, + as6812_32x_cpld1 +}; + +struct accton_i2c_cpld_mux { + enum cpld_mux_type type; + struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; + u8 last_chan; /* last register value */ +}; + +struct chip_desc { + u8 nchans; + u8 deselectChan; +}; + +/* Provide specs for the PCA954x types we know about */ +static const struct chip_desc chips[] = { + [as6812_32x_cpld1] = { + .nchans = NUM_OF_CPLD1_CHANS, + .deselectChan = NUM_OF_CPLD1_CHANS, + }, + [as6812_32x_cpld2] = { + .nchans = NUM_OF_CPLD2_CHANS, + .deselectChan = NUM_OF_CPLD2_CHANS, + }, + [as6812_32x_cpld3] = { + .nchans = NUM_OF_CPLD3_CHANS, + .deselectChan = NUM_OF_CPLD3_CHANS, + } +}; + +static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { + { "as6812_32x_cpld1", as6812_32x_cpld1 }, + { "as6812_32x_cpld2", as6812_32x_cpld2 }, + { "as6812_32x_cpld3", as6812_32x_cpld3 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); + +/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() + for this as they will try to lock adapter a second time */ +static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, + struct i2c_client *client, u8 val) +{ + unsigned long orig_jiffies; + unsigned short flags; + union i2c_smbus_data data; + int try; + s32 res = -EIO; + + data.byte = val; + flags = client->flags; + flags &= I2C_M_TEN | I2C_CLIENT_PEC; + + if (adap->algo->smbus_xfer) { + /* Retry automatically on arbitration loss */ + orig_jiffies = jiffies; + for (res = 0, try = 0; try <= adap->retries; try++) { + res = adap->algo->smbus_xfer(adap, client->addr, flags, + I2C_SMBUS_WRITE, 0x2, + I2C_SMBUS_BYTE_DATA, &data); + if (res != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + adap->timeout)) + break; + } + } + + return res; +} + +static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + u8 regval; + int ret = 0; + regval = chan; + + /* Only select the channel if its different from the last channel */ + if (data->last_chan != regval) { + ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); + data->last_chan = regval; + } + + return ret; +} + +static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, + void *client, u32 chan) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + + /* Deselect active channel */ + data->last_chan = chips[data->type].deselectChan; + + return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); +} + +static void accton_i2c_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + + if (!node) { + dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +static void accton_i2c_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + + mutex_unlock(&list_lock); +} + +static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + u8 reg = 0x1; + struct i2c_client *client; + int len; + + client = to_i2c_client(dev); + len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); + + return len; +} + +static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); + +/* + * I2C init/probing/exit functions + */ +static int accton_i2c_cpld_mux_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); + int chan=0; + struct accton_i2c_cpld_mux *data; + int ret = -ENODEV; + + if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) + goto err; + + data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err; + } + + i2c_set_clientdata(client, data); + + data->type = id->driver_data; + + if (data->type == as6812_32x_cpld2 || data->type == as6812_32x_cpld3) { + data->last_chan = chips[data->type].deselectChan; /* force the first selection */ + + /* Now create an adapter for each channel */ + for (chan = 0; chan < chips[data->type].nchans; chan++) { + data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, + I2C_CLASS_HWMON | I2C_CLASS_SPD, + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); + + if (data->virt_adaps[chan] == NULL) { + ret = -ENODEV; + dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); + goto virt_reg_failed; + } + } + + dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", + chan, client->name); + } + + accton_i2c_cpld_add_client(client); + + ret = sysfs_create_file(&client->dev.kobj, &ver.attr); + if (ret) + goto virt_reg_failed; + + return 0; + +virt_reg_failed: + for (chan--; chan >= 0; chan--) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + } + kfree(data); +err: + return ret; +} + +static int accton_i2c_cpld_mux_remove(struct i2c_client *client) +{ + struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); + const struct chip_desc *chip = &chips[data->type]; + int chan; + + sysfs_remove_file(&client->dev.kobj, &ver.attr); + + for (chan = 0; chan < chip->nchans; ++chan) { + if (data->virt_adaps[chan]) { + i2c_del_mux_adapter(data->virt_adaps[chan]); + data->virt_adaps[chan] = NULL; + } + } + + kfree(data); + accton_i2c_cpld_remove_client(client); + + return 0; +} + +int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EPERM; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_read_byte_data(cpld_node->client, reg); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as6812_32x_i2c_cpld_read); + +int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + + mutex_lock(&list_lock); + + list_for_each(list_node, &cpld_client_list) + { + cpld_node = list_entry(list_node, struct cpld_client_node, list); + + if (cpld_node->client->addr == cpld_addr) { + ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); + break; + } + } + + mutex_unlock(&list_lock); + + return ret; +} +EXPORT_SYMBOL(as6812_32x_i2c_cpld_write); + +static struct i2c_driver accton_i2c_cpld_mux_driver = { + .driver = { + .name = "as6812_32x_cpld", + .owner = THIS_MODULE, + }, + .probe = accton_i2c_cpld_mux_probe, + .remove = accton_i2c_cpld_mux_remove, + .id_table = accton_i2c_cpld_mux_id, +}; + +static int __init accton_i2c_cpld_mux_init(void) +{ + mutex_init(&list_lock); + return i2c_add_driver(&accton_i2c_cpld_mux_driver); +} + +static void __exit accton_i2c_cpld_mux_exit(void) +{ + i2c_del_driver(&accton_i2c_cpld_mux_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_i2c_cpld_mux_init); +module_exit(accton_i2c_cpld_mux_exit); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-fan.c new file mode 100644 index 00000000..f0555674 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-fan.c @@ -0,0 +1,434 @@ +/* + * A hwmon driver for the Accton as6812 32x fan contrl + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAN_MAX_NUMBER 5 +#define FAN_SPEED_CPLD_TO_RPM_STEP 150 +#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 +#define FAN_DUTY_CYCLE_MIN 0 +#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ + +#define CPLD_REG_FAN_STATUS_OFFSET 0xC +#define CPLD_REG_FANR_STATUS_OFFSET 0x17 +#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E + +#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 +#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 +#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 +#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 +#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 + +#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 +#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 +#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A +#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B +#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C + +#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD + +#define CPLD_FAN1_INFO_BIT_MASK 0x1 +#define CPLD_FAN2_INFO_BIT_MASK 0x2 +#define CPLD_FAN3_INFO_BIT_MASK 0x4 +#define CPLD_FAN4_INFO_BIT_MASK 0x8 +#define CPLD_FAN5_INFO_BIT_MASK 0x10 + +#define PROJECT_NAME + +#define DEBUG_MODE 0 + +#if (DEBUG_MODE == 1) + #define DEBUG_PRINT(format, ...) printk(format, __VA_ARGS__) +#else + #define DEBUG_PRINT(format, ...) +#endif + +static struct accton_as6812_32x_fan *fan_data = NULL; + +struct accton_as6812_32x_fan { + struct platform_device *pdev; + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ + u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ + u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ + u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ + u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ + u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ +}; + +/*******************/ +#define MAKE_FAN_MASK_OR_REG(name,type) \ + CPLD_FAN##type##1_##name, \ + CPLD_FAN##type##2_##name, \ + CPLD_FAN##type##3_##name, \ + CPLD_FAN##type##4_##name, \ + CPLD_FAN##type##5_##name, + +/* fan related data + */ +static const u8 fan_info_mask[] = { + MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) +}; + +static const u8 fan_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) +}; + +static const u8 fanr_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) +}; + +/*******************/ +#define DEF_FAN_SET(id) \ + FAN##id##_FAULT, \ + FAN##id##_SPEED, \ + FAN##id##_DUTY_CYCLE, \ + FAN##id##_DIRECTION, \ + FANR##id##_FAULT, \ + FANR##id##_SPEED, + +enum sysfs_fan_attributes { + DEF_FAN_SET(1) + DEF_FAN_SET(2) + DEF_FAN_SET(3) + DEF_FAN_SET(4) + DEF_FAN_SET(5) +}; +/*******************/ +static void accton_as6812_32x_fan_update_device(struct device *dev); +static int accton_as6812_32x_fan_read_value(u8 reg); +static int accton_as6812_32x_fan_write_value(u8 reg, u8 value); + +static ssize_t fan_set_duty_cycle(struct device *dev, + struct device_attribute *da,const char *buf, size_t count); +static ssize_t fan_show_value(struct device *dev, + struct device_attribute *da, char *buf); + +extern int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + + +/*******************/ +#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ + fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); + +#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) + +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) +/*******************/ + +#define _MAKE_FAN_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ + &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, + +#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) + +static struct attribute *accton_as6812_32x_fan_attributes[] = { + /* fan related attributes */ + MAKE_FAN_ATTR(PROJECT_NAME,1) + MAKE_FAN_ATTR(PROJECT_NAME,2) + MAKE_FAN_ATTR(PROJECT_NAME,3) + MAKE_FAN_ATTR(PROJECT_NAME,4) + MAKE_FAN_ATTR(PROJECT_NAME,5) + NULL +}; +/*******************/ + +/* fan related functions + */ +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + ssize_t ret = 0; + int data_index, type_index; + + accton_as6812_32x_fan_update_device(dev); + + if (fan_data->valid == 0) { + return ret; + } + + type_index = attr->index%FAN2_FAULT; + data_index = attr->index/FAN2_FAULT; + + switch (type_index) { + case FAN1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->status[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DUTY_CYCLE: + ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DIRECTION: + ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); + DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + default: + DEBUG_PRINT("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); + break; + } + + return ret; +} +/*******************/ +static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) { + + int error, value; + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) + return -EINVAL; + + accton_as6812_32x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); + + fan_data->valid = 0; + + return count; +} + +static const struct attribute_group accton_as6812_32x_fan_group = { + .attrs = accton_as6812_32x_fan_attributes, +}; + +static int accton_as6812_32x_fan_read_value(u8 reg) +{ + return as6812_32x_i2c_cpld_read(0x60, reg); +} + +static int accton_as6812_32x_fan_write_value(u8 reg, u8 value) +{ + return as6812_32x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as6812_32x_fan_update_device(struct device *dev) +{ + int speed, r_speed, fault, r_fault, direction, ctrl_speed; + int i; + + mutex_lock(&fan_data->update_lock); + + DEBUG_PRINT("Starting accton_as6812_32x_fan update \n"); + + if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { + /* do nothing */ + goto _exit; + } + + fan_data->valid = 0; + + DEBUG_PRINT("Starting accton_as6812_32x_fan update 2 \n"); + + fault = accton_as6812_32x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); + r_fault = accton_as6812_32x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); + direction = accton_as6812_32x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); + ctrl_speed = accton_as6812_32x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); + + if ( (fault < 0) || (r_fault < 0) || (ctrl_speed < 0) ) + { + DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + DEBUG_PRINT("[fan:] fault:%d, r_fault=%d, ctrl_speed=%d \n",fault, r_fault, ctrl_speed); + + for (i = 0; i < FAN_MAX_NUMBER; i++) + { + /* Update fan data + */ + + /* fan fault + * 0: normal, 1:abnormal + * Each FAN-tray module has two fans. + */ + fan_data->status[i] = (fault & fan_info_mask[i]) >> i; + DEBUG_PRINT("[fan%d:] fail=%d \n",i, fan_data->status[i]); + + fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; + fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; + fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; + + /* fan speed + */ + speed = accton_as6812_32x_fan_read_value(fan_speed_reg[i]); + r_speed = accton_as6812_32x_fan_read_value(fanr_speed_reg[i]); + if ( (speed < 0) || (r_speed < 0) ) + { + DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + DEBUG_PRINT("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); + + fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; + fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; + } + + /* finish to update */ + fan_data->last_updated = jiffies; + fan_data->valid = 1; + +_exit: + mutex_unlock(&fan_data->update_lock); +} + +static int accton_as6812_32x_fan_probe(struct platform_device *pdev) +{ + int status = -1; + + /* Register sysfs hooks */ + status = sysfs_create_group(&pdev->dev.kobj, &accton_as6812_32x_fan_group); + if (status) { + goto exit; + + } + + fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(fan_data->hwmon_dev)) { + status = PTR_ERR(fan_data->hwmon_dev); + goto exit_remove; + } + + dev_info(&pdev->dev, "accton_as6812_32x_fan\n"); + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &accton_as6812_32x_fan_group); +exit: + return status; +} + +static int accton_as6812_32x_fan_remove(struct platform_device *pdev) +{ + hwmon_device_unregister(fan_data->hwmon_dev); + sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as6812_32x_fan_group); + + return 0; +} + +#define DRVNAME "as6812_32x_fan" + +static struct platform_driver accton_as6812_32x_fan_driver = { + .probe = accton_as6812_32x_fan_probe, + .remove = accton_as6812_32x_fan_remove, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as6812_32x_fan_init(void) +{ + int ret; + + extern int platform_accton_as6812_32x(void); + if(!platform_accton_as6812_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as6812_32x_fan_driver); + if (ret < 0) { + goto exit; + } + + fan_data = kzalloc(sizeof(struct accton_as6812_32x_fan), GFP_KERNEL); + if (!fan_data) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as6812_32x_fan_driver); + goto exit; + } + + mutex_init(&fan_data->update_lock); + fan_data->valid = 0; + + fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(fan_data->pdev)) { + ret = PTR_ERR(fan_data->pdev); + platform_driver_unregister(&accton_as6812_32x_fan_driver); + kfree(fan_data); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as6812_32x_fan_exit(void) +{ + platform_device_unregister(fan_data->pdev); + platform_driver_unregister(&accton_as6812_32x_fan_driver); + kfree(fan_data); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as6812_32x_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_as6812_32x_fan_init); +module_exit(accton_as6812_32x_fan_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-leds.c new file mode 100644 index 00000000..fd54ce06 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-leds.c @@ -0,0 +1,617 @@ +/* + * A LED driver for the accton_as6812_32x_led + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include + +extern int as6812_32x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "as6812_32x_led" + +struct accton_as6812_32x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED + 1 = PSU1/PSU2 LED + 2 = FAN1-4 LED + 3 = FAN5-6 LED */ +}; + +static struct accton_as6812_32x_led_data *ledctl = NULL; + +/* LED related data + */ +#define LED_TYPE_PSU1_REG_MASK 0x03 +#define LED_MODE_PSU1_GREEN_MASK 0x02 +#define LED_MODE_PSU1_AMBER_MASK 0x01 +#define LED_MODE_PSU1_OFF_MASK 0x03 +#define LED_MODE_PSU1_AUTO_MASK 0x00 + +#define LED_TYPE_PSU2_REG_MASK 0x0C +#define LED_MODE_PSU2_GREEN_MASK 0x08 +#define LED_MODE_PSU2_AMBER_MASK 0x04 +#define LED_MODE_PSU2_OFF_MASK 0x0C +#define LED_MODE_PSU2_AUTO_MASK 0x00 + +#define LED_TYPE_DIAG_REG_MASK 0x0C +#define LED_MODE_DIAG_GREEN_MASK 0x08 +#define LED_MODE_DIAG_AMBER_MASK 0x04 +#define LED_MODE_DIAG_OFF_MASK 0x0C +#define LED_MODE_DIAG_BLINK_MASK 0x48 + +#define LED_TYPE_FAN_REG_MASK 0x03 +#define LED_MODE_FAN_GREEN_MASK 0x02 +#define LED_MODE_FAN_AMBER_MASK 0x01 +#define LED_MODE_FAN_OFF_MASK 0x03 +#define LED_MODE_FAN_AUTO_MASK 0x00 + +#define LED_TYPE_FAN1_REG_MASK 0x03 +#define LED_TYPE_FAN2_REG_MASK 0xC0 +#define LED_TYPE_FAN3_REG_MASK 0x30 +#define LED_TYPE_FAN4_REG_MASK 0x0C +#define LED_TYPE_FAN5_REG_MASK 0x03 + +#define LED_MODE_FANX_GREEN_MASK 0x01 +#define LED_MODE_FANX_RED_MASK 0x02 +#define LED_MODE_FANX_OFF_MASK 0x00 + +#define LED_TYPE_LOC_REG_MASK 0x30 +#define LED_MODE_LOC_ON_MASK 0x00 +#define LED_MODE_LOC_OFF_MASK 0x10 +#define LED_MODE_LOC_BLINK_MASK 0x20 + +static const u8 led_reg[] = { + 0xA, /* LOC/DIAG/FAN LED*/ + 0xB, /* PSU1/PSU2 LED */ + 0xE, /* FAN2-5 LED */ + 0xF, /* FAN1 LED */ +}; + +enum led_type { + LED_TYPE_PSU1, + LED_TYPE_PSU2, + LED_TYPE_DIAG, + LED_TYPE_FAN, + LED_TYPE_FAN1, + LED_TYPE_FAN2, + LED_TYPE_FAN3, + LED_TYPE_FAN4, + LED_TYPE_FAN5, + LED_TYPE_LOC +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_AMBER, + LED_MODE_RED, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER_BLINK, + LED_MODE_RED_BLINK, + LED_MODE_AUTO, +}; + +struct led_type_mode { + enum led_type type; + int type_mask; + enum led_light_mode mode; + int mode_mask; +}; + +static struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, +{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, +{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, +{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, +{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN_BLINK, LED_MODE_DIAG_BLINK_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} +}; + + +struct fanx_info_s { + u8 cname; /* device name */ + enum led_type type; + u8 reg_id; /* map to led_reg & reg_val */ +}; + +static struct fanx_info_s fanx_info[] = { + {'1', LED_TYPE_FAN1, 3}, + {'2', LED_TYPE_FAN2, 2}, + {'3', LED_TYPE_FAN3, 2}, + {'4', LED_TYPE_FAN4, 2}, + {'5', LED_TYPE_FAN5, 2}, +}; + +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if (type == LED_TYPE_DIAG) + { /* special case : bit 6 - meaning blinking */ + if (0x40 & reg_val) + return LED_MODE_GREEN_BLINK; + } + if ((led_type_mode_data[i].type_mask & reg_val) == + led_type_mode_data[i].mode_mask) + { + return led_type_mode_data[i].mode; + } + } + + return 0; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + if (type == LED_TYPE_DIAG) + { + if (mode == LED_MODE_GREEN_BLINK) + { /* special case : bit 6 - meaning blinking */ + reg_val = 0x48 | (reg_val & ~0x4C); + break; + } + else + { /* for diag led, other case must cancel bit 6 first */ + reg_val = reg_val & ~0x40; + } + } + reg_val = led_type_mode_data[i].mode_mask | + (reg_val & (~led_type_mode_data[i].type_mask)); + break; + } + + return reg_val; +} + +static int accton_as6812_32x_led_read_value(u8 reg) +{ + return as6812_32x_i2c_cpld_read(0x60, reg); +} + +static int accton_as6812_32x_led_write_value(u8 reg, u8 value) +{ + return as6812_32x_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as6812_32x_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as6812_32x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as6812_32x_led_read_value(led_reg[i]); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as6812_32x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as6812_32x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as6812_32x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as6812_32x_led_psu_1_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); +} + +static enum led_brightness accton_as6812_32x_led_psu_1_get(struct led_classdev *cdev) +{ + accton_as6812_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); +} + +static void accton_as6812_32x_led_psu_2_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); +} + +static enum led_brightness accton_as6812_32x_led_psu_2_get(struct led_classdev *cdev) +{ + accton_as6812_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); +} + +static void accton_as6812_32x_led_fan_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); +} + +static enum led_brightness accton_as6812_32x_led_fan_get(struct led_classdev *cdev) +{ + accton_as6812_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); +} + + +static void accton_as6812_32x_led_fanx_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (led_cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); + return; + } + } +} + + +static enum led_brightness accton_as6812_32x_led_fanx_get(struct led_classdev *cdev) +{ + enum led_type led_type1; + int reg_id; + int i, nsize; + int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); + + for(i=0;iname); + + if (cdev->name[nsize-1] == fanx_info[i].cname) + { + led_type1 = fanx_info[i].type; + reg_id = fanx_info[i].reg_id; + accton_as6812_32x_led_update(); + return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); + } + } + + + return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); +} + + +static void accton_as6812_32x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); +} + +static enum led_brightness accton_as6812_32x_led_diag_get(struct led_classdev *cdev) +{ + accton_as6812_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as6812_32x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); +} + +static enum led_brightness accton_as6812_32x_led_loc_get(struct led_classdev *cdev) +{ + accton_as6812_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static struct led_classdev accton_as6812_32x_leds[] = { + [LED_TYPE_PSU1] = { + .name = "accton_as6812_32x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_psu_1_set, + .brightness_get = accton_as6812_32x_led_psu_1_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as6812_32x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_psu_2_set, + .brightness_get = accton_as6812_32x_led_psu_2_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN] = { + .name = "accton_as6812_32x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_fan_set, + .brightness_get = accton_as6812_32x_led_fan_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN1] = { + .name = "accton_as6812_32x_led::fan1", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_fanx_set, + .brightness_get = accton_as6812_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN2] = { + .name = "accton_as6812_32x_led::fan2", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_fanx_set, + .brightness_get = accton_as6812_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN3] = { + .name = "accton_as6812_32x_led::fan3", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_fanx_set, + .brightness_get = accton_as6812_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN4] = { + .name = "accton_as6812_32x_led::fan4", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_fanx_set, + .brightness_get = accton_as6812_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN5] = { + .name = "accton_as6812_32x_led::fan5", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_fanx_set, + .brightness_get = accton_as6812_32x_led_fanx_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_DIAG] = { + .name = "accton_as6812_32x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_diag_set, + .brightness_get = accton_as6812_32x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_LOC] = { + .name = "accton_as6812_32x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as6812_32x_led_loc_set, + .brightness_get = accton_as6812_32x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as6812_32x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { + led_classdev_suspend(&accton_as6812_32x_leds[i]); + } + + return 0; +} + +static int accton_as6812_32x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { + led_classdev_resume(&accton_as6812_32x_leds[i]); + } + + return 0; +} + +static int accton_as6812_32x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as6812_32x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as6812_32x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as6812_32x_leds[i]); + } + } + + return ret; +} + +static int accton_as6812_32x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { + led_classdev_unregister(&accton_as6812_32x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as6812_32x_led_driver = { + .probe = accton_as6812_32x_led_probe, + .remove = accton_as6812_32x_led_remove, + .suspend = accton_as6812_32x_led_suspend, + .resume = accton_as6812_32x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as6812_32x_led_init(void) +{ + int ret; + + extern int platform_accton_as6812_32x(void); + if(!platform_accton_as6812_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as6812_32x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as6812_32x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as6812_32x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as6812_32x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as6812_32x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as6812_32x_led_driver); + kfree(ledctl); +} + +module_init(accton_as6812_32x_led_init); +module_exit(accton_as6812_32x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as6812_32x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-psu.c new file mode 100644 index 00000000..dfee68b1 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-psu.c @@ -0,0 +1,305 @@ +/* + * An hwmon driver for accton as6812_32x Power Module + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as6812_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as6812_32x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[14]; /* Model name, read from eeprom */ +}; + +static struct as6812_32x_psu_data *as6812_32x_psu_update_device(struct device *dev); + +enum as6812_32x_psu_sysfs_attributes { + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as6812_32x_psu_attributes[] = { + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as6812_32x_psu_data *data = as6812_32x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((data->index-1)*4) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = data->status >> ((data->index-1)*4 + 1) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as6812_32x_psu_data *data = as6812_32x_psu_update_device(dev); + + return sprintf(buf, "%s\n", data->model_name); +} + +static const struct attribute_group as6812_32x_psu_group = { + .attrs = as6812_32x_psu_attributes, +}; + +static int as6812_32x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as6812_32x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as6812_32x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as6812_32x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x50 || client->addr == 0x38) { + data->index = 1; + } + else if (client->addr == 0x53 || client->addr == 0x3b) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as6812_32x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as6812_32x_psu_remove(struct i2c_client *client) +{ + struct as6812_32x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as6812_32x_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as6812_32x_psu_id[] = { + { "as6812_32x_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as6812_32x_psu_id); + +static struct i2c_driver as6812_32x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as6812_32x_psu", + }, + .probe = as6812_32x_psu_probe, + .remove = as6812_32x_psu_remove, + .id_table = as6812_32x_psu_id, + .address_list = normal_i2c, +}; + +static int as6812_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = 0; + int retry_count = 5; + + while (retry_count) { + retry_count--; + + result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) { + msleep(10); + continue; + } + + if (unlikely(result != data_len)) { + result = -EIO; + msleep(10); + continue; + } + + result = 0; + break; + } + + return result; +} + +static struct as6812_32x_psu_data *as6812_32x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as6812_32x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status; + int present = 0; + + dev_dbg(&client->dev, "Starting as6812_32x update\n"); + + /* Read psu status */ + status = as6812_32x_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + /* Read model name */ + memset(data->model_name, 0, sizeof(data->model_name)); + present = !(data->status >> ((data->index-1)*4) & 0x1); + + if (present) { + u8 command; + int model_name_len = 0; + + if (client->addr == 0x38 || client->addr == 0x3b) { + /* cpr_4011_4mxx AC power */ + command = 0x26; + model_name_len = 13; + } + else { /* 0x50 & 0x53 */ + /* ym2651 AC power */ + command = 0x20; + model_name_len = 8; + } + + status = as6812_32x_psu_read_block(client,command,data->model_name, + model_name_len); + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[model_name_len] = '\0'; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as6812_32x_psu_init(void) +{ + extern int platform_accton_as6812_32x(void); + if(!platform_accton_as6812_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as6812_32x_psu_driver); +} + +static void __exit as6812_32x_psu_exit(void) +{ + i2c_del_driver(&as6812_32x_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as6812_32x_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as6812_32x_psu_init); +module_exit(as6812_32x_psu_exit); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-sfp.c new file mode 100644 index 00000000..023e949b --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-sfp.c @@ -0,0 +1,372 @@ +/* + * An hwmon driver for accton as6812_32x sfp + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT_INDEX(i) (1ULL << (i)) + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as6812_32x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u64 is_present; /* present status */ +}; + +static struct as6812_32x_sfp_data *as6812_32x_sfp_update_device(struct device *dev, int update_eeprom); +static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf); +static int as6812_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data); +extern int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); +//extern int accton_i2c_cpld_mux_get_index(int adap_index); + +enum as6812_32x_sfp_sysfs_attributes { + SFP_IS_PRESENT, + SFP_EEPROM, + SFP_PORT_NUMBER, + SFP_IS_PRESENT_ALL +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); + +static struct attribute *as6812_32x_sfp_attributes[] = { + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as6812_32x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->port+1); +} + +/* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ +do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ +} while(0) + +static ssize_t show_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + + if(attr->index == SFP_IS_PRESENT_ALL) { + int values[4]; + /* + * Report the SFP_PRESENCE status for all ports. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], as6812_32x_i2c_cpld_read(0x62, 0xA), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], as6812_32x_i2c_cpld_read(0x62, 0xB), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], as6812_32x_i2c_cpld_read(0x64, 0xA), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], as6812_32x_i2c_cpld_read(0x64, 0xB), 1); + + /* Return values 1 -> 32 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], values[3]); + } + else { /* SFP_IS_PRESENT */ + u8 val; + struct as6812_32x_sfp_data *data = as6812_32x_sfp_update_device(dev, 0); + + if (!data->valid) { + return -EIO; + } + + val = (data->is_present & BIT_INDEX(data->port)) ? 0 : 1; + return sprintf(buf, "%d", val); + } +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as6812_32x_sfp_data *data = as6812_32x_sfp_update_device(dev, 1); + + if (!data->valid) { + return 0; + } + + if ((data->is_present & BIT_INDEX(data->port)) != 0) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as6812_32x_sfp_group = { + .attrs = as6812_32x_sfp_attributes, +}; + +static int as6812_32x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as6812_32x_sfp_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, /*I2C_FUNC_SMBUS_BYTE_DATA | */I2C_FUNC_SMBUS_WORD_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as6812_32x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as6812_32x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as6812_32x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as6812_32x_sfp_remove(struct i2c_client *client) +{ + struct as6812_32x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as6812_32x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as6812_32x_sfp1, as6812_32x_sfp2, as6812_32x_sfp3, as6812_32x_sfp4, +as6812_32x_sfp5, as6812_32x_sfp6, as6812_32x_sfp7, as6812_32x_sfp8, +as6812_32x_sfp9, as6812_32x_sfp10, as6812_32x_sfp11,as6812_32x_sfp12, +as6812_32x_sfp13, as6812_32x_sfp14, as6812_32x_sfp15,as6812_32x_sfp16, +as6812_32x_sfp17, as6812_32x_sfp18, as6812_32x_sfp19,as6812_32x_sfp20, +as6812_32x_sfp21, as6812_32x_sfp22, as6812_32x_sfp23,as6812_32x_sfp24, +as6812_32x_sfp25, as6812_32x_sfp26, as6812_32x_sfp27,as6812_32x_sfp28, +as6812_32x_sfp29, as6812_32x_sfp30, as6812_32x_sfp31,as6812_32x_sfp32 +}; + +static const struct i2c_device_id as6812_32x_sfp_id[] = { +{ "as6812_32x_sfp1", as6812_32x_sfp1 }, { "as6812_32x_sfp2", as6812_32x_sfp2 }, +{ "as6812_32x_sfp3", as6812_32x_sfp3 }, { "as6812_32x_sfp4", as6812_32x_sfp4 }, +{ "as6812_32x_sfp5", as6812_32x_sfp5 }, { "as6812_32x_sfp6", as6812_32x_sfp6 }, +{ "as6812_32x_sfp7", as6812_32x_sfp7 }, { "as6812_32x_sfp8", as6812_32x_sfp8 }, +{ "as6812_32x_sfp9", as6812_32x_sfp9 }, { "as6812_32x_sfp10", as6812_32x_sfp10 }, +{ "as6812_32x_sfp11", as6812_32x_sfp11 }, { "as6812_32x_sfp12", as6812_32x_sfp12 }, +{ "as6812_32x_sfp13", as6812_32x_sfp13 }, { "as6812_32x_sfp14", as6812_32x_sfp14 }, +{ "as6812_32x_sfp15", as6812_32x_sfp15 }, { "as6812_32x_sfp16", as6812_32x_sfp16 }, +{ "as6812_32x_sfp17", as6812_32x_sfp17 }, { "as6812_32x_sfp18", as6812_32x_sfp18 }, +{ "as6812_32x_sfp19", as6812_32x_sfp19 }, { "as6812_32x_sfp20", as6812_32x_sfp20 }, +{ "as6812_32x_sfp21", as6812_32x_sfp21 }, { "as6812_32x_sfp22", as6812_32x_sfp22 }, +{ "as6812_32x_sfp23", as6812_32x_sfp23 }, { "as6812_32x_sfp24", as6812_32x_sfp24 }, +{ "as6812_32x_sfp25", as6812_32x_sfp25 }, { "as6812_32x_sfp26", as6812_32x_sfp26 }, +{ "as6812_32x_sfp27", as6812_32x_sfp27 }, { "as6812_32x_sfp28", as6812_32x_sfp28 }, +{ "as6812_32x_sfp29", as6812_32x_sfp29 }, { "as6812_32x_sfp30", as6812_32x_sfp30 }, +{ "as6812_32x_sfp31", as6812_32x_sfp31 }, { "as6812_32x_sfp32", as6812_32x_sfp32 }, +{} +}; +MODULE_DEVICE_TABLE(i2c, as6812_32x_sfp_id); + + +static struct i2c_driver as6812_32x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as6812_32x_sfp", + }, + .probe = as6812_32x_sfp_probe, + .remove = as6812_32x_sfp_remove, + .id_table = as6812_32x_sfp_id, + .address_list = normal_i2c, +}; + +#if 0 +static int as6812_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) +{ + int result = i2c_smbus_read_byte_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u8)result; + result = 0; + +abort: + return result; +} +#endif + +static int as6812_32x_sfp_read_word(struct i2c_client *client, u8 command, u16 *data) +{ + int result = i2c_smbus_read_word_data(client, command); + + if (unlikely(result < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u16)result; + result = 0; + +abort: + return result; +} + +#define ALWAYS_UPDATE 1 + +static struct as6812_32x_sfp_data *as6812_32x_sfp_update_device(struct device *dev, int update_eeprom) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as6812_32x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (ALWAYS_UPDATE || time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0, j = 0; + + data->valid = 0; + + /* Read present status of port 1~32 */ + data->is_present = 0; + + for (i = 0; i < 2; i++) { + for (j = 0; j < 2; j++) { + status = as6812_32x_i2c_cpld_read(0x62+i*2, 0xA+j); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x62+i*2, 0xA+j, status); + goto exit; + } + + data->is_present |= (u64)status << ((i*16) + (j*8)); + } + } + + if (update_eeprom) { + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if ((data->is_present & BIT_INDEX(data->port)) == 0) { + /* read eeprom */ + u16 eeprom_data; + for (i = 0; i < (sizeof(data->eeprom) / 2); i++) { + status = as6812_32x_sfp_read_word(client, i*2, &eeprom_data); + + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); + goto exit; + } + + data->eeprom[i*2] = eeprom_data & 0xff; + data->eeprom[i*2 + 1] = eeprom_data >> 8; + } + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +module_i2c_driver(as6812_32x_sfp_driver); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as6812_32x_sfp driver"); +MODULE_LICENSE("GPL"); From 5e30bd16aff461f64a09cba54c3780388a2761d5 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:37:00 +0000 Subject: [PATCH 234/255] AS7712-32X Kernel Modules. --- .../x86-64-accton-as7712-32x/modules/Makefile | 1 + .../x86-64-accton-as7712-32x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as7712-32x-fan.c | 452 ++++++++++++++++++ .../builds/x86-64-accton-as7712-32x-leds.c | 443 +++++++++++++++++ .../builds/x86-64-accton-as7712-32x-psu.c | 293 ++++++++++++ .../builds/x86-64-accton-as7712-32x-sfp.c | 356 ++++++++++++++ 8 files changed, 1552 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/PKG.yml new file mode 100644 index 00000000..787399c5 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as7712-32x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile new file mode 100644 index 00000000..3be04999 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as7712-32x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c new file mode 100644 index 00000000..dae9a9e7 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c @@ -0,0 +1,452 @@ +/* + * A hwmon driver for the Accton as7712 32x fan + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "as7712_32x_fan" + +static struct as7712_32x_fan_data *as7712_32x_fan_update_device(struct device *dev); +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +/* fan related data, the index should match sysfs_fan_attributes + */ +static const u8 fan_reg[] = { + 0x0F, /* fan 1-6 present status */ + 0x11, /* fan PWM(for all fan) */ + 0x12, /* front fan 1 speed(rpm) */ + 0x13, /* front fan 2 speed(rpm) */ + 0x14, /* front fan 3 speed(rpm) */ + 0x15, /* front fan 4 speed(rpm) */ + 0x16, /* front fan 5 speed(rpm) */ + 0x17, /* front fan 6 speed(rpm) */ + 0x22, /* rear fan 1 speed(rpm) */ + 0x23, /* rear fan 2 speed(rpm) */ + 0x24, /* rear fan 3 speed(rpm) */ + 0x25, /* rear fan 4 speed(rpm) */ + 0x26, /* rear fan 5 speed(rpm) */ + 0x27, /* rear fan 6 speed(rpm) */ +}; + +/* Each client has this additional data */ +struct as7712_32x_fan_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ +}; + +enum fan_id { + FAN1_ID, + FAN2_ID, + FAN3_ID, + FAN4_ID, + FAN5_ID, + FAN6_ID +}; + +enum sysfs_fan_attributes { + FAN_PRESENT_REG, + FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ + FAN1_FRONT_SPEED_RPM, + FAN2_FRONT_SPEED_RPM, + FAN3_FRONT_SPEED_RPM, + FAN4_FRONT_SPEED_RPM, + FAN5_FRONT_SPEED_RPM, + FAN6_FRONT_SPEED_RPM, + FAN1_REAR_SPEED_RPM, + FAN2_REAR_SPEED_RPM, + FAN3_REAR_SPEED_RPM, + FAN4_REAR_SPEED_RPM, + FAN5_REAR_SPEED_RPM, + FAN6_REAR_SPEED_RPM, + FAN1_PRESENT, + FAN2_PRESENT, + FAN3_PRESENT, + FAN4_PRESENT, + FAN5_PRESENT, + FAN6_PRESENT, + FAN1_FAULT, + FAN2_FAULT, + FAN3_FAULT, + FAN4_FAULT, + FAN5_FAULT, + FAN6_FAULT +}; + +/* Define attributes + */ +#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) +#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr + +#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) +#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr + +#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) +#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr + +#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) +#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr + +#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ + static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) +#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr + +/* 6 fan fault attributes in this platform */ +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); +/* 6 fan speed(rpm) attributes in this platform */ +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); +/* 6 fan present attributes in this platform */ +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); +/* 1 fan duty cycle attribute in this platform */ +DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); + +static struct attribute *as7712_32x_fan_attributes[] = { + /* fan related attributes */ + DECLARE_FAN_FAULT_ATTR(1), + DECLARE_FAN_FAULT_ATTR(2), + DECLARE_FAN_FAULT_ATTR(3), + DECLARE_FAN_FAULT_ATTR(4), + DECLARE_FAN_FAULT_ATTR(5), + DECLARE_FAN_FAULT_ATTR(6), + DECLARE_FAN_SPEED_RPM_ATTR(1), + DECLARE_FAN_SPEED_RPM_ATTR(2), + DECLARE_FAN_SPEED_RPM_ATTR(3), + DECLARE_FAN_SPEED_RPM_ATTR(4), + DECLARE_FAN_SPEED_RPM_ATTR(5), + DECLARE_FAN_SPEED_RPM_ATTR(6), + DECLARE_FAN_PRESENT_ATTR(1), + DECLARE_FAN_PRESENT_ATTR(2), + DECLARE_FAN_PRESENT_ATTR(3), + DECLARE_FAN_PRESENT_ATTR(4), + DECLARE_FAN_PRESENT_ATTR(5), + DECLARE_FAN_PRESENT_ATTR(6), + DECLARE_FAN_DUTY_CYCLE_ATTR(), + NULL +}; + +#define FAN_DUTY_CYCLE_REG_MASK 0xF +#define FAN_MAX_DUTY_CYCLE 100 +#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 + +static int as7712_32x_fan_read_value(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int as7712_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +/* fan utility functions + */ +static u32 reg_val_to_duty_cycle(u8 reg_val) +{ + reg_val &= FAN_DUTY_CYCLE_REG_MASK; + return ((u32)(reg_val+1) * 625 + 75)/ 100; +} + +static u8 duty_cycle_to_reg_val(u8 duty_cycle) +{ + return ((u32)duty_cycle * 100 / 625) - 1; +} + +static u32 reg_val_to_speed_rpm(u8 reg_val) +{ + return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; +} + +static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) +{ + u8 mask = (1 << id); + + reg_val &= mask; + + return reg_val ? 0 : 1; +} + +static u8 is_fan_fault(struct as7712_32x_fan_data *data, enum fan_id id) +{ + u8 ret = 1; + int front_fan_index = FAN1_FRONT_SPEED_RPM + id; + int rear_fan_index = FAN1_REAR_SPEED_RPM + id; + + /* Check if the speed of front or rear fan is ZERO, + */ + if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && + reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { + ret = 0; + } + + return ret; +} + +static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + int error, value; + struct i2c_client *client = to_i2c_client(dev); + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < 0 || value > FAN_MAX_DUTY_CYCLE) + return -EINVAL; + + as7712_32x_fan_write_value(client, 0x33, 0); /* Disable fan speed watch dog */ + as7712_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); + return count; +} + +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7712_32x_fan_data *data = as7712_32x_fan_update_device(dev); + ssize_t ret = 0; + + if (data->valid) { + switch (attr->index) { + case FAN_DUTY_CYCLE_PERCENTAGE: + { + u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); + ret = sprintf(buf, "%u\n", duty_cycle); + break; + } + case FAN1_FRONT_SPEED_RPM: + case FAN2_FRONT_SPEED_RPM: + case FAN3_FRONT_SPEED_RPM: + case FAN4_FRONT_SPEED_RPM: + case FAN5_FRONT_SPEED_RPM: + case FAN6_FRONT_SPEED_RPM: + case FAN1_REAR_SPEED_RPM: + case FAN2_REAR_SPEED_RPM: + case FAN3_REAR_SPEED_RPM: + case FAN4_REAR_SPEED_RPM: + case FAN5_REAR_SPEED_RPM: + case FAN6_REAR_SPEED_RPM: + ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); + break; + case FAN1_PRESENT: + case FAN2_PRESENT: + case FAN3_PRESENT: + case FAN4_PRESENT: + case FAN5_PRESENT: + case FAN6_PRESENT: + ret = sprintf(buf, "%d\n", + reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], + attr->index - FAN1_PRESENT)); + break; + case FAN1_FAULT: + case FAN2_FAULT: + case FAN3_FAULT: + case FAN4_FAULT: + case FAN5_FAULT: + case FAN6_FAULT: + ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); + break; + default: + break; + } + } + + return ret; +} + +static const struct attribute_group as7712_32x_fan_group = { + .attrs = as7712_32x_fan_attributes, +}; + +static struct as7712_32x_fan_data *as7712_32x_fan_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7712_32x_fan_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || + !data->valid) { + int i; + + dev_dbg(&client->dev, "Starting as7712_32x_fan update\n"); + data->valid = 0; + + /* Update fan data + */ + for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { + int status = as7712_32x_fan_read_value(client, fan_reg[i]); + + if (status < 0) { + data->valid = 0; + mutex_unlock(&data->update_lock); + dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); + return data; + } + else { + data->reg_val[i] = status; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int as7712_32x_fan_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7712_32x_fan_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7712_32x_fan_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7712_32x_fan_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: fan '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7712_32x_fan_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7712_32x_fan_remove(struct i2c_client *client) +{ + struct as7712_32x_fan_data *data = i2c_get_clientdata(client); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7712_32x_fan_group); + + return 0; +} + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; + +static const struct i2c_device_id as7712_32x_fan_id[] = { + { "as7712_32x_fan", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as7712_32x_fan_id); + +static struct i2c_driver as7712_32x_fan_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = DRVNAME, + }, + .probe = as7712_32x_fan_probe, + .remove = as7712_32x_fan_remove, + .id_table = as7712_32x_fan_id, + .address_list = normal_i2c, +}; + +static int __init as7712_32x_fan_init(void) +{ + extern int platform_accton_as7712_32x(void); + if (!platform_accton_as7712_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7712_32x_fan_driver); +} + +static void __exit as7712_32x_fan_exit(void) +{ + i2c_del_driver(&as7712_32x_fan_driver); +} + +module_init(as7712_32x_fan_init); +module_exit(as7712_32x_fan_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as7712_32x_fan driver"); +MODULE_LICENSE("GPL"); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-leds.c new file mode 100644 index 00000000..747d39a7 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-leds.c @@ -0,0 +1,443 @@ +/* + * A LED driver for the accton_as7712_32x_led + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "accton_as7712_32x_led" + +struct accton_as7712_32x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[1]; /* only 1 register*/ +}; + +static struct accton_as7712_32x_led_data *ledctl = NULL; + +/* LED related data + */ + +#define LED_CNTRLER_I2C_ADDRESS (0x60) + +#define LED_TYPE_DIAG_REG_MASK (0x3) +#define LED_MODE_DIAG_GREEN_VALUE (0x02) +#define LED_MODE_DIAG_RED_VALUE (0x01) +#define LED_MODE_DIAG_AMBER_VALUE (0x00) /*It's yellow actually. Green+Red=Yellow*/ +#define LED_MODE_DIAG_OFF_VALUE (0x03) + + +#define LED_TYPE_LOC_REG_MASK (0x80) +#define LED_MODE_LOC_ON_VALUE (0) +#define LED_MODE_LOC_OFF_VALUE (0x80) + +enum led_type { + LED_TYPE_DIAG, + LED_TYPE_LOC, + LED_TYPE_FAN, + LED_TYPE_PSU1, + LED_TYPE_PSU2 +}; + +struct led_reg { + u32 types; + u8 reg_addr; +}; + +static const struct led_reg led_reg_map[] = { + {(1<update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as7712_32x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as7712_32x_led_read_value(led_reg_map[i].reg_addr); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg_map[i].reg_addr, status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as7712_32x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + enum led_type type) +{ + int reg_val; + u8 reg ; + mutex_lock(&ledctl->update_lock); + + if( !accton_getLedReg(type, ®)) + { + dev_dbg(&ledctl->pdev->dev, "Not match item for %d.\n", type); + } + + reg_val = accton_as7712_32x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as7712_32x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + + +static void accton_as7712_32x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as7712_32x_led_set(led_cdev, led_light_mode, LED_TYPE_DIAG); +} + +static enum led_brightness accton_as7712_32x_led_diag_get(struct led_classdev *cdev) +{ + accton_as7712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as7712_32x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as7712_32x_led_set(led_cdev, led_light_mode, LED_TYPE_LOC); +} + +static enum led_brightness accton_as7712_32x_led_loc_get(struct led_classdev *cdev) +{ + accton_as7712_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static void accton_as7712_32x_led_auto_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ +} + +static enum led_brightness accton_as7712_32x_led_auto_get(struct led_classdev *cdev) +{ + return LED_MODE_AUTO; +} + +static struct led_classdev accton_as7712_32x_leds[] = { + [LED_TYPE_DIAG] = { + .name = "accton_as7712_32x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as7712_32x_led_diag_set, + .brightness_get = accton_as7712_32x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_RED, + }, + [LED_TYPE_LOC] = { + .name = "accton_as7712_32x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as7712_32x_led_loc_set, + .brightness_get = accton_as7712_32x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_BLUE, + }, + [LED_TYPE_FAN] = { + .name = "accton_as7712_32x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as7712_32x_led_auto_set, + .brightness_get = accton_as7712_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU1] = { + .name = "accton_as7712_32x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as7712_32x_led_auto_set, + .brightness_get = accton_as7712_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as7712_32x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as7712_32x_led_auto_set, + .brightness_get = accton_as7712_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as7712_32x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { + led_classdev_suspend(&accton_as7712_32x_leds[i]); + } + + return 0; +} + +static int accton_as7712_32x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { + led_classdev_resume(&accton_as7712_32x_leds[i]); + } + + return 0; +} + +static int accton_as7712_32x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as7712_32x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as7712_32x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as7712_32x_leds[i]); + } + } + + return ret; +} + +static int accton_as7712_32x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { + led_classdev_unregister(&accton_as7712_32x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as7712_32x_led_driver = { + .probe = accton_as7712_32x_led_probe, + .remove = accton_as7712_32x_led_remove, + .suspend = accton_as7712_32x_led_suspend, + .resume = accton_as7712_32x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as7712_32x_led_init(void) +{ + int ret; + + extern int platform_accton_as7712_32x(void); + if (!platform_accton_as7712_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as7712_32x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as7712_32x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as7712_32x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as7712_32x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as7712_32x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as7712_32x_led_driver); + kfree(ledctl); +} + +module_init(accton_as7712_32x_led_init); +module_exit(accton_as7712_32x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as7712_32x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c new file mode 100644 index 00000000..4de2db3a --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c @@ -0,0 +1,293 @@ +/* + * An hwmon driver for accton as7712_32x Power Module + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as7712_32x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[9]; /* Model name, read from eeprom */ +}; + +static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev); + +enum as7712_32x_psu_sysfs_attributes { + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as7712_32x_psu_attributes[] = { + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> (1-data->index) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = (data->status >> (3-data->index) & 0x1); + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); + + return sprintf(buf, "%s\n", data->model_name); +} + +static const struct attribute_group as7712_32x_psu_group = { + .attrs = as7712_32x_psu_attributes, +}; + +static int as7712_32x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7712_32x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7712_32x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + data->index = dev_id->driver_data; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7712_32x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7712_32x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7712_32x_psu_remove(struct i2c_client *client) +{ + struct as7712_32x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7712_32x_psu_group); + kfree(data); + + return 0; +} + +enum psu_index +{ + as7712_32x_psu1, + as7712_32x_psu2 +}; + +static const struct i2c_device_id as7712_32x_psu_id[] = { + { "as7712_32x_psu1", as7712_32x_psu1 }, + { "as7712_32x_psu2", as7712_32x_psu2 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as7712_32x_psu_id); + +static struct i2c_driver as7712_32x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as7712_32x_psu", + }, + .probe = as7712_32x_psu_probe, + .remove = as7712_32x_psu_remove, + .id_table = as7712_32x_psu_id, + .address_list = normal_i2c, +}; + +static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = 0; + int retry_count = 5; + + while (retry_count) { + retry_count--; + + result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) { + msleep(10); + continue; + } + + if (unlikely(result != data_len)) { + result = -EIO; + msleep(10); + continue; + } + + result = 0; + break; + } + + return result; +} + +static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7712_32x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status; + int power_good = 0; + + dev_dbg(&client->dev, "Starting as7712_32x update\n"); + + /* Read psu status */ + status = accton_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + /* Read model name */ + memset(data->model_name, 0, sizeof(data->model_name)); + power_good = (data->status >> (3-data->index) & 0x1); + + if (power_good) { + status = as7712_32x_psu_read_block(client, 0x20, data->model_name, + ARRAY_SIZE(data->model_name)-1); + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as7712_32x_psu_init(void) +{ + extern int platform_accton_as7712_32x(void); + if (!platform_accton_as7712_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7712_32x_psu_driver); +} + +static void __exit as7712_32x_psu_exit(void) +{ + i2c_del_driver(&as7712_32x_psu_driver); +} + +module_init(as7712_32x_psu_init); +module_exit(as7712_32x_psu_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as7712_32x_psu driver"); +MODULE_LICENSE("GPL"); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-sfp.c new file mode 100644 index 00000000..5953ae6d --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-sfp.c @@ -0,0 +1,356 @@ +/* + * An hwmon driver for accton as7712_32x sfp + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT_INDEX(i) (1UL << (i)) + + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as7712_32x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u32 is_present; /* present status */ +}; + +static struct as7712_32x_sfp_data *as7712_32x_sfp_update_device(struct device *dev); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +enum as7712_32x_sfp_sysfs_attributes { + SFP_PORT_NUMBER, + SFP_IS_PRESENT, + SFP_IS_PRESENT_ALL, + SFP_EEPROM +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); + +static struct attribute *as7712_32x_sfp_attributes[] = { + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7712_32x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->port+1); +} + +/* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ +do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ +} while(0) + +static ssize_t show_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + + if(attr->index == SFP_IS_PRESENT_ALL) { + int values[4]; + /* + * Report the SFP_PRESENCE status for all ports. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); + + /* Return values 1 -> 32 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], values[3]); + } + else { /* SFP_IS_PRESENT */ + struct as7712_32x_sfp_data *data = as7712_32x_sfp_update_device(dev); + + if (!data->valid) { + return -EIO; + } + + return sprintf(buf, "%d\n", data->is_present); + } +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as7712_32x_sfp_data *data = as7712_32x_sfp_update_device(dev); + + if (!data->valid) { + return 0; + } + + if (!data->is_present) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as7712_32x_sfp_group = { + .attrs = as7712_32x_sfp_attributes, +}; + +static int as7712_32x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7712_32x_sfp_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7712_32x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7712_32x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7712_32x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7712_32x_sfp_remove(struct i2c_client *client) +{ + struct as7712_32x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7712_32x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as7712_32x_sfp1, as7712_32x_sfp2, as7712_32x_sfp3, as7712_32x_sfp4, +as7712_32x_sfp5, as7712_32x_sfp6, as7712_32x_sfp7, as7712_32x_sfp8, +as7712_32x_sfp9, as7712_32x_sfp10,as7712_32x_sfp11,as7712_32x_sfp12, +as7712_32x_sfp13,as7712_32x_sfp14,as7712_32x_sfp15,as7712_32x_sfp16, +as7712_32x_sfp17,as7712_32x_sfp18,as7712_32x_sfp19,as7712_32x_sfp20, +as7712_32x_sfp21,as7712_32x_sfp22,as7712_32x_sfp23,as7712_32x_sfp24, +as7712_32x_sfp25,as7712_32x_sfp26,as7712_32x_sfp27,as7712_32x_sfp28, +as7712_32x_sfp29,as7712_32x_sfp30,as7712_32x_sfp31,as7712_32x_sfp32 +}; + +static const struct i2c_device_id as7712_32x_sfp_id[] = { +{ "as7712_32x_sfp1", as7712_32x_sfp1 }, { "as7712_32x_sfp2", as7712_32x_sfp2 }, +{ "as7712_32x_sfp3", as7712_32x_sfp3 }, { "as7712_32x_sfp4", as7712_32x_sfp4 }, +{ "as7712_32x_sfp5", as7712_32x_sfp5 }, { "as7712_32x_sfp6", as7712_32x_sfp6 }, +{ "as7712_32x_sfp7", as7712_32x_sfp7 }, { "as7712_32x_sfp8", as7712_32x_sfp8 }, +{ "as7712_32x_sfp9", as7712_32x_sfp9 }, { "as7712_32x_sfp10", as7712_32x_sfp10 }, +{ "as7712_32x_sfp11", as7712_32x_sfp11 }, { "as7712_32x_sfp12", as7712_32x_sfp12 }, +{ "as7712_32x_sfp13", as7712_32x_sfp13 }, { "as7712_32x_sfp14", as7712_32x_sfp14 }, +{ "as7712_32x_sfp15", as7712_32x_sfp15 }, { "as7712_32x_sfp16", as7712_32x_sfp16 }, +{ "as7712_32x_sfp17", as7712_32x_sfp17 }, { "as7712_32x_sfp18", as7712_32x_sfp18 }, +{ "as7712_32x_sfp19", as7712_32x_sfp19 }, { "as7712_32x_sfp20", as7712_32x_sfp20 }, +{ "as7712_32x_sfp21", as7712_32x_sfp21 }, { "as7712_32x_sfp22", as7712_32x_sfp22 }, +{ "as7712_32x_sfp23", as7712_32x_sfp23 }, { "as7712_32x_sfp24", as7712_32x_sfp24 }, +{ "as7712_32x_sfp25", as7712_32x_sfp25 }, { "as7712_32x_sfp26", as7712_32x_sfp26 }, +{ "as7712_32x_sfp27", as7712_32x_sfp27 }, { "as7712_32x_sfp28", as7712_32x_sfp28 }, +{ "as7712_32x_sfp29", as7712_32x_sfp29 }, { "as7712_32x_sfp30", as7712_32x_sfp30 }, +{ "as7712_32x_sfp31", as7712_32x_sfp31 }, { "as7712_32x_sfp32", as7712_32x_sfp32 }, +{} +}; +MODULE_DEVICE_TABLE(i2c, as7712_32x_sfp_id); + +static struct i2c_driver as7712_32x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as7712_32x_sfp", + }, + .probe = as7712_32x_sfp_probe, + .remove = as7712_32x_sfp_remove, + .id_table = as7712_32x_sfp_id, + .address_list = normal_i2c, +}; + +static int as7712_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as7712_32x_sfp_data *as7712_32x_sfp_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7712_32x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0; + u8 cpld_reg = 0x30 + (data->port/8); + + data->valid = 0; + + /* Read present status of the specified port number */ + data->is_present = 0; + status = accton_i2c_cpld_read(0x60, cpld_reg); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); + goto exit; + } + + data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; + + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if (data->is_present) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { + status = as7712_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, + data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), + I2C_SMBUS_BLOCK_MAX); + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); + goto exit; + } + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as7712_32x_sfp_init(void) +{ + extern int platform_accton_as7712_32x(void); + if (!platform_accton_as7712_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7712_32x_sfp_driver); +} + +static void __exit as7712_32x_sfp_exit(void) +{ + i2c_del_driver(&as7712_32x_sfp_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as7712_32x_sfp driver"); +MODULE_LICENSE("GPL"); + +module_init(as7712_32x_sfp_init); +module_exit(as7712_32x_sfp_exit); From 42666e565fcb29ec5f1a9defef82f97de1621a3c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:37:16 +0000 Subject: [PATCH 235/255] The module list needs to be quoted. --- .../amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile index d750cf56..e4df12e2 100644 --- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile +++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile @@ -14,7 +14,7 @@ include $(ONL)/make/config.mk kernel: $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) - ARCH=x86_64 $(ONL)/tools/scripts/kmodbuild.sh linux-3.16.39-mbuild $(wildcard $(ONL)/packages/base/any/kernels/modules/*) onl + ARCH=x86_64 $(ONL)/tools/scripts/kmodbuild.sh linux-3.16.39-mbuild "$(wildcard $(ONL)/packages/base/any/kernels/modules/*)" onl clean: rm -rf linux-3.16* kernel-3.16* From 323d18e67a9678573363c495d992afe26dd9d951 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:38:03 +0000 Subject: [PATCH 236/255] - Kernel Modules - Upgrade to 3.16 LTS --- .../x86-64-accton-as7712-32x/platform-config/r0/PKG.yml | 2 +- .../r0/src/lib/x86-64-accton-as7712-32x-r0.yml | 4 ++-- .../r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/PKG.yml index b8e054ad..34ebf81e 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as7712-32x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as7712-32x-r0 MODULES=onl-platform-modules-x86-64-accton-as7712-32x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml index 1220adf4..1aef0594 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml @@ -17,8 +17,8 @@ x86-64-accton-as7712-32x-r0: --parity=no --stop=1 - kernel: - <<: *kernel-3-2 + kernel: + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py index 06580f03..7b91b11a 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py @@ -8,6 +8,10 @@ class OnlPlatform_x86_64_accton_as7712_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".7712.32" def baseconfig(self): + self.insmod('ym2651') + for m in [ 'fan', 'psu', 'leds', 'sfp' ]: + self.insmod("x86-64-accton-as6812-32x-%s.ko" % m, required=False) + ########### initialize I2C bus 0 ########### self.new_i2c_devices([ From c88d8f9bf2b7e9be1d4eac45e7fc8f328f4f6725 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:38:27 +0000 Subject: [PATCH 237/255] - Kernel Modules - Upgrade to 3.16 LTS --- .../x86-64-accton-as6812-32x/platform-config/r0/PKG.yml | 2 +- .../r0/src/lib/x86-64-accton-as6812-32x-r0.yml | 4 ++-- .../r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/PKG.yml index 4fd78b2e..05ced785 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as6812-32x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as6812-32x-r0 MODULES=onl-platform-modules-x86-64-accton-as6812-32x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml index a7b75803..18dda3ba 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml @@ -17,8 +17,8 @@ x86-64-accton-as6812-32x-r0: --parity=no --stop=1 - kernel: - <<: *kernel-3-2 + kernel: + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py index b7abeda4..2dd65d5d 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py @@ -9,6 +9,10 @@ class OnlPlatform_x86_64_accton_as6812_32x_r0(OnlPlatformAccton, def baseconfig(self): + self.insmod('cpr_4011_4mxx', required=False) + for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: + self.insmod("x86-64-accton-as6812-32x-%s.ko" % m, required=False) + ########### initialize I2C bus 0 ########### # initialize CPLD self.new_i2c_devices( From 6a51959b37240a39e06985ae9042b3f7226d814c Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:39:11 +0000 Subject: [PATCH 238/255] - Kernel Modules - Upgrade to 3.16 LTS --- .../x86-64-accton-as5812-54t/platform-config/r0/PKG.yml | 2 +- .../r0/src/lib/x86-64-accton-as5812-54t-r0.yml | 4 ++-- .../r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/PKG.yml index 5b0cb10e..9af5bae9 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5812-54t-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5812-54t-r0 MODULES=onl-platform-modules-x86-64-accton-as5812-54t:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml index a88f81cf..62190b5a 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml @@ -17,8 +17,8 @@ x86-64-accton-as5812-54t-r0: --parity=no --stop=1 - kernel: - <<: *kernel-3-2 + kernel: + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py index 1ca9ab33..c372a4e7 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py @@ -10,6 +10,10 @@ class OnlPlatform_x86_64_accton_as5812_54t_r0(OnlPlatformAccton, def baseconfig(self): ########### initialize I2C bus 0 ########### + self.insmod("accton_i2c_cpld", required=False) + self.insmod("cpr_4011_4mxx", required=False) + for m in [ "sfp", "psu", "fan", "leds" ]: + self.insmod("x86-64-accton-as5812-54t-%s" % m, required=False) # initialize CPLDs self.new_i2c_device('accton_i2c_cpld', 0x60, 0) From 75e7f65ded9226023169007b224b78c4ad81e5c5 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 02:39:17 +0000 Subject: [PATCH 239/255] - Kernel Modules - Upgrade to 3.16 LTS --- .../x86-64-accton-as5812-54x/platform-config/r0/PKG.yml | 2 +- .../r0/src/lib/x86-64-accton-as5812-54x-r0.yml | 4 ++-- .../r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/PKG.yml index ebd6029f..6af12173 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5812-54x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5812-54x-r0 MODULES=onl-platform-modules-x86-64-accton-as5812-54x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml index 95817d8a..014340dc 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml @@ -17,8 +17,8 @@ x86-64-accton-as5812-54x-r0: --parity=no --stop=1 - kernel: - <<: *kernel-3-2 + kernel: + <<: *kernel-3-16 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py index 536b1879..f6c28147 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py @@ -9,6 +9,10 @@ class OnlPlatform_x86_64_accton_as5812_54x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".5812.54.1" def baseconfig(self): + self.insmod('cpr_4011_4mxx', required=False) + for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: + self.insmod("x86-64-accton-as5812-54x-%s.ko" % m, required=False) + ########### initialize I2C bus 0 ########### # initialize CPLDs From 2ba24e27207c6f36ae431ae0326a1cc23acbbdad Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 15:23:02 +0000 Subject: [PATCH 240/255] Fix modules. --- .../r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py index 7b91b11a..4040d2a7 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py @@ -8,9 +8,10 @@ class OnlPlatform_x86_64_accton_as7712_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".7712.32" def baseconfig(self): - self.insmod('ym2651') + self.insmod('ym2651y') + self.insmod('accton_i2c_cpld') for m in [ 'fan', 'psu', 'leds', 'sfp' ]: - self.insmod("x86-64-accton-as6812-32x-%s.ko" % m, required=False) + self.insmod("x86-64-accton-as7712-32x-%s.ko" % m, required=False) ########### initialize I2C bus 0 ########### self.new_i2c_devices([ From bef2b6bbb77e3c8ce76e19ee2b856a08ad32ee75 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 15:23:17 +0000 Subject: [PATCH 241/255] Use the existing APIs to read the presence data. This also fixes an atoi buffer error when the input was not properly terminated. --- .../onlp/builds/src/module/src/sfpi.c | 29 ++++++------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/sfpi.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/sfpi.c index ac02ac4e..10288992 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/sfpi.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/onlp/builds/src/module/src/sfpi.c @@ -32,31 +32,22 @@ #include #include "platform_lib.h" +#include #define MAX_SFP_PATH 64 static char sfp_node_path[MAX_SFP_PATH] = {0}; #define FRONT_PORT_TO_CPLD_MUX_INDEX(port) (port+2) -static int +static int as6812_32x_sfp_node_read_int(char *node_path, int *value, int data_len) { - int ret = 0; - char buf[8]; - *value = 0; - - ret = deviceNodeReadString(node_path, buf, sizeof(buf), data_len); - - if (ret == 0) { - *value = atoi(buf); - } - - return ret; + return onlp_file_read_int(value, node_path); } -static char* +static char* as6812_32x_sfp_get_port_path(int port, char *node_name) { - sprintf(sfp_node_path, "/sys/bus/i2c/devices/%d-0050/%s", + sprintf(sfp_node_path, "/sys/bus/i2c/devices/%d-0050/%s", FRONT_PORT_TO_CPLD_MUX_INDEX(port), node_name); @@ -71,7 +62,7 @@ as6812_32x_sfp_get_port_path(int port, char *node_name) int onlp_sfpi_init(void) { - /* Called at initialization time */ + /* Called at initialization time */ return ONLP_STATUS_OK; } @@ -83,7 +74,7 @@ onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) */ int p; AIM_BITMAP_CLR_ALL(bmap); - + for(p = 0; p < 32; p++) { AIM_BITMAP_SET(bmap, p); } @@ -106,7 +97,6 @@ onlp_sfpi_is_present(int port) AIM_LOG_ERROR("Unable to read present status from port(%d)\r\n", port); return ONLP_STATUS_E_INTERNAL; } - return present; } @@ -119,7 +109,7 @@ onlp_sfpi_presence_bitmap_get(onlp_sfp_bitmap_t* dst) path = as6812_32x_sfp_get_port_path(0, "sfp_is_present_all"); fp = fopen(path, "r"); - + if(fp == NULL) { AIM_LOG_ERROR("Unable to open the sfp_is_present_all device file."); return ONLP_STATUS_E_INTERNAL; @@ -172,7 +162,7 @@ onlp_sfpi_eeprom_read(int port, uint8_t data[256]) * Return OK if eeprom is read */ memset(data, 0, 256); - + if (deviceNodeReadBinary(path, (char*)data, 256, 256) != 0) { AIM_LOG_ERROR("Unable to read eeprom from port(%d)\r\n", port); return ONLP_STATUS_E_INTERNAL; @@ -186,4 +176,3 @@ onlp_sfpi_denit(void) { return ONLP_STATUS_OK; } - From 65d0cf8f43f7ad1174c80f2c0473fb2c85c986f4 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 15:25:08 +0000 Subject: [PATCH 242/255] Zero the buffer on behalf of the client. --- packages/base/any/onlp/src/onlplib/module/src/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/base/any/onlp/src/onlplib/module/src/file.c b/packages/base/any/onlp/src/onlplib/module/src/file.c index 1380c2f4..a1abd74d 100644 --- a/packages/base/any/onlp/src/onlplib/module/src/file.c +++ b/packages/base/any/onlp/src/onlplib/module/src/file.c @@ -124,6 +124,7 @@ onlp_file_vread(uint8_t* data, int max, int* len, const char* fmt, va_list vargs rv = fd; } else { + memset(data, 0, max); if ((*len = read(fd, data, max)) <= 0) { AIM_LOG_ERROR("Failed to read input file '%s'", fname); rv = ONLP_STATUS_E_INTERNAL; From b244824f7828bb8f4a4b982d01d04e07f6a29a04 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 15:30:57 +0000 Subject: [PATCH 243/255] Use the new search API to improve compatibility between kernels. --- .../onlp/builds/src/module/src/thermali.c | 49 +++++-------------- 1 file changed, 13 insertions(+), 36 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/thermali.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/thermali.c index e39d7817..94f5353c 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/onlp/builds/src/module/src/thermali.c @@ -30,8 +30,6 @@ #include #include "platform_lib.h" -#define prefix_path "/sys/bus/i2c/devices/" - #define VALIDATE(_id) \ do { \ if(!ONLP_OID_IS_THERMAL(_id)) { \ @@ -39,17 +37,6 @@ } \ } while(0) -#define OPEN_READ_FILE(fd,fullpath,data,nbytes,len) \ - DEBUG_PRINT("[Debug][%s][%d][openfile: %s]\n", __FUNCTION__, __LINE__, fullpath); \ - if ((fd = open(fullpath, O_RDONLY)) == -1) \ - return ONLP_STATUS_E_INTERNAL; \ - if ((len = read(fd, r_data, nbytes)) <= 0){ \ - close(fd); \ - return ONLP_STATUS_E_INTERNAL;} \ - DEBUG_PRINT("[Debug][%s][%d][read data: %s]\n", __FUNCTION__, __LINE__, r_data); \ - if (close(fd) == -1) \ - return ONLP_STATUS_E_INTERNAL - enum onlp_thermal_id { THERMAL_RESERVED = 0, @@ -61,24 +48,24 @@ enum onlp_thermal_id THERMAL_1_ON_PSU2, }; -static char* last_path[] = /* must map with onlp_thermal_id */ +static char* devfiles__[] = /* must map with onlp_thermal_id */ { "reserved", NULL, /* CPU_CORE files */ - "3-0048/temp1_input", - "3-0049/temp1_input", - "3-004a/temp1_input", - "3-004b/temp1_input", - "11-005b/psu_temp1_input", - "10-0058/psu_temp1_input", + "/sys/bus/i2c/devices/3-0048*temp1_input", + "/sys/bus/i2c/devices/3-0049*temp1_input", + "/sys/bus/i2c/devices/3-004a*temp1_input", + "/sys/bus/i2c/devices/3-004b*temp1_input", + "/sys/bus/i2c/devices/11-005b*psu_temp1_input", + "/sys/bus/i2c/devices/10-0058*psu_temp1_input", }; static char* cpu_coretemp_files[] = { - "/sys/devices/platform/coretemp.0/temp2_input", - "/sys/devices/platform/coretemp.0/temp3_input", - "/sys/devices/platform/coretemp.0/temp4_input", - "/sys/devices/platform/coretemp.0/temp5_input", + "/sys/devices/platform/coretemp.0*temp2_input", + "/sys/devices/platform/coretemp.0*temp3_input", + "/sys/devices/platform/coretemp.0*temp4_input", + "/sys/devices/platform/coretemp.0*temp5_input", NULL, }; @@ -137,9 +124,7 @@ onlp_thermali_init(void) int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { - int fd, len, nbytes = 10, temp_base=1, local_id; - char r_data[10] = {0}; - char fullpath[50] = {0}; + int local_id; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); @@ -152,13 +137,5 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) return rv; } - /* get fullpath */ - sprintf(fullpath, "%s%s", prefix_path, last_path[local_id]); - - OPEN_READ_FILE(fd, fullpath, r_data, nbytes, len); - info->mcelsius = atoi(r_data) / temp_base; - DEBUG_PRINT("\n[Debug][%s][%d][save data: %d]\n", __FUNCTION__, __LINE__, info->mcelsius); - - return ONLP_STATUS_OK; + return onlp_file_read_int(&info->mcelsius, devfiles__[local_id]); } - From aecfcdccba2e1afb794aa191d212f7b60158b097 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Mon, 2 Jan 2017 15:35:44 +0000 Subject: [PATCH 244/255] Use the new search API to improve compatibility between kernels. --- .../onlp/builds/src/module/src/thermali.c | 60 ++++--------------- 1 file changed, 12 insertions(+), 48 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/onlp/builds/src/module/src/thermali.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/onlp/builds/src/module/src/thermali.c index 2460563e..c04d425e 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/onlp/builds/src/module/src/thermali.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/onlp/builds/src/module/src/thermali.c @@ -29,10 +29,6 @@ #include #include -#define prefix_path "/sys/bus/i2c/devices/" -#define filename "temp1_input" -#define LOCAL_DEBUG 0 - #define VALIDATE(_id) \ do { \ if(!ONLP_OID_IS_THERMAL(_id)) { \ @@ -40,21 +36,6 @@ } \ } while(0) - -#define OPEN_READ_FILE(fd,fullpath,data,nbytes,len) \ - if (LOCAL_DEBUG) \ - printf("[Debug][%s][%d][openfile: %s]\n", __FUNCTION__, __LINE__, fullpath); \ - if ((fd = open(fullpath, O_RDONLY)) == -1) \ - return ONLP_STATUS_E_INTERNAL; \ - if ((len = read(fd, r_data, nbytes)) <= 0){ \ - close(fd); \ - return ONLP_STATUS_E_INTERNAL;} \ - if (LOCAL_DEBUG) \ - printf("[Debug][%s][%d][read data: %s]\n", __FUNCTION__, __LINE__, r_data); \ - if (close(fd) == -1) \ - return ONLP_STATUS_E_INTERNAL - - enum onlp_thermal_id { THERMAL_RESERVED = 0, @@ -66,23 +47,23 @@ enum onlp_thermal_id THERMAL_1_ON_PSU2, }; -static char* last_path[] = /* must map with onlp_thermal_id */ +static char* devfiles__[] = /* must map with onlp_thermal_id */ { "reserved", NULL, /* CPU_CORE files */ - "15-0048/", - "16-0049/", - "17-004a/", - "11-003c/psu_", - "12-003f/psu_", + "/sys/bus/i2c/devices/15-0048*temp1_input", + "/sys/bus/i2c/devices/16-0049*temp1_input", + "/sys/bus/i2c/devices/17-004a*temp1_input", + "/sys/bus/i2c/devices/11-003c*psu_temp1_input", + "/sys/bus/i2c/devices/12-003f*psu_temp1_input", }; static char* cpu_coretemp_files[] = { - "/sys/devices/platform/coretemp.0/temp2_input", - "/sys/devices/platform/coretemp.0/temp3_input", - "/sys/devices/platform/coretemp.0/temp4_input", - "/sys/devices/platform/coretemp.0/temp5_input", + "/sys/devices/platform/coretemp.0*temp2_input", + "/sys/devices/platform/coretemp.0*temp3_input", + "/sys/devices/platform/coretemp.0*temp4_input", + "/sys/devices/platform/coretemp.0*temp5_input", NULL, }; @@ -142,16 +123,11 @@ onlp_thermali_init(void) int onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) { - int fd, len, nbytes = 10, temp_base=1, local_id; - char r_data[10] = {0}; - char fullpath[50] = {0}; + int local_id; VALIDATE(id); local_id = ONLP_OID_ID_GET(id); - if (LOCAL_DEBUG) - printf("\n[Debug][%s][%d][local_id: %d]", __FUNCTION__, __LINE__, local_id); - /* Set the onlp_oid_hdr_t and capabilities */ *info = linfo[local_id]; @@ -160,17 +136,5 @@ onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) return rv; } - /* get fullpath */ - sprintf(fullpath, "%s%s%s", prefix_path, last_path[local_id], filename); - - OPEN_READ_FILE(fd,fullpath,r_data,nbytes,len); - - info->mcelsius = atoi(r_data)/temp_base; - - if (LOCAL_DEBUG) - printf("\n[Debug][%s][%d][save data: %d]\n", __FUNCTION__, __LINE__, info->mcelsius); - - return ONLP_STATUS_OK; + return onlp_file_read_int(&info->mcelsius, devfiles__[local_id]); } - - From afba81d0235fe518bacecb0a21a3fb80256cb6c4 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 00:09:52 +0000 Subject: [PATCH 245/255] The kernel drivers for the AS5712, AS6712, AS5812{t,x}, AS6812, and AS7712 are not modules for the 3.2 kernel. The drivers for the 7716, 7512, and 5512 will be migrated to modules as well as the next step. --- .../kernels/3.2.65-1+deb7u2/patches/series | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index e3e983f4..08b76145 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -234,22 +234,22 @@ network-core-proto-down.patch network-bonding-clag-proto-down.patch network-bridge-disable-multiple-sub-intfs-on-same-port.patch network-virtio-proto-down.patch -driver-hwmon-cpr-4011-4mxx.patch -platform-accton-as5712_54x-device-drivers.patch -platform-accton-as6712_32x-device-drivers.patch +#driver-hwmon-cpr-4011-4mxx.patch +#platform-accton-as5712_54x-device-drivers.patch +#platform-accton-as6712_32x-device-drivers.patch overlayfs_notify.patch -platform-accton-as7512_32x-device-drivers.patch +#platform-accton-as7512_32x-device-drivers.patch driver-pca954x-i2c-mux-deselect-on-exit-config-option.patch -platform-accton-as7712_32x-device-drivers.patch -platform-accton-as5812_54x-device-drivers.patch -platform-accton-as6812_32x-device-drivers.patch -platform-accton-as5812_54t-device-drivers.patch +#platform-accton-as7712_32x-device-drivers.patch +#platform-accton-as5812_54x-device-drivers.patch +#platform-accton-as6812_32x-device-drivers.patch +#platform-accton-as5812_54t-device-drivers.patch driver-mfd-lpc-ich.patch driver-watchdog-itco-wd.patch -platform-accton-as5512_54x-device-drivers.patch -platform-accton-as7716_32x-device-drivers.patch +#platform-accton-as5512_54x-device-drivers.patch +#platform-accton-as7716_32x-device-drivers.patch driver-broadcom-tigon3.patch -mgmt-port-init-config.patch +#mgmt-port-init-config.patch arch-intel-reboot-cf9-cold.patch drivers-hwmon-adm1021-detect.patch drivers-i2c-busses-i2c-isch-timeout.patch From 6a3ce5cad8886f97c92e5051895e402aa97897f8 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 00:12:55 +0000 Subject: [PATCH 246/255] Add kernel module builds for the 3.2 kernel. --- .../amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml | 11 +++++++++-- .../kernel-3.2-deb7-x86-64-all/builds/.gitignore | 3 ++- .../kernel-3.2-deb7-x86-64-all/builds/Makefile | 2 +- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml index 5fc62c40..ed3ca8d8 100644 --- a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml +++ b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/PKG.yml @@ -1,3 +1,5 @@ +variables: + basename: onl-kernel-3.2-deb7-x86-64-all common: arch: amd64 @@ -7,7 +9,7 @@ common: support: opennetworklinux@googlegroups.com packages: - - name: onl-kernel-3.2-deb7-x86-64-all + - name: $basename version: 1.0.0 summary: Open Network Linux Kernel 3.2-deb7 for X86_64 Platforms. @@ -17,6 +19,11 @@ packages: changelog: Change changes changes., + - name: $basename-modules + version: 1.0.0 + summary: Open Network Linux 3.2 Kernel Modules for X86_64 Platforms + files: + builds/lib: /lib - + changelog: Change changes changes., diff --git a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/.gitignore b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/.gitignore index b1d21326..63ede980 100644 --- a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/.gitignore +++ b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/.gitignore @@ -1,2 +1,3 @@ linux-3.2.65-1+deb7u2* -kernel-3.2-deb7-x86_64-all \ No newline at end of file +kernel-3.2-deb7-x86_64-all +lib diff --git a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/Makefile index 61fc5d43..918588b3 100644 --- a/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/Makefile +++ b/packages/base/amd64/kernels/kernel-3.2-deb7-x86-64-all/builds/Makefile @@ -14,6 +14,6 @@ include $(ONL)/make/config.mk kernel: $(MAKE) -C $(ONL)/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) - + ARCH=x86_64 $(ONL)/tools/scripts/kmodbuild.sh linux-3.2.65-1+deb7u2-mbuild "$(wildcard $(ONL)/packages/base/any/kernels/modules/*)" onl clean: rm -rf linux-3.2.65-1+deb7u2 linux-3.2.65-1+deb7u2-mbuild From b62b09574898d6e825b5139ba336394daa03a263 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 00:14:34 +0000 Subject: [PATCH 247/255] The 3.2 kernel configuration now uses the platform modules as well. All platforms reverted to 3.2 for initial testing. --- .../modules/builds/Makefile | 2 +- .../builds/x86-64-accton-as5712-54x-cpld.c | 13 +- .../src/lib/x86-64-accton-as5712-54x-r0.yml | 2 +- .../x86_64_accton_as5712_54x_r0/__init__.py | 4 +- .../modules/builds/Makefile | 2 +- .../src/lib/x86-64-accton-as5812-54t-r0.yml | 2 +- .../x86_64_accton_as5812_54t_r0/__init__.py | 6 +- .../modules/builds/Makefile | 2 +- .../builds/x86-64-accton-as5812-54x-cpld.c | 13 +- .../src/lib/x86-64-accton-as5812-54x-r0.yml | 2 +- .../x86_64_accton_as5812_54x_r0/__init__.py | 4 +- .../modules/builds/Makefile | 2 +- .../builds/x86-64-accton-as6712-32x-cpld.c | 53 +++--- .../src/lib/x86-64-accton-as6712-32x-r0.yml | 2 +- .../x86_64_accton_as6712_32x_r0/__init__.py | 4 +- .../modules/builds/Makefile | 2 +- .../builds/x86-64-accton-as6812-32x-cpld.c | 49 ++--- .../src/lib/x86-64-accton-as6812-32x-r0.yml | 2 +- .../x86_64_accton_as6812_32x_r0/__init__.py | 5 +- .../modules/builds/Makefile | 2 +- .../builds/x86-64-accton-as7712-32x-fan.c | 47 ++++- .../builds/x86-64-accton-as7712-32x-psu.c | 179 +++++++++++++----- .../src/lib/x86-64-accton-as7712-32x-r0.yml | 2 +- .../x86_64_accton_as7712_32x_r0/__init__.py | 2 +- 24 files changed, 269 insertions(+), 134 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile index 9de6dc2f..20ef0708 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/Makefile @@ -1,4 +1,4 @@ -KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as5712-54x ARCH := x86_64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c index a947fca1..ad09168d 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/modules/builds/x86-64-accton-as5712-54x-cpld.c @@ -33,6 +33,7 @@ #include #include #include +#include static struct dmi_system_id as5712_dmi_table[] = { { @@ -266,7 +267,7 @@ static ssize_t show_cpld_version(struct device *dev, struct device_attribute *at len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); return len; -} +} static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); @@ -313,9 +314,11 @@ static int accton_i2c_cpld_mux_probe(struct i2c_client *client, int idx; #endif data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, - I2C_CLASS_HWMON | I2C_CLASS_SPD, - accton_i2c_cpld_mux_select_chan, - accton_i2c_cpld_mux_deselect_mux); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + I2C_CLASS_HWMON | I2C_CLASS_SPD, +#endif + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); if (data->virt_adaps[chan] == NULL) { ret = -ENODEV; @@ -463,5 +466,3 @@ MODULE_LICENSE("GPL"); module_init(accton_i2c_cpld_mux_init); module_exit(accton_i2c_cpld_mux_exit); - - diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml index e03fe786..fa4eea34 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/lib/x86-64-accton-as5712-54x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as5712-54x-r0: --stop=1 kernel: - <<: *kernel-3-16 + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py index 6e9d0aa0..f5999753 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5712-54x/platform-config/r0/src/python/x86_64_accton_as5712_54x_r0/__init__.py @@ -9,9 +9,9 @@ class OnlPlatform_x86_64_accton_as5712_54x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".5712.54" def baseconfig(self): - self.insmod('cpr_4011_4mxx', required=False) + self.insmod('cpr_4011_4mxx') for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: - self.insmod("x86-64-accton-as5712-54x-%s.ko" % m, required=False) + self.insmod("x86-64-accton-as5712-54x-%s.ko" % m) ########### initialize I2C bus 0 ########### # initialize CPLDs diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile index 46ebca16..5385dc37 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/modules/builds/Makefile @@ -1,4 +1,4 @@ -KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as5812-54t ARCH := x86_64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml index 62190b5a..0741e829 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/lib/x86-64-accton-as5812-54t-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as5812-54t-r0: --stop=1 kernel: - <<: *kernel-3-16 + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py index c372a4e7..e5d139c9 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54t/platform-config/r0/src/python/x86_64_accton_as5812_54t_r0/__init__.py @@ -10,10 +10,10 @@ class OnlPlatform_x86_64_accton_as5812_54t_r0(OnlPlatformAccton, def baseconfig(self): ########### initialize I2C bus 0 ########### - self.insmod("accton_i2c_cpld", required=False) - self.insmod("cpr_4011_4mxx", required=False) + self.insmod("accton_i2c_cpld") + self.insmod("cpr_4011_4mxx") for m in [ "sfp", "psu", "fan", "leds" ]: - self.insmod("x86-64-accton-as5812-54t-%s" % m, required=False) + self.insmod("x86-64-accton-as5812-54t-%s" % m) # initialize CPLDs self.new_i2c_device('accton_i2c_cpld', 0x60, 0) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile index 9fc1159e..af7d8095 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/Makefile @@ -1,4 +1,4 @@ -KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as5812-54x ARCH := x86_64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c index 710c5202..14e1d860 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/modules/builds/x86-64-accton-as5812-54x-cpld.c @@ -34,6 +34,7 @@ #include #include #include +#include static struct dmi_system_id as5812_54x_dmi_table[] = { { @@ -228,7 +229,7 @@ static ssize_t show_cpld_version(struct device *dev, struct device_attribute *at len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); return len; -} +} static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); @@ -262,9 +263,11 @@ static int accton_i2c_cpld_mux_probe(struct i2c_client *client, /* Now create an adapter for each channel */ for (chan = 0; chan < chips[data->type].nchans; chan++) { data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, - I2C_CLASS_HWMON | I2C_CLASS_SPD, - accton_i2c_cpld_mux_select_chan, - accton_i2c_cpld_mux_deselect_mux); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + I2C_CLASS_HWMON | I2C_CLASS_SPD, +#endif + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); if (data->virt_adaps[chan] == NULL) { ret = -ENODEV; @@ -391,5 +394,3 @@ MODULE_LICENSE("GPL"); module_init(accton_i2c_cpld_mux_init); module_exit(accton_i2c_cpld_mux_exit); - - diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml index 014340dc..d0f26744 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/lib/x86-64-accton-as5812-54x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as5812-54x-r0: --stop=1 kernel: - <<: *kernel-3-16 + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py index f6c28147..b71c2674 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5812-54x/platform-config/r0/src/python/x86_64_accton_as5812_54x_r0/__init__.py @@ -9,9 +9,9 @@ class OnlPlatform_x86_64_accton_as5812_54x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".5812.54.1" def baseconfig(self): - self.insmod('cpr_4011_4mxx', required=False) + self.insmod('cpr_4011_4mxx') for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: - self.insmod("x86-64-accton-as5812-54x-%s.ko" % m, required=False) + self.insmod("x86-64-accton-as5812-54x-%s.ko" % m) ########### initialize I2C bus 0 ########### diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile index 7a8c22ac..b1af96e3 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/Makefile @@ -1,4 +1,4 @@ -KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as6712-32x ARCH := x86_64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c index e37d06bc..d1c1892e 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/modules/builds/x86-64-accton-as6712-32x-cpld.c @@ -33,6 +33,7 @@ #include #include #include +#include static struct dmi_system_id as6712_dmi_table[] = { { @@ -132,7 +133,7 @@ static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, buf[1] = val; msg.buf = buf; ret = adap->algo->master_xfer(adap, &msg, 1); - } + } else { union i2c_smbus_data data; ret = adap->algo->smbus_xfer(adap, client->addr, @@ -157,7 +158,7 @@ static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; for (res = 0, try = 0; try <= adap->retries; try++) { - res = adap->algo->smbus_xfer(adap, client->addr, flags, + res = adap->algo->smbus_xfer(adap, client->addr, flags, I2C_SMBUS_WRITE, 0x2, I2C_SMBUS_BYTE_DATA, &data); if (res != -EAGAIN) @@ -196,21 +197,21 @@ static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, /* Deselect active channel */ data->last_chan = chips[data->type].deselectChan; - + return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); } static void accton_i2c_cpld_add_client(struct i2c_client *client) { struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); - + if (!node) { dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); return; } - + node->client = client; - + mutex_lock(&list_lock); list_add(&node->list, &cpld_client_list); mutex_unlock(&list_lock); @@ -221,24 +222,24 @@ static void accton_i2c_cpld_remove_client(struct i2c_client *client) struct list_head *list_node = NULL; struct cpld_client_node *cpld_node = NULL; int found = 0; - + mutex_lock(&list_lock); list_for_each(list_node, &cpld_client_list) { cpld_node = list_entry(list_node, struct cpld_client_node, list); - + if (cpld_node->client == client) { found = 1; break; } } - + if (found) { list_del(list_node); kfree(cpld_node); } - + mutex_unlock(&list_lock); } @@ -252,7 +253,7 @@ static ssize_t show_cpld_version(struct device *dev, struct device_attribute *at len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); return len; -} +} static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); @@ -294,17 +295,19 @@ static int accton_i2c_cpld_mux_probe(struct i2c_client *client, data->last_chan = chips[data->type].deselectChan; /* force the first selection */ /* Now create an adapter for each channel */ - for (chan = 0; chan < chips[data->type].nchans; chan++) { + for (chan = 0; chan < chips[data->type].nchans; chan++) { data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, - I2C_CLASS_HWMON | I2C_CLASS_SPD, - accton_i2c_cpld_mux_select_chan, - accton_i2c_cpld_mux_deselect_mux); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + I2C_CLASS_HWMON | I2C_CLASS_SPD, +#endif + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); if (data->virt_adaps[chan] == NULL) { ret = -ENODEV; dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); goto virt_reg_failed; - } + } } dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", @@ -338,14 +341,14 @@ static int accton_i2c_cpld_mux_remove(struct i2c_client *client) for (chan = 0; chan < chip->nchans; ++chan) { if (data->virt_adaps[chan]) { - i2c_del_mux_adapter(data->virt_adaps[chan]); + i2c_del_mux_adapter(data->virt_adaps[chan]); data->virt_adaps[chan] = NULL; } } kfree(data); accton_i2c_cpld_remove_client(client); - + return 0; } @@ -354,19 +357,19 @@ int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) struct list_head *list_node = NULL; struct cpld_client_node *cpld_node = NULL; int ret = -EPERM; - + mutex_lock(&list_lock); list_for_each(list_node, &cpld_client_list) { cpld_node = list_entry(list_node, struct cpld_client_node, list); - + if (cpld_node->client->addr == cpld_addr) { ret = i2c_smbus_read_byte_data(cpld_node->client, reg); break; } } - + mutex_unlock(&list_lock); return ret; @@ -378,19 +381,19 @@ int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) struct list_head *list_node = NULL; struct cpld_client_node *cpld_node = NULL; int ret = -EIO; - + mutex_lock(&list_lock); list_for_each(list_node, &cpld_client_list) { cpld_node = list_entry(list_node, struct cpld_client_node, list); - + if (cpld_node->client->addr == cpld_addr) { ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); break; } } - + mutex_unlock(&list_lock); return ret; @@ -424,5 +427,3 @@ MODULE_LICENSE("GPL"); module_init(accton_i2c_cpld_mux_init); module_exit(accton_i2c_cpld_mux_exit); - - diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml index f685f030..beb9db4c 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/lib/x86-64-accton-as6712-32x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as6712-32x-r0: --stop=1 kernel: - <<: *kernel-3-16 + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py index 9ac98c66..a27d74e5 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6712-32x/platform-config/r0/src/python/x86_64_accton_as6712_32x_r0/__init__.py @@ -8,9 +8,9 @@ class OnlPlatform_x86_64_accton_as6712_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".6712.32" def baseconfig(self): - self.insmod('cpr_4011_4mxx', required=False) + self.insmod('cpr_4011_4mxx') for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: - self.insmod("x86-64-accton-as6712-32x-%s.ko" % m, required=False) + self.insmod("x86-64-accton-as6712-32x-%s.ko" % m) ########### initialize I2C bus 0 ########### # initialize CPLD diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile index 81d692dd..0ee5e509 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/Makefile @@ -1,4 +1,4 @@ -KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as6812-32x ARCH := x86_64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c index dafa5e1b..6c146767 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/modules/builds/x86-64-accton-as6812-32x-cpld.c @@ -34,6 +34,7 @@ #include #include #include +#include static struct dmi_system_id as6812_dmi_table[] = { { @@ -132,7 +133,7 @@ static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, /* Retry automatically on arbitration loss */ orig_jiffies = jiffies; for (res = 0, try = 0; try <= adap->retries; try++) { - res = adap->algo->smbus_xfer(adap, client->addr, flags, + res = adap->algo->smbus_xfer(adap, client->addr, flags, I2C_SMBUS_WRITE, 0x2, I2C_SMBUS_BYTE_DATA, &data); if (res != -EAGAIN) @@ -170,21 +171,21 @@ static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, /* Deselect active channel */ data->last_chan = chips[data->type].deselectChan; - + return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); } static void accton_i2c_cpld_add_client(struct i2c_client *client) { struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); - + if (!node) { dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); return; } - + node->client = client; - + mutex_lock(&list_lock); list_add(&node->list, &cpld_client_list); mutex_unlock(&list_lock); @@ -195,24 +196,24 @@ static void accton_i2c_cpld_remove_client(struct i2c_client *client) struct list_head *list_node = NULL; struct cpld_client_node *cpld_node = NULL; int found = 0; - + mutex_lock(&list_lock); list_for_each(list_node, &cpld_client_list) { cpld_node = list_entry(list_node, struct cpld_client_node, list); - + if (cpld_node->client == client) { found = 1; break; } } - + if (found) { list_del(list_node); kfree(cpld_node); } - + mutex_unlock(&list_lock); } @@ -226,7 +227,7 @@ static ssize_t show_cpld_version(struct device *dev, struct device_attribute *at len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); return len; -} +} static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); @@ -258,17 +259,19 @@ static int accton_i2c_cpld_mux_probe(struct i2c_client *client, data->last_chan = chips[data->type].deselectChan; /* force the first selection */ /* Now create an adapter for each channel */ - for (chan = 0; chan < chips[data->type].nchans; chan++) { + for (chan = 0; chan < chips[data->type].nchans; chan++) { data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, - I2C_CLASS_HWMON | I2C_CLASS_SPD, - accton_i2c_cpld_mux_select_chan, - accton_i2c_cpld_mux_deselect_mux); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + I2C_CLASS_HWMON | I2C_CLASS_SPD, +#endif + accton_i2c_cpld_mux_select_chan, + accton_i2c_cpld_mux_deselect_mux); if (data->virt_adaps[chan] == NULL) { ret = -ENODEV; dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); goto virt_reg_failed; - } + } } dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", @@ -302,14 +305,14 @@ static int accton_i2c_cpld_mux_remove(struct i2c_client *client) for (chan = 0; chan < chip->nchans; ++chan) { if (data->virt_adaps[chan]) { - i2c_del_mux_adapter(data->virt_adaps[chan]); + i2c_del_mux_adapter(data->virt_adaps[chan]); data->virt_adaps[chan] = NULL; } } kfree(data); accton_i2c_cpld_remove_client(client); - + return 0; } @@ -318,19 +321,19 @@ int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) struct list_head *list_node = NULL; struct cpld_client_node *cpld_node = NULL; int ret = -EPERM; - + mutex_lock(&list_lock); list_for_each(list_node, &cpld_client_list) { cpld_node = list_entry(list_node, struct cpld_client_node, list); - + if (cpld_node->client->addr == cpld_addr) { ret = i2c_smbus_read_byte_data(cpld_node->client, reg); break; } } - + mutex_unlock(&list_lock); return ret; @@ -342,19 +345,19 @@ int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) struct list_head *list_node = NULL; struct cpld_client_node *cpld_node = NULL; int ret = -EIO; - + mutex_lock(&list_lock); list_for_each(list_node, &cpld_client_list) { cpld_node = list_entry(list_node, struct cpld_client_node, list); - + if (cpld_node->client->addr == cpld_addr) { ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); break; } } - + mutex_unlock(&list_lock); return ret; diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml index 18dda3ba..97f6b35a 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/lib/x86-64-accton-as6812-32x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as6812-32x-r0: --stop=1 kernel: - <<: *kernel-3-16 + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py index 2dd65d5d..edc5c26a 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as6812-32x/platform-config/r0/src/python/x86_64_accton_as6812_32x_r0/__init__.py @@ -8,10 +8,9 @@ class OnlPlatform_x86_64_accton_as6812_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".6812.32" def baseconfig(self): - - self.insmod('cpr_4011_4mxx', required=False) + self.insmod('cpr_4011_4mxx') for m in [ 'cpld', 'fan', 'psu', 'leds', 'sfp' ]: - self.insmod("x86-64-accton-as6812-32x-%s.ko" % m, required=False) + self.insmod("x86-64-accton-as6812-32x-%s.ko" % m) ########### initialize I2C bus 0 ########### # initialize CPLD diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile index 3be04999..35af35dc 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/Makefile @@ -1,4 +1,4 @@ -KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 KMODULES := $(wildcard *.c) PLATFORM := x86-64-accton-as7712-32x ARCH := x86_64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c index dae9a9e7..74c577d4 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-fan.c @@ -43,6 +43,7 @@ extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); */ static const u8 fan_reg[] = { 0x0F, /* fan 1-6 present status */ + 0x10, /* fan 1-6 direction(0:B2F 1:F2B) */ 0x11, /* fan PWM(for all fan) */ 0x12, /* front fan 1 speed(rpm) */ 0x13, /* front fan 2 speed(rpm) */ @@ -78,6 +79,7 @@ enum fan_id { enum sysfs_fan_attributes { FAN_PRESENT_REG, + FAN_DIRECTION_REG, FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ FAN1_FRONT_SPEED_RPM, FAN2_FRONT_SPEED_RPM, @@ -91,6 +93,12 @@ enum sysfs_fan_attributes { FAN4_REAR_SPEED_RPM, FAN5_REAR_SPEED_RPM, FAN6_REAR_SPEED_RPM, + FAN1_DIRECTION, + FAN2_DIRECTION, + FAN3_DIRECTION, + FAN4_DIRECTION, + FAN5_DIRECTION, + FAN6_DIRECTION, FAN1_PRESENT, FAN2_PRESENT, FAN3_PRESENT, @@ -150,6 +158,13 @@ DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); +/* 6 fan direction attribute in this platform */ +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(1); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(2); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(3); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(4); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(5); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(6); /* 1 fan duty cycle attribute in this platform */ DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); @@ -173,6 +188,12 @@ static struct attribute *as7712_32x_fan_attributes[] = { DECLARE_FAN_PRESENT_ATTR(4), DECLARE_FAN_PRESENT_ATTR(5), DECLARE_FAN_PRESENT_ATTR(6), + DECLARE_FAN_DIRECTION_ATTR(1), + DECLARE_FAN_DIRECTION_ATTR(2), + DECLARE_FAN_DIRECTION_ATTR(3), + DECLARE_FAN_DIRECTION_ATTR(4), + DECLARE_FAN_DIRECTION_ATTR(5), + DECLARE_FAN_DIRECTION_ATTR(6), DECLARE_FAN_DUTY_CYCLE_ATTR(), NULL }; @@ -209,6 +230,14 @@ static u32 reg_val_to_speed_rpm(u8 reg_val) return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; } +static u8 reg_val_to_direction(u8 reg_val, enum fan_id id) +{ + u8 mask = (1 << id); + + reg_val &= mask; + + return reg_val ? 1 : 0; +} static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) { u8 mask = (1 << id); @@ -299,6 +328,16 @@ static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, case FAN6_FAULT: ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); break; + case FAN1_DIRECTION: + case FAN2_DIRECTION: + case FAN3_DIRECTION: + case FAN4_DIRECTION: + case FAN5_DIRECTION: + case FAN6_DIRECTION: + ret = sprintf(buf, "%d\n", + reg_val_to_direction(data->reg_val[FAN_DIRECTION_REG], + attr->index - FAN1_DIRECTION)); + break; default: break; } @@ -430,10 +469,10 @@ static struct i2c_driver as7712_32x_fan_driver = { static int __init as7712_32x_fan_init(void) { - extern int platform_accton_as7712_32x(void); - if (!platform_accton_as7712_32x()) { - return -ENODEV; - } + extern int platform_accton_as7712_32x(void); + if (!platform_accton_as7712_32x()) { + return -ENODEV; + } return i2c_add_driver(&as7712_32x_fan_driver); } diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c index 4de2db3a..65f7a16a 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/modules/builds/x86-64-accton-as7712-32x-psu.c @@ -34,14 +34,18 @@ #include #include +#define MAX_MODEL_NAME 16 + +#define DC12V_FAN_DIR_OFFSET 0x34 +#define DC12V_FAN_DIR_LEN 3 + static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); /* Addresses scanned */ -static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; /* Each client has this additional data */ @@ -52,27 +56,32 @@ struct as7712_32x_psu_data { unsigned long last_updated; /* In jiffies */ u8 index; /* PSU index */ u8 status; /* Status(present/power_good) register read from CPLD */ - char model_name[9]; /* Model name, read from eeprom */ + char model_name[MAX_MODEL_NAME]; /* Model name, read from eeprom */ + char fan_dir[DC12V_FAN_DIR_LEN+1]; /* DC12V fan direction */ }; +static ssize_t show_string(struct device *dev, struct device_attribute *da, char *buf); static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev); enum as7712_32x_psu_sysfs_attributes { PSU_PRESENT, PSU_MODEL_NAME, - PSU_POWER_GOOD + PSU_POWER_GOOD, + PSU_FAN_DIR /* For DC12V only */ }; /* sysfs attributes for hwmon */ -static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_string, NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); +static SENSOR_DEVICE_ATTR(psu_fan_dir, S_IRUGO, show_string, NULL, PSU_FAN_DIR); static struct attribute *as7712_32x_psu_attributes[] = { &sensor_dev_attr_psu_present.dev_attr.attr, &sensor_dev_attr_psu_model_name.dev_attr.attr, &sensor_dev_attr_psu_power_good.dev_attr.attr, + &sensor_dev_attr_psu_fan_dir.dev_attr.attr, NULL }; @@ -83,6 +92,10 @@ static ssize_t show_status(struct device *dev, struct device_attribute *da, struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); u8 status = 0; + if (!data->valid) { + return -EIO; + } + if (attr->index == PSU_PRESENT) { status = !(data->status >> (1-data->index) & 0x1); } @@ -93,12 +106,25 @@ static ssize_t show_status(struct device *dev, struct device_attribute *da, return sprintf(buf, "%d\n", status); } -static ssize_t show_model_name(struct device *dev, struct device_attribute *da, +static ssize_t show_string(struct device *dev, struct device_attribute *da, char *buf) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); - - return sprintf(buf, "%s\n", data->model_name); + char *ptr = NULL; + + if (!data->valid) { + return -EIO; + } + + if (attr->index == PSU_MODEL_NAME) { + ptr = data->model_name; + } + else { /* PSU_FAN_DIR */ + ptr = data->fan_dir; + } + + return sprintf(buf, "%s\n", ptr); } static const struct attribute_group as7712_32x_psu_group = { @@ -195,30 +221,83 @@ static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 * { int result = 0; int retry_count = 5; - - while (retry_count) { - retry_count--; - - result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); - - if (unlikely(result < 0)) { - msleep(10); - continue; - } - - if (unlikely(result != data_len)) { - result = -EIO; - msleep(10); + + while (retry_count) { + retry_count--; + + result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) { + msleep(10); continue; } - - result = 0; - break; - } - + + if (unlikely(result != data_len)) { + result = -EIO; + msleep(10); + continue; + } + + result = 0; + break; + } + return result; } +enum psu_type { + PSU_TYPE_AC_110V, + PSU_TYPE_DC_48V, + PSU_TYPE_DC_12V +}; + +struct model_name_info { + enum psu_type type; + u8 offset; + u8 length; + char* model_name; +}; + +struct model_name_info models[] = { +{PSU_TYPE_AC_110V, 0x20, 8, "YM-2651Y"}, +{PSU_TYPE_DC_48V, 0x20, 8, "YM-2651V"}, +{PSU_TYPE_DC_12V, 0x00, 11, "PSU-12V-750"}, +}; + +static int as7712_32x_psu_model_name_get(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7712_32x_psu_data *data = i2c_get_clientdata(client); + int i, status; + + for (i = 0; i < ARRAY_SIZE(models); i++) { + memset(data->model_name, 0, sizeof(data->model_name)); + + status = as7712_32x_psu_read_block(client, models[i].offset, + data->model_name, models[i].length); + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x) offset(0x%x)\n", + client->addr, models[i].offset); + return status; + } + else { + data->model_name[models[i].length] = '\0'; + } + + /* Determine if the model name is known, if not, read next index + */ + if (strncmp(data->model_name, models[i].model_name, models[i].length) == 0) { + return 0; + } + else { + data->model_name[0] = '\0'; + } + } + + return -ENODATA; +} + static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -229,8 +308,9 @@ static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *d if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { int status; - int power_good = 0; + int power_good = 0; + data->valid = 0; dev_dbg(&client->dev, "Starting as7712_32x update\n"); /* Read psu status */ @@ -238,25 +318,35 @@ static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *d if (status < 0) { dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + goto exit; } else { data->status = status; } - + /* Read model name */ memset(data->model_name, 0, sizeof(data->model_name)); + memset(data->fan_dir, 0, sizeof(data->fan_dir)); power_good = (data->status >> (3-data->index) & 0x1); - - if (power_good) { - status = as7712_32x_psu_read_block(client, 0x20, data->model_name, - ARRAY_SIZE(data->model_name)-1); - if (status < 0) { - data->model_name[0] = '\0'; - dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + if (power_good) { + if (as7712_32x_psu_model_name_get(dev) < 0) { + goto exit; } - else { - data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + + if (strncmp(data->model_name, + models[PSU_TYPE_DC_12V].model_name, + models[PSU_TYPE_DC_12V].length) == 0) { + /* Read fan direction */ + status = as7712_32x_psu_read_block(client, DC12V_FAN_DIR_OFFSET, + data->fan_dir, DC12V_FAN_DIR_LEN); + + if (status < 0) { + data->fan_dir[0] = '\0'; + dev_dbg(&client->dev, "unable to read fan direction from (0x%x) offset(0x%x)\n", + client->addr, DC12V_FAN_DIR_OFFSET); + goto exit; + } } } @@ -264,6 +354,7 @@ static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *d data->valid = 1; } +exit: mutex_unlock(&data->update_lock); return data; @@ -271,10 +362,10 @@ static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *d static int __init as7712_32x_psu_init(void) { - extern int platform_accton_as7712_32x(void); - if (!platform_accton_as7712_32x()) { - return -ENODEV; - } + extern int platform_accton_as7712_32x(void); + if (!platform_accton_as7712_32x()) { + return -ENODEV; + } return i2c_add_driver(&as7712_32x_psu_driver); } diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml index 1aef0594..2352f103 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/x86-64-accton-as7712-32x-r0.yml @@ -18,7 +18,7 @@ x86-64-accton-as7712-32x-r0: --stop=1 kernel: - <<: *kernel-3-16 + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py index 4040d2a7..e460484f 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/python/x86_64_accton_as7712_32x_r0/__init__.py @@ -11,7 +11,7 @@ class OnlPlatform_x86_64_accton_as7712_32x_r0(OnlPlatformAccton, self.insmod('ym2651y') self.insmod('accton_i2c_cpld') for m in [ 'fan', 'psu', 'leds', 'sfp' ]: - self.insmod("x86-64-accton-as7712-32x-%s.ko" % m, required=False) + self.insmod("x86-64-accton-as7712-32x-%s.ko" % m) ########### initialize I2C bus 0 ########### self.new_i2c_devices([ From 993dd3e9cdc9e13b9638271f44c1f37b1b45ac71 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 00:15:53 +0000 Subject: [PATCH 248/255] Add 3.2 modules package. --- builds/any/rootfs/jessie/common/amd64-base-packages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/builds/any/rootfs/jessie/common/amd64-base-packages.yml b/builds/any/rootfs/jessie/common/amd64-base-packages.yml index a07863f3..d72ff6e5 100644 --- a/builds/any/rootfs/jessie/common/amd64-base-packages.yml +++ b/builds/any/rootfs/jessie/common/amd64-base-packages.yml @@ -11,3 +11,4 @@ - hw-management - sx-kernel - onl-kernel-3.16-lts-x86-64-all-modules +- onl-kernel-3.2-deb7-x86-64-all-modules From adbf5fb47e6a6d0d0131b0085c23abfe5a23a5a2 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 19:45:14 +0000 Subject: [PATCH 249/255] AS5512 Kernel Modules --- .../x86-64-accton-as5512-54x/modules/Makefile | 1 + .../x86-64-accton-as5512-54x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as5512-54x-fan.c | 454 ++++++ .../builds/x86-64-accton-as5512-54x-leds.c | 463 ++++++ .../builds/x86-64-accton-as5512-54x-psu.c | 295 ++++ .../builds/x86-64-accton-as5512-54x-sfp.c | 1237 +++++++++++++++++ 8 files changed, 2457 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/PKG.yml new file mode 100644 index 00000000..5f84b1ad --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as5512-54x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/Makefile new file mode 100644 index 00000000..fd78b4d0 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as5512-54x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-fan.c new file mode 100644 index 00000000..67e3dd66 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-fan.c @@ -0,0 +1,454 @@ +/* + * A hwmon driver for the Accton as5512 54x fan control + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAN_MAX_NUMBER 5 +#define FAN_SPEED_CPLD_TO_RPM_STEP 150 +#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 +#define FAN_DUTY_CYCLE_MIN 0 +#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ + +#define CPLD_REG_FAN_STATUS_OFFSET 0x0C +#define CPLD_REG_FANR_STATUS_OFFSET 0x1E +#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1D + +#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 +#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 +#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 +#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 +#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 + +#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 +#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 +#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A +#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B +#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C + +#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0x0D + +#define CPLD_FAN1_INFO_BIT_MASK 0x01 +#define CPLD_FAN2_INFO_BIT_MASK 0x02 +#define CPLD_FAN3_INFO_BIT_MASK 0x04 +#define CPLD_FAN4_INFO_BIT_MASK 0x08 +#define CPLD_FAN5_INFO_BIT_MASK 0x10 + +#define PROJECT_NAME + +#define LOCAL_DEBUG 0 + +static struct accton_as5512_54x_fan *fan_data = NULL; + +struct accton_as5512_54x_fan { + struct platform_device *pdev; + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ + u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ + u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ + u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ + u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ + u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ +}; + +/*******************/ +#define MAKE_FAN_MASK_OR_REG(name,type) \ + CPLD_FAN##type##1_##name, \ + CPLD_FAN##type##2_##name, \ + CPLD_FAN##type##3_##name, \ + CPLD_FAN##type##4_##name, \ + CPLD_FAN##type##5_##name, + +/* fan related data + */ +static const u8 fan_info_mask[] = { + MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) +}; + +static const u8 fan_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) +}; + +static const u8 fanr_speed_reg[] = { + MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) +}; + +/*******************/ +#define DEF_FAN_SET(id) \ + FAN##id##_FAULT, \ + FAN##id##_SPEED, \ + FAN##id##_DUTY_CYCLE, \ + FAN##id##_DIRECTION, \ + FANR##id##_FAULT, \ + FANR##id##_SPEED, + +enum sysfs_fan_attributes { + DEF_FAN_SET(1) + DEF_FAN_SET(2) + DEF_FAN_SET(3) + DEF_FAN_SET(4) + DEF_FAN_SET(5) +}; +/*******************/ +static void accton_as5512_54x_fan_update_device(struct device *dev); +static int accton_as5512_54x_fan_read_value(u8 reg); +static int accton_as5512_54x_fan_write_value(u8 reg, u8 value); + +static ssize_t fan_set_duty_cycle(struct device *dev, + struct device_attribute *da,const char *buf, size_t count); +static ssize_t fan_show_value(struct device *dev, + struct device_attribute *da, char *buf); + +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + + +/*******************/ +#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ + static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); + +#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) + +#define _MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) \ + static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ + fan_set_duty_cycle, FAN1_DUTY_CYCLE); + +#define MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) _MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) + + +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) +MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) +MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(PROJECT_NAME,) +/*******************/ + +#define _MAKE_FAN_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ + &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, + +#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) + +#define _MAKE_FAN_DUTY_ATTR(prj, id) \ + &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr, + +#define MAKE_FAN_DUTY_ATTR(prj, id) _MAKE_FAN_DUTY_ATTR(prj, id) + +static struct attribute *accton_as5512_54x_fan_attributes[] = { + /* fan related attributes */ + MAKE_FAN_ATTR(PROJECT_NAME,1) + MAKE_FAN_ATTR(PROJECT_NAME,2) + MAKE_FAN_ATTR(PROJECT_NAME,3) + MAKE_FAN_ATTR(PROJECT_NAME,4) + MAKE_FAN_ATTR(PROJECT_NAME,5) + MAKE_FAN_DUTY_ATTR(PROJECT_NAME,) + NULL +}; +/*******************/ + +/* fan related functions + */ +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + ssize_t ret = 0; + int data_index, type_index; + + accton_as5512_54x_fan_update_device(dev); + + if (fan_data->valid == 0) { + return ret; + } + + type_index = attr->index%FAN2_FAULT; + data_index = attr->index/FAN2_FAULT; + + switch (type_index) { + case FAN1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DUTY_CYCLE: + ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FAN1_DIRECTION: + ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_FAULT: + ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + case FANR1_SPEED: + ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); + break; + default: + if (LOCAL_DEBUG) + printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); + break; + } + + return ret; +} +/*******************/ +static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) { + + int error, value; + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) + return -EINVAL; + + accton_as5512_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); + + fan_data->valid = 0; + + return count; +} + +static const struct attribute_group accton_as5512_54x_fan_group = { + .attrs = accton_as5512_54x_fan_attributes, +}; + +static int accton_as5512_54x_fan_read_value(u8 reg) +{ + return accton_i2c_cpld_read(0x60, reg); +} + +static int accton_as5512_54x_fan_write_value(u8 reg, u8 value) +{ + return accton_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5512_54x_fan_update_device(struct device *dev) +{ + int speed, r_speed, fault, r_fault, ctrl_speed, direction; + int i; + + mutex_lock(&fan_data->update_lock); + + if (LOCAL_DEBUG) + printk ("Starting accton_as5512_54x_fan update \n"); + + if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { + /* do nothing */ + goto _exit; + } + + fan_data->valid = 0; + + if (LOCAL_DEBUG) + printk ("Starting accton_as5512_54x_fan update 2 \n"); + + fault = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); + r_fault = accton_as5512_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); + direction = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); + ctrl_speed = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); + + if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); + + for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; + if (LOCAL_DEBUG) + printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); + + fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; + fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; + fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; + + /* fan speed + */ + speed = accton_as5512_54x_fan_read_value(fan_speed_reg[i]); + r_speed = accton_as5512_54x_fan_read_value(fanr_speed_reg[i]); + if ( (speed < 0) || (r_speed < 0) ) + { + if (LOCAL_DEBUG) + printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); + goto _exit; /* error */ + } + + if (LOCAL_DEBUG) + printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); + + fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; + fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; + } + + /* finish to update */ + fan_data->last_updated = jiffies; + fan_data->valid = 1; + +_exit: + mutex_unlock(&fan_data->update_lock); +} + +static int accton_as5512_54x_fan_probe(struct platform_device *pdev) +{ + int status = -1; + + /* Register sysfs hooks */ + status = sysfs_create_group(&pdev->dev.kobj, &accton_as5512_54x_fan_group); + if (status) { + goto exit; + + } + + fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(fan_data->hwmon_dev)) { + status = PTR_ERR(fan_data->hwmon_dev); + goto exit_remove; + } + + dev_info(&pdev->dev, "accton_as5512_54x_fan\n"); + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &accton_as5512_54x_fan_group); +exit: + return status; +} + +static int accton_as5512_54x_fan_remove(struct platform_device *pdev) +{ + hwmon_device_unregister(fan_data->hwmon_dev); + sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5512_54x_fan_group); + + return 0; +} + +#define DRVNAME "as5512_54x_fan" + +static struct platform_driver accton_as5512_54x_fan_driver = { + .probe = accton_as5512_54x_fan_probe, + .remove = accton_as5512_54x_fan_remove, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5512_54x_fan_init(void) +{ + int ret; + + extern int platform_accton_as5512_54x(void); + if(!platform_accton_as5512_54x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as5512_54x_fan_driver); + if (ret < 0) { + goto exit; + } + + fan_data = kzalloc(sizeof(struct accton_as5512_54x_fan), GFP_KERNEL); + if (!fan_data) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5512_54x_fan_driver); + goto exit; + } + + mutex_init(&fan_data->update_lock); + fan_data->valid = 0; + + fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(fan_data->pdev)) { + ret = PTR_ERR(fan_data->pdev); + platform_driver_unregister(&accton_as5512_54x_fan_driver); + kfree(fan_data); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5512_54x_fan_exit(void) +{ + platform_device_unregister(fan_data->pdev); + platform_driver_unregister(&accton_as5512_54x_fan_driver); + kfree(fan_data); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5512_54x_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(accton_as5512_54x_fan_init); +module_exit(accton_as5512_54x_fan_exit); + + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-leds.c new file mode 100644 index 00000000..761483a9 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-leds.c @@ -0,0 +1,463 @@ +/* + * A LED driver for the accton_as5512_54x_led + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include + +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "as5512_54x_led" + +struct accton_as5512_54x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[2]; /* Register value, 0 = LOC/DIAG/FAN LED + 1 = PSU1/PSU2 LED */ +}; + +static struct accton_as5512_54x_led_data *ledctl = NULL; + +/* LED related data + */ +#define LED_TYPE_PSU1_REG_MASK 0x03 +#define LED_MODE_PSU1_GREEN_MASK 0x02 +#define LED_MODE_PSU1_AMBER_MASK 0x01 +#define LED_MODE_PSU1_OFF_MASK 0x03 +#define LED_MODE_PSU1_AUTO_MASK 0x00 + +#define LED_TYPE_PSU2_REG_MASK 0x0C +#define LED_MODE_PSU2_GREEN_MASK 0x08 +#define LED_MODE_PSU2_AMBER_MASK 0x04 +#define LED_MODE_PSU2_OFF_MASK 0x0C +#define LED_MODE_PSU2_AUTO_MASK 0x00 + +#define LED_TYPE_DIAG_REG_MASK 0x0C +#define LED_MODE_DIAG_GREEN_MASK 0x08 +#define LED_MODE_DIAG_AMBER_MASK 0x04 +#define LED_MODE_DIAG_OFF_MASK 0x0C + +#define LED_TYPE_FAN_REG_MASK 0x03 +#define LED_MODE_FAN_GREEN_MASK 0x02 +#define LED_MODE_FAN_AMBER_MASK 0x01 +#define LED_MODE_FAN_OFF_MASK 0x03 +#define LED_MODE_FAN_AUTO_MASK 0x00 + +#define LED_TYPE_LOC_REG_MASK 0x30 +#define LED_MODE_LOC_ON_MASK 0x00 +#define LED_MODE_LOC_OFF_MASK 0x10 +#define LED_MODE_LOC_BLINK_MASK 0x20 + +static const u8 led_reg[] = { + 0xA, /* LOC/DIAG/FAN LED*/ + 0xB, /* PSU1/PSU2 LED */ +}; + +enum led_type { + LED_TYPE_PSU1, + LED_TYPE_PSU2, + LED_TYPE_DIAG, + LED_TYPE_FAN, + LED_TYPE_LOC +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER, + LED_MODE_AMBER_BLINK, + LED_MODE_RED, + LED_MODE_RED_BLINK, + LED_MODE_BLUE, + LED_MODE_BLUE_BLINK, + LED_MODE_AUTO, + LED_MODE_UNKNOWN +}; + +struct led_type_mode { + enum led_type type; + int type_mask; + enum led_light_mode mode; + int mode_mask; +}; + +static struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, +{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, +{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, +{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, +{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, +{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} +}; + +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if ((led_type_mode_data[i].type_mask & reg_val) == + led_type_mode_data[i].mode_mask) + { + return led_type_mode_data[i].mode; + } + } + + return 0; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) { + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + reg_val = led_type_mode_data[i].mode_mask | + (reg_val & (~led_type_mode_data[i].type_mask)); + } + + return reg_val; +} + +static int accton_as5512_54x_led_read_value(u8 reg) +{ + return accton_i2c_cpld_read(0x60, reg); +} + +static int accton_as5512_54x_led_write_value(u8 reg, u8 value) +{ + return accton_i2c_cpld_write(0x60, reg, value); +} + +static void accton_as5512_54x_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as5512_54x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as5512_54x_led_read_value(led_reg[i]); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5512_54x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as5512_54x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as5512_54x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as5512_54x_led_psu_1_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); +} + +static enum led_brightness accton_as5512_54x_led_psu_1_get(struct led_classdev *cdev) +{ + accton_as5512_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); +} + +static void accton_as5512_54x_led_psu_2_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); +} + +static enum led_brightness accton_as5512_54x_led_psu_2_get(struct led_classdev *cdev) +{ + accton_as5512_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); +} + +static void accton_as5512_54x_led_fan_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); +} + +static enum led_brightness accton_as5512_54x_led_fan_get(struct led_classdev *cdev) +{ + accton_as5512_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); +} + +static void accton_as5512_54x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); +} + +static enum led_brightness accton_as5512_54x_led_diag_get(struct led_classdev *cdev) +{ + accton_as5512_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as5512_54x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); +} + +static enum led_brightness accton_as5512_54x_led_loc_get(struct led_classdev *cdev) +{ + accton_as5512_54x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static struct led_classdev accton_as5512_54x_leds[] = { + [LED_TYPE_PSU1] = { + .name = "accton_as5512_54x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as5512_54x_led_psu_1_set, + .brightness_get = accton_as5512_54x_led_psu_1_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as5512_54x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as5512_54x_led_psu_2_set, + .brightness_get = accton_as5512_54x_led_psu_2_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_FAN] = { + .name = "accton_as5512_54x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as5512_54x_led_fan_set, + .brightness_get = accton_as5512_54x_led_fan_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_DIAG] = { + .name = "accton_as5512_54x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as5512_54x_led_diag_set, + .brightness_get = accton_as5512_54x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_LOC] = { + .name = "accton_as5512_54x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as5512_54x_led_loc_set, + .brightness_get = accton_as5512_54x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as5512_54x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { + led_classdev_suspend(&accton_as5512_54x_leds[i]); + } + + return 0; +} + +static int accton_as5512_54x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { + led_classdev_resume(&accton_as5512_54x_leds[i]); + } + + return 0; +} + +static int accton_as5512_54x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as5512_54x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as5512_54x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as5512_54x_leds[i]); + } + } + + return ret; +} + +static int accton_as5512_54x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { + led_classdev_unregister(&accton_as5512_54x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as5512_54x_led_driver = { + .probe = accton_as5512_54x_led_probe, + .remove = accton_as5512_54x_led_remove, + .suspend = accton_as5512_54x_led_suspend, + .resume = accton_as5512_54x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as5512_54x_led_init(void) +{ + int ret; + + extern int platform_accton_as5512_54x(void); + if(!platform_accton_as5512_54x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as5512_54x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as5512_54x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as5512_54x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as5512_54x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as5512_54x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as5512_54x_led_driver); + kfree(ledctl); +} + +module_init(accton_as5512_54x_led_init); +module_exit(accton_as5512_54x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as5512_54x_led driver"); +MODULE_LICENSE("GPL"); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-psu.c new file mode 100644 index 00000000..66d61f3c --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-psu.c @@ -0,0 +1,295 @@ +/* + * An hwmon driver for accton as5512_54x Power Module + * + * Copyright (C) 2015 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as5512_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as5512_54x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[14]; /* Model name, read from eeprom */ +}; + +static struct as5512_54x_psu_data *as5512_54x_psu_update_device(struct device *dev); + +enum as5512_54x_psu_sysfs_attributes { + PSU_INDEX, + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as5512_54x_psu_attributes[] = { + &sensor_dev_attr_psu_index.dev_attr.attr, + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_index(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5512_54x_psu_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->index); +} + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as5512_54x_psu_data *data = as5512_54x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((data->index - 1) * 4) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as5512_54x_psu_data *data = as5512_54x_psu_update_device(dev); + + return sprintf(buf, "%s", data->model_name); +} + +static const struct attribute_group as5512_54x_psu_group = { + .attrs = as5512_54x_psu_attributes, +}; + +static int as5512_54x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as5512_54x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as5512_54x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as5512_54x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x38 || client->addr == 0x50) { + data->index = 1; + } + else if (client->addr == 0x3b || client->addr == 0x53) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as5512_54x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as5512_54x_psu_remove(struct i2c_client *client) +{ + struct as5512_54x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as5512_54x_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as5512_54x_psu_id[] = { + { "as5512_54x_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as5512_54x_psu_id); + +static struct i2c_driver as5512_54x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as5512_54x_psu", + }, + .probe = as5512_54x_psu_probe, + .remove = as5512_54x_psu_remove, + .id_table = as5512_54x_psu_id, + .address_list = normal_i2c, +}; + +static int as5512_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as5512_54x_psu_data *as5512_54x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as5512_54x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + + dev_dbg(&client->dev, "Starting as5512_54x update\n"); + + /* Read model name */ + if (client->addr == 0x38 || client->addr == 0x3b) { + /* AC power */ + status = as5512_54x_psu_read_block(client, 0x26, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + else { + /* DC power */ + status = as5512_54x_psu_read_block(client, 0x50, data->model_name, + ARRAY_SIZE(data->model_name)-1); + } + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + + /* Read psu status */ + status = accton_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as5512_54x_psu_init(void) +{ + extern int platform_accton_as5512_54x(void); + if(!platform_accton_as5512_54x()) { + return -ENODEV; + } + + return i2c_add_driver(&as5512_54x_psu_driver); +} + +static void __exit as5512_54x_psu_exit(void) +{ + i2c_del_driver(&as5512_54x_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5512_54x_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as5512_54x_psu_init); +module_exit(as5512_54x_psu_exit); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-sfp.c new file mode 100644 index 00000000..d89e71d0 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/modules/builds/x86-64-accton-as5512-54x-sfp.c @@ -0,0 +1,1237 @@ +/* + * SFP driver for accton as5512_54x sfp + * + * Copyright (C) Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "as5512_54x_sfp" + +#define DEBUG_MODE 0 + +#if (DEBUG_MODE == 1) + #define DEBUG_PRINT(fmt, args...) \ + printk (KERN_INFO "%s:%s[%d]: " fmt "\r\n", __FILE__, __FUNCTION__, __LINE__, ##args) +#else + #define DEBUG_PRINT(fmt, args...) +#endif + +#define NUM_OF_SFP_PORT 54 +#define EEPROM_NAME "sfp_eeprom" +#define EEPROM_SIZE 256 /* 256 byte eeprom */ +#define BIT_INDEX(i) (1ULL << (i)) +#define USE_I2C_BLOCK_READ 1 +#define I2C_RW_RETRY_COUNT 3 +#define I2C_RW_RETRY_INTERVAL 100 /* ms */ + +#define SFP_EEPROM_A0_I2C_ADDR (0xA0 >> 1) +#define SFP_EEPROM_A2_I2C_ADDR (0xA2 >> 1) + +#define SFF8024_PHYSICAL_DEVICE_ID_ADDR 0x0 +#define SFF8024_DEVICE_ID_SFP 0x3 +#define SFF8024_DEVICE_ID_QSFP 0xC +#define SFF8024_DEVICE_ID_QSFP_PLUS 0xD +#define SFF8024_DEVICE_ID_QSFP28 0x11 + +#define SFF8472_DIAG_MON_TYPE_ADDR 92 +#define SFF8472_DIAG_MON_TYPE_DDM_MASK 0x40 +#define SFF8472_10G_ETH_COMPLIANCE_ADDR 0x3 +#define SFF8472_10G_BASE_MASK 0xF0 + +#define SFF8436_RX_LOS_ADDR 3 +#define SFF8436_TX_FAULT_ADDR 4 +#define SFF8436_TX_DISABLE_ADDR 86 + +static ssize_t sfp_eeprom_read(struct i2c_client *, u8, u8 *,int); +static ssize_t sfp_eeprom_write(struct i2c_client *, u8 , const char *,int); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_I2C_ADDR, SFP_EEPROM_A2_I2C_ADDR, I2C_CLIENT_END }; + +#define CPLD_PORT_TO_FRONT_PORT(port) (port+1) + +enum port_numbers { +sfp1, sfp2, sfp3, sfp4, sfp5, sfp6, sfp7, sfp8, +sfp9, sfp10, sfp11, sfp12, sfp13, sfp14, sfp15, sfp16, +sfp17, sfp18, sfp19, sfp20, sfp21, sfp22, sfp23, sfp24, +sfp25, sfp26, sfp27, sfp28, sfp29, sfp30, sfp31, sfp32, +sfp33, sfp34, sfp35, sfp36, sfp37, sfp38, sfp39, sfp40, +sfp41, sfp42, sfp43, sfp44, sfp45, sfp46, sfp47, sfp48, +sfp49, sfp50, sfp51, sfp52, sfp53, sfp54 +}; + +static const struct i2c_device_id sfp_device_id[] = { +{ "sfp1", sfp1 }, { "sfp2", sfp2 }, { "sfp3", sfp3 }, { "sfp4", sfp4 }, +{ "sfp5", sfp5 }, { "sfp6", sfp6 }, { "sfp7", sfp7 }, { "sfp8", sfp8 }, +{ "sfp9", sfp9 }, { "sfp10", sfp10 }, { "sfp11", sfp11 }, { "sfp12", sfp12 }, +{ "sfp13", sfp13 }, { "sfp14", sfp14 }, { "sfp15", sfp15 }, { "sfp16", sfp16 }, +{ "sfp17", sfp17 }, { "sfp18", sfp18 }, { "sfp19", sfp19 }, { "sfp20", sfp20 }, +{ "sfp21", sfp21 }, { "sfp22", sfp22 }, { "sfp23", sfp23 }, { "sfp24", sfp24 }, +{ "sfp25", sfp25 }, { "sfp26", sfp26 }, { "sfp27", sfp27 }, { "sfp28", sfp28 }, +{ "sfp29", sfp29 }, { "sfp30", sfp30 }, { "sfp31", sfp31 }, { "sfp32", sfp32 }, +{ "sfp33", sfp33 }, { "sfp34", sfp34 }, { "sfp35", sfp35 }, { "sfp36", sfp36 }, +{ "sfp37", sfp37 }, { "sfp38", sfp38 }, { "sfp39", sfp39 }, { "sfp40", sfp40 }, +{ "sfp41", sfp41 }, { "sfp42", sfp42 }, { "sfp43", sfp43 }, { "sfp44", sfp44 }, +{ "sfp45", sfp45 }, { "sfp46", sfp46 }, { "sfp47", sfp47 }, { "sfp48", sfp48 }, +{ "sfp49", sfp49 }, { "sfp50", sfp50 }, { "sfp51", sfp51 }, { "sfp52", sfp52 }, +{ "sfp53", sfp53 }, { "sfp54", sfp54 }, +{ /* LIST END */ } +}; +MODULE_DEVICE_TABLE(i2c, sfp_device_id); + +/* + * list of valid port types + * note OOM_PORT_TYPE_NOT_PRESENT to indicate no + * module is present in this port + */ +typedef enum oom_driver_port_type_e { + OOM_DRIVER_PORT_TYPE_INVALID, + OOM_DRIVER_PORT_TYPE_NOT_PRESENT, + OOM_DRIVER_PORT_TYPE_SFP, + OOM_DRIVER_PORT_TYPE_SFP_PLUS, + OOM_DRIVER_PORT_TYPE_QSFP, + OOM_DRIVER_PORT_TYPE_QSFP_PLUS, + OOM_DRIVER_PORT_TYPE_QSFP28 +} oom_driver_port_type_t; + +enum driver_type_e { + DRIVER_TYPE_SFP_MSA, + DRIVER_TYPE_SFP_DDM, + DRIVER_TYPE_QSFP +}; + +/* Each client has this additional data + */ +struct eeprom_data { + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + struct bin_attribute bin; /* eeprom data */ +}; + +struct sfp_msa_data { + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u64 status[6]; /* bit0:port0, bit1:port1 and so on */ + /* index 0 => tx_fail + 1 => tx_disable + 2 => rx_loss + 3 => device id + 4 => 10G Ethernet Compliance Codes + to distinguish SFP or SFP+ + 5 => DIAGNOSTIC MONITORING TYPE */ + struct eeprom_data eeprom; +}; + +struct sfp_ddm_data { + struct eeprom_data eeprom; +}; + +struct qsfp_data { + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 status[3]; /* bit0:port0, bit1:port1 and so on */ + /* index 0 => tx_fail + 1 => tx_disable + 2 => rx_loss */ + + u8 device_id; + struct eeprom_data eeprom; +}; + +struct sfp_port_data { + struct mutex update_lock; + enum driver_type_e driver_type; + int port; /* CPLD port index */ + oom_driver_port_type_t port_type; + u64 present; /* present status, bit0:port0, bit1:port1 and so on */ + + struct sfp_msa_data *msa; + struct sfp_ddm_data *ddm; + struct qsfp_data *qsfp; + + struct i2c_client *client; +}; + +enum sfp_sysfs_attributes { + PRESENT, + PRESENT_ALL, + PORT_NUMBER, + PORT_TYPE, + DDM_IMPLEMENTED, + TX_FAULT, + TX_FAULT1, + TX_FAULT2, + TX_FAULT3, + TX_FAULT4, + TX_DISABLE, + TX_DISABLE1, + TX_DISABLE2, + TX_DISABLE3, + TX_DISABLE4, + RX_LOS, + RX_LOS1, + RX_LOS2, + RX_LOS3, + RX_LOS4, + RX_LOS_ALL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); +} + +static struct sfp_port_data* sfp_update_present(struct i2c_client *client) +{ + int i = 0, j = 0, status = -1; + u8 reg; + unsigned short cpld_addr; + struct sfp_port_data *data = i2c_get_clientdata(client); + + DEBUG_PRINT("Starting sfp present status update"); + mutex_lock(&data->update_lock); + data->present = 0; + + /* Read present status of port 1~48(SFP port) */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 3; j++) { + cpld_addr = 0x61+i; + reg = 0x6+j; + status = accton_i2c_cpld_read(cpld_addr, reg); + + if (unlikely(status < 0)) { + data = ERR_PTR(status); + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); + goto exit; + } + + DEBUG_PRINT("Present status = 0x%lx\r\n", data->present); + data->present |= (u64)status << ((i*24) + (j%3)*8); + } + } + + /* Read present status of port 49-54(QSFP port) */ + cpld_addr = 0x62; + reg = 0x14; + status = accton_i2c_cpld_read(cpld_addr, reg); + + if (unlikely(status < 0)) { + data = ERR_PTR(status); + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); + goto exit; + } + else { + data->present |= (u64)status << 48; + } + + DEBUG_PRINT("Present status = 0x%lx", data->present); +exit: + mutex_unlock(&data->update_lock); + return data; +} + +static struct sfp_port_data* sfp_update_tx_rx_status(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + int i = 0, j = 0; + int status = -1; + + if (time_before(jiffies, data->msa->last_updated + HZ + HZ / 2) && data->msa->valid) { + return data; + } + + DEBUG_PRINT("Starting as5512_54x sfp tx rx status update"); + mutex_lock(&data->update_lock); + data->msa->valid = 0; + memset(data->msa->status, 0, sizeof(data->msa->status)); + + /* Read status of port 1~48(SFP port) */ + for (i = 0; i < 2; i++) { + for (j = 0; j < 9; j++) { + u8 reg; + unsigned short cpld_addr; + reg = 0x9+j; + cpld_addr = 0x61+i; + + status = accton_i2c_cpld_read(cpld_addr, reg); + if (unlikely(status < 0)) { + data = ERR_PTR(status); + dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); + goto exit; + } + + data->msa->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); + } + } + + data->msa->valid = 1; + data->msa->last_updated = jiffies; + +exit: + mutex_unlock(&data->update_lock); + return data; +} + +static ssize_t sfp_set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + unsigned short cpld_addr = 0; + u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; + long disable; + int error; + + error = kstrtol(buf, 10, &disable); + if (error) { + return error; + } + + mutex_lock(&data->update_lock); + + if(data->port < 24) { + cpld_addr = 0x61; + cpld_reg = 0xC + data->port / 8; + cpld_bit = 1 << (data->port % 8); + } + else { /* port 24 ~ 48 */ + cpld_addr = 0x62; + cpld_reg = 0xC + (data->port - 24) / 8; + cpld_bit = 1 << (data->port % 8); + } + + /* Read current status */ + cpld_val = accton_i2c_cpld_read(cpld_addr, cpld_reg); + + /* Update tx_disable status */ + if (disable) { + data->msa->status[1] |= BIT_INDEX(data->port); + cpld_val |= cpld_bit; + } + else { + data->msa->status[1] &= ~BIT_INDEX(data->port); + cpld_val &= ~cpld_bit; + } + + accton_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); + mutex_unlock(&data->update_lock); + return count; +} + +static int sfp_is_port_present(struct i2c_client *client, int port) +{ + struct sfp_port_data *data = i2c_get_clientdata(client); + + data = sfp_update_present(client); + if (IS_ERR(data)) { + return PTR_ERR(data); + } + + return (data->present & BIT_INDEX(data->port)) ? 0 : 1; +} + +static ssize_t show_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + + if (PRESENT_ALL == attr->index) { + int i; + u8 values[7] = {0}; + struct sfp_port_data *data = sfp_update_present(client); + + if (IS_ERR(data)) { + return PTR_ERR(data); + } + + for (i = 0; i < ARRAY_SIZE(values); i++) { + values[i] = ~(u8)(data->present >> (i * 8)); + } + + /* Return values 1 -> 54 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], + values[3], values[4], values[5], + values[6] & 0x3F); + } + else { + struct sfp_port_data *data = i2c_get_clientdata(client); + int present = sfp_is_port_present(client, data->port); + + if (IS_ERR_VALUE(present)) { + return present; + } + + /* PRESENT */ + return sprintf(buf, "%d\n", present); + } +} + +static struct sfp_port_data *sfp_update_port_type(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + u8 buf = 0; + int status; + + mutex_lock(&data->update_lock); + + switch (data->driver_type) { + case DRIVER_TYPE_SFP_MSA: + { + status = sfp_eeprom_read(client, SFF8024_PHYSICAL_DEVICE_ID_ADDR, &buf, sizeof(buf)); + if (unlikely(status < 0)) { + data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; + break; + } + + if (buf != SFF8024_DEVICE_ID_SFP) { + data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; + break; + } + + status = sfp_eeprom_read(client, SFF8472_10G_ETH_COMPLIANCE_ADDR, &buf, sizeof(buf)); + if (unlikely(status < 0)) { + data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; + break; + } + + DEBUG_PRINT("sfp port type (0x3) data = (0x%x)", buf); + data->port_type = buf & SFF8472_10G_BASE_MASK ? OOM_DRIVER_PORT_TYPE_SFP_PLUS : OOM_DRIVER_PORT_TYPE_SFP; + break; + } + case DRIVER_TYPE_QSFP: + { + status = sfp_eeprom_read(client, SFF8024_PHYSICAL_DEVICE_ID_ADDR, &buf, sizeof(buf)); + if (unlikely(status < 0)) { + data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; + break; + } + + DEBUG_PRINT("qsfp port type (0x0) buf = (0x%x)", buf); + switch (buf) { + case SFF8024_DEVICE_ID_QSFP: + data->port_type = OOM_DRIVER_PORT_TYPE_QSFP; + break; + case SFF8024_DEVICE_ID_QSFP_PLUS: + data->port_type = OOM_DRIVER_PORT_TYPE_QSFP_PLUS; + break; + case SFF8024_DEVICE_ID_QSFP28: + data->port_type = OOM_DRIVER_PORT_TYPE_QSFP_PLUS; + break; + default: + data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; + break; + } + + break; + } + default: + break; + } + + mutex_unlock(&data->update_lock); + return data; +} + +static ssize_t show_port_type(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + int present = sfp_is_port_present(client, data->port); + + if (IS_ERR_VALUE(present)) { + return present; + } + + if (!present) { + return sprintf(buf, "%d\n", OOM_DRIVER_PORT_TYPE_NOT_PRESENT); + } + + sfp_update_port_type(dev); + return sprintf(buf, "%d\n", data->port_type); +} + +static struct sfp_port_data* qsfp_update_tx_rx_status(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + int i, status = -1; + u8 buf = 0; + u8 reg[] = {SFF8436_TX_FAULT_ADDR, SFF8436_TX_DISABLE_ADDR, SFF8436_RX_LOS_ADDR}; + + if (time_before(jiffies, data->qsfp->last_updated + HZ + HZ / 2) && data->qsfp->valid) { + return data; + } + + DEBUG_PRINT("Starting sfp tx rx status update"); + mutex_lock(&data->update_lock); + data->qsfp->valid = 0; + memset(data->qsfp->status, 0, sizeof(data->qsfp->status)); + + /* Notify device to update tx fault/ tx disable/ rx los status */ + for (i = 0; i < ARRAY_SIZE(reg); i++) { + status = sfp_eeprom_read(client, reg[i], &buf, sizeof(buf)); + if (unlikely(status < 0)) { + data = ERR_PTR(status); + goto exit; + } + } + msleep(200); + + /* Read actual tx fault/ tx disable/ rx los status */ + for (i = 0; i < ARRAY_SIZE(reg); i++) { + status = sfp_eeprom_read(client, reg[i], &buf, sizeof(buf)); + if (unlikely(status < 0)) { + data = ERR_PTR(status); + goto exit; + } + + DEBUG_PRINT("qsfp reg(0x%x) status = (0x%x)", reg[i], data->qsfp->status[i]); + data->qsfp->status[i] = (buf & 0xF); + } + + data->qsfp->valid = 1; + data->qsfp->last_updated = jiffies; + +exit: + mutex_unlock(&data->update_lock); + return data; +} + +static ssize_t qsfp_show_tx_rx_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + int status; + u8 val = 0; + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + + status = sfp_is_port_present(client, data->port); + if (IS_ERR_VALUE(status)) { + return status; + } + + data = qsfp_update_tx_rx_status(dev); + if (IS_ERR(data)) { + return PTR_ERR(data); + } + + switch (attr->index) { + case TX_FAULT1: + case TX_FAULT2: + case TX_FAULT3: + case TX_FAULT4: + val = (data->qsfp->status[2] & BIT_INDEX(attr->index - TX_FAULT1)) ? 1 : 0; + break; + case TX_DISABLE1: + case TX_DISABLE2: + case TX_DISABLE3: + case TX_DISABLE4: + val = (data->qsfp->status[1] & BIT_INDEX(attr->index - TX_DISABLE1)) ? 1 : 0; + break; + case RX_LOS1: + case RX_LOS2: + case RX_LOS3: + case RX_LOS4: + val = (data->qsfp->status[0] & BIT_INDEX(attr->index - RX_LOS1)) ? 1 : 0; + break; + default: + break; + } + + return sprintf(buf, "%d\n", val); +} + +static ssize_t qsfp_set_tx_disable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + long disable; + int status; + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct sfp_port_data *data = NULL; + + status = kstrtol(buf, 10, &disable); + if (status) { + return status; + } + + data = qsfp_update_tx_rx_status(dev); + if (IS_ERR(data)) { + return PTR_ERR(data); + } + + mutex_lock(&data->update_lock); + + if (disable) { + data->qsfp->status[1] |= (1 << (attr->index - TX_DISABLE1)); + } + else { + data->qsfp->status[1] &= ~(1 << (attr->index - TX_DISABLE1)); + } + + DEBUG_PRINT("index = (%d), status = (0x%x)", attr->index, data->qsfp->status[1]); + status = sfp_eeprom_write(data->client, SFF8436_TX_DISABLE_ADDR, &data->qsfp->status[1], sizeof(data->qsfp->status[1])); + if (unlikely(status < 0)) { + count = status; + } + + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t sfp_show_ddm_implemented(struct device *dev, struct device_attribute *da, + char *buf) +{ + int status; + char ddm; + struct i2c_client *client = to_i2c_client(dev); + struct sfp_port_data *data = i2c_get_clientdata(client); + + status = sfp_is_port_present(client, data->port); + if (IS_ERR_VALUE(status)) { + return status; + } + + status = sfp_eeprom_read(client, SFF8472_DIAG_MON_TYPE_ADDR, &ddm, sizeof(ddm)); + if (unlikely(status < 0)) { + return status; + } + + return sprintf(buf, "%d\n", (ddm & SFF8472_DIAG_MON_TYPE_DDM_MASK) ? 1 : 0); +} + +static ssize_t sfp_show_tx_rx_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + u8 val = 0, index = 0; + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct sfp_port_data *data = sfp_update_tx_rx_status(dev); + + if (IS_ERR(data)) { + return PTR_ERR(data); + } + + if(attr->index == RX_LOS_ALL) { + int i = 0; + u8 values[6] = {0}; + + for (i = 0; i < ARRAY_SIZE(values); i++) { + values[i] = (u8)(data->msa->status[2] >> (i * 8)); + } + + /** Return values 1 -> 48 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], + values[3], values[4], values[5]); + } + + switch (attr->index) { + case TX_FAULT: + index = 0; + break; + case TX_DISABLE: + index = 1; + break; + case RX_LOS: + index = 2; + break; + default: + break; + } + + val = (data->msa->status[index] & BIT_INDEX(data->port)) ? 1 : 0; + return sprintf(buf, "%d\n", val); +} + +/* SFP/QSFP common attributes for sysfs */ +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_port_type, S_IRUGO, show_port_type, NULL, PORT_TYPE); +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, PRESENT); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, PRESENT_ALL); + +/* QSFP attributes for sysfs */ +static SENSOR_DEVICE_ATTR(sfp_rx_los1, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS1); +static SENSOR_DEVICE_ATTR(sfp_rx_los2, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS2); +static SENSOR_DEVICE_ATTR(sfp_rx_los3, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS3); +static SENSOR_DEVICE_ATTR(sfp_rx_los4, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS4); +static SENSOR_DEVICE_ATTR(sfp_tx_disable1, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE1); +static SENSOR_DEVICE_ATTR(sfp_tx_disable2, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE2); +static SENSOR_DEVICE_ATTR(sfp_tx_disable3, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE3); +static SENSOR_DEVICE_ATTR(sfp_tx_disable4, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE4); +static SENSOR_DEVICE_ATTR(sfp_tx_fault1, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT1); +static SENSOR_DEVICE_ATTR(sfp_tx_fault2, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT2); +static SENSOR_DEVICE_ATTR(sfp_tx_fault3, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT3); +static SENSOR_DEVICE_ATTR(sfp_tx_fault4, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT4); +static struct attribute *qsfp_attributes[] = { + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_port_type.dev_attr.attr, + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los1.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los2.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los3.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los4.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable1.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable2.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable3.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable4.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault1.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault2.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault3.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault4.dev_attr.attr, + NULL +}; + +/* SFP msa attributes for sysfs */ +static SENSOR_DEVICE_ATTR(sfp_ddm_implemented, S_IRUGO, sfp_show_ddm_implemented, NULL, DDM_IMPLEMENTED); +static SENSOR_DEVICE_ATTR(sfp_rx_los, S_IRUGO, sfp_show_tx_rx_status, NULL, RX_LOS); +static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, sfp_show_tx_rx_status, NULL, RX_LOS_ALL); +static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, sfp_show_tx_rx_status, sfp_set_tx_disable, TX_DISABLE); +static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, sfp_show_tx_rx_status, NULL, TX_FAULT); +static struct attribute *sfp_msa_attributes[] = { + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_port_type.dev_attr.attr, + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + &sensor_dev_attr_sfp_ddm_implemented.dev_attr.attr, + &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los.dev_attr.attr, + &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, + &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, + NULL +}; + +/* SFP ddm attributes for sysfs */ +static struct attribute *sfp_ddm_attributes[] = { + NULL +}; + +static ssize_t sfp_eeprom_write(struct i2c_client *client, u8 command, const char *data, + int data_len) +{ +#if USE_I2C_BLOCK_READ + int status, retry = I2C_RW_RETRY_COUNT; + + if (data_len > I2C_SMBUS_BLOCK_MAX) { + data_len = I2C_SMBUS_BLOCK_MAX; + } + + while (retry) { + status = i2c_smbus_write_i2c_block_data(client, command, data_len, data); + if (unlikely(status < 0)) { + msleep(I2C_RW_RETRY_INTERVAL); + retry--; + continue; + } + + break; + } + + if (unlikely(status < 0)) { + return status; + } + + return data_len; +#else + int status, retry = I2C_RW_RETRY_COUNT; + + while (retry) { + status = i2c_smbus_write_byte_data(client, command, *data); + if (unlikely(status < 0)) { + msleep(I2C_RW_RETRY_INTERVAL); + retry--; + continue; + } + + break; + } + + if (unlikely(status < 0)) { + return status; + } + + return 1; +#endif + + +} + +static ssize_t sfp_port_write(struct sfp_port_data *data, + const char *buf, loff_t off, size_t count) +{ + ssize_t retval = 0; + + if (unlikely(!count)) { + return count; + } + + /* + * Write data to chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&data->update_lock); + + while (count) { + ssize_t status; + + status = sfp_eeprom_write(data->client, off, buf, count); + if (status <= 0) { + if (retval == 0) { + retval = status; + } + break; + } + buf += status; + off += status; + count -= status; + retval += status; + } + + mutex_unlock(&data->update_lock); + return retval; +} + + +static ssize_t sfp_bin_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct sfp_port_data *data; + DEBUG_PRINT("%s(%d) offset = (%d), count = (%d)", off, count); + data = dev_get_drvdata(container_of(kobj, struct device, kobj)); + return sfp_port_write(data, buf, off, count); +} + +static ssize_t sfp_eeprom_read(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ +#if USE_I2C_BLOCK_READ + int status, retry = I2C_RW_RETRY_COUNT; + + if (data_len > I2C_SMBUS_BLOCK_MAX) { + data_len = I2C_SMBUS_BLOCK_MAX; + } + + while (retry) { + status = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + if (unlikely(status < 0)) { + msleep(I2C_RW_RETRY_INTERVAL); + retry--; + continue; + } + + break; + } + + if (unlikely(status < 0)) { + goto abort; + } + if (unlikely(status != data_len)) { + status = -EIO; + goto abort; + } + + //result = data_len; + +abort: + return status; +#else + int status, retry = I2C_RW_RETRY_COUNT; + + while (retry) { + status = i2c_smbus_read_byte_data(client, command); + if (unlikely(status < 0)) { + msleep(I2C_RW_RETRY_INTERVAL); + retry--; + continue; + } + + break; + } + + if (unlikely(status < 0)) { + dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); + goto abort; + } + + *data = (u8)status; + status = 1; + +abort: + return status; +#endif +} + +static ssize_t sfp_port_read(struct sfp_port_data *data, + char *buf, loff_t off, size_t count) +{ + ssize_t retval = 0; + + if (unlikely(!count)) { + DEBUG_PRINT("Count = 0, return"); + return count; + } + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&data->update_lock); + + while (count) { + ssize_t status; + + status = sfp_eeprom_read(data->client, off, buf, count); + if (status <= 0) { + if (retval == 0) { + retval = status; + } + break; + } + + buf += status; + off += status; + count -= status; + retval += status; + } + + mutex_unlock(&data->update_lock); + return retval; + +} + +static ssize_t sfp_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct sfp_port_data *data; + DEBUG_PRINT("offset = (%d), count = (%d)", off, count); + data = dev_get_drvdata(container_of(kobj, struct device, kobj)); + return sfp_port_read(data, buf, off, count); +} + +static int sfp_sysfs_eeprom_init(struct kobject *kobj, struct bin_attribute *eeprom) +{ + int err; + + sysfs_bin_attr_init(eeprom); + eeprom->attr.name = EEPROM_NAME; + eeprom->attr.mode = S_IWUSR | S_IRUGO; + eeprom->read = sfp_bin_read; + eeprom->write = sfp_bin_write; + eeprom->size = EEPROM_SIZE; + + /* Create eeprom file */ + err = sysfs_create_bin_file(kobj, eeprom); + if (err) { + return err; + } + + return 0; +} + +static int sfp_sysfs_eeprom_cleanup(struct kobject *kobj, struct bin_attribute *eeprom) +{ + sysfs_remove_bin_file(kobj, eeprom); + return 0; +} + +static const struct attribute_group sfp_msa_group = { + .attrs = sfp_msa_attributes, +}; + +static int sfp_i2c_check_functionality(struct i2c_client *client) +{ +#if USE_I2C_BLOCK_READ + return i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK); +#else + return i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA); +#endif +} + +static int sfp_msa_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, + struct sfp_msa_data **data) +{ + int status; + struct sfp_msa_data *msa; + + if (!sfp_i2c_check_functionality(client)) { + status = -EIO; + goto exit; + } + + msa = kzalloc(sizeof(struct sfp_msa_data), GFP_KERNEL); + if (!msa) { + status = -ENOMEM; + goto exit; + } + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &sfp_msa_group); + if (status) { + goto exit_free; + } + + /* init eeprom */ + status = sfp_sysfs_eeprom_init(&client->dev.kobj, &msa->eeprom.bin); + if (status) { + goto exit_remove; + } + + *data = msa; + dev_info(&client->dev, "sfp msa '%s'\n", client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &sfp_msa_group); +exit_free: + kfree(msa); +exit: + + return status; +} + +static const struct attribute_group sfp_ddm_group = { + .attrs = sfp_ddm_attributes, +}; + +static int sfp_ddm_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, + struct sfp_ddm_data **data) +{ + int status; + struct sfp_ddm_data *ddm; + + if (!sfp_i2c_check_functionality(client)) { + status = -EIO; + goto exit; + } + + ddm = kzalloc(sizeof(struct sfp_ddm_data), GFP_KERNEL); + if (!ddm) { + status = -ENOMEM; + goto exit; + } + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &sfp_ddm_group); + if (status) { + goto exit_free; + } + + /* init eeprom */ + status = sfp_sysfs_eeprom_init(&client->dev.kobj, &ddm->eeprom.bin); + if (status) { + goto exit_remove; + } + + *data = ddm; + dev_info(&client->dev, "sfp ddm '%s'\n", client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &sfp_ddm_group); +exit_free: + kfree(ddm); +exit: + + return status; +} + +static const struct attribute_group qsfp_group = { + .attrs = qsfp_attributes, +}; + +static int qsfp_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, + struct qsfp_data **data) +{ + int status; + struct qsfp_data *qsfp; + + if (!sfp_i2c_check_functionality(client)) { + status = -EIO; + goto exit; + } + + qsfp = kzalloc(sizeof(struct qsfp_data), GFP_KERNEL); + if (!qsfp) { + status = -ENOMEM; + goto exit; + } + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &qsfp_group); + if (status) { + goto exit_free; + } + + /* init eeprom */ + status = sfp_sysfs_eeprom_init(&client->dev.kobj, &qsfp->eeprom.bin); + if (status) { + goto exit_remove; + } + + /* Bring QSFPs out of reset */ + accton_i2c_cpld_write(0x62, 0x15, 0x3F); + + *data = qsfp; + dev_info(&client->dev, "qsfp '%s'\n", client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &qsfp_group); +exit_free: + kfree(qsfp); +exit: + + return status; +} + +static int sfp_device_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct sfp_port_data *data = NULL; + + data = kzalloc(sizeof(struct sfp_port_data), GFP_KERNEL); + if (!data) { + return -ENOMEM; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + data->client = client; + + if (dev_id->driver_data >= sfp1 && dev_id->driver_data <= sfp48) { + if (client->addr == SFP_EEPROM_A0_I2C_ADDR) { + data->driver_type = DRIVER_TYPE_SFP_MSA; + return sfp_msa_probe(client, dev_id, &data->msa); + } + else if (client->addr == SFP_EEPROM_A2_I2C_ADDR) { + data->driver_type = DRIVER_TYPE_SFP_DDM; + return sfp_ddm_probe(client, dev_id, &data->ddm); + } + } + else { /* sfp49 ~ sfp54 */ + if (client->addr == SFP_EEPROM_A0_I2C_ADDR) { + data->driver_type = DRIVER_TYPE_QSFP; + return qsfp_probe(client, dev_id, &data->qsfp); + } + } + + return -ENODEV; +} + +static int sfp_msa_remove(struct i2c_client *client, struct sfp_msa_data *data) +{ + sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); + sysfs_remove_group(&client->dev.kobj, &sfp_msa_group); + kfree(data); + return 0; +} + +static int sfp_ddm_remove(struct i2c_client *client, struct sfp_ddm_data *data) +{ + sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); + sysfs_remove_group(&client->dev.kobj, &sfp_ddm_group); + kfree(data); + return 0; +} + +static int qfp_remove(struct i2c_client *client, struct qsfp_data *data) +{ + sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); + sysfs_remove_group(&client->dev.kobj, &qsfp_group); + kfree(data); + return 0; +} + +static int sfp_device_remove(struct i2c_client *client) +{ + struct sfp_port_data *data = i2c_get_clientdata(client); + + switch (data->driver_type) { + case DRIVER_TYPE_SFP_MSA: + return sfp_msa_remove(client, data->msa); + case DRIVER_TYPE_SFP_DDM: + return sfp_ddm_remove(client, data->ddm); + case DRIVER_TYPE_QSFP: + return qfp_remove(client, data->qsfp); + } + + return 0; +} + +static struct i2c_driver sfp_driver = { + .driver = { + .name = DRIVER_NAME, + }, + .probe = sfp_device_probe, + .remove = sfp_device_remove, + .id_table = sfp_device_id, + .address_list = normal_i2c, +}; + +static int __init sfp_init(void) +{ + extern int platform_accton_as5512_54x(void); + if(!platform_accton_as5512_54x()) { + return -ENODEV; + } + + return i2c_add_driver(&sfp_driver); +} + +static void __exit sfp_exit(void) +{ + i2c_del_driver(&sfp_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as5512_54x_sfp driver"); +MODULE_LICENSE("GPL"); + +module_init(sfp_init); +module_exit(sfp_exit); + + From cfdaa56ad5c526b089f170f6d4fd3ae9d1992297 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 19:45:30 +0000 Subject: [PATCH 250/255] AS7512 Kernel Modules. --- .../x86-64-accton-as7512-32x/modules/Makefile | 1 + .../x86-64-accton-as7512-32x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as7512-32x-fan.c | 510 ++++++++++++++++++ .../builds/x86-64-accton-as7512-32x-leds.c | 503 +++++++++++++++++ .../builds/x86-64-accton-as7512-32x-psu.c | 291 ++++++++++ .../builds/x86-64-accton-as7512-32x-sfp.c | 356 ++++++++++++ 8 files changed, 1668 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/PKG.yml new file mode 100644 index 00000000..31a58650 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as7512-32x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/Makefile new file mode 100644 index 00000000..81a91d58 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as7512-32x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-fan.c new file mode 100644 index 00000000..6dfa74f8 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-fan.c @@ -0,0 +1,510 @@ +/* + * A hwmon driver for the Accton as7512 32x fan + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "as7512_32x_fan" + +static struct as7512_32x_fan_data *as7512_32x_fan_update_device(struct device *dev); +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +/* fan related data, the index should match sysfs_fan_attributes + */ +static const u8 fan_reg[] = { + 0x0F, /* fan 1-6 present status */ + 0x10, /* fan 1-6 direction(0:B2F 1:F2B) */ + 0x11, /* fan PWM(for all fan) */ + 0x12, /* front fan 1 speed(rpm) */ + 0x13, /* front fan 2 speed(rpm) */ + 0x14, /* front fan 3 speed(rpm) */ + 0x15, /* front fan 4 speed(rpm) */ + 0x16, /* front fan 5 speed(rpm) */ + 0x17, /* front fan 6 speed(rpm) */ + 0x22, /* rear fan 1 speed(rpm) */ + 0x23, /* rear fan 2 speed(rpm) */ + 0x24, /* rear fan 3 speed(rpm) */ + 0x25, /* rear fan 4 speed(rpm) */ + 0x26, /* rear fan 5 speed(rpm) */ + 0x27, /* rear fan 6 speed(rpm) */ +}; + +/* Each client has this additional data */ +struct as7512_32x_fan_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ +}; + +enum fan_id { + FAN1_ID, + FAN2_ID, + FAN3_ID, + FAN4_ID, + FAN5_ID, + FAN6_ID +}; + +enum sysfs_fan_attributes { + FAN_PRESENT_REG, + FAN_DIRECTION_REG, + FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ + FAN1_FRONT_SPEED_RPM, + FAN2_FRONT_SPEED_RPM, + FAN3_FRONT_SPEED_RPM, + FAN4_FRONT_SPEED_RPM, + FAN5_FRONT_SPEED_RPM, + FAN6_FRONT_SPEED_RPM, + FAN1_REAR_SPEED_RPM, + FAN2_REAR_SPEED_RPM, + FAN3_REAR_SPEED_RPM, + FAN4_REAR_SPEED_RPM, + FAN5_REAR_SPEED_RPM, + FAN6_REAR_SPEED_RPM, + FAN1_DIRECTION, + FAN2_DIRECTION, + FAN3_DIRECTION, + FAN4_DIRECTION, + FAN5_DIRECTION, + FAN6_DIRECTION, + FAN1_PRESENT, + FAN2_PRESENT, + FAN3_PRESENT, + FAN4_PRESENT, + FAN5_PRESENT, + FAN6_PRESENT, + FAN1_FAULT, + FAN2_FAULT, + FAN3_FAULT, + FAN4_FAULT, + FAN5_FAULT, + FAN6_FAULT +}; + +/* Define attributes + */ +#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) +#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr + +#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) +#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr + +#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) +#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr + +#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) +#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr + +#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ + static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) +#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr + +/* 6 fan fault attributes in this platform */ +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); + +#if 0 +/* 6 fan direction attribute in this platform */ +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(1); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(2); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(3); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(4); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(5); +DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(6); +#endif + +/* 6 fan speed(rpm) attributes in this platform */ +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); +/* 6 fan present attributes in this platform */ +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); +/* 1 fan duty cycle attribute in this platform */ +DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); + +static struct attribute *as7512_32x_fan_attributes[] = { + /* fan related attributes */ + DECLARE_FAN_FAULT_ATTR(1), + DECLARE_FAN_FAULT_ATTR(2), + DECLARE_FAN_FAULT_ATTR(3), + DECLARE_FAN_FAULT_ATTR(4), + DECLARE_FAN_FAULT_ATTR(5), + DECLARE_FAN_FAULT_ATTR(6), +#if 0 + DECLARE_FAN_DIRECTION_ATTR(1), + DECLARE_FAN_DIRECTION_ATTR(2), + DECLARE_FAN_DIRECTION_ATTR(3), + DECLARE_FAN_DIRECTION_ATTR(4), + DECLARE_FAN_DIRECTION_ATTR(5), + DECLARE_FAN_DIRECTION_ATTR(6), +#endif + DECLARE_FAN_SPEED_RPM_ATTR(1), + DECLARE_FAN_SPEED_RPM_ATTR(2), + DECLARE_FAN_SPEED_RPM_ATTR(3), + DECLARE_FAN_SPEED_RPM_ATTR(4), + DECLARE_FAN_SPEED_RPM_ATTR(5), + DECLARE_FAN_SPEED_RPM_ATTR(6), + DECLARE_FAN_PRESENT_ATTR(1), + DECLARE_FAN_PRESENT_ATTR(2), + DECLARE_FAN_PRESENT_ATTR(3), + DECLARE_FAN_PRESENT_ATTR(4), + DECLARE_FAN_PRESENT_ATTR(5), + DECLARE_FAN_PRESENT_ATTR(6), + DECLARE_FAN_DUTY_CYCLE_ATTR(), + NULL +}; + +#define FAN_DUTY_CYCLE_REG_MASK 0x0F +#define FAN_MAX_DUTY_CYCLE 100 +#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 + +static int as7512_32x_fan_read_value(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int as7512_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +/* fan utility functions + */ +static u32 reg_val_to_duty_cycle(u8 reg_val) +{ + reg_val &= FAN_DUTY_CYCLE_REG_MASK; + return (u32)(reg_val+1) * 625 / 100; +} + +static u8 duty_cycle_to_reg_val(u8 duty_cycle) +{ + return ((u32)duty_cycle * 100 / 625) - 1; +} + +static u32 reg_val_to_speed_rpm(u8 reg_val) +{ + return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; +} + +static u8 reg_val_to_direction(u8 reg_val, enum fan_id id) +{ + u8 mask = (1 << id); + + reg_val &= mask; + + return reg_val ? 1 : 0; +} + +static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) +{ + u8 mask = (1 << id); + + reg_val &= mask; + + return reg_val ? 0 : 1; +} + +static u8 is_fan_fault(struct as7512_32x_fan_data *data, enum fan_id id) +{ + u8 ret = 1; + int front_fan_index = FAN1_FRONT_SPEED_RPM + id; + int rear_fan_index = FAN1_REAR_SPEED_RPM + id; + + /* Check if the speed of front or rear fan is ZERO, + */ + if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && + reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { + ret = 0; + } + + return ret; +} + +static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + int error, value; + struct i2c_client *client = to_i2c_client(dev); + struct as7512_32x_fan_data *data = i2c_get_clientdata(client); + + error = kstrtoint(buf, 10, &value); + if (error) { + return error; + } + + if (value < 0 || value > FAN_MAX_DUTY_CYCLE) { + return -EINVAL; + } + + /* Disable the watchdog timer + */ + error = as7512_32x_fan_write_value(client, 0x33, 0); + + if (error != 0) { + dev_dbg(&client->dev, "Unable to disable the watchdog timer\n"); + return error; + } + + as7512_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); + data->valid = 0; + + return count; +} + +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7512_32x_fan_data *data = as7512_32x_fan_update_device(dev); + ssize_t ret = 0; + + if (data->valid) { + switch (attr->index) { + case FAN_DUTY_CYCLE_PERCENTAGE: + { + u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); + ret = sprintf(buf, "%u\n", duty_cycle); + break; + } + case FAN1_FRONT_SPEED_RPM: + case FAN2_FRONT_SPEED_RPM: + case FAN3_FRONT_SPEED_RPM: + case FAN4_FRONT_SPEED_RPM: + case FAN5_FRONT_SPEED_RPM: + case FAN6_FRONT_SPEED_RPM: + case FAN1_REAR_SPEED_RPM: + case FAN2_REAR_SPEED_RPM: + case FAN3_REAR_SPEED_RPM: + case FAN4_REAR_SPEED_RPM: + case FAN5_REAR_SPEED_RPM: + case FAN6_REAR_SPEED_RPM: + ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); + break; + case FAN1_DIRECTION: + case FAN2_DIRECTION: + case FAN3_DIRECTION: + case FAN4_DIRECTION: + case FAN5_DIRECTION: + case FAN6_DIRECTION: + ret = sprintf(buf, "%d\n", + reg_val_to_direction(data->reg_val[FAN_DIRECTION_REG], + attr->index - FAN1_DIRECTION)); + break; + case FAN1_PRESENT: + case FAN2_PRESENT: + case FAN3_PRESENT: + case FAN4_PRESENT: + case FAN5_PRESENT: + case FAN6_PRESENT: + ret = sprintf(buf, "%d\n", + reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], + attr->index - FAN1_PRESENT)); + break; + case FAN1_FAULT: + case FAN2_FAULT: + case FAN3_FAULT: + case FAN4_FAULT: + case FAN5_FAULT: + case FAN6_FAULT: + ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); + break; + default: + break; + } + } + + return ret; +} + +static const struct attribute_group as7512_32x_fan_group = { + .attrs = as7512_32x_fan_attributes, +}; + +static struct as7512_32x_fan_data *as7512_32x_fan_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7512_32x_fan_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || + !data->valid) { + int i; + + dev_dbg(&client->dev, "Starting as7512_32x_fan update\n"); + data->valid = 0; + + /* Update fan data + */ + for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { + int status = as7512_32x_fan_read_value(client, fan_reg[i]); + + if (status < 0) { + data->valid = 0; + mutex_unlock(&data->update_lock); + dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); + return data; + } + else { + data->reg_val[i] = status; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int as7512_32x_fan_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7512_32x_fan_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7512_32x_fan_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7512_32x_fan_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: fan '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7512_32x_fan_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7512_32x_fan_remove(struct i2c_client *client) +{ + struct as7512_32x_fan_data *data = i2c_get_clientdata(client); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7512_32x_fan_group); + + return 0; +} + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; + +static const struct i2c_device_id as7512_32x_fan_id[] = { + { "as7512_32x_fan", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as7512_32x_fan_id); + +static struct i2c_driver as7512_32x_fan_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = DRVNAME, + }, + .probe = as7512_32x_fan_probe, + .remove = as7512_32x_fan_remove, + .id_table = as7512_32x_fan_id, + .address_list = normal_i2c, +}; + +static int __init as7512_32x_fan_init(void) +{ + extern int platform_accton_as7512_32x(void); + if (!platform_accton_as7512_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7512_32x_fan_driver); +} + +static void __exit as7512_32x_fan_exit(void) +{ + i2c_del_driver(&as7512_32x_fan_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as7512_32x_fan driver"); +MODULE_LICENSE("GPL"); + +module_init(as7512_32x_fan_init); +module_exit(as7512_32x_fan_exit); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-leds.c new file mode 100644 index 00000000..3dc5def5 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-leds.c @@ -0,0 +1,503 @@ +/* + * A LED driver for the accton_as7512_32x_led + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +#define DRVNAME "as7512_32x_led" +#define NUM_OF_LED_REG 5 + +struct accton_as7512_32x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[NUM_OF_LED_REG]; /* 5 LED registers */ +}; + +static struct accton_as7512_32x_led_data *ledctl = NULL; + +/* LED related data + */ + +#define LED_CNTRLER_I2C_ADDRESS (0x60) +/* +#define LED_TYPE_DIAG_REG_MASK (0x3) +#define LED_MODE_DIAG_GREEN_VALUE (0x02) +#define LED_MODE_DIAG_RED_VALUE (0x01) +#define LED_MODE_DIAG_AMBER_VALUE (0x00) +#define LED_MODE_DIAG_OFF_VALUE (0x03) +#define LED_TYPE_DIAG_REG_MASK 0xFF +#define LED_MODE_DIAG_GREEN_VALUE 0xFF +#define LED_MODE_DIAG_RED_VALUE 0xFF +#define LED_MODE_DIAG_OFF_VALUE 0 + +#define LED_TYPE_LOC_REG_MASK 0xFF +#define LED_MODE_LOC_ON_VALUE 0x0 +#define LED_MODE_LOC_OFF_VALUE 0xFF +*/ +/* +#define LED_TYPE_FAN_REG_MASK 0xFF +#define LED_MODE_FAN_GREEN_VALUE 0xFF +#define LED_MODE_FAN_RED_VALUE 0xFF +#define LED_MODE_FAN_OFF_VALUE 0 +*/ +#define LED_BRIGHTNESS_ON_VALUE 0x0 +#define LED_BRIGHTNESS_OFF_VALUE 0xFF + +static const u8 led_reg[NUM_OF_LED_REG] = +{ + 0x41, /* Diag LED-Green. */ + 0x42, /* Diag LED-Red. */ + 0x43, /* FAN LED-Green. */ + 0x44, /* FAN LED-Red. */ + 0x45, /* LOC LED. */ + //0x1C, /* FAN 1-4 LED */ + //0x1D /* FAN 5-6 LED */ +}; + +enum led_type { + LED_TYPE_DIAG_GREEN, + LED_TYPE_DIAG_RED, + LED_TYPE_LOC, + LED_TYPE_FAN_GREEN, + LED_TYPE_FAN_RED +}; + +struct led_reg { + u32 types; + u8 reg_addr; +}; + +enum led_light_mode { + LED_MODE_OFF = 0, + LED_MODE_GREEN, + LED_MODE_AMBER, + LED_MODE_RED, + LED_MODE_BLUE, + LED_MODE_GREEN_BLINK, + LED_MODE_AMBER_BLINK, + LED_MODE_RED_BLINK, + LED_MODE_BLUE_BLINK, + LED_MODE_AUTO, + LED_MODE_UNKNOWN +}; + +#if 0 +struct led_type_mode { + enum led_type type; + enum led_light_mode mode; + int reg_bit_mask; + int mode_value; +}; + +struct led_type_mode led_type_mode_data[] = { +{LED_TYPE_LOC, LED_MODE_OFF, LED_TYPE_LOC_REG_MASK, LED_MODE_LOC_OFF_VALUE}, +{LED_TYPE_LOC, LED_MODE_BLUE, LED_TYPE_LOC_REG_MASK, LED_MODE_LOC_ON_VALUE}, +{LED_TYPE_DIAG_GREEN, LED_MODE_OFF, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_OFF_VALUE}, +{LED_TYPE_DIAG_GREEN, LED_MODE_GREEN, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_GREEN_VALUE}, +{LED_TYPE_DIAG_RED, LED_MODE_OFF, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_OFF_VALUE}, +{LED_TYPE_DIAG_RED, LED_MODE_RED, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_RED_VALUE}, +{LED_TYPE_FAN_GREEN, LED_MODE_OFF, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_OFF_VALUE}, +{LED_TYPE_FAN_GREEN, LED_MODE_GREEN, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_GREEN_VALUE}, +{LED_TYPE_FAN_RED, LED_MODE_OFF, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_OFF_VALUE}, +{LED_TYPE_FAN_RED, LED_MODE_RED, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_RED_VALUE}, +}; +#endif + +/* +static int accton_getLedReg(enum led_type type, u8 *reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(led_reg_map); i++) { + if(led_reg_map[i].types & (type<<1)){ + *reg = led_reg_map[i].reg_addr; + return 0; + } + } + return 1; +} +*/ + +#if 0 +static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + + if (type != led_type_mode_data[i].type) + continue; + + if ((led_type_mode_data[i].reg_bit_mask & reg_val) == + led_type_mode_data[i].mode_value) + { + return led_type_mode_data[i].mode; + } + } + + return LED_MODE_UNKNOWN; +} + +static u8 led_light_mode_to_reg_val(enum led_type type, + enum led_light_mode mode, u8 reg_val) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { + if (type != led_type_mode_data[i].type) + continue; + + if (mode != led_type_mode_data[i].mode) + continue; + + reg_val = led_type_mode_data[i].mode_value | + (reg_val & (~led_type_mode_data[i].reg_bit_mask)); + break; + } + + return reg_val; +} +#endif + +static int accton_as7512_32x_led_read_value(u8 reg) +{ + return accton_i2c_cpld_read(LED_CNTRLER_I2C_ADDRESS, reg); +} + +static int accton_as7512_32x_led_write_value(u8 reg, u8 value) +{ + return accton_i2c_cpld_write(LED_CNTRLER_I2C_ADDRESS, reg, value); +} + +static void accton_as7512_32x_led_update(void) +{ + mutex_lock(&ledctl->update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as7512_32x_led update\n"); + ledctl->valid = 0; + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as7512_32x_led_read_value(led_reg[i]); + + if (status < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); + goto exit; + } + else { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +#if 0 +static void accton_as7512_32x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + u8 reg, enum led_type type) +{ + int reg_val; + + mutex_lock(&ledctl->update_lock); + + reg_val = accton_as7512_32x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as7512_32x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} +#endif + +static void accton_as7512_32x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + if (LED_MODE_OFF == (enum led_light_mode)led_light_mode) { + accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_OFF_VALUE); + accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_OFF_VALUE); + return; + } + + if (LED_MODE_GREEN == (enum led_light_mode)led_light_mode) { + accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_ON_VALUE); + accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_OFF_VALUE); + return; + } + + if (LED_MODE_RED == (enum led_light_mode)led_light_mode) { + accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_OFF_VALUE); + accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_ON_VALUE); + return; + } + + if (LED_MODE_AMBER == (enum led_light_mode)led_light_mode) { + accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_ON_VALUE); + accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_ON_VALUE); + return; + } +} + +static enum led_brightness accton_as7512_32x_led_diag_get(struct led_classdev *cdev) +{ + u8 is_green_reg_on, is_red_reg_on; + + accton_as7512_32x_led_update(); + + is_green_reg_on = (ledctl->reg_val[0] == LED_BRIGHTNESS_OFF_VALUE) ? 0 : 1; + is_red_reg_on = (ledctl->reg_val[1] == LED_BRIGHTNESS_OFF_VALUE) ? 0 : 1; + + if (is_green_reg_on && is_red_reg_on) { + return LED_MODE_AMBER; + } + + if (is_green_reg_on) { + return LED_MODE_GREEN; + } + + if (is_red_reg_on) { + return LED_MODE_RED; + } + + return LED_MODE_OFF; +} + +static void accton_as7512_32x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + if (LED_MODE_OFF == (enum led_light_mode)led_light_mode) { + accton_as7512_32x_led_write_value(led_reg[4], LED_BRIGHTNESS_OFF_VALUE); + return; + } + + if (LED_MODE_BLUE == (enum led_light_mode)led_light_mode) { + accton_as7512_32x_led_write_value(led_reg[4], LED_BRIGHTNESS_ON_VALUE); + return; + } +} + +static enum led_brightness accton_as7512_32x_led_loc_get(struct led_classdev *cdev) +{ + accton_as7512_32x_led_update(); + + if (ledctl->reg_val[0] == LED_BRIGHTNESS_OFF_VALUE) { + return LED_MODE_OFF; + } + + return LED_MODE_BLUE; +} + +static enum led_brightness accton_as7512_32x_led_auto_get(struct led_classdev *cdev) +{ + return LED_MODE_AUTO; +} + +static struct led_classdev accton_as7512_32x_leds[] = { + [0] = { + .name = "accton_as7512_32x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as7512_32x_led_diag_set, + .brightness_get = accton_as7512_32x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_RED, + }, + [1] = { + .name = "accton_as7512_32x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as7512_32x_led_loc_set, + .brightness_get = accton_as7512_32x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_BLUE, + }, + [2] = { + .name = "accton_as7512_32x_led::fan", + .default_trigger = "unused", + .brightness_set = NULL, + .brightness_get = accton_as7512_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [3] = { + .name = "accton_as7512_32x_led::psu1", + .default_trigger = "unused", + .brightness_set = NULL, + .brightness_get = accton_as7512_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [4] = { + .name = "accton_as7512_32x_led::psu2", + .default_trigger = "unused", + .brightness_set = NULL, + .brightness_get = accton_as7512_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as7512_32x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { + led_classdev_suspend(&accton_as7512_32x_leds[i]); + } + + return 0; +} + +static int accton_as7512_32x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { + led_classdev_resume(&accton_as7512_32x_leds[i]); + } + + return 0; +} + +static int accton_as7512_32x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as7512_32x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as7512_32x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as7512_32x_leds[i]); + } + } + + return ret; +} + +static int accton_as7512_32x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { + led_classdev_unregister(&accton_as7512_32x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as7512_32x_led_driver = { + .probe = accton_as7512_32x_led_probe, + .remove = accton_as7512_32x_led_remove, + .suspend = accton_as7512_32x_led_suspend, + .resume = accton_as7512_32x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as7512_32x_led_init(void) +{ + int ret; + + extern int platform_accton_as7512_32x(void); + if (!platform_accton_as7512_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as7512_32x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as7512_32x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as7512_32x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as7512_32x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as7512_32x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as7512_32x_led_driver); + kfree(ledctl); +} + +module_init(accton_as7512_32x_led_init); +module_exit(accton_as7512_32x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as7512_32x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-psu.c new file mode 100644 index 00000000..8b8ae238 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-psu.c @@ -0,0 +1,291 @@ +/* + * An hwmon driver for accton as7512_32x Power Module + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as7512_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as7512_32x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[9]; /* Model name, read from eeprom */ +}; + +static struct as7512_32x_psu_data *as7512_32x_psu_update_device(struct device *dev); + +enum as7512_32x_psu_sysfs_attributes { + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as7512_32x_psu_attributes[] = { + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7512_32x_psu_data *data = as7512_32x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> ((2 - data->index) + 2) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = (data->status >> (2 - data->index)) & 0x1; + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as7512_32x_psu_data *data = as7512_32x_psu_update_device(dev); + + return sprintf(buf, "%s\n", data->model_name); +} + +static const struct attribute_group as7512_32x_psu_group = { + .attrs = as7512_32x_psu_attributes, +}; + +static int as7512_32x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7512_32x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7512_32x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7512_32x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + /* Update PSU index */ + if (client->addr == 0x50) { + data->index = 1; + } + else if (client->addr == 0x53) { + data->index = 2; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7512_32x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7512_32x_psu_remove(struct i2c_client *client) +{ + struct as7512_32x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7512_32x_psu_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id as7512_32x_psu_id[] = { + { "as7512_32x_psu", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as7512_32x_psu_id); + +static struct i2c_driver as7512_32x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as7512_32x_psu", + }, + .probe = as7512_32x_psu_probe, + .remove = as7512_32x_psu_remove, + .id_table = as7512_32x_psu_id, + .address_list = normal_i2c, +}; + +static int as7512_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = 0; + int retry_count = 5; + + while (retry_count) { + retry_count--; + + result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) { + msleep(10); + continue; + } + + if (unlikely(result != data_len)) { + result = -EIO; + msleep(10); + continue; + } + + result = 0; + break; + } + + return result; +} + +static struct as7512_32x_psu_data *as7512_32x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7512_32x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status; + int power_good = 0; + + dev_dbg(&client->dev, "Starting as7512_32x update\n"); + + /* Read psu status */ + status = accton_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + /* Read model name */ + memset(data->model_name, 0, sizeof(data->model_name)); + power_good = (data->status >> (2 - data->index)) & 0x1; + + if (power_good) { + status = as7512_32x_psu_read_block(client, 0x20, data->model_name, + ARRAY_SIZE(data->model_name)-1); + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as7512_32x_psu_init(void) +{ + extern int platform_accton_as7512_32x(void); + if (!platform_accton_as7512_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7512_32x_psu_driver); +} + +static void __exit as7512_32x_psu_exit(void) +{ + i2c_del_driver(&as7512_32x_psu_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as7512_32x_psu driver"); +MODULE_LICENSE("GPL"); + +module_init(as7512_32x_psu_init); +module_exit(as7512_32x_psu_exit); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-sfp.c new file mode 100644 index 00000000..f7560fb6 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/modules/builds/x86-64-accton-as7512-32x-sfp.c @@ -0,0 +1,356 @@ +/* + * An hwmon driver for accton as7512_32x sfp + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT_INDEX(i) (1UL << (i)) + + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as7512_32x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u32 is_present; /* present status */ +}; + +static struct as7512_32x_sfp_data *as7512_32x_sfp_update_device(struct device *dev); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +enum as7512_32x_sfp_sysfs_attributes { + SFP_PORT_NUMBER, + SFP_IS_PRESENT, + SFP_IS_PRESENT_ALL, + SFP_EEPROM +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); + +static struct attribute *as7512_32x_sfp_attributes[] = { + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7512_32x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->port+1); +} + +/* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ +do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ +} while(0) + +static ssize_t show_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + + if(attr->index == SFP_IS_PRESENT_ALL) { + int values[4]; + /* + * Report the SFP_PRESENCE status for all ports. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); + + /* Return values 1 -> 32 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], values[3]); + } + else { /* SFP_IS_PRESENT */ + struct as7512_32x_sfp_data *data = as7512_32x_sfp_update_device(dev); + + if (!data->valid) { + return -EIO; + } + + return sprintf(buf, "%d\n", data->is_present); + } +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as7512_32x_sfp_data *data = as7512_32x_sfp_update_device(dev); + + if (!data->valid) { + return 0; + } + + if (!data->is_present) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as7512_32x_sfp_group = { + .attrs = as7512_32x_sfp_attributes, +}; + +static int as7512_32x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7512_32x_sfp_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7512_32x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7512_32x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7512_32x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7512_32x_sfp_remove(struct i2c_client *client) +{ + struct as7512_32x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7512_32x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as7512_32x_sfp1, as7512_32x_sfp2, as7512_32x_sfp3, as7512_32x_sfp4, +as7512_32x_sfp5, as7512_32x_sfp6, as7512_32x_sfp7, as7512_32x_sfp8, +as7512_32x_sfp9, as7512_32x_sfp10,as7512_32x_sfp11,as7512_32x_sfp12, +as7512_32x_sfp13,as7512_32x_sfp14,as7512_32x_sfp15,as7512_32x_sfp16, +as7512_32x_sfp17,as7512_32x_sfp18,as7512_32x_sfp19,as7512_32x_sfp20, +as7512_32x_sfp21,as7512_32x_sfp22,as7512_32x_sfp23,as7512_32x_sfp24, +as7512_32x_sfp25,as7512_32x_sfp26,as7512_32x_sfp27,as7512_32x_sfp28, +as7512_32x_sfp29,as7512_32x_sfp30,as7512_32x_sfp31,as7512_32x_sfp32 +}; + +static const struct i2c_device_id as7512_32x_sfp_id[] = { +{ "as7512_32x_sfp1", as7512_32x_sfp1 }, { "as7512_32x_sfp2", as7512_32x_sfp2 }, +{ "as7512_32x_sfp3", as7512_32x_sfp3 }, { "as7512_32x_sfp4", as7512_32x_sfp4 }, +{ "as7512_32x_sfp5", as7512_32x_sfp5 }, { "as7512_32x_sfp6", as7512_32x_sfp6 }, +{ "as7512_32x_sfp7", as7512_32x_sfp7 }, { "as7512_32x_sfp8", as7512_32x_sfp8 }, +{ "as7512_32x_sfp9", as7512_32x_sfp9 }, { "as7512_32x_sfp10", as7512_32x_sfp10 }, +{ "as7512_32x_sfp11", as7512_32x_sfp11 }, { "as7512_32x_sfp12", as7512_32x_sfp12 }, +{ "as7512_32x_sfp13", as7512_32x_sfp13 }, { "as7512_32x_sfp14", as7512_32x_sfp14 }, +{ "as7512_32x_sfp15", as7512_32x_sfp15 }, { "as7512_32x_sfp16", as7512_32x_sfp16 }, +{ "as7512_32x_sfp17", as7512_32x_sfp17 }, { "as7512_32x_sfp18", as7512_32x_sfp18 }, +{ "as7512_32x_sfp19", as7512_32x_sfp19 }, { "as7512_32x_sfp20", as7512_32x_sfp20 }, +{ "as7512_32x_sfp21", as7512_32x_sfp21 }, { "as7512_32x_sfp22", as7512_32x_sfp22 }, +{ "as7512_32x_sfp23", as7512_32x_sfp23 }, { "as7512_32x_sfp24", as7512_32x_sfp24 }, +{ "as7512_32x_sfp25", as7512_32x_sfp25 }, { "as7512_32x_sfp26", as7512_32x_sfp26 }, +{ "as7512_32x_sfp27", as7512_32x_sfp27 }, { "as7512_32x_sfp28", as7512_32x_sfp28 }, +{ "as7512_32x_sfp29", as7512_32x_sfp29 }, { "as7512_32x_sfp30", as7512_32x_sfp30 }, +{ "as7512_32x_sfp31", as7512_32x_sfp31 }, { "as7512_32x_sfp32", as7512_32x_sfp32 }, +{} +}; +MODULE_DEVICE_TABLE(i2c, as7512_32x_sfp_id); + +static struct i2c_driver as7512_32x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as7512_32x_sfp", + }, + .probe = as7512_32x_sfp_probe, + .remove = as7512_32x_sfp_remove, + .id_table = as7512_32x_sfp_id, + .address_list = normal_i2c, +}; + +static int as7512_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as7512_32x_sfp_data *as7512_32x_sfp_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7512_32x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0; + u8 cpld_reg = 0x30 + (data->port/8); + + data->valid = 0; + + /* Read present status of the specified port number */ + data->is_present = 0; + status = accton_i2c_cpld_read(0x60, cpld_reg); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); + goto exit; + } + + data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; + + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if (data->is_present) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { + status = as7512_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, + data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), + I2C_SMBUS_BLOCK_MAX); + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); + goto exit; + } + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as7512_32x_sfp_init(void) +{ + extern int platform_accton_as7512_32x(void); + if (!platform_accton_as7512_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7512_32x_sfp_driver); +} + +static void __exit as7512_32x_sfp_exit(void) +{ + i2c_del_driver(&as7512_32x_sfp_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as7512_32x_sfp driver"); +MODULE_LICENSE("GPL"); + +module_init(as7512_32x_sfp_init); +module_exit(as7512_32x_sfp_exit); From d043d6a1e16a7de36426317795292fe1282e553b Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 19:45:49 +0000 Subject: [PATCH 251/255] AS7716 Kernel Modules. --- .../x86-64-accton-as7716-32x/modules/Makefile | 1 + .../x86-64-accton-as7716-32x/modules/PKG.yml | 1 + .../modules/builds/.gitignore | 1 + .../modules/builds/Makefile | 5 + .../builds/x86-64-accton-as7716-32x-fan.c | 452 ++++++++++++++++++ .../builds/x86-64-accton-as7716-32x-leds.c | 443 +++++++++++++++++ .../builds/x86-64-accton-as7716-32x-psu.c | 293 ++++++++++++ .../builds/x86-64-accton-as7716-32x-sfp.c | 356 ++++++++++++++ 8 files changed, 1552 insertions(+) create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/PKG.yml create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/.gitignore create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/Makefile create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-fan.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-leds.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-psu.c create mode 100644 packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-sfp.c diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/PKG.yml new file mode 100644 index 00000000..b28c8b94 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml PLATFORM=x86-64-accton-as7716-32x ARCH=amd64 KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/.gitignore b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/Makefile b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/Makefile new file mode 100644 index 00000000..c618e942 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/Makefile @@ -0,0 +1,5 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 onl-kernel-3.2-deb7-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +PLATFORM := x86-64-accton-as7716-32x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-fan.c b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-fan.c new file mode 100644 index 00000000..924374c6 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-fan.c @@ -0,0 +1,452 @@ +/* + * A hwmon driver for the Accton as7716 32x fan + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "as7716_32x_fan" + +static struct as7716_32x_fan_data *as7716_32x_fan_update_device(struct device *dev); +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +/* fan related data, the index should match sysfs_fan_attributes + */ +static const u8 fan_reg[] = { + 0x0F, /* fan 1-6 present status */ + 0x11, /* fan PWM(for all fan) */ + 0x12, /* front fan 1 speed(rpm) */ + 0x13, /* front fan 2 speed(rpm) */ + 0x14, /* front fan 3 speed(rpm) */ + 0x15, /* front fan 4 speed(rpm) */ + 0x16, /* front fan 5 speed(rpm) */ + 0x17, /* front fan 6 speed(rpm) */ + 0x22, /* rear fan 1 speed(rpm) */ + 0x23, /* rear fan 2 speed(rpm) */ + 0x24, /* rear fan 3 speed(rpm) */ + 0x25, /* rear fan 4 speed(rpm) */ + 0x26, /* rear fan 5 speed(rpm) */ + 0x27, /* rear fan 6 speed(rpm) */ +}; + +/* Each client has this additional data */ +struct as7716_32x_fan_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ +}; + +enum fan_id { + FAN1_ID, + FAN2_ID, + FAN3_ID, + FAN4_ID, + FAN5_ID, + FAN6_ID +}; + +enum sysfs_fan_attributes { + FAN_PRESENT_REG, + FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ + FAN1_FRONT_SPEED_RPM, + FAN2_FRONT_SPEED_RPM, + FAN3_FRONT_SPEED_RPM, + FAN4_FRONT_SPEED_RPM, + FAN5_FRONT_SPEED_RPM, + FAN6_FRONT_SPEED_RPM, + FAN1_REAR_SPEED_RPM, + FAN2_REAR_SPEED_RPM, + FAN3_REAR_SPEED_RPM, + FAN4_REAR_SPEED_RPM, + FAN5_REAR_SPEED_RPM, + FAN6_REAR_SPEED_RPM, + FAN1_PRESENT, + FAN2_PRESENT, + FAN3_PRESENT, + FAN4_PRESENT, + FAN5_PRESENT, + FAN6_PRESENT, + FAN1_FAULT, + FAN2_FAULT, + FAN3_FAULT, + FAN4_FAULT, + FAN5_FAULT, + FAN6_FAULT +}; + +/* Define attributes + */ +#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) +#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr + +#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) +#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr + +#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) +#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr + +#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) +#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr + +#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ + static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ + static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) +#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ + &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr + +/* 6 fan fault attributes in this platform */ +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); +/* 6 fan speed(rpm) attributes in this platform */ +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); +DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); +/* 6 fan present attributes in this platform */ +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); +DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); +/* 1 fan duty cycle attribute in this platform */ +DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); + +static struct attribute *as7716_32x_fan_attributes[] = { + /* fan related attributes */ + DECLARE_FAN_FAULT_ATTR(1), + DECLARE_FAN_FAULT_ATTR(2), + DECLARE_FAN_FAULT_ATTR(3), + DECLARE_FAN_FAULT_ATTR(4), + DECLARE_FAN_FAULT_ATTR(5), + DECLARE_FAN_FAULT_ATTR(6), + DECLARE_FAN_SPEED_RPM_ATTR(1), + DECLARE_FAN_SPEED_RPM_ATTR(2), + DECLARE_FAN_SPEED_RPM_ATTR(3), + DECLARE_FAN_SPEED_RPM_ATTR(4), + DECLARE_FAN_SPEED_RPM_ATTR(5), + DECLARE_FAN_SPEED_RPM_ATTR(6), + DECLARE_FAN_PRESENT_ATTR(1), + DECLARE_FAN_PRESENT_ATTR(2), + DECLARE_FAN_PRESENT_ATTR(3), + DECLARE_FAN_PRESENT_ATTR(4), + DECLARE_FAN_PRESENT_ATTR(5), + DECLARE_FAN_PRESENT_ATTR(6), + DECLARE_FAN_DUTY_CYCLE_ATTR(), + NULL +}; + +#define FAN_DUTY_CYCLE_REG_MASK 0xF +#define FAN_MAX_DUTY_CYCLE 100 +#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 + +static int as7716_32x_fan_read_value(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int as7716_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +/* fan utility functions + */ +static u32 reg_val_to_duty_cycle(u8 reg_val) +{ + reg_val &= FAN_DUTY_CYCLE_REG_MASK; + return ((u32)(reg_val+1) * 625 + 75)/ 100; +} + +static u8 duty_cycle_to_reg_val(u8 duty_cycle) +{ + return ((u32)duty_cycle * 100 / 625) - 1; +} + +static u32 reg_val_to_speed_rpm(u8 reg_val) +{ + return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; +} + +static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) +{ + u8 mask = (1 << id); + + reg_val &= mask; + + return reg_val ? 0 : 1; +} + +static u8 is_fan_fault(struct as7716_32x_fan_data *data, enum fan_id id) +{ + u8 ret = 1; + int front_fan_index = FAN1_FRONT_SPEED_RPM + id; + int rear_fan_index = FAN1_REAR_SPEED_RPM + id; + + /* Check if the speed of front or rear fan is ZERO, + */ + if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && + reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { + ret = 0; + } + + return ret; +} + +static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + int error, value; + struct i2c_client *client = to_i2c_client(dev); + + error = kstrtoint(buf, 10, &value); + if (error) + return error; + + if (value < 0 || value > FAN_MAX_DUTY_CYCLE) + return -EINVAL; + + as7716_32x_fan_write_value(client, 0x33, 0); /* Disable fan speed watch dog */ + as7716_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); + return count; +} + +static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7716_32x_fan_data *data = as7716_32x_fan_update_device(dev); + ssize_t ret = 0; + + if (data->valid) { + switch (attr->index) { + case FAN_DUTY_CYCLE_PERCENTAGE: + { + u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); + ret = sprintf(buf, "%u\n", duty_cycle); + break; + } + case FAN1_FRONT_SPEED_RPM: + case FAN2_FRONT_SPEED_RPM: + case FAN3_FRONT_SPEED_RPM: + case FAN4_FRONT_SPEED_RPM: + case FAN5_FRONT_SPEED_RPM: + case FAN6_FRONT_SPEED_RPM: + case FAN1_REAR_SPEED_RPM: + case FAN2_REAR_SPEED_RPM: + case FAN3_REAR_SPEED_RPM: + case FAN4_REAR_SPEED_RPM: + case FAN5_REAR_SPEED_RPM: + case FAN6_REAR_SPEED_RPM: + ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); + break; + case FAN1_PRESENT: + case FAN2_PRESENT: + case FAN3_PRESENT: + case FAN4_PRESENT: + case FAN5_PRESENT: + case FAN6_PRESENT: + ret = sprintf(buf, "%d\n", + reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], + attr->index - FAN1_PRESENT)); + break; + case FAN1_FAULT: + case FAN2_FAULT: + case FAN3_FAULT: + case FAN4_FAULT: + case FAN5_FAULT: + case FAN6_FAULT: + ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); + break; + default: + break; + } + } + + return ret; +} + +static const struct attribute_group as7716_32x_fan_group = { + .attrs = as7716_32x_fan_attributes, +}; + +static struct as7716_32x_fan_data *as7716_32x_fan_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7716_32x_fan_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || + !data->valid) { + int i; + + dev_dbg(&client->dev, "Starting as7716_32x_fan update\n"); + data->valid = 0; + + /* Update fan data + */ + for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { + int status = as7716_32x_fan_read_value(client, fan_reg[i]); + + if (status < 0) { + data->valid = 0; + mutex_unlock(&data->update_lock); + dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); + return data; + } + else { + data->reg_val[i] = status; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int as7716_32x_fan_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7716_32x_fan_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7716_32x_fan_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7716_32x_fan_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: fan '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7716_32x_fan_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7716_32x_fan_remove(struct i2c_client *client) +{ + struct as7716_32x_fan_data *data = i2c_get_clientdata(client); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7716_32x_fan_group); + + return 0; +} + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; + +static const struct i2c_device_id as7716_32x_fan_id[] = { + { "as7716_32x_fan", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as7716_32x_fan_id); + +static struct i2c_driver as7716_32x_fan_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = DRVNAME, + }, + .probe = as7716_32x_fan_probe, + .remove = as7716_32x_fan_remove, + .id_table = as7716_32x_fan_id, + .address_list = normal_i2c, +}; + +static int __init as7716_32x_fan_init(void) +{ + extern int platform_accton_as7716_32x(void); + if (!platform_accton_as7716_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7716_32x_fan_driver); +} + +static void __exit as7716_32x_fan_exit(void) +{ + i2c_del_driver(&as7716_32x_fan_driver); +} + +module_init(as7716_32x_fan_init); +module_exit(as7716_32x_fan_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as7716_32x_fan driver"); +MODULE_LICENSE("GPL"); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-leds.c b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-leds.c new file mode 100644 index 00000000..5a848972 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-leds.c @@ -0,0 +1,443 @@ +/* + * A LED driver for the accton_as7716_32x_led + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); + +#define DRVNAME "accton_as7716_32x_led" + +struct accton_as7716_32x_led_data { + struct platform_device *pdev; + struct mutex update_lock; + char valid; /* != 0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 reg_val[1]; /* only 1 register*/ +}; + +static struct accton_as7716_32x_led_data *ledctl = NULL; + +/* LED related data + */ + +#define LED_CNTRLER_I2C_ADDRESS (0x60) + +#define LED_TYPE_DIAG_REG_MASK (0x3) +#define LED_MODE_DIAG_GREEN_VALUE (0x02) +#define LED_MODE_DIAG_RED_VALUE (0x01) +#define LED_MODE_DIAG_AMBER_VALUE (0x00) /*It's yellow actually. Green+Red=Yellow*/ +#define LED_MODE_DIAG_OFF_VALUE (0x03) + + +#define LED_TYPE_LOC_REG_MASK (0x80) +#define LED_MODE_LOC_ON_VALUE (0) +#define LED_MODE_LOC_OFF_VALUE (0x80) + +enum led_type { + LED_TYPE_DIAG, + LED_TYPE_LOC, + LED_TYPE_FAN, + LED_TYPE_PSU1, + LED_TYPE_PSU2 +}; + +struct led_reg { + u32 types; + u8 reg_addr; +}; + +static const struct led_reg led_reg_map[] = { + {(1<update_lock); + + if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) + || !ledctl->valid) { + int i; + + dev_dbg(&ledctl->pdev->dev, "Starting accton_as7716_32x_led update\n"); + + /* Update LED data + */ + for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { + int status = accton_as7716_32x_led_read_value(led_reg_map[i].reg_addr); + + if (status < 0) { + ledctl->valid = 0; + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg_map[i].reg_addr, status); + goto exit; + } + else + { + ledctl->reg_val[i] = status; + } + } + + ledctl->last_updated = jiffies; + ledctl->valid = 1; + } + +exit: + mutex_unlock(&ledctl->update_lock); +} + +static void accton_as7716_32x_led_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode, + enum led_type type) +{ + int reg_val; + u8 reg ; + mutex_lock(&ledctl->update_lock); + + if( !accton_getLedReg(type, ®)) + { + dev_dbg(&ledctl->pdev->dev, "Not match item for %d.\n", type); + } + + reg_val = accton_as7716_32x_led_read_value(reg); + + if (reg_val < 0) { + dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); + goto exit; + } + reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); + accton_as7716_32x_led_write_value(reg, reg_val); + + /* to prevent the slow-update issue */ + ledctl->valid = 0; + +exit: + mutex_unlock(&ledctl->update_lock); +} + + +static void accton_as7716_32x_led_diag_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as7716_32x_led_set(led_cdev, led_light_mode, LED_TYPE_DIAG); +} + +static enum led_brightness accton_as7716_32x_led_diag_get(struct led_classdev *cdev) +{ + accton_as7716_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); +} + +static void accton_as7716_32x_led_loc_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ + accton_as7716_32x_led_set(led_cdev, led_light_mode, LED_TYPE_LOC); +} + +static enum led_brightness accton_as7716_32x_led_loc_get(struct led_classdev *cdev) +{ + accton_as7716_32x_led_update(); + return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); +} + +static void accton_as7716_32x_led_auto_set(struct led_classdev *led_cdev, + enum led_brightness led_light_mode) +{ +} + +static enum led_brightness accton_as7716_32x_led_auto_get(struct led_classdev *cdev) +{ + return LED_MODE_AUTO; +} + +static struct led_classdev accton_as7716_32x_leds[] = { + [LED_TYPE_DIAG] = { + .name = "accton_as7716_32x_led::diag", + .default_trigger = "unused", + .brightness_set = accton_as7716_32x_led_diag_set, + .brightness_get = accton_as7716_32x_led_diag_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_RED, + }, + [LED_TYPE_LOC] = { + .name = "accton_as7716_32x_led::loc", + .default_trigger = "unused", + .brightness_set = accton_as7716_32x_led_loc_set, + .brightness_get = accton_as7716_32x_led_loc_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_BLUE, + }, + [LED_TYPE_FAN] = { + .name = "accton_as7716_32x_led::fan", + .default_trigger = "unused", + .brightness_set = accton_as7716_32x_led_auto_set, + .brightness_get = accton_as7716_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU1] = { + .name = "accton_as7716_32x_led::psu1", + .default_trigger = "unused", + .brightness_set = accton_as7716_32x_led_auto_set, + .brightness_get = accton_as7716_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, + [LED_TYPE_PSU2] = { + .name = "accton_as7716_32x_led::psu2", + .default_trigger = "unused", + .brightness_set = accton_as7716_32x_led_auto_set, + .brightness_get = accton_as7716_32x_led_auto_get, + .flags = LED_CORE_SUSPENDRESUME, + .max_brightness = LED_MODE_AUTO, + }, +}; + +static int accton_as7716_32x_led_suspend(struct platform_device *dev, + pm_message_t state) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { + led_classdev_suspend(&accton_as7716_32x_leds[i]); + } + + return 0; +} + +static int accton_as7716_32x_led_resume(struct platform_device *dev) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { + led_classdev_resume(&accton_as7716_32x_leds[i]); + } + + return 0; +} + +static int accton_as7716_32x_led_probe(struct platform_device *pdev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { + ret = led_classdev_register(&pdev->dev, &accton_as7716_32x_leds[i]); + + if (ret < 0) + break; + } + + /* Check if all LEDs were successfully registered */ + if (i != ARRAY_SIZE(accton_as7716_32x_leds)){ + int j; + + /* only unregister the LEDs that were successfully registered */ + for (j = 0; j < i; j++) { + led_classdev_unregister(&accton_as7716_32x_leds[i]); + } + } + + return ret; +} + +static int accton_as7716_32x_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { + led_classdev_unregister(&accton_as7716_32x_leds[i]); + } + + return 0; +} + +static struct platform_driver accton_as7716_32x_led_driver = { + .probe = accton_as7716_32x_led_probe, + .remove = accton_as7716_32x_led_remove, + .suspend = accton_as7716_32x_led_suspend, + .resume = accton_as7716_32x_led_resume, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init accton_as7716_32x_led_init(void) +{ + int ret; + + extern int platform_accton_as7716_32x(void); + if (!platform_accton_as7716_32x()) { + return -ENODEV; + } + + ret = platform_driver_register(&accton_as7716_32x_led_driver); + if (ret < 0) { + goto exit; + } + + ledctl = kzalloc(sizeof(struct accton_as7716_32x_led_data), GFP_KERNEL); + if (!ledctl) { + ret = -ENOMEM; + platform_driver_unregister(&accton_as7716_32x_led_driver); + goto exit; + } + + mutex_init(&ledctl->update_lock); + + ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); + if (IS_ERR(ledctl->pdev)) { + ret = PTR_ERR(ledctl->pdev); + platform_driver_unregister(&accton_as7716_32x_led_driver); + kfree(ledctl); + goto exit; + } + +exit: + return ret; +} + +static void __exit accton_as7716_32x_led_exit(void) +{ + platform_device_unregister(ledctl->pdev); + platform_driver_unregister(&accton_as7716_32x_led_driver); + kfree(ledctl); +} + +module_init(accton_as7716_32x_led_init); +module_exit(accton_as7716_32x_led_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton_as7716_32x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-psu.c b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-psu.c new file mode 100644 index 00000000..4fd15ae1 --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-psu.c @@ -0,0 +1,293 @@ +/* + * An hwmon driver for accton as7716_32x Power Module + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); +static int as7716_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as7716_32x_psu_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + u8 status; /* Status(present/power_good) register read from CPLD */ + char model_name[9]; /* Model name, read from eeprom */ +}; + +static struct as7716_32x_psu_data *as7716_32x_psu_update_device(struct device *dev); + +enum as7716_32x_psu_sysfs_attributes { + PSU_PRESENT, + PSU_MODEL_NAME, + PSU_POWER_GOOD +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); +static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); +static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); + +static struct attribute *as7716_32x_psu_attributes[] = { + &sensor_dev_attr_psu_present.dev_attr.attr, + &sensor_dev_attr_psu_model_name.dev_attr.attr, + &sensor_dev_attr_psu_power_good.dev_attr.attr, + NULL +}; + +static ssize_t show_status(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct as7716_32x_psu_data *data = as7716_32x_psu_update_device(dev); + u8 status = 0; + + if (attr->index == PSU_PRESENT) { + status = !(data->status >> (1-data->index) & 0x1); + } + else { /* PSU_POWER_GOOD */ + status = (data->status >> (3-data->index) & 0x1); + } + + return sprintf(buf, "%d\n", status); +} + +static ssize_t show_model_name(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as7716_32x_psu_data *data = as7716_32x_psu_update_device(dev); + + return sprintf(buf, "%s\n", data->model_name); +} + +static const struct attribute_group as7716_32x_psu_group = { + .attrs = as7716_32x_psu_attributes, +}; + +static int as7716_32x_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7716_32x_psu_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7716_32x_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->valid = 0; + data->index = dev_id->driver_data; + mutex_init(&data->update_lock); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7716_32x_psu_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: psu '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7716_32x_psu_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7716_32x_psu_remove(struct i2c_client *client) +{ + struct as7716_32x_psu_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7716_32x_psu_group); + kfree(data); + + return 0; +} + +enum psu_index +{ + as7716_32x_psu1, + as7716_32x_psu2 +}; + +static const struct i2c_device_id as7716_32x_psu_id[] = { + { "as7716_32x_psu1", as7716_32x_psu1 }, + { "as7716_32x_psu2", as7716_32x_psu2 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, as7716_32x_psu_id); + +static struct i2c_driver as7716_32x_psu_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as7716_32x_psu", + }, + .probe = as7716_32x_psu_probe, + .remove = as7716_32x_psu_remove, + .id_table = as7716_32x_psu_id, + .address_list = normal_i2c, +}; + +static int as7716_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = 0; + int retry_count = 5; + + while (retry_count) { + retry_count--; + + result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) { + msleep(10); + continue; + } + + if (unlikely(result != data_len)) { + result = -EIO; + msleep(10); + continue; + } + + result = 0; + break; + } + + return result; +} + +static struct as7716_32x_psu_data *as7716_32x_psu_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7716_32x_psu_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status; + int power_good = 0; + + dev_dbg(&client->dev, "Starting as7716_32x update\n"); + + /* Read psu status */ + status = accton_i2c_cpld_read(0x60, 0x2); + + if (status < 0) { + dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); + } + else { + data->status = status; + } + + /* Read model name */ + memset(data->model_name, 0, sizeof(data->model_name)); + power_good = (data->status >> (3-data->index) & 0x1); + + if (power_good) { + status = as7716_32x_psu_read_block(client, 0x20, data->model_name, + ARRAY_SIZE(data->model_name)-1); + + if (status < 0) { + data->model_name[0] = '\0'; + dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); + } + else { + data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as7716_32x_psu_init(void) +{ + extern int platform_accton_as7716_32x(void); + if (!platform_accton_as7716_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7716_32x_psu_driver); +} + +static void __exit as7716_32x_psu_exit(void) +{ + i2c_del_driver(&as7716_32x_psu_driver); +} + +module_init(as7716_32x_psu_init); +module_exit(as7716_32x_psu_exit); + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("as7716_32x_psu driver"); +MODULE_LICENSE("GPL"); + diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-sfp.c b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-sfp.c new file mode 100644 index 00000000..432e9b7d --- /dev/null +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/modules/builds/x86-64-accton-as7716-32x-sfp.c @@ -0,0 +1,356 @@ +/* + * An hwmon driver for accton as7716_32x sfp + * + * Copyright (C) 2014 Accton Technology Corporation. + * Brandon Chuang + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT_INDEX(i) (1UL << (i)) + + +/* Addresses scanned + */ +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* Each client has this additional data + */ +struct as7716_32x_sfp_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + int port; /* Front port index */ + char eeprom[256]; /* eeprom data */ + u32 is_present; /* present status */ +}; + +static struct as7716_32x_sfp_data *as7716_32x_sfp_update_device(struct device *dev); +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); +static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); +extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); +extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); + +enum as7716_32x_sfp_sysfs_attributes { + SFP_PORT_NUMBER, + SFP_IS_PRESENT, + SFP_IS_PRESENT_ALL, + SFP_EEPROM +}; + +/* sysfs attributes for hwmon + */ +static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); +static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); +static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); +static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); + +static struct attribute *as7716_32x_sfp_attributes[] = { + &sensor_dev_attr_sfp_port_number.dev_attr.attr, + &sensor_dev_attr_sfp_is_present.dev_attr.attr, + &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, + &sensor_dev_attr_sfp_eeprom.dev_attr.attr, + NULL +}; + +static ssize_t show_port_number(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%d\n", data->port+1); +} + +/* Error-check the CPLD read results. */ +#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ +do { \ + _rv = (_read_expr); \ + if(_rv < 0) { \ + return sprintf(_buf, "READ ERROR\n"); \ + } \ + if(_invert) { \ + _rv = ~_rv; \ + } \ + _rv &= 0xFF; \ +} while(0) + +static ssize_t show_present(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + + if(attr->index == SFP_IS_PRESENT_ALL) { + int values[4]; + /* + * Report the SFP_PRESENCE status for all ports. + */ + + /* SFP_PRESENT Ports 1-8 */ + VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); + /* SFP_PRESENT Ports 9-16 */ + VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); + /* SFP_PRESENT Ports 17-24 */ + VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); + /* SFP_PRESENT Ports 25-32 */ + VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); + + /* Return values 1 -> 32 in order */ + return sprintf(buf, "%.2x %.2x %.2x %.2x\n", + values[0], values[1], values[2], values[3]); + } + else { /* SFP_IS_PRESENT */ + struct as7716_32x_sfp_data *data = as7716_32x_sfp_update_device(dev); + + if (!data->valid) { + return -EIO; + } + + return sprintf(buf, "%d\n", data->is_present); + } +} + +static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct as7716_32x_sfp_data *data = as7716_32x_sfp_update_device(dev); + + if (!data->valid) { + return 0; + } + + if (!data->is_present) { + return 0; + } + + memcpy(buf, data->eeprom, sizeof(data->eeprom)); + + return sizeof(data->eeprom); +} + +static const struct attribute_group as7716_32x_sfp_group = { + .attrs = as7716_32x_sfp_attributes, +}; + +static int as7716_32x_sfp_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct as7716_32x_sfp_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct as7716_32x_sfp_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + + mutex_init(&data->update_lock); + data->port = dev_id->driver_data; + i2c_set_clientdata(client, data); + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &as7716_32x_sfp_group); + if (status) { + goto exit_free; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + dev_info(&client->dev, "%s: sfp '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove: + sysfs_remove_group(&client->dev.kobj, &as7716_32x_sfp_group); +exit_free: + kfree(data); +exit: + + return status; +} + +static int as7716_32x_sfp_remove(struct i2c_client *client) +{ + struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &as7716_32x_sfp_group); + kfree(data); + + return 0; +} + +enum port_numbers { +as7716_32x_sfp1, as7716_32x_sfp2, as7716_32x_sfp3, as7716_32x_sfp4, +as7716_32x_sfp5, as7716_32x_sfp6, as7716_32x_sfp7, as7716_32x_sfp8, +as7716_32x_sfp9, as7716_32x_sfp10,as7716_32x_sfp11,as7716_32x_sfp12, +as7716_32x_sfp13,as7716_32x_sfp14,as7716_32x_sfp15,as7716_32x_sfp16, +as7716_32x_sfp17,as7716_32x_sfp18,as7716_32x_sfp19,as7716_32x_sfp20, +as7716_32x_sfp21,as7716_32x_sfp22,as7716_32x_sfp23,as7716_32x_sfp24, +as7716_32x_sfp25,as7716_32x_sfp26,as7716_32x_sfp27,as7716_32x_sfp28, +as7716_32x_sfp29,as7716_32x_sfp30,as7716_32x_sfp31,as7716_32x_sfp32 +}; + +static const struct i2c_device_id as7716_32x_sfp_id[] = { +{ "as7716_32x_sfp1", as7716_32x_sfp1 }, { "as7716_32x_sfp2", as7716_32x_sfp2 }, +{ "as7716_32x_sfp3", as7716_32x_sfp3 }, { "as7716_32x_sfp4", as7716_32x_sfp4 }, +{ "as7716_32x_sfp5", as7716_32x_sfp5 }, { "as7716_32x_sfp6", as7716_32x_sfp6 }, +{ "as7716_32x_sfp7", as7716_32x_sfp7 }, { "as7716_32x_sfp8", as7716_32x_sfp8 }, +{ "as7716_32x_sfp9", as7716_32x_sfp9 }, { "as7716_32x_sfp10", as7716_32x_sfp10 }, +{ "as7716_32x_sfp11", as7716_32x_sfp11 }, { "as7716_32x_sfp12", as7716_32x_sfp12 }, +{ "as7716_32x_sfp13", as7716_32x_sfp13 }, { "as7716_32x_sfp14", as7716_32x_sfp14 }, +{ "as7716_32x_sfp15", as7716_32x_sfp15 }, { "as7716_32x_sfp16", as7716_32x_sfp16 }, +{ "as7716_32x_sfp17", as7716_32x_sfp17 }, { "as7716_32x_sfp18", as7716_32x_sfp18 }, +{ "as7716_32x_sfp19", as7716_32x_sfp19 }, { "as7716_32x_sfp20", as7716_32x_sfp20 }, +{ "as7716_32x_sfp21", as7716_32x_sfp21 }, { "as7716_32x_sfp22", as7716_32x_sfp22 }, +{ "as7716_32x_sfp23", as7716_32x_sfp23 }, { "as7716_32x_sfp24", as7716_32x_sfp24 }, +{ "as7716_32x_sfp25", as7716_32x_sfp25 }, { "as7716_32x_sfp26", as7716_32x_sfp26 }, +{ "as7716_32x_sfp27", as7716_32x_sfp27 }, { "as7716_32x_sfp28", as7716_32x_sfp28 }, +{ "as7716_32x_sfp29", as7716_32x_sfp29 }, { "as7716_32x_sfp30", as7716_32x_sfp30 }, +{ "as7716_32x_sfp31", as7716_32x_sfp31 }, { "as7716_32x_sfp32", as7716_32x_sfp32 }, +{} +}; +MODULE_DEVICE_TABLE(i2c, as7716_32x_sfp_id); + +static struct i2c_driver as7716_32x_sfp_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "as7716_32x_sfp", + }, + .probe = as7716_32x_sfp_probe, + .remove = as7716_32x_sfp_remove, + .id_table = as7716_32x_sfp_id, + .address_list = normal_i2c, +}; + +static int as7716_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); + + if (unlikely(result < 0)) + goto abort; + if (unlikely(result != data_len)) { + result = -EIO; + goto abort; + } + + result = 0; + +abort: + return result; +} + +static struct as7716_32x_sfp_data *as7716_32x_sfp_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int status = -1; + int i = 0; + u8 cpld_reg = 0x30 + (data->port/8); + + data->valid = 0; + + /* Read present status of the specified port number */ + data->is_present = 0; + status = accton_i2c_cpld_read(0x60, cpld_reg); + + if (status < 0) { + dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); + goto exit; + } + + data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; + + /* Read eeprom data based on port number */ + memset(data->eeprom, 0, sizeof(data->eeprom)); + + /* Check if the port is present */ + if (data->is_present) { + /* read eeprom */ + for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { + status = as7716_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, + data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), + I2C_SMBUS_BLOCK_MAX); + if (status < 0) { + dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); + goto exit; + } + } + } + + data->last_updated = jiffies; + data->valid = 1; + } + +exit: + mutex_unlock(&data->update_lock); + + return data; +} + +static int __init as7716_32x_sfp_init(void) +{ + extern int platform_accton_as7716_32x(void); + if (!platform_accton_as7716_32x()) { + return -ENODEV; + } + + return i2c_add_driver(&as7716_32x_sfp_driver); +} + +static void __exit as7716_32x_sfp_exit(void) +{ + i2c_del_driver(&as7716_32x_sfp_driver); +} + +MODULE_AUTHOR("Brandon Chuang "); +MODULE_DESCRIPTION("accton as7716_32x_sfp driver"); +MODULE_LICENSE("GPL"); + +module_init(as7716_32x_sfp_init); +module_exit(as7716_32x_sfp_exit); From b9df55d8b8fa7b034b89d20572ad17df06b453ac Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 19:47:12 +0000 Subject: [PATCH 252/255] Use kernel modules. --- .../x86-64-accton-as5512-54x/platform-config/r0/PKG.yml | 2 +- .../r0/src/lib/x86-64-accton-as5512-54x-r0.yml | 4 ++-- .../r0/src/python/x86_64_accton_as5512_54x_r0/__init__.py | 5 +++++ .../x86-64-accton-as7512-32x/platform-config/r0/PKG.yml | 2 +- .../r0/src/python/x86_64_accton_as7512_32x_r0/__init__.py | 5 +++++ .../x86-64-accton-as7716-32x/platform-config/r0/PKG.yml | 2 +- .../r0/src/lib/x86-64-accton-as7716-32x-r0.yml | 4 +++- .../r0/src/python/x86_64_accton_as7716_32x_r0/__init__.py | 5 +++++ 8 files changed, 23 insertions(+), 6 deletions(-) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/PKG.yml index e13eaf29..699ce795 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5512-54x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as5512-54x-r0 MODULES=onl-platform-modules-x86-64-accton-as5512-54x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/lib/x86-64-accton-as5512-54x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/lib/x86-64-accton-as5512-54x-r0.yml index 218d540c..d1eafdd2 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/lib/x86-64-accton-as5512-54x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/lib/x86-64-accton-as5512-54x-r0.yml @@ -17,8 +17,8 @@ x86-64-accton-as5512-54x-r0: --parity=no --stop=1 - kernel: - <<: *kernel-3-18 + kernel: + <<: *kernel-3-2 args: >- nopat diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/python/x86_64_accton_as5512_54x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/python/x86_64_accton_as5512_54x_r0/__init__.py index 972a3b28..5f00ee18 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/python/x86_64_accton_as5512_54x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as5512-54x/platform-config/r0/src/python/x86_64_accton_as5512_54x_r0/__init__.py @@ -8,6 +8,11 @@ class OnlPlatform_x86_64_accton_as5512_54x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".5512.54.1" def baseconfig(self): + + self.insmod('cpr_4011_4mxx') + self.insmod('accton_i2c_cpld') + self.insmod_platform() + ########### initialize I2C bus 0 ########### # initialize multiplexer (PCA9548) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/PKG.yml index e7c30ba8..300b6bcf 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as7512-32x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as7512-32x-r0 MODULES=onl-platform-modules-x86-64-accton-as7512-32x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/src/python/x86_64_accton_as7512_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/src/python/x86_64_accton_as7512_32x_r0/__init__.py index 69cd67c3..1e883db8 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/src/python/x86_64_accton_as7512_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7512-32x/platform-config/r0/src/python/x86_64_accton_as7512_32x_r0/__init__.py @@ -8,6 +8,11 @@ class OnlPlatform_x86_64_accton_as7512_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".7512.32" def baseconfig(self): + + self.insmod("ym2651y") + self.insmod("accton_i2c_cpld") + self.insmod_platform() + ########### initialize I2C bus 0 ########### # initialize multiplexer (PCA9548) self.new_i2c_device('pca9548', 0x76, 0) diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/PKG.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/PKG.yml index 017e10d7..5273bfdd 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/PKG.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/PKG.yml @@ -1 +1 @@ -!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as7716-32x-r0 +!include $ONL_TEMPLATES/platform-config-with-modules.yml ARCH=amd64 VENDOR=accton PLATFORM=x86-64-accton-as7716-32x-r0 MODULES=onl-platform-modules-x86-64-accton-as7716-32x:amd64 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/x86-64-accton-as7716-32x-r0.yml b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/x86-64-accton-as7716-32x-r0.yml index f88f0e06..691b33b1 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/x86-64-accton-as7716-32x-r0.yml +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/x86-64-accton-as7716-32x-r0.yml @@ -17,12 +17,14 @@ x86-64-accton-as7716-32x-r0: --parity=no --stop=1 - kernel: + kernel: <<: *kernel-3-2 args: >- nopat console=ttyS0,115200n8 + tg3.short_preamble=1 + tg3.bcm5718s_reset=1 ##network: ## interfaces: diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/python/x86_64_accton_as7716_32x_r0/__init__.py b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/python/x86_64_accton_as7716_32x_r0/__init__.py index 76f0da9e..df5a257a 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/python/x86_64_accton_as7716_32x_r0/__init__.py +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/python/x86_64_accton_as7716_32x_r0/__init__.py @@ -8,6 +8,11 @@ class OnlPlatform_x86_64_accton_as7716_32x_r0(OnlPlatformAccton, SYS_OBJECT_ID=".7716.32" def baseconfig(self): + + self.insmod("ym2651y") + self.insmod('accton_i2c_cpld') + self.insmod_platform() + ########### initialize I2C bus 0 ########### self.new_i2c_devices([ # initialize multiplexer (PCA9548) From ada99bfaf2a1b0b5778cea6bb62602a1f585f9be Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 19:47:54 +0000 Subject: [PATCH 253/255] Convenience method for inserting all platform-specific modules. --- .../src/python/onl/platform/base.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py index fd442256..ae309002 100644 --- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py +++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py @@ -213,6 +213,19 @@ class OnlPlatformBase(object): else: return False + def insmod_platform(self): + kv = os.uname()[2] + # Insert all modules in the platform module directories + directories = [ self.PLATFORM, + '-'.join(self.PLATFORM.split('-')[:-1]) ] + + for subdir in directories: + d = "/lib/modules/%s/%s" % (kv, subdir) + if os.path.isdir(d): + for f in os.listdir(d): + if f.endswith(".ko"): + self.insmod(f) + def onie_machine_get(self): mc = self.basedir_onl("etc/onie/machine.json") if not os.path.exists(mc): From f04f6e605f1f40b52a146c75ea9add14a0d2947e Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 11:52:00 -0800 Subject: [PATCH 254/255] All platform patches have been moved to kernel modules. --- .../patches/driver-hwmon-cpr-4011-4mxx.patch | 439 --- .../patches/mgmt-port-init-config.patch | 50 - ...orm-accton-as5512_54x-device-drivers.patch | 2615 ---------------- ...orm-accton-as5712_54x-device-drivers.patch | 2639 ---------------- ...orm-accton-as5812_54t-device-drivers.patch | 1861 ------------ ...orm-accton-as5812_54x-device-drivers.patch | 2401 --------------- ...orm-accton-as6712_32x-device-drivers.patch | 2334 -------------- ...orm-accton-as6812_32x-device-drivers.patch | 2300 -------------- ...orm-accton-as7512_32x-device-drivers.patch | 2675 ----------------- ...orm-accton-as7712_32x-device-drivers.patch | 1830 ----------- ...orm-accton-as7716_32x-device-drivers.patch | 1707 ----------- .../kernels/3.2.65-1+deb7u2/patches/series | 11 - 12 files changed, 20862 deletions(-) delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-hwmon-cpr-4011-4mxx.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch delete mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-hwmon-cpr-4011-4mxx.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-hwmon-cpr-4011-4mxx.patch deleted file mode 100644 index 9cf1aa88..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-hwmon-cpr-4011-4mxx.patch +++ /dev/null @@ -1,439 +0,0 @@ -Patch for COMPUWARE CPR-4011-4M11 and CPR-4011-4M21 power module. - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 5c984a6..b3abc7b 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1412,6 +1412,17 @@ config SENSORS_MC13783_ADC - help - Support for the A/D converter on MC13783 PMIC. - -+config SENSORS_CPR_4011_4MXX -+ tristate "Compuware CPR_4011_4MXX Redundant Power Module" -+ depends on I2C -+ help -+ If you say yes here you get support for Compuware CPR_4011_4MXX -+ Redundant Power Module. -+ -+ This driver can also be built as a module. If so, the module will -+ be called cpr_4011_4mxx. -+ -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index ff3a18e..1e90738 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -42,6 +42,7 @@ obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o - obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o - obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o - obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o -+obj-$(CONFIG_SENSORS_CPR_4011_4MXX) += cpr_4011_4mxx.o - obj-$(CONFIG_SENSORS_CY8CXX) += cy8cxx.o - obj-$(CONFIG_SENSORS_CY8C3245R1) += cy8c3245r1.o - obj-$(CONFIG_SENSORS_DME1737) += dme1737.o -diff --git a/drivers/hwmon/cpr_4011_4mxx.c b/drivers/hwmon/cpr_4011_4mxx.c -new file mode 100644 -index 0000000..c14c733 ---- /dev/null -+++ b/drivers/hwmon/cpr_4011_4mxx.c -@@ -0,0 +1,397 @@ -+/* -+ * An hwmon driver for the CPR-4011-4Mxx Redundant Power Module -+ * -+ * Copyright (C) Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MAX_FAN_DUTY_CYCLE 100 -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x3c, 0x3d, 0x3e, 0x3f, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct cpr_4011_4mxx_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 vout_mode; /* Register value */ -+ u16 v_in; /* Register value */ -+ u16 v_out; /* Register value */ -+ u16 i_in; /* Register value */ -+ u16 i_out; /* Register value */ -+ u16 p_in; /* Register value */ -+ u16 p_out; /* Register value */ -+ u16 temp_input[2]; /* Register value */ -+ u8 fan_fault; /* Register value */ -+ u16 fan_duty_cycle[2]; /* Register value */ -+ u16 fan_speed[2]; /* Register value */ -+}; -+ -+static ssize_t show_linear(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_vout(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, const char *buf, size_t count); -+static int cpr_4011_4mxx_write_word(struct i2c_client *client, u8 reg, u16 value); -+static struct cpr_4011_4mxx_data *cpr_4011_4mxx_update_device(struct device *dev); -+ -+enum cpr_4011_4mxx_sysfs_attributes { -+ PSU_V_IN, -+ PSU_V_OUT, -+ PSU_I_IN, -+ PSU_I_OUT, -+ PSU_P_IN, -+ PSU_P_OUT, -+ PSU_TEMP1_INPUT, -+ PSU_FAN1_FAULT, -+ PSU_FAN1_DUTY_CYCLE, -+ PSU_FAN1_SPEED, -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_v_in, S_IRUGO, show_linear, NULL, PSU_V_IN); -+static SENSOR_DEVICE_ATTR(psu_v_out, S_IRUGO, show_vout, NULL, PSU_V_OUT); -+static SENSOR_DEVICE_ATTR(psu_i_in, S_IRUGO, show_linear, NULL, PSU_I_IN); -+static SENSOR_DEVICE_ATTR(psu_i_out, S_IRUGO, show_linear, NULL, PSU_I_OUT); -+static SENSOR_DEVICE_ATTR(psu_p_in, S_IRUGO, show_linear, NULL, PSU_P_IN); -+static SENSOR_DEVICE_ATTR(psu_p_out, S_IRUGO, show_linear, NULL, PSU_P_OUT); -+static SENSOR_DEVICE_ATTR(psu_temp1_input, S_IRUGO, show_linear, NULL, PSU_TEMP1_INPUT); -+static SENSOR_DEVICE_ATTR(psu_fan1_fault, S_IRUGO, show_fan_fault, NULL, PSU_FAN1_FAULT); -+static SENSOR_DEVICE_ATTR(psu_fan1_duty_cycle_percentage, S_IWUSR | S_IRUGO, show_linear, set_fan_duty_cycle, PSU_FAN1_DUTY_CYCLE); -+static SENSOR_DEVICE_ATTR(psu_fan1_speed_rpm, S_IRUGO, show_linear, NULL, PSU_FAN1_SPEED); -+ -+static struct attribute *cpr_4011_4mxx_attributes[] = { -+ &sensor_dev_attr_psu_v_in.dev_attr.attr, -+ &sensor_dev_attr_psu_v_out.dev_attr.attr, -+ &sensor_dev_attr_psu_i_in.dev_attr.attr, -+ &sensor_dev_attr_psu_i_out.dev_attr.attr, -+ &sensor_dev_attr_psu_p_in.dev_attr.attr, -+ &sensor_dev_attr_psu_p_out.dev_attr.attr, -+ &sensor_dev_attr_psu_temp1_input.dev_attr.attr, -+ &sensor_dev_attr_psu_fan1_fault.dev_attr.attr, -+ &sensor_dev_attr_psu_fan1_duty_cycle_percentage.dev_attr.attr, -+ &sensor_dev_attr_psu_fan1_speed_rpm.dev_attr.attr, -+ NULL -+}; -+ -+static int two_complement_to_int(u16 data, u8 valid_bit, int mask) -+{ -+ u16 valid_data = data & mask; -+ bool is_negative = valid_data >> (valid_bit - 1); -+ -+ return is_negative ? (-(((~valid_data) & mask) + 1)) : valid_data; -+} -+ -+static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct i2c_client *client = to_i2c_client(dev); -+ struct cpr_4011_4mxx_data *data = i2c_get_clientdata(client); -+ int nr = (attr->index == PSU_FAN1_DUTY_CYCLE) ? 0 : 1; -+ long speed; -+ int error; -+ -+ error = kstrtol(buf, 10, &speed); -+ if (error) -+ return error; -+ -+ if (speed < 0 || speed > MAX_FAN_DUTY_CYCLE) -+ return -EINVAL; -+ -+ mutex_lock(&data->update_lock); -+ data->fan_duty_cycle[nr] = speed; -+ cpr_4011_4mxx_write_word(client, 0x3B + nr, data->fan_duty_cycle[nr]); -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+static ssize_t show_linear(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct cpr_4011_4mxx_data *data = cpr_4011_4mxx_update_device(dev); -+ -+ u16 value = 0; -+ int exponent, mantissa; -+ int multiplier = 1000; -+ -+ switch (attr->index) { -+ case PSU_V_IN: -+ value = data->v_in; -+ break; -+ case PSU_I_IN: -+ value = data->i_in; -+ break; -+ case PSU_I_OUT: -+ value = data->i_out; -+ break; -+ case PSU_P_IN: -+ value = data->p_in; -+ break; -+ case PSU_P_OUT: -+ value = data->p_out; -+ break; -+ case PSU_TEMP1_INPUT: -+ value = data->temp_input[0]; -+ break; -+ case PSU_FAN1_DUTY_CYCLE: -+ multiplier = 1; -+ value = data->fan_duty_cycle[0]; -+ break; -+ case PSU_FAN1_SPEED: -+ multiplier = 1; -+ value = data->fan_speed[0]; -+ break; -+ default: -+ break; -+ } -+ -+ exponent = two_complement_to_int(value >> 11, 5, 0x1f); -+ mantissa = two_complement_to_int(value & 0x7ff, 11, 0x7ff); -+ -+ return (exponent >= 0) ? sprintf(buf, "%d\n", (mantissa << exponent) * multiplier) : -+ sprintf(buf, "%d\n", (mantissa * multiplier) / (1 << -exponent)); -+} -+ -+static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct cpr_4011_4mxx_data *data = cpr_4011_4mxx_update_device(dev); -+ -+ u8 shift = (attr->index == PSU_FAN1_FAULT) ? 7 : 6; -+ -+ return sprintf(buf, "%d\n", data->fan_fault >> shift); -+} -+ -+static ssize_t show_vout(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct cpr_4011_4mxx_data *data = cpr_4011_4mxx_update_device(dev); -+ int exponent, mantissa; -+ int multiplier = 1000; -+ -+ exponent = two_complement_to_int(data->vout_mode, 5, 0x1f); -+ mantissa = data->v_out; -+ -+ return (exponent > 0) ? sprintf(buf, "%d\n", (mantissa << exponent) * multiplier) : -+ sprintf(buf, "%d\n", (mantissa * multiplier) / (1 << -exponent)); -+} -+ -+static const struct attribute_group cpr_4011_4mxx_group = { -+ .attrs = cpr_4011_4mxx_attributes, -+}; -+ -+static int cpr_4011_4mxx_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct cpr_4011_4mxx_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct cpr_4011_4mxx_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &cpr_4011_4mxx_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &cpr_4011_4mxx_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int cpr_4011_4mxx_remove(struct i2c_client *client) -+{ -+ struct cpr_4011_4mxx_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &cpr_4011_4mxx_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id cpr_4011_4mxx_id[] = { -+ { "cpr_4011_4mxx", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, cpr_4011_4mxx_id); -+ -+static struct i2c_driver cpr_4011_4mxx_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "cpr_4011_4mxx", -+ }, -+ .probe = cpr_4011_4mxx_probe, -+ .remove = cpr_4011_4mxx_remove, -+ .id_table = cpr_4011_4mxx_id, -+ .address_list = normal_i2c, -+}; -+ -+static int cpr_4011_4mxx_read_byte(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_byte_data(client, reg); -+} -+ -+static int cpr_4011_4mxx_read_word(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_word_data(client, reg); -+} -+ -+static int cpr_4011_4mxx_write_word(struct i2c_client *client, u8 reg, u16 value) -+{ -+ return i2c_smbus_write_word_data(client, reg, value); -+} -+ -+struct reg_data_byte { -+ u8 reg; -+ u8 *value; -+}; -+ -+struct reg_data_word { -+ u8 reg; -+ u16 *value; -+}; -+ -+static struct cpr_4011_4mxx_data *cpr_4011_4mxx_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct cpr_4011_4mxx_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int i, status; -+ struct reg_data_byte regs_byte[] = { {0x20, &data->vout_mode}, -+ {0x81, &data->fan_fault}}; -+ struct reg_data_word regs_word[] = { {0x88, &data->v_in}, -+ {0x8b, &data->v_out}, -+ {0x89, &data->i_in}, -+ {0x8c, &data->i_out}, -+ {0x96, &data->p_out}, -+ {0x97, &data->p_in}, -+ {0x8d, &(data->temp_input[0])}, -+ {0x8e, &(data->temp_input[1])}, -+ {0x3b, &(data->fan_duty_cycle[0])}, -+ {0x3c, &(data->fan_duty_cycle[1])}, -+ {0x90, &(data->fan_speed[0])}, -+ {0x91, &(data->fan_speed[1])}}; -+ -+ dev_dbg(&client->dev, "Starting cpr_4011_4mxx update\n"); -+ -+ /* Read byte data */ -+ for (i = 0; i < ARRAY_SIZE(regs_byte); i++) { -+ status = cpr_4011_4mxx_read_byte(client, regs_byte[i].reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", -+ regs_byte[i].reg, status); -+ } -+ else { -+ *(regs_byte[i].value) = status; -+ } -+ } -+ -+ /* Read word data */ -+ for (i = 0; i < ARRAY_SIZE(regs_word); i++) { -+ status = cpr_4011_4mxx_read_word(client, regs_word[i].reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", -+ regs_word[i].reg, status); -+ } -+ else { -+ *(regs_word[i].value) = status; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init cpr_4011_4mxx_init(void) -+{ -+ return i2c_add_driver(&cpr_4011_4mxx_driver); -+} -+ -+static void __exit cpr_4011_4mxx_exit(void) -+{ -+ i2c_del_driver(&cpr_4011_4mxx_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("CPR_4011_4MXX driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(cpr_4011_4mxx_init); -+module_exit(cpr_4011_4mxx_exit); diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch deleted file mode 100644 index 23546620..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch +++ /dev/null @@ -1,50 +0,0 @@ -diff --git a/drivers/net/ethernet/broadcom/tg3/tg3.c b/drivers/net/ethernet/broadcom/tg3/tg3.c -index 4894a11..9b7b7b4 100644 ---- a/drivers/net/ethernet/broadcom/tg3/tg3.c -+++ b/drivers/net/ethernet/broadcom/tg3/tg3.c -@@ -561,6 +561,7 @@ static const struct { - - #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) - -+static int as7716 = -1; /* as7716=1: as7716 switch is used, it needs as7716 specific patch */ - - static void tg3_write32(struct tg3 *tp, u32 off, u32 val) - { -@@ -1628,6 +1629,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp) - static void tg3_mdio_start(struct tg3 *tp) - { - tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; -+ -+ if (as7716 == 1) -+ tp->mi_mode |= MAC_MI_MODE_SHORT_PREAMBLE; /* as7716: for accessing external PHY(0x1F) BCM54616S */ -+ - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - -@@ -2899,6 +2904,11 @@ static int tg3_phy_reset(struct tg3 *tp) - } - } - -+ if (as7716 == 1 && tp->phy_id == TG3_PHY_ID_BCM5718S) { -+ __tg3_writephy(tp, 0x8, 0x10, 0x1d0); /* as7716: set internal phy 0x8 to make linkup */ -+ __tg3_writephy(tp, 0x1f, 0x4, 0x5e1); /* as7716 enable 10/100 cability of external phy BCM 54616S*/ -+ } -+ - if (tg3_flag(tp, 5717_PLUS) && - (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) - return 0; -@@ -19874,6 +19884,14 @@ static struct pci_driver tg3_driver = { - - static int __init tg3_init(void) - { -+ extern int platform_accton_as7716_32x(void); -+ if (platform_accton_as7716_32x()) { -+ as7716 = 1; -+ printk_once(KERN_INFO "\nAS7716-32X\n"); -+ } -+ else -+ as7716 = 0; -+ - #ifdef TG3_VMWARE_NETQ_ENABLE - int i; - for (i = 0; i < TG3_MAX_NIC; i++) { diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch deleted file mode 100644 index af063811..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch +++ /dev/null @@ -1,2615 +0,0 @@ -Device driver patches for accton as5512 (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 89c619d..968bd5f 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1574,6 +1574,24 @@ config SENSORS_ACCTON_AS5812_54t_PSU - This driver can also be built as a module. If so, the module will - be called accton_as5812_54t_psu. - -+config SENSORS_ACCTON_AS5512_54X_PSU -+ tristate "Accton as5512 54x psu" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as5512 54x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5512_54x_psu. -+ -+config SENSORS_ACCTON_AS5512_54X_FAN -+ tristate "Accton as5512 54x fan" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as5512 54x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5512_54x_fan. -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index de922bc..b8ee7b0 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -36,6 +36,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_FAN) += accton_as6812_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_PSU) += accton_as6812_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_PSU) += accton_as5812_54t_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_PSU) += accton_as5512_54x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_FAN) += accton_as5512_54x_fan.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as5512_54x_fan.c b/drivers/hwmon/accton_as5512_54x_fan.c -new file mode 100644 -index 0000000..67e3dd6 ---- /dev/null -+++ b/drivers/hwmon/accton_as5512_54x_fan.c -@@ -0,0 +1,454 @@ -+/* -+ * A hwmon driver for the Accton as5512 54x fan control -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FAN_MAX_NUMBER 5 -+#define FAN_SPEED_CPLD_TO_RPM_STEP 150 -+#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 -+#define FAN_DUTY_CYCLE_MIN 0 -+#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ -+ -+#define CPLD_REG_FAN_STATUS_OFFSET 0x0C -+#define CPLD_REG_FANR_STATUS_OFFSET 0x1E -+#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1D -+ -+#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 -+#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 -+#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 -+#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 -+#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 -+ -+#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 -+#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 -+#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A -+#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B -+#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C -+ -+#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0x0D -+ -+#define CPLD_FAN1_INFO_BIT_MASK 0x01 -+#define CPLD_FAN2_INFO_BIT_MASK 0x02 -+#define CPLD_FAN3_INFO_BIT_MASK 0x04 -+#define CPLD_FAN4_INFO_BIT_MASK 0x08 -+#define CPLD_FAN5_INFO_BIT_MASK 0x10 -+ -+#define PROJECT_NAME -+ -+#define LOCAL_DEBUG 0 -+ -+static struct accton_as5512_54x_fan *fan_data = NULL; -+ -+struct accton_as5512_54x_fan { -+ struct platform_device *pdev; -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ -+ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ -+ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ -+ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ -+ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ -+ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ -+}; -+ -+/*******************/ -+#define MAKE_FAN_MASK_OR_REG(name,type) \ -+ CPLD_FAN##type##1_##name, \ -+ CPLD_FAN##type##2_##name, \ -+ CPLD_FAN##type##3_##name, \ -+ CPLD_FAN##type##4_##name, \ -+ CPLD_FAN##type##5_##name, -+ -+/* fan related data -+ */ -+static const u8 fan_info_mask[] = { -+ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) -+}; -+ -+static const u8 fan_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) -+}; -+ -+static const u8 fanr_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) -+}; -+ -+/*******************/ -+#define DEF_FAN_SET(id) \ -+ FAN##id##_FAULT, \ -+ FAN##id##_SPEED, \ -+ FAN##id##_DUTY_CYCLE, \ -+ FAN##id##_DIRECTION, \ -+ FANR##id##_FAULT, \ -+ FANR##id##_SPEED, -+ -+enum sysfs_fan_attributes { -+ DEF_FAN_SET(1) -+ DEF_FAN_SET(2) -+ DEF_FAN_SET(3) -+ DEF_FAN_SET(4) -+ DEF_FAN_SET(5) -+}; -+/*******************/ -+static void accton_as5512_54x_fan_update_device(struct device *dev); -+static int accton_as5512_54x_fan_read_value(u8 reg); -+static int accton_as5512_54x_fan_write_value(u8 reg, u8 value); -+ -+static ssize_t fan_set_duty_cycle(struct device *dev, -+ struct device_attribute *da,const char *buf, size_t count); -+static ssize_t fan_show_value(struct device *dev, -+ struct device_attribute *da, char *buf); -+ -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+ -+/*******************/ -+#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); -+ -+#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) -+ -+#define _MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ -+ fan_set_duty_cycle, FAN1_DUTY_CYCLE); -+ -+#define MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) _MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) -+ -+ -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) -+MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(PROJECT_NAME,) -+/*******************/ -+ -+#define _MAKE_FAN_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, -+ -+#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) -+ -+#define _MAKE_FAN_DUTY_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr, -+ -+#define MAKE_FAN_DUTY_ATTR(prj, id) _MAKE_FAN_DUTY_ATTR(prj, id) -+ -+static struct attribute *accton_as5512_54x_fan_attributes[] = { -+ /* fan related attributes */ -+ MAKE_FAN_ATTR(PROJECT_NAME,1) -+ MAKE_FAN_ATTR(PROJECT_NAME,2) -+ MAKE_FAN_ATTR(PROJECT_NAME,3) -+ MAKE_FAN_ATTR(PROJECT_NAME,4) -+ MAKE_FAN_ATTR(PROJECT_NAME,5) -+ MAKE_FAN_DUTY_ATTR(PROJECT_NAME,) -+ NULL -+}; -+/*******************/ -+ -+/* fan related functions -+ */ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ ssize_t ret = 0; -+ int data_index, type_index; -+ -+ accton_as5512_54x_fan_update_device(dev); -+ -+ if (fan_data->valid == 0) { -+ return ret; -+ } -+ -+ type_index = attr->index%FAN2_FAULT; -+ data_index = attr->index/FAN2_FAULT; -+ -+ switch (type_index) { -+ case FAN1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DUTY_CYCLE: -+ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DIRECTION: -+ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ default: -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); -+ break; -+ } -+ -+ return ret; -+} -+/*******************/ -+static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) { -+ -+ int error, value; -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) -+ return -EINVAL; -+ -+ accton_as5512_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); -+ -+ fan_data->valid = 0; -+ -+ return count; -+} -+ -+static const struct attribute_group accton_as5512_54x_fan_group = { -+ .attrs = accton_as5512_54x_fan_attributes, -+}; -+ -+static int accton_as5512_54x_fan_read_value(u8 reg) -+{ -+ return accton_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5512_54x_fan_write_value(u8 reg, u8 value) -+{ -+ return accton_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5512_54x_fan_update_device(struct device *dev) -+{ -+ int speed, r_speed, fault, r_fault, ctrl_speed, direction; -+ int i; -+ -+ mutex_lock(&fan_data->update_lock); -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5512_54x_fan update \n"); -+ -+ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { -+ /* do nothing */ -+ goto _exit; -+ } -+ -+ fan_data->valid = 0; -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5512_54x_fan update 2 \n"); -+ -+ fault = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); -+ r_fault = accton_as5512_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); -+ direction = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); -+ ctrl_speed = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); -+ -+ if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); -+ -+ for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); -+ -+ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; -+ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; -+ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; -+ -+ /* fan speed -+ */ -+ speed = accton_as5512_54x_fan_read_value(fan_speed_reg[i]); -+ r_speed = accton_as5512_54x_fan_read_value(fanr_speed_reg[i]); -+ if ( (speed < 0) || (r_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); -+ -+ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ } -+ -+ /* finish to update */ -+ fan_data->last_updated = jiffies; -+ fan_data->valid = 1; -+ -+_exit: -+ mutex_unlock(&fan_data->update_lock); -+} -+ -+static int accton_as5512_54x_fan_probe(struct platform_device *pdev) -+{ -+ int status = -1; -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&pdev->dev.kobj, &accton_as5512_54x_fan_group); -+ if (status) { -+ goto exit; -+ -+ } -+ -+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); -+ if (IS_ERR(fan_data->hwmon_dev)) { -+ status = PTR_ERR(fan_data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&pdev->dev, "accton_as5512_54x_fan\n"); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&pdev->dev.kobj, &accton_as5512_54x_fan_group); -+exit: -+ return status; -+} -+ -+static int accton_as5512_54x_fan_remove(struct platform_device *pdev) -+{ -+ hwmon_device_unregister(fan_data->hwmon_dev); -+ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5512_54x_fan_group); -+ -+ return 0; -+} -+ -+#define DRVNAME "as5512_54x_fan" -+ -+static struct platform_driver accton_as5512_54x_fan_driver = { -+ .probe = accton_as5512_54x_fan_probe, -+ .remove = accton_as5512_54x_fan_remove, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5512_54x_fan_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5512_54x(void); -+ if(!platform_accton_as5512_54x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as5512_54x_fan_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ fan_data = kzalloc(sizeof(struct accton_as5512_54x_fan), GFP_KERNEL); -+ if (!fan_data) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5512_54x_fan_driver); -+ goto exit; -+ } -+ -+ mutex_init(&fan_data->update_lock); -+ fan_data->valid = 0; -+ -+ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(fan_data->pdev)) { -+ ret = PTR_ERR(fan_data->pdev); -+ platform_driver_unregister(&accton_as5512_54x_fan_driver); -+ kfree(fan_data); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5512_54x_fan_exit(void) -+{ -+ platform_device_unregister(fan_data->pdev); -+ platform_driver_unregister(&accton_as5512_54x_fan_driver); -+ kfree(fan_data); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5512_54x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_as5512_54x_fan_init); -+module_exit(accton_as5512_54x_fan_exit); -+ -+ -diff --git a/drivers/hwmon/accton_as5512_54x_psu.c b/drivers/hwmon/accton_as5512_54x_psu.c -new file mode 100644 -index 0000000..66d61f3 ---- /dev/null -+++ b/drivers/hwmon/accton_as5512_54x_psu.c -@@ -0,0 +1,295 @@ -+/* -+ * An hwmon driver for accton as5512_54x Power Module -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as5512_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5512_54x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[14]; /* Model name, read from eeprom */ -+}; -+ -+static struct as5512_54x_psu_data *as5512_54x_psu_update_device(struct device *dev); -+ -+enum as5512_54x_psu_sysfs_attributes { -+ PSU_INDEX, -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as5512_54x_psu_attributes[] = { -+ &sensor_dev_attr_psu_index.dev_attr.attr, -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5512_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->index); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5512_54x_psu_data *data = as5512_54x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((data->index - 1) * 4) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5512_54x_psu_data *data = as5512_54x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s", data->model_name); -+} -+ -+static const struct attribute_group as5512_54x_psu_group = { -+ .attrs = as5512_54x_psu_attributes, -+}; -+ -+static int as5512_54x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5512_54x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5512_54x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5512_54x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x38 || client->addr == 0x50) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x3b || client->addr == 0x53) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5512_54x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5512_54x_psu_remove(struct i2c_client *client) -+{ -+ struct as5512_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5512_54x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as5512_54x_psu_id[] = { -+ { "as5512_54x_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as5512_54x_psu_id); -+ -+static struct i2c_driver as5512_54x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5512_54x_psu", -+ }, -+ .probe = as5512_54x_psu_probe, -+ .remove = as5512_54x_psu_remove, -+ .id_table = as5512_54x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5512_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as5512_54x_psu_data *as5512_54x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5512_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ -+ dev_dbg(&client->dev, "Starting as5512_54x update\n"); -+ -+ /* Read model name */ -+ if (client->addr == 0x38 || client->addr == 0x3b) { -+ /* AC power */ -+ status = as5512_54x_psu_read_block(client, 0x26, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ else { -+ /* DC power */ -+ status = as5512_54x_psu_read_block(client, 0x50, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; -+ } -+ -+ /* Read psu status */ -+ status = accton_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as5512_54x_psu_init(void) -+{ -+ extern int platform_accton_as5512_54x(void); -+ if(!platform_accton_as5512_54x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as5512_54x_psu_driver); -+} -+ -+static void __exit as5512_54x_psu_exit(void) -+{ -+ i2c_del_driver(&as5512_54x_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5512_54x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as5512_54x_psu_init); -+module_exit(as5512_54x_psu_exit); -+ -diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c -index acf88c9..e50c599 100644 ---- a/drivers/hwmon/accton_i2c_cpld.c -+++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -255,6 +255,29 @@ int platform_accton_as5812_54t(void) - } - EXPORT_SYMBOL(platform_accton_as5812_54t); - -+static struct dmi_system_id as5512_54x_dmi_table[] = { -+ { -+ .ident = "Accton AS5512", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), -+ }, -+ }, -+ { -+ .ident = "Accton AS5512", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), -+ }, -+ }, -+}; -+ -+int platform_accton_as5512_54x(void) -+{ -+ return dmi_check_system(as5512_54x_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as5512_54x); -+ - MODULE_AUTHOR("Brandon Chuang "); - MODULE_DESCRIPTION("accton_i2c_cpld driver"); - MODULE_LICENSE("GPL"); -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index 599b97b..9ba4a1b 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -88,7 +88,14 @@ config LEDS_ACCTON_AS5812_54t - help - This option enables support for the LEDs on the Accton as5812 54t. - Say Y to enable LEDs on the Accton as5812 54t. -- -+ -+config LEDS_ACCTON_AS5512_54X -+ tristate "LED support for the Accton as5512 54x" -+ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD -+ help -+ This option enables support for the LEDs on the Accton as5512 54x. -+ Say Y to enable LEDs on the Accton as5512 54x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index bd20baa..ff3be6c 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -50,6 +50,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o - obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS5812_54t) += leds-accton_as5812_54t.o -+obj-$(CONFIG_LEDS_ACCTON_AS5512_54X) += leds-accton_as5512_54x.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as5512_54x.c b/drivers/leds/leds-accton_as5512_54x.c -new file mode 100644 -index 0000000..761483a ---- /dev/null -+++ b/drivers/leds/leds-accton_as5512_54x.c -@@ -0,0 +1,463 @@ -+/* -+ * A LED driver for the accton_as5512_54x_led -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "as5512_54x_led" -+ -+struct accton_as5512_54x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[2]; /* Register value, 0 = LOC/DIAG/FAN LED -+ 1 = PSU1/PSU2 LED */ -+}; -+ -+static struct accton_as5512_54x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+#define LED_TYPE_PSU1_REG_MASK 0x03 -+#define LED_MODE_PSU1_GREEN_MASK 0x02 -+#define LED_MODE_PSU1_AMBER_MASK 0x01 -+#define LED_MODE_PSU1_OFF_MASK 0x03 -+#define LED_MODE_PSU1_AUTO_MASK 0x00 -+ -+#define LED_TYPE_PSU2_REG_MASK 0x0C -+#define LED_MODE_PSU2_GREEN_MASK 0x08 -+#define LED_MODE_PSU2_AMBER_MASK 0x04 -+#define LED_MODE_PSU2_OFF_MASK 0x0C -+#define LED_MODE_PSU2_AUTO_MASK 0x00 -+ -+#define LED_TYPE_DIAG_REG_MASK 0x0C -+#define LED_MODE_DIAG_GREEN_MASK 0x08 -+#define LED_MODE_DIAG_AMBER_MASK 0x04 -+#define LED_MODE_DIAG_OFF_MASK 0x0C -+ -+#define LED_TYPE_FAN_REG_MASK 0x03 -+#define LED_MODE_FAN_GREEN_MASK 0x02 -+#define LED_MODE_FAN_AMBER_MASK 0x01 -+#define LED_MODE_FAN_OFF_MASK 0x03 -+#define LED_MODE_FAN_AUTO_MASK 0x00 -+ -+#define LED_TYPE_LOC_REG_MASK 0x30 -+#define LED_MODE_LOC_ON_MASK 0x00 -+#define LED_MODE_LOC_OFF_MASK 0x10 -+#define LED_MODE_LOC_BLINK_MASK 0x20 -+ -+static const u8 led_reg[] = { -+ 0xA, /* LOC/DIAG/FAN LED*/ -+ 0xB, /* PSU1/PSU2 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2, -+ LED_TYPE_DIAG, -+ LED_TYPE_FAN, -+ LED_TYPE_LOC -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED, -+ LED_MODE_RED_BLINK, -+ LED_MODE_BLUE, -+ LED_MODE_BLUE_BLINK, -+ LED_MODE_AUTO, -+ LED_MODE_UNKNOWN -+}; -+ -+struct led_type_mode { -+ enum led_type type; -+ int type_mask; -+ enum led_light_mode mode; -+ int mode_mask; -+}; -+ -+static struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} -+}; -+ -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if ((led_type_mode_data[i].type_mask & reg_val) == -+ led_type_mode_data[i].mode_mask) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return 0; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ reg_val = led_type_mode_data[i].mode_mask | -+ (reg_val & (~led_type_mode_data[i].type_mask)); -+ } -+ -+ return reg_val; -+} -+ -+static int accton_as5512_54x_led_read_value(u8 reg) -+{ -+ return accton_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5512_54x_led_write_value(u8 reg, u8 value) -+{ -+ return accton_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5512_54x_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as5512_54x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as5512_54x_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5512_54x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as5512_54x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as5512_54x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5512_54x_led_psu_1_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); -+} -+ -+static enum led_brightness accton_as5512_54x_led_psu_1_get(struct led_classdev *cdev) -+{ -+ accton_as5512_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); -+} -+ -+static void accton_as5512_54x_led_psu_2_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); -+} -+ -+static enum led_brightness accton_as5512_54x_led_psu_2_get(struct led_classdev *cdev) -+{ -+ accton_as5512_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); -+} -+ -+static void accton_as5512_54x_led_fan_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); -+} -+ -+static enum led_brightness accton_as5512_54x_led_fan_get(struct led_classdev *cdev) -+{ -+ accton_as5512_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); -+} -+ -+static void accton_as5512_54x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as5512_54x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as5512_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as5512_54x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as5512_54x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as5512_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static struct led_classdev accton_as5512_54x_leds[] = { -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as5512_54x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5512_54x_led_psu_1_set, -+ .brightness_get = accton_as5512_54x_led_psu_1_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as5512_54x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5512_54x_led_psu_2_set, -+ .brightness_get = accton_as5512_54x_led_psu_2_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as5512_54x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5512_54x_led_fan_set, -+ .brightness_get = accton_as5512_54x_led_fan_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as5512_54x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5512_54x_led_diag_set, -+ .brightness_get = accton_as5512_54x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as5512_54x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5512_54x_led_loc_set, -+ .brightness_get = accton_as5512_54x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as5512_54x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { -+ led_classdev_suspend(&accton_as5512_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5512_54x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { -+ led_classdev_resume(&accton_as5512_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5512_54x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as5512_54x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as5512_54x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as5512_54x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as5512_54x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { -+ led_classdev_unregister(&accton_as5512_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as5512_54x_led_driver = { -+ .probe = accton_as5512_54x_led_probe, -+ .remove = accton_as5512_54x_led_remove, -+ .suspend = accton_as5512_54x_led_suspend, -+ .resume = accton_as5512_54x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5512_54x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5512_54x(void); -+ if(!platform_accton_as5512_54x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as5512_54x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as5512_54x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5512_54x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as5512_54x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5512_54x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as5512_54x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as5512_54x_led_init); -+module_exit(accton_as5512_54x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5512_54x_led driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index c75227b..d90ebe2 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -135,7 +135,16 @@ config EEPROM_ACCTON_AS5812_54t_SFP - - This driver can also be built as a module. If so, the module will - be called accton_as5812_54t_sfp. -- -+ -+config EEPROM_ACCTON_AS5512_54X_SFP -+ tristate "Accton as5512_54x sfp" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton 5512_54x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_5512_54x_sfp. -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 152a8bc..907f836 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -13,4 +13,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5812_54t_SFP) += accton_as5812_54t_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS5512_54X_SFP) += accton_as5512_54x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as5512_54x_sfp.c b/drivers/misc/eeprom/accton_as5512_54x_sfp.c -new file mode 100644 -index 0000000..d89e71d ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as5512_54x_sfp.c -@@ -0,0 +1,1237 @@ -+/* -+ * SFP driver for accton as5512_54x sfp -+ * -+ * Copyright (C) Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRIVER_NAME "as5512_54x_sfp" -+ -+#define DEBUG_MODE 0 -+ -+#if (DEBUG_MODE == 1) -+ #define DEBUG_PRINT(fmt, args...) \ -+ printk (KERN_INFO "%s:%s[%d]: " fmt "\r\n", __FILE__, __FUNCTION__, __LINE__, ##args) -+#else -+ #define DEBUG_PRINT(fmt, args...) -+#endif -+ -+#define NUM_OF_SFP_PORT 54 -+#define EEPROM_NAME "sfp_eeprom" -+#define EEPROM_SIZE 256 /* 256 byte eeprom */ -+#define BIT_INDEX(i) (1ULL << (i)) -+#define USE_I2C_BLOCK_READ 1 -+#define I2C_RW_RETRY_COUNT 3 -+#define I2C_RW_RETRY_INTERVAL 100 /* ms */ -+ -+#define SFP_EEPROM_A0_I2C_ADDR (0xA0 >> 1) -+#define SFP_EEPROM_A2_I2C_ADDR (0xA2 >> 1) -+ -+#define SFF8024_PHYSICAL_DEVICE_ID_ADDR 0x0 -+#define SFF8024_DEVICE_ID_SFP 0x3 -+#define SFF8024_DEVICE_ID_QSFP 0xC -+#define SFF8024_DEVICE_ID_QSFP_PLUS 0xD -+#define SFF8024_DEVICE_ID_QSFP28 0x11 -+ -+#define SFF8472_DIAG_MON_TYPE_ADDR 92 -+#define SFF8472_DIAG_MON_TYPE_DDM_MASK 0x40 -+#define SFF8472_10G_ETH_COMPLIANCE_ADDR 0x3 -+#define SFF8472_10G_BASE_MASK 0xF0 -+ -+#define SFF8436_RX_LOS_ADDR 3 -+#define SFF8436_TX_FAULT_ADDR 4 -+#define SFF8436_TX_DISABLE_ADDR 86 -+ -+static ssize_t sfp_eeprom_read(struct i2c_client *, u8, u8 *,int); -+static ssize_t sfp_eeprom_write(struct i2c_client *, u8 , const char *,int); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_I2C_ADDR, SFP_EEPROM_A2_I2C_ADDR, I2C_CLIENT_END }; -+ -+#define CPLD_PORT_TO_FRONT_PORT(port) (port+1) -+ -+enum port_numbers { -+sfp1, sfp2, sfp3, sfp4, sfp5, sfp6, sfp7, sfp8, -+sfp9, sfp10, sfp11, sfp12, sfp13, sfp14, sfp15, sfp16, -+sfp17, sfp18, sfp19, sfp20, sfp21, sfp22, sfp23, sfp24, -+sfp25, sfp26, sfp27, sfp28, sfp29, sfp30, sfp31, sfp32, -+sfp33, sfp34, sfp35, sfp36, sfp37, sfp38, sfp39, sfp40, -+sfp41, sfp42, sfp43, sfp44, sfp45, sfp46, sfp47, sfp48, -+sfp49, sfp50, sfp51, sfp52, sfp53, sfp54 -+}; -+ -+static const struct i2c_device_id sfp_device_id[] = { -+{ "sfp1", sfp1 }, { "sfp2", sfp2 }, { "sfp3", sfp3 }, { "sfp4", sfp4 }, -+{ "sfp5", sfp5 }, { "sfp6", sfp6 }, { "sfp7", sfp7 }, { "sfp8", sfp8 }, -+{ "sfp9", sfp9 }, { "sfp10", sfp10 }, { "sfp11", sfp11 }, { "sfp12", sfp12 }, -+{ "sfp13", sfp13 }, { "sfp14", sfp14 }, { "sfp15", sfp15 }, { "sfp16", sfp16 }, -+{ "sfp17", sfp17 }, { "sfp18", sfp18 }, { "sfp19", sfp19 }, { "sfp20", sfp20 }, -+{ "sfp21", sfp21 }, { "sfp22", sfp22 }, { "sfp23", sfp23 }, { "sfp24", sfp24 }, -+{ "sfp25", sfp25 }, { "sfp26", sfp26 }, { "sfp27", sfp27 }, { "sfp28", sfp28 }, -+{ "sfp29", sfp29 }, { "sfp30", sfp30 }, { "sfp31", sfp31 }, { "sfp32", sfp32 }, -+{ "sfp33", sfp33 }, { "sfp34", sfp34 }, { "sfp35", sfp35 }, { "sfp36", sfp36 }, -+{ "sfp37", sfp37 }, { "sfp38", sfp38 }, { "sfp39", sfp39 }, { "sfp40", sfp40 }, -+{ "sfp41", sfp41 }, { "sfp42", sfp42 }, { "sfp43", sfp43 }, { "sfp44", sfp44 }, -+{ "sfp45", sfp45 }, { "sfp46", sfp46 }, { "sfp47", sfp47 }, { "sfp48", sfp48 }, -+{ "sfp49", sfp49 }, { "sfp50", sfp50 }, { "sfp51", sfp51 }, { "sfp52", sfp52 }, -+{ "sfp53", sfp53 }, { "sfp54", sfp54 }, -+{ /* LIST END */ } -+}; -+MODULE_DEVICE_TABLE(i2c, sfp_device_id); -+ -+/* -+ * list of valid port types -+ * note OOM_PORT_TYPE_NOT_PRESENT to indicate no -+ * module is present in this port -+ */ -+typedef enum oom_driver_port_type_e { -+ OOM_DRIVER_PORT_TYPE_INVALID, -+ OOM_DRIVER_PORT_TYPE_NOT_PRESENT, -+ OOM_DRIVER_PORT_TYPE_SFP, -+ OOM_DRIVER_PORT_TYPE_SFP_PLUS, -+ OOM_DRIVER_PORT_TYPE_QSFP, -+ OOM_DRIVER_PORT_TYPE_QSFP_PLUS, -+ OOM_DRIVER_PORT_TYPE_QSFP28 -+} oom_driver_port_type_t; -+ -+enum driver_type_e { -+ DRIVER_TYPE_SFP_MSA, -+ DRIVER_TYPE_SFP_DDM, -+ DRIVER_TYPE_QSFP -+}; -+ -+/* Each client has this additional data -+ */ -+struct eeprom_data { -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ struct bin_attribute bin; /* eeprom data */ -+}; -+ -+struct sfp_msa_data { -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u64 status[6]; /* bit0:port0, bit1:port1 and so on */ -+ /* index 0 => tx_fail -+ 1 => tx_disable -+ 2 => rx_loss -+ 3 => device id -+ 4 => 10G Ethernet Compliance Codes -+ to distinguish SFP or SFP+ -+ 5 => DIAGNOSTIC MONITORING TYPE */ -+ struct eeprom_data eeprom; -+}; -+ -+struct sfp_ddm_data { -+ struct eeprom_data eeprom; -+}; -+ -+struct qsfp_data { -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[3]; /* bit0:port0, bit1:port1 and so on */ -+ /* index 0 => tx_fail -+ 1 => tx_disable -+ 2 => rx_loss */ -+ -+ u8 device_id; -+ struct eeprom_data eeprom; -+}; -+ -+struct sfp_port_data { -+ struct mutex update_lock; -+ enum driver_type_e driver_type; -+ int port; /* CPLD port index */ -+ oom_driver_port_type_t port_type; -+ u64 present; /* present status, bit0:port0, bit1:port1 and so on */ -+ -+ struct sfp_msa_data *msa; -+ struct sfp_ddm_data *ddm; -+ struct qsfp_data *qsfp; -+ -+ struct i2c_client *client; -+}; -+ -+enum sfp_sysfs_attributes { -+ PRESENT, -+ PRESENT_ALL, -+ PORT_NUMBER, -+ PORT_TYPE, -+ DDM_IMPLEMENTED, -+ TX_FAULT, -+ TX_FAULT1, -+ TX_FAULT2, -+ TX_FAULT3, -+ TX_FAULT4, -+ TX_DISABLE, -+ TX_DISABLE1, -+ TX_DISABLE2, -+ TX_DISABLE3, -+ TX_DISABLE4, -+ RX_LOS, -+ RX_LOS1, -+ RX_LOS2, -+ RX_LOS3, -+ RX_LOS4, -+ RX_LOS_ALL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); -+} -+ -+static struct sfp_port_data* sfp_update_present(struct i2c_client *client) -+{ -+ int i = 0, j = 0, status = -1; -+ u8 reg; -+ unsigned short cpld_addr; -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ -+ DEBUG_PRINT("Starting sfp present status update"); -+ mutex_lock(&data->update_lock); -+ data->present = 0; -+ -+ /* Read present status of port 1~48(SFP port) */ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 3; j++) { -+ cpld_addr = 0x61+i; -+ reg = 0x6+j; -+ status = accton_i2c_cpld_read(cpld_addr, reg); -+ -+ if (unlikely(status < 0)) { -+ data = ERR_PTR(status); -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); -+ goto exit; -+ } -+ -+ DEBUG_PRINT("Present status = 0x%lx\r\n", data->present); -+ data->present |= (u64)status << ((i*24) + (j%3)*8); -+ } -+ } -+ -+ /* Read present status of port 49-54(QSFP port) */ -+ cpld_addr = 0x62; -+ reg = 0x14; -+ status = accton_i2c_cpld_read(cpld_addr, reg); -+ -+ if (unlikely(status < 0)) { -+ data = ERR_PTR(status); -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); -+ goto exit; -+ } -+ else { -+ data->present |= (u64)status << 48; -+ } -+ -+ DEBUG_PRINT("Present status = 0x%lx", data->present); -+exit: -+ mutex_unlock(&data->update_lock); -+ return data; -+} -+ -+static struct sfp_port_data* sfp_update_tx_rx_status(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ int i = 0, j = 0; -+ int status = -1; -+ -+ if (time_before(jiffies, data->msa->last_updated + HZ + HZ / 2) && data->msa->valid) { -+ return data; -+ } -+ -+ DEBUG_PRINT("Starting as5512_54x sfp tx rx status update"); -+ mutex_lock(&data->update_lock); -+ data->msa->valid = 0; -+ memset(data->msa->status, 0, sizeof(data->msa->status)); -+ -+ /* Read status of port 1~48(SFP port) */ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 9; j++) { -+ u8 reg; -+ unsigned short cpld_addr; -+ reg = 0x9+j; -+ cpld_addr = 0x61+i; -+ -+ status = accton_i2c_cpld_read(cpld_addr, reg); -+ if (unlikely(status < 0)) { -+ data = ERR_PTR(status); -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); -+ goto exit; -+ } -+ -+ data->msa->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); -+ } -+ } -+ -+ data->msa->valid = 1; -+ data->msa->last_updated = jiffies; -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ return data; -+} -+ -+static ssize_t sfp_set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ unsigned short cpld_addr = 0; -+ u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; -+ long disable; -+ int error; -+ -+ error = kstrtol(buf, 10, &disable); -+ if (error) { -+ return error; -+ } -+ -+ mutex_lock(&data->update_lock); -+ -+ if(data->port < 24) { -+ cpld_addr = 0x61; -+ cpld_reg = 0xC + data->port / 8; -+ cpld_bit = 1 << (data->port % 8); -+ } -+ else { /* port 24 ~ 48 */ -+ cpld_addr = 0x62; -+ cpld_reg = 0xC + (data->port - 24) / 8; -+ cpld_bit = 1 << (data->port % 8); -+ } -+ -+ /* Read current status */ -+ cpld_val = accton_i2c_cpld_read(cpld_addr, cpld_reg); -+ -+ /* Update tx_disable status */ -+ if (disable) { -+ data->msa->status[1] |= BIT_INDEX(data->port); -+ cpld_val |= cpld_bit; -+ } -+ else { -+ data->msa->status[1] &= ~BIT_INDEX(data->port); -+ cpld_val &= ~cpld_bit; -+ } -+ -+ accton_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); -+ mutex_unlock(&data->update_lock); -+ return count; -+} -+ -+static int sfp_is_port_present(struct i2c_client *client, int port) -+{ -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ -+ data = sfp_update_present(client); -+ if (IS_ERR(data)) { -+ return PTR_ERR(data); -+ } -+ -+ return (data->present & BIT_INDEX(data->port)) ? 0 : 1; -+} -+ -+static ssize_t show_present(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct i2c_client *client = to_i2c_client(dev); -+ -+ if (PRESENT_ALL == attr->index) { -+ int i; -+ u8 values[7] = {0}; -+ struct sfp_port_data *data = sfp_update_present(client); -+ -+ if (IS_ERR(data)) { -+ return PTR_ERR(data); -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(values); i++) { -+ values[i] = ~(u8)(data->present >> (i * 8)); -+ } -+ -+ /* Return values 1 -> 54 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], -+ values[3], values[4], values[5], -+ values[6] & 0x3F); -+ } -+ else { -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ int present = sfp_is_port_present(client, data->port); -+ -+ if (IS_ERR_VALUE(present)) { -+ return present; -+ } -+ -+ /* PRESENT */ -+ return sprintf(buf, "%d\n", present); -+ } -+} -+ -+static struct sfp_port_data *sfp_update_port_type(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ u8 buf = 0; -+ int status; -+ -+ mutex_lock(&data->update_lock); -+ -+ switch (data->driver_type) { -+ case DRIVER_TYPE_SFP_MSA: -+ { -+ status = sfp_eeprom_read(client, SFF8024_PHYSICAL_DEVICE_ID_ADDR, &buf, sizeof(buf)); -+ if (unlikely(status < 0)) { -+ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; -+ break; -+ } -+ -+ if (buf != SFF8024_DEVICE_ID_SFP) { -+ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; -+ break; -+ } -+ -+ status = sfp_eeprom_read(client, SFF8472_10G_ETH_COMPLIANCE_ADDR, &buf, sizeof(buf)); -+ if (unlikely(status < 0)) { -+ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; -+ break; -+ } -+ -+ DEBUG_PRINT("sfp port type (0x3) data = (0x%x)", buf); -+ data->port_type = buf & SFF8472_10G_BASE_MASK ? OOM_DRIVER_PORT_TYPE_SFP_PLUS : OOM_DRIVER_PORT_TYPE_SFP; -+ break; -+ } -+ case DRIVER_TYPE_QSFP: -+ { -+ status = sfp_eeprom_read(client, SFF8024_PHYSICAL_DEVICE_ID_ADDR, &buf, sizeof(buf)); -+ if (unlikely(status < 0)) { -+ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; -+ break; -+ } -+ -+ DEBUG_PRINT("qsfp port type (0x0) buf = (0x%x)", buf); -+ switch (buf) { -+ case SFF8024_DEVICE_ID_QSFP: -+ data->port_type = OOM_DRIVER_PORT_TYPE_QSFP; -+ break; -+ case SFF8024_DEVICE_ID_QSFP_PLUS: -+ data->port_type = OOM_DRIVER_PORT_TYPE_QSFP_PLUS; -+ break; -+ case SFF8024_DEVICE_ID_QSFP28: -+ data->port_type = OOM_DRIVER_PORT_TYPE_QSFP_PLUS; -+ break; -+ default: -+ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; -+ break; -+ } -+ -+ break; -+ } -+ default: -+ break; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ return data; -+} -+ -+static ssize_t show_port_type(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ int present = sfp_is_port_present(client, data->port); -+ -+ if (IS_ERR_VALUE(present)) { -+ return present; -+ } -+ -+ if (!present) { -+ return sprintf(buf, "%d\n", OOM_DRIVER_PORT_TYPE_NOT_PRESENT); -+ } -+ -+ sfp_update_port_type(dev); -+ return sprintf(buf, "%d\n", data->port_type); -+} -+ -+static struct sfp_port_data* qsfp_update_tx_rx_status(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ int i, status = -1; -+ u8 buf = 0; -+ u8 reg[] = {SFF8436_TX_FAULT_ADDR, SFF8436_TX_DISABLE_ADDR, SFF8436_RX_LOS_ADDR}; -+ -+ if (time_before(jiffies, data->qsfp->last_updated + HZ + HZ / 2) && data->qsfp->valid) { -+ return data; -+ } -+ -+ DEBUG_PRINT("Starting sfp tx rx status update"); -+ mutex_lock(&data->update_lock); -+ data->qsfp->valid = 0; -+ memset(data->qsfp->status, 0, sizeof(data->qsfp->status)); -+ -+ /* Notify device to update tx fault/ tx disable/ rx los status */ -+ for (i = 0; i < ARRAY_SIZE(reg); i++) { -+ status = sfp_eeprom_read(client, reg[i], &buf, sizeof(buf)); -+ if (unlikely(status < 0)) { -+ data = ERR_PTR(status); -+ goto exit; -+ } -+ } -+ msleep(200); -+ -+ /* Read actual tx fault/ tx disable/ rx los status */ -+ for (i = 0; i < ARRAY_SIZE(reg); i++) { -+ status = sfp_eeprom_read(client, reg[i], &buf, sizeof(buf)); -+ if (unlikely(status < 0)) { -+ data = ERR_PTR(status); -+ goto exit; -+ } -+ -+ DEBUG_PRINT("qsfp reg(0x%x) status = (0x%x)", reg[i], data->qsfp->status[i]); -+ data->qsfp->status[i] = (buf & 0xF); -+ } -+ -+ data->qsfp->valid = 1; -+ data->qsfp->last_updated = jiffies; -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ return data; -+} -+ -+static ssize_t qsfp_show_tx_rx_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ int status; -+ u8 val = 0; -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ -+ status = sfp_is_port_present(client, data->port); -+ if (IS_ERR_VALUE(status)) { -+ return status; -+ } -+ -+ data = qsfp_update_tx_rx_status(dev); -+ if (IS_ERR(data)) { -+ return PTR_ERR(data); -+ } -+ -+ switch (attr->index) { -+ case TX_FAULT1: -+ case TX_FAULT2: -+ case TX_FAULT3: -+ case TX_FAULT4: -+ val = (data->qsfp->status[2] & BIT_INDEX(attr->index - TX_FAULT1)) ? 1 : 0; -+ break; -+ case TX_DISABLE1: -+ case TX_DISABLE2: -+ case TX_DISABLE3: -+ case TX_DISABLE4: -+ val = (data->qsfp->status[1] & BIT_INDEX(attr->index - TX_DISABLE1)) ? 1 : 0; -+ break; -+ case RX_LOS1: -+ case RX_LOS2: -+ case RX_LOS3: -+ case RX_LOS4: -+ val = (data->qsfp->status[0] & BIT_INDEX(attr->index - RX_LOS1)) ? 1 : 0; -+ break; -+ default: -+ break; -+ } -+ -+ return sprintf(buf, "%d\n", val); -+} -+ -+static ssize_t qsfp_set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ long disable; -+ int status; -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct sfp_port_data *data = NULL; -+ -+ status = kstrtol(buf, 10, &disable); -+ if (status) { -+ return status; -+ } -+ -+ data = qsfp_update_tx_rx_status(dev); -+ if (IS_ERR(data)) { -+ return PTR_ERR(data); -+ } -+ -+ mutex_lock(&data->update_lock); -+ -+ if (disable) { -+ data->qsfp->status[1] |= (1 << (attr->index - TX_DISABLE1)); -+ } -+ else { -+ data->qsfp->status[1] &= ~(1 << (attr->index - TX_DISABLE1)); -+ } -+ -+ DEBUG_PRINT("index = (%d), status = (0x%x)", attr->index, data->qsfp->status[1]); -+ status = sfp_eeprom_write(data->client, SFF8436_TX_DISABLE_ADDR, &data->qsfp->status[1], sizeof(data->qsfp->status[1])); -+ if (unlikely(status < 0)) { -+ count = status; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ return count; -+} -+ -+static ssize_t sfp_show_ddm_implemented(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ int status; -+ char ddm; -+ struct i2c_client *client = to_i2c_client(dev); -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ -+ status = sfp_is_port_present(client, data->port); -+ if (IS_ERR_VALUE(status)) { -+ return status; -+ } -+ -+ status = sfp_eeprom_read(client, SFF8472_DIAG_MON_TYPE_ADDR, &ddm, sizeof(ddm)); -+ if (unlikely(status < 0)) { -+ return status; -+ } -+ -+ return sprintf(buf, "%d\n", (ddm & SFF8472_DIAG_MON_TYPE_DDM_MASK) ? 1 : 0); -+} -+ -+static ssize_t sfp_show_tx_rx_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ u8 val = 0, index = 0; -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct sfp_port_data *data = sfp_update_tx_rx_status(dev); -+ -+ if (IS_ERR(data)) { -+ return PTR_ERR(data); -+ } -+ -+ if(attr->index == RX_LOS_ALL) { -+ int i = 0; -+ u8 values[6] = {0}; -+ -+ for (i = 0; i < ARRAY_SIZE(values); i++) { -+ values[i] = (u8)(data->msa->status[2] >> (i * 8)); -+ } -+ -+ /** Return values 1 -> 48 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], -+ values[3], values[4], values[5]); -+ } -+ -+ switch (attr->index) { -+ case TX_FAULT: -+ index = 0; -+ break; -+ case TX_DISABLE: -+ index = 1; -+ break; -+ case RX_LOS: -+ index = 2; -+ break; -+ default: -+ break; -+ } -+ -+ val = (data->msa->status[index] & BIT_INDEX(data->port)) ? 1 : 0; -+ return sprintf(buf, "%d\n", val); -+} -+ -+/* SFP/QSFP common attributes for sysfs */ -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_port_type, S_IRUGO, show_port_type, NULL, PORT_TYPE); -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, PRESENT_ALL); -+ -+/* QSFP attributes for sysfs */ -+static SENSOR_DEVICE_ATTR(sfp_rx_los1, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS1); -+static SENSOR_DEVICE_ATTR(sfp_rx_los2, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS2); -+static SENSOR_DEVICE_ATTR(sfp_rx_los3, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS3); -+static SENSOR_DEVICE_ATTR(sfp_rx_los4, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS4); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable1, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE1); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable2, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE2); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable3, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE3); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable4, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE4); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault1, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT1); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault2, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT2); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault3, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT3); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault4, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT4); -+static struct attribute *qsfp_attributes[] = { -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_type.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los1.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los2.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los3.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los4.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable1.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable2.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable3.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable4.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault1.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault2.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault3.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault4.dev_attr.attr, -+ NULL -+}; -+ -+/* SFP msa attributes for sysfs */ -+static SENSOR_DEVICE_ATTR(sfp_ddm_implemented, S_IRUGO, sfp_show_ddm_implemented, NULL, DDM_IMPLEMENTED); -+static SENSOR_DEVICE_ATTR(sfp_rx_los, S_IRUGO, sfp_show_tx_rx_status, NULL, RX_LOS); -+static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, sfp_show_tx_rx_status, NULL, RX_LOS_ALL); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, sfp_show_tx_rx_status, sfp_set_tx_disable, TX_DISABLE); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, sfp_show_tx_rx_status, NULL, TX_FAULT); -+static struct attribute *sfp_msa_attributes[] = { -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_type.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_ddm_implemented.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, -+ NULL -+}; -+ -+/* SFP ddm attributes for sysfs */ -+static struct attribute *sfp_ddm_attributes[] = { -+ NULL -+}; -+ -+static ssize_t sfp_eeprom_write(struct i2c_client *client, u8 command, const char *data, -+ int data_len) -+{ -+#if USE_I2C_BLOCK_READ -+ int status, retry = I2C_RW_RETRY_COUNT; -+ -+ if (data_len > I2C_SMBUS_BLOCK_MAX) { -+ data_len = I2C_SMBUS_BLOCK_MAX; -+ } -+ -+ while (retry) { -+ status = i2c_smbus_write_i2c_block_data(client, command, data_len, data); -+ if (unlikely(status < 0)) { -+ msleep(I2C_RW_RETRY_INTERVAL); -+ retry--; -+ continue; -+ } -+ -+ break; -+ } -+ -+ if (unlikely(status < 0)) { -+ return status; -+ } -+ -+ return data_len; -+#else -+ int status, retry = I2C_RW_RETRY_COUNT; -+ -+ while (retry) { -+ status = i2c_smbus_write_byte_data(client, command, *data); -+ if (unlikely(status < 0)) { -+ msleep(I2C_RW_RETRY_INTERVAL); -+ retry--; -+ continue; -+ } -+ -+ break; -+ } -+ -+ if (unlikely(status < 0)) { -+ return status; -+ } -+ -+ return 1; -+#endif -+ -+ -+} -+ -+static ssize_t sfp_port_write(struct sfp_port_data *data, -+ const char *buf, loff_t off, size_t count) -+{ -+ ssize_t retval = 0; -+ -+ if (unlikely(!count)) { -+ return count; -+ } -+ -+ /* -+ * Write data to chip, protecting against concurrent updates -+ * from this host, but not from other I2C masters. -+ */ -+ mutex_lock(&data->update_lock); -+ -+ while (count) { -+ ssize_t status; -+ -+ status = sfp_eeprom_write(data->client, off, buf, count); -+ if (status <= 0) { -+ if (retval == 0) { -+ retval = status; -+ } -+ break; -+ } -+ buf += status; -+ off += status; -+ count -= status; -+ retval += status; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ return retval; -+} -+ -+ -+static ssize_t sfp_bin_write(struct file *filp, struct kobject *kobj, -+ struct bin_attribute *attr, -+ char *buf, loff_t off, size_t count) -+{ -+ struct sfp_port_data *data; -+ DEBUG_PRINT("%s(%d) offset = (%d), count = (%d)", off, count); -+ data = dev_get_drvdata(container_of(kobj, struct device, kobj)); -+ return sfp_port_write(data, buf, off, count); -+} -+ -+static ssize_t sfp_eeprom_read(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+#if USE_I2C_BLOCK_READ -+ int status, retry = I2C_RW_RETRY_COUNT; -+ -+ if (data_len > I2C_SMBUS_BLOCK_MAX) { -+ data_len = I2C_SMBUS_BLOCK_MAX; -+ } -+ -+ while (retry) { -+ status = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ if (unlikely(status < 0)) { -+ msleep(I2C_RW_RETRY_INTERVAL); -+ retry--; -+ continue; -+ } -+ -+ break; -+ } -+ -+ if (unlikely(status < 0)) { -+ goto abort; -+ } -+ if (unlikely(status != data_len)) { -+ status = -EIO; -+ goto abort; -+ } -+ -+ //result = data_len; -+ -+abort: -+ return status; -+#else -+ int status, retry = I2C_RW_RETRY_COUNT; -+ -+ while (retry) { -+ status = i2c_smbus_read_byte_data(client, command); -+ if (unlikely(status < 0)) { -+ msleep(I2C_RW_RETRY_INTERVAL); -+ retry--; -+ continue; -+ } -+ -+ break; -+ } -+ -+ if (unlikely(status < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u8)status; -+ status = 1; -+ -+abort: -+ return status; -+#endif -+} -+ -+static ssize_t sfp_port_read(struct sfp_port_data *data, -+ char *buf, loff_t off, size_t count) -+{ -+ ssize_t retval = 0; -+ -+ if (unlikely(!count)) { -+ DEBUG_PRINT("Count = 0, return"); -+ return count; -+ } -+ -+ /* -+ * Read data from chip, protecting against concurrent updates -+ * from this host, but not from other I2C masters. -+ */ -+ mutex_lock(&data->update_lock); -+ -+ while (count) { -+ ssize_t status; -+ -+ status = sfp_eeprom_read(data->client, off, buf, count); -+ if (status <= 0) { -+ if (retval == 0) { -+ retval = status; -+ } -+ break; -+ } -+ -+ buf += status; -+ off += status; -+ count -= status; -+ retval += status; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ return retval; -+ -+} -+ -+static ssize_t sfp_bin_read(struct file *filp, struct kobject *kobj, -+ struct bin_attribute *attr, -+ char *buf, loff_t off, size_t count) -+{ -+ struct sfp_port_data *data; -+ DEBUG_PRINT("offset = (%d), count = (%d)", off, count); -+ data = dev_get_drvdata(container_of(kobj, struct device, kobj)); -+ return sfp_port_read(data, buf, off, count); -+} -+ -+static int sfp_sysfs_eeprom_init(struct kobject *kobj, struct bin_attribute *eeprom) -+{ -+ int err; -+ -+ sysfs_bin_attr_init(eeprom); -+ eeprom->attr.name = EEPROM_NAME; -+ eeprom->attr.mode = S_IWUSR | S_IRUGO; -+ eeprom->read = sfp_bin_read; -+ eeprom->write = sfp_bin_write; -+ eeprom->size = EEPROM_SIZE; -+ -+ /* Create eeprom file */ -+ err = sysfs_create_bin_file(kobj, eeprom); -+ if (err) { -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int sfp_sysfs_eeprom_cleanup(struct kobject *kobj, struct bin_attribute *eeprom) -+{ -+ sysfs_remove_bin_file(kobj, eeprom); -+ return 0; -+} -+ -+static const struct attribute_group sfp_msa_group = { -+ .attrs = sfp_msa_attributes, -+}; -+ -+static int sfp_i2c_check_functionality(struct i2c_client *client) -+{ -+#if USE_I2C_BLOCK_READ -+ return i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK); -+#else -+ return i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA); -+#endif -+} -+ -+static int sfp_msa_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, -+ struct sfp_msa_data **data) -+{ -+ int status; -+ struct sfp_msa_data *msa; -+ -+ if (!sfp_i2c_check_functionality(client)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ msa = kzalloc(sizeof(struct sfp_msa_data), GFP_KERNEL); -+ if (!msa) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &sfp_msa_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ /* init eeprom */ -+ status = sfp_sysfs_eeprom_init(&client->dev.kobj, &msa->eeprom.bin); -+ if (status) { -+ goto exit_remove; -+ } -+ -+ *data = msa; -+ dev_info(&client->dev, "sfp msa '%s'\n", client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &sfp_msa_group); -+exit_free: -+ kfree(msa); -+exit: -+ -+ return status; -+} -+ -+static const struct attribute_group sfp_ddm_group = { -+ .attrs = sfp_ddm_attributes, -+}; -+ -+static int sfp_ddm_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, -+ struct sfp_ddm_data **data) -+{ -+ int status; -+ struct sfp_ddm_data *ddm; -+ -+ if (!sfp_i2c_check_functionality(client)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ ddm = kzalloc(sizeof(struct sfp_ddm_data), GFP_KERNEL); -+ if (!ddm) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &sfp_ddm_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ /* init eeprom */ -+ status = sfp_sysfs_eeprom_init(&client->dev.kobj, &ddm->eeprom.bin); -+ if (status) { -+ goto exit_remove; -+ } -+ -+ *data = ddm; -+ dev_info(&client->dev, "sfp ddm '%s'\n", client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &sfp_ddm_group); -+exit_free: -+ kfree(ddm); -+exit: -+ -+ return status; -+} -+ -+static const struct attribute_group qsfp_group = { -+ .attrs = qsfp_attributes, -+}; -+ -+static int qsfp_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, -+ struct qsfp_data **data) -+{ -+ int status; -+ struct qsfp_data *qsfp; -+ -+ if (!sfp_i2c_check_functionality(client)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ qsfp = kzalloc(sizeof(struct qsfp_data), GFP_KERNEL); -+ if (!qsfp) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &qsfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ /* init eeprom */ -+ status = sfp_sysfs_eeprom_init(&client->dev.kobj, &qsfp->eeprom.bin); -+ if (status) { -+ goto exit_remove; -+ } -+ -+ /* Bring QSFPs out of reset */ -+ accton_i2c_cpld_write(0x62, 0x15, 0x3F); -+ -+ *data = qsfp; -+ dev_info(&client->dev, "qsfp '%s'\n", client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &qsfp_group); -+exit_free: -+ kfree(qsfp); -+exit: -+ -+ return status; -+} -+ -+static int sfp_device_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct sfp_port_data *data = NULL; -+ -+ data = kzalloc(sizeof(struct sfp_port_data), GFP_KERNEL); -+ if (!data) { -+ return -ENOMEM; -+ } -+ -+ i2c_set_clientdata(client, data); -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ data->client = client; -+ -+ if (dev_id->driver_data >= sfp1 && dev_id->driver_data <= sfp48) { -+ if (client->addr == SFP_EEPROM_A0_I2C_ADDR) { -+ data->driver_type = DRIVER_TYPE_SFP_MSA; -+ return sfp_msa_probe(client, dev_id, &data->msa); -+ } -+ else if (client->addr == SFP_EEPROM_A2_I2C_ADDR) { -+ data->driver_type = DRIVER_TYPE_SFP_DDM; -+ return sfp_ddm_probe(client, dev_id, &data->ddm); -+ } -+ } -+ else { /* sfp49 ~ sfp54 */ -+ if (client->addr == SFP_EEPROM_A0_I2C_ADDR) { -+ data->driver_type = DRIVER_TYPE_QSFP; -+ return qsfp_probe(client, dev_id, &data->qsfp); -+ } -+ } -+ -+ return -ENODEV; -+} -+ -+static int sfp_msa_remove(struct i2c_client *client, struct sfp_msa_data *data) -+{ -+ sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); -+ sysfs_remove_group(&client->dev.kobj, &sfp_msa_group); -+ kfree(data); -+ return 0; -+} -+ -+static int sfp_ddm_remove(struct i2c_client *client, struct sfp_ddm_data *data) -+{ -+ sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); -+ sysfs_remove_group(&client->dev.kobj, &sfp_ddm_group); -+ kfree(data); -+ return 0; -+} -+ -+static int qfp_remove(struct i2c_client *client, struct qsfp_data *data) -+{ -+ sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); -+ sysfs_remove_group(&client->dev.kobj, &qsfp_group); -+ kfree(data); -+ return 0; -+} -+ -+static int sfp_device_remove(struct i2c_client *client) -+{ -+ struct sfp_port_data *data = i2c_get_clientdata(client); -+ -+ switch (data->driver_type) { -+ case DRIVER_TYPE_SFP_MSA: -+ return sfp_msa_remove(client, data->msa); -+ case DRIVER_TYPE_SFP_DDM: -+ return sfp_ddm_remove(client, data->ddm); -+ case DRIVER_TYPE_QSFP: -+ return qfp_remove(client, data->qsfp); -+ } -+ -+ return 0; -+} -+ -+static struct i2c_driver sfp_driver = { -+ .driver = { -+ .name = DRIVER_NAME, -+ }, -+ .probe = sfp_device_probe, -+ .remove = sfp_device_remove, -+ .id_table = sfp_device_id, -+ .address_list = normal_i2c, -+}; -+ -+static int __init sfp_init(void) -+{ -+ extern int platform_accton_as5512_54x(void); -+ if(!platform_accton_as5512_54x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&sfp_driver); -+} -+ -+static void __exit sfp_exit(void) -+{ -+ i2c_del_driver(&sfp_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5512_54x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(sfp_init); -+module_exit(sfp_exit); -+ -+ diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch deleted file mode 100644 index f7a74202..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch +++ /dev/null @@ -1,2639 +0,0 @@ -Device driver patches for accton as5712 (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index b3abc7b..52a68eb 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1423,6 +1423,25 @@ config SENSORS_CPR_4011_4MXX - be called cpr_4011_4mxx. - - -+config SENSORS_ACCTON_AS5712_54x_FAN -+ tristate "Accton as5712 54x fan" -+ depends on I2C && I2C_MUX_ACCTON_AS5712_54x_CPLD -+ help -+ If you say yes here you get support for Accton as5712 54x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5712_54x_fan. -+ -+config SENSORS_ACCTON_AS5712_54x_PSU -+ tristate "Accton as5712 54x psu" -+ depends on I2C && I2C_MUX_ACCTON_AS5712_54x_CPLD -+ help -+ If you say yes here you get support for Accton as5712 54x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5712_54x_psu. -+ -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index 1e90738..e2f3bce 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -21,6 +21,8 @@ obj-$(CONFIG_SENSORS_W83791D) += w83791d.o - - obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o - obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5712_54x_FAN) += accton_as5712_54x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5712_54x_PSU) += accton_as5712_54x_psu.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as5712_54x_fan.c b/drivers/hwmon/accton_as5712_54x_fan.c -new file mode 100644 -index 0000000..df6c222 ---- /dev/null -+++ b/drivers/hwmon/accton_as5712_54x_fan.c -@@ -0,0 +1,442 @@ -+/* -+ * A hwmon driver for the Accton as5710 54x fan contrl -+ * -+ * Copyright (C) 2013 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FAN_MAX_NUMBER 5 -+#define FAN_SPEED_CPLD_TO_RPM_STEP 150 -+#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 -+#define FAN_DUTY_CYCLE_MIN 0 /* 10% ??*/ -+#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ -+ -+#define CPLD_REG_FAN_STATUS_OFFSET 0xC -+#define CPLD_REG_FANR_STATUS_OFFSET 0x1F -+#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E -+ -+#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 -+#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 -+#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 -+#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 -+#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 -+ -+#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 -+#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 -+#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A -+#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B -+#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C -+ -+#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD -+ -+#define CPLD_FAN1_INFO_BIT_MASK 0x1 -+#define CPLD_FAN2_INFO_BIT_MASK 0x2 -+#define CPLD_FAN3_INFO_BIT_MASK 0x4 -+#define CPLD_FAN4_INFO_BIT_MASK 0x8 -+#define CPLD_FAN5_INFO_BIT_MASK 0x10 -+ -+#define PROJECT_NAME -+ -+#define LOCAL_DEBUG 0 -+ -+static struct accton_as5712_54x_fan *fan_data = NULL; -+ -+struct accton_as5712_54x_fan { -+ struct platform_device *pdev; -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ -+ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ -+ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ -+ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ -+ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ -+ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ -+}; -+ -+/*******************/ -+#define MAKE_FAN_MASK_OR_REG(name,type) \ -+ CPLD_FAN##type##1_##name, \ -+ CPLD_FAN##type##2_##name, \ -+ CPLD_FAN##type##3_##name, \ -+ CPLD_FAN##type##4_##name, \ -+ CPLD_FAN##type##5_##name, -+ -+/* fan related data -+ */ -+static const u8 fan_info_mask[] = { -+ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) -+}; -+ -+static const u8 fan_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) -+}; -+ -+static const u8 fanr_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) -+}; -+ -+/*******************/ -+#define DEF_FAN_SET(id) \ -+ FAN##id##_FAULT, \ -+ FAN##id##_SPEED, \ -+ FAN##id##_DUTY_CYCLE, \ -+ FAN##id##_DIRECTION, \ -+ FANR##id##_FAULT, \ -+ FANR##id##_SPEED, -+ -+enum sysfs_fan_attributes { -+ DEF_FAN_SET(1) -+ DEF_FAN_SET(2) -+ DEF_FAN_SET(3) -+ DEF_FAN_SET(4) -+ DEF_FAN_SET(5) -+}; -+/*******************/ -+static void accton_as5712_54x_fan_update_device(struct device *dev); -+static int accton_as5712_54x_fan_read_value(u8 reg); -+static int accton_as5712_54x_fan_write_value(u8 reg, u8 value); -+ -+static ssize_t fan_set_duty_cycle(struct device *dev, -+ struct device_attribute *da,const char *buf, size_t count); -+static ssize_t fan_show_value(struct device *dev, -+ struct device_attribute *da, char *buf); -+ -+extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+ -+/*******************/ -+#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ -+ fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); -+ -+#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) -+ -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) -+/*******************/ -+ -+#define _MAKE_FAN_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ -+ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, -+ -+#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) -+ -+static struct attribute *accton_as5712_54x_fan_attributes[] = { -+ /* fan related attributes */ -+ MAKE_FAN_ATTR(PROJECT_NAME,1) -+ MAKE_FAN_ATTR(PROJECT_NAME,2) -+ MAKE_FAN_ATTR(PROJECT_NAME,3) -+ MAKE_FAN_ATTR(PROJECT_NAME,4) -+ MAKE_FAN_ATTR(PROJECT_NAME,5) -+ NULL -+}; -+/*******************/ -+ -+/* fan related functions -+ */ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ ssize_t ret = 0; -+ int data_index, type_index; -+ -+ accton_as5712_54x_fan_update_device(dev); -+ -+ if (fan_data->valid == 0) { -+ return ret; -+ } -+ -+ type_index = attr->index%FAN2_FAULT; -+ data_index = attr->index/FAN2_FAULT; -+ -+ switch (type_index) { -+ case FAN1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DUTY_CYCLE: -+ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DIRECTION: -+ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ default: -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); -+ break; -+ } -+ -+ return ret; -+} -+/*******************/ -+static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) { -+ -+ int error, value; -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) -+ return -EINVAL; -+ -+ accton_as5712_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); -+ -+ fan_data->valid = 0; -+ -+ return count; -+} -+ -+static const struct attribute_group accton_as5712_54x_fan_group = { -+ .attrs = accton_as5712_54x_fan_attributes, -+}; -+ -+static int accton_as5712_54x_fan_read_value(u8 reg) -+{ -+ return as5712_54x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5712_54x_fan_write_value(u8 reg, u8 value) -+{ -+ return as5712_54x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5712_54x_fan_update_device(struct device *dev) -+{ -+ int speed, r_speed, fault, r_fault, ctrl_speed, direction; -+ int i; -+ -+ mutex_lock(&fan_data->update_lock); -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5712_54x_fan update \n"); -+ -+ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { -+ /* do nothing */ -+ goto _exit; -+ } -+ -+ fan_data->valid = 0; -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5712_54x_fan update 2 \n"); -+ -+ fault = accton_as5712_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); -+ r_fault = accton_as5712_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); -+ direction = accton_as5712_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); -+ ctrl_speed = accton_as5712_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); -+ -+ if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); -+ -+ for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); -+ -+ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; -+ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; -+ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; -+ -+ /* fan speed -+ */ -+ speed = accton_as5712_54x_fan_read_value(fan_speed_reg[i]); -+ r_speed = accton_as5712_54x_fan_read_value(fanr_speed_reg[i]); -+ if ( (speed < 0) || (r_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); -+ -+ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ } -+ -+ /* finish to update */ -+ fan_data->last_updated = jiffies; -+ fan_data->valid = 1; -+ -+_exit: -+ mutex_unlock(&fan_data->update_lock); -+} -+ -+static int accton_as5712_54x_fan_probe(struct platform_device *pdev) -+{ -+ int status = -1; -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&pdev->dev.kobj, &accton_as5712_54x_fan_group); -+ if (status) { -+ goto exit; -+ -+ } -+ -+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); -+ if (IS_ERR(fan_data->hwmon_dev)) { -+ status = PTR_ERR(fan_data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&pdev->dev, "accton_as5712_54x_fan\n"); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&pdev->dev.kobj, &accton_as5712_54x_fan_group); -+exit: -+ return status; -+} -+ -+static int accton_as5712_54x_fan_remove(struct platform_device *pdev) -+{ -+ hwmon_device_unregister(fan_data->hwmon_dev); -+ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5712_54x_fan_group); -+ -+ return 0; -+} -+ -+#define DRVNAME "as5712_54x_fan" -+ -+static struct platform_driver accton_as5712_54x_fan_driver = { -+ .probe = accton_as5712_54x_fan_probe, -+ .remove = accton_as5712_54x_fan_remove, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5712_54x_fan_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5712_54x(void); -+ if(!platform_accton_as5712_54x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as5712_54x_fan_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ fan_data = kzalloc(sizeof(struct accton_as5712_54x_fan), GFP_KERNEL); -+ if (!fan_data) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5712_54x_fan_driver); -+ goto exit; -+ } -+ -+ mutex_init(&fan_data->update_lock); -+ fan_data->valid = 0; -+ -+ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(fan_data->pdev)) { -+ ret = PTR_ERR(fan_data->pdev); -+ platform_driver_unregister(&accton_as5712_54x_fan_driver); -+ kfree(fan_data); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5712_54x_fan_exit(void) -+{ -+ platform_device_unregister(fan_data->pdev); -+ platform_driver_unregister(&accton_as5712_54x_fan_driver); -+ kfree(fan_data); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5712_54x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_as5712_54x_fan_init); -+module_exit(accton_as5712_54x_fan_exit); -+ -diff --git a/drivers/hwmon/accton_as5712_54x_psu.c b/drivers/hwmon/accton_as5712_54x_psu.c -new file mode 100644 -index 0000000..0890e92 ---- /dev/null -+++ b/drivers/hwmon/accton_as5712_54x_psu.c -@@ -0,0 +1,293 @@ -+/* -+ * An hwmon driver for accton as5712_54x Power Module -+ * -+ * Copyright (C) Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as5712_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5712_54x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[14]; /* Model name, read from eeprom */ -+}; -+ -+static struct as5712_54x_psu_data *as5712_54x_psu_update_device(struct device *dev); -+ -+enum as5712_54x_psu_sysfs_attributes { -+ PSU_INDEX, -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as5712_54x_psu_attributes[] = { -+ &sensor_dev_attr_psu_index.dev_attr.attr, -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5712_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->index); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5712_54x_psu_data *data = as5712_54x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((data->index - 1) * 4) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5712_54x_psu_data *data = as5712_54x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s", data->model_name); -+} -+ -+static const struct attribute_group as5712_54x_psu_group = { -+ .attrs = as5712_54x_psu_attributes, -+}; -+ -+static int as5712_54x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5712_54x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5712_54x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5712_54x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x38 || client->addr == 0x50) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x3b || client->addr == 0x53) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5712_54x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5712_54x_psu_remove(struct i2c_client *client) -+{ -+ struct as5712_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5712_54x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as5712_54x_psu_id[] = { -+ { "as5712_54x_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as5712_54x_psu_id); -+ -+static struct i2c_driver as5712_54x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5712_54x_psu", -+ }, -+ .probe = as5712_54x_psu_probe, -+ .remove = as5712_54x_psu_remove, -+ .id_table = as5712_54x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5712_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as5712_54x_psu_data *as5712_54x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5712_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ -+ dev_dbg(&client->dev, "Starting as5712_54x update\n"); -+ -+ /* Read model name */ -+ if (client->addr == 0x38 || client->addr == 0x3b) { -+ /* AC power */ -+ status = as5712_54x_psu_read_block(client, 0x26, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ else { -+ /* DC power */ -+ status = as5712_54x_psu_read_block(client, 0x50, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; -+ } -+ -+ /* Read psu status */ -+ status = as5712_54x_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as5712_54x_psu_init(void) -+{ -+ extern int platform_accton_as5712_54x(void); -+ if(!platform_accton_as5712_54x()) { -+ return -ENODEV; -+ } -+ return i2c_add_driver(&as5712_54x_psu_driver); -+} -+ -+static void __exit as5712_54x_psu_exit(void) -+{ -+ i2c_del_driver(&as5712_54x_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5712_54x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as5712_54x_psu_init); -+module_exit(as5712_54x_psu_exit); -+ -diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig -index 73ed997..339e2b2 100644 ---- a/drivers/i2c/muxes/Kconfig -+++ b/drivers/i2c/muxes/Kconfig -@@ -5,6 +5,16 @@ - menu "Multiplexer I2C Chip support" - depends on I2C_MUX - -+ -+config I2C_MUX_ACCTON_AS5712_54x_CPLD -+ tristate "Accton as5712_54x CPLD I2C multiplexer" -+ help -+ If you say yes here you get support for the Accton CPLD -+ I2C mux devices. -+ -+ This driver can also be built as a module. If so, the module -+ will be called i2c-mux-accton_as5712_54x_cpld. -+ - config I2C_MUX_GPIO - tristate "GPIO-based I2C multiplexer" - depends on GENERIC_GPIO -diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile -index 7fbdd2f..997522c 100644 ---- a/drivers/i2c/muxes/Makefile -+++ b/drivers/i2c/muxes/Makefile -@@ -6,6 +6,7 @@ obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o - obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o - obj-$(CONFIG_I2C_MUX_DNI_6448) += dni_6448_i2c_mux.o - obj-$(CONFIG_I2C_MUX_QUANTA) += quanta-i2cmux.o - obj-$(CONFIG_I2C_MUX_QUANTA_LY2) += quanta-ly2-i2c-mux.o -+obj-$(CONFIG_I2C_MUX_ACCTON_AS5712_54x_CPLD) += i2c-mux-accton_as5712_54x_cpld.o - - ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG -diff --git a/drivers/i2c/muxes/i2c-mux-accton_as5712_54x_cpld.c b/drivers/i2c/muxes/i2c-mux-accton_as5712_54x_cpld.c -new file mode 100644 -index 0000000..6381db5 ---- /dev/null -+++ b/drivers/i2c/muxes/i2c-mux-accton_as5712_54x_cpld.c -@@ -0,0 +1,466 @@ -+/* -+ * I2C multiplexer -+ * -+ * Copyright (C) Brandon Chuang -+ * -+ * This module supports the accton cpld that hold the channel select -+ * mechanism for other i2c slave devices, such as SFP. -+ * This includes the: -+ * Accton as5712_54x CPLD1/CPLD2/CPLD3 -+ * -+ * Based on: -+ * pca954x.c from Kumar Gala -+ * Copyright (C) 2006 -+ * -+ * Based on: -+ * pca954x.c from Ken Harrenstien -+ * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) -+ * -+ * Based on: -+ * i2c-virtual_cb.c from Brian Kuschak -+ * and -+ * pca9540.c from Jean Delvare . -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static struct dmi_system_id as5712_dmi_table[] = { -+ { -+ .ident = "Accton AS5712", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), -+ }, -+ }, -+ { -+ .ident = "Accton AS5712", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), -+ }, -+ }, -+}; -+ -+int platform_accton_as5712_54x(void) -+{ -+ return dmi_check_system(as5712_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as5712_54x); -+ -+#define NUM_OF_CPLD1_CHANS 0x0 -+#define NUM_OF_CPLD2_CHANS 0x18 -+#define NUM_OF_CPLD3_CHANS 0x1E -+#define CPLD_CHANNEL_SELECT_REG 0x2 -+#define CPLD_DESELECT_CHANNEL 0xFF -+ -+#if 0 -+#define NUM_OF_ALL_CPLD_CHANS (NUM_OF_CPLD2_CHANS + NUM_OF_CPLD3_CHANS) -+#endif -+ -+#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS -+ -+static LIST_HEAD(cpld_client_list); -+static struct mutex list_lock; -+ -+struct cpld_client_node { -+ struct i2c_client *client; -+ struct list_head list; -+}; -+ -+enum cpld_mux_type { -+ as5712_54x_cpld2, -+ as5712_54x_cpld3, -+ as5712_54x_cpld1 -+}; -+ -+struct accton_i2c_cpld_mux { -+ enum cpld_mux_type type; -+ struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; -+ u8 last_chan; /* last register value */ -+}; -+ -+#if 0 -+/* The mapping table between mux index and adapter index -+ array index : the mux index -+ the content : adapter index -+ */ -+static int mux_adap_map[NUM_OF_ALL_CPLD_CHANS]; -+#endif -+ -+struct chip_desc { -+ u8 nchans; -+ u8 deselectChan; -+}; -+ -+/* Provide specs for the PCA954x types we know about */ -+static const struct chip_desc chips[] = { -+ [as5712_54x_cpld1] = { -+ .nchans = NUM_OF_CPLD1_CHANS, -+ .deselectChan = CPLD_DESELECT_CHANNEL, -+ }, -+ [as5712_54x_cpld2] = { -+ .nchans = NUM_OF_CPLD2_CHANS, -+ .deselectChan = CPLD_DESELECT_CHANNEL, -+ }, -+ [as5712_54x_cpld3] = { -+ .nchans = NUM_OF_CPLD3_CHANS, -+ .deselectChan = CPLD_DESELECT_CHANNEL, -+ } -+}; -+ -+static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { -+ { "as5712_54x_cpld1", as5712_54x_cpld1 }, -+ { "as5712_54x_cpld2", as5712_54x_cpld2 }, -+ { "as5712_54x_cpld3", as5712_54x_cpld3 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); -+ -+/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() -+ for this as they will try to lock adapter a second time */ -+static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, -+ struct i2c_client *client, u8 val) -+{ -+#if 0 -+ int ret = -ENODEV; -+ -+ //if (adap->algo->master_xfer) { -+ if (0) -+ struct i2c_msg msg; -+ char buf[2]; -+ -+ msg.addr = client->addr; -+ msg.flags = 0; -+ msg.len = 2; -+ buf[0] = 0x2; -+ buf[1] = val; -+ msg.buf = buf; -+ ret = adap->algo->master_xfer(adap, &msg, 1); -+ } -+ else { -+ union i2c_smbus_data data; -+ ret = adap->algo->smbus_xfer(adap, client->addr, -+ client->flags, -+ I2C_SMBUS_WRITE, -+ 0x2, I2C_SMBUS_BYTE, &data); -+ } -+ -+ return ret; -+#else -+ unsigned long orig_jiffies; -+ unsigned short flags; -+ union i2c_smbus_data data; -+ int try; -+ s32 res = -EIO; -+ -+ data.byte = val; -+ flags = client->flags; -+ flags &= I2C_M_TEN | I2C_CLIENT_PEC; -+ -+ if (adap->algo->smbus_xfer) { -+ /* Retry automatically on arbitration loss */ -+ orig_jiffies = jiffies; -+ for (res = 0, try = 0; try <= adap->retries; try++) { -+ res = adap->algo->smbus_xfer(adap, client->addr, flags, -+ I2C_SMBUS_WRITE, CPLD_CHANNEL_SELECT_REG, -+ I2C_SMBUS_BYTE_DATA, &data); -+ if (res != -EAGAIN) -+ break; -+ if (time_after(jiffies, -+ orig_jiffies + adap->timeout)) -+ break; -+ } -+ } -+ -+ return res; -+#endif -+} -+ -+static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ u8 regval; -+ int ret = 0; -+ regval = chan; -+ -+ /* Only select the channel if its different from the last channel */ -+ if (data->last_chan != regval) { -+ ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); -+ data->last_chan = regval; -+ } -+ -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ -+ /* Deselect active channel */ -+ data->last_chan = chips[data->type].deselectChan; -+ -+ return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); -+} -+ -+static void accton_i2c_cpld_add_client(struct i2c_client *client) -+{ -+ struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); -+ -+ if (!node) { -+ dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); -+ return; -+ } -+ -+ node->client = client; -+ -+ mutex_lock(&list_lock); -+ list_add(&node->list, &cpld_client_list); -+ mutex_unlock(&list_lock); -+} -+ -+static void accton_i2c_cpld_remove_client(struct i2c_client *client) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int found = 0; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client == client) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) { -+ list_del(list_node); -+ kfree(cpld_node); -+ } -+ -+ mutex_unlock(&list_lock); -+} -+ -+static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ u8 reg = 0x1; -+ struct i2c_client *client; -+ int len; -+ -+ client = to_i2c_client(dev); -+ len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); -+ -+ return len; -+} -+ -+static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); -+ -+/* -+ * I2C init/probing/exit functions -+ */ -+static int accton_i2c_cpld_mux_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); -+ int chan=0; -+ struct accton_i2c_cpld_mux *data; -+ int ret = -ENODEV; -+ -+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) -+ goto err; -+ -+ data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); -+ if (!data) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ i2c_set_clientdata(client, data); -+ -+#if 0 -+ /* Write the mux register at addr to verify -+ * that the mux is in fact present. -+ */ -+ if (i2c_smbus_write_byte(client, 0) < 0) { -+ dev_warn(&client->dev, "probe failed\n"); -+ goto exit_free; -+ } -+#endif -+ -+ data->type = id->driver_data; -+ -+ if (data->type == as5712_54x_cpld2 || data->type == as5712_54x_cpld3) { -+ data->last_chan = chips[data->type].deselectChan; /* force the first selection */ -+ -+ /* Now create an adapter for each channel */ -+ for (chan = 0; chan < chips[data->type].nchans; chan++) { -+#if 0 -+ int idx; -+#endif -+ data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, -+ accton_i2c_cpld_mux_select_chan, -+ accton_i2c_cpld_mux_deselect_mux); -+ -+ if (data->virt_adaps[chan] == NULL) { -+ ret = -ENODEV; -+ dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); -+ goto virt_reg_failed; -+ } -+ -+#if 0 -+ idx = (data->type - as5712_54x_cpld2) * NUM_OF_CPLD2_CHANS + chan; -+ mux_adap_map[idx] = data->virt_adaps[chan]->nr; -+#endif -+ } -+ -+ dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", -+ chan, client->name); -+ } -+ -+ accton_i2c_cpld_add_client(client); -+ -+ ret = sysfs_create_file(&client->dev.kobj, &ver.attr); -+ if (ret) -+ goto virt_reg_failed; -+ -+ return 0; -+ -+virt_reg_failed: -+ for (chan--; chan >= 0; chan--) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ } -+ -+ kfree(data); -+err: -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_remove(struct i2c_client *client) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ const struct chip_desc *chip = &chips[data->type]; -+ int chan; -+ -+ sysfs_remove_file(&client->dev.kobj, &ver.attr); -+ -+ for (chan = 0; chan < chip->nchans; ++chan) { -+ if (data->virt_adaps[chan]) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ data->virt_adaps[chan] = NULL; -+ } -+ } -+ -+ kfree(data); -+ accton_i2c_cpld_remove_client(client); -+ -+ return 0; -+} -+ -+int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EPERM; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_read_byte_data(cpld_node->client, reg); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as5712_54x_i2c_cpld_read); -+ -+int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EIO; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as5712_54x_i2c_cpld_write); -+ -+#if 0 -+int accton_i2c_cpld_mux_get_index(int adap_index) -+{ -+ int i; -+ -+ for (i = 0; i < NUM_OF_ALL_CPLD_CHANS; i++) { -+ if (mux_adap_map[i] == adap_index) { -+ return i; -+ } -+ } -+ -+ return -EINVAL; -+} -+EXPORT_SYMBOL(accton_i2c_cpld_mux_get_index); -+#endif -+ -+static struct i2c_driver accton_i2c_cpld_mux_driver = { -+ .driver = { -+ .name = "as5712_54x_cpld", -+ .owner = THIS_MODULE, -+ }, -+ .probe = accton_i2c_cpld_mux_probe, -+ .remove = accton_i2c_cpld_mux_remove, -+ .id_table = accton_i2c_cpld_mux_id, -+}; -+ -+static int __init accton_i2c_cpld_mux_init(void) -+{ -+ mutex_init(&list_lock); -+ return i2c_add_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+static void __exit accton_i2c_cpld_mux_exit(void) -+{ -+ i2c_del_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_i2c_cpld_mux_init); -+module_exit(accton_i2c_cpld_mux_exit); -+ -+ -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index ff203a4..361ef45 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -40,6 +40,13 @@ config LEDS_ATMEL_PWM - This option enables support for LEDs driven using outputs - of the dedicated PWM controller found on newer Atmel SOCs. - -+config LEDS_ACCTON_AS5712_54x -+ tristate "LED support for the Accton as5712 54x" -+ depends on LEDS_CLASS && I2C_MUX_ACCTON_AS5712_54x_CPLD -+ help -+ This option enables support for the LEDs on the Accton as5712 54x. -+ Say Y to enable LEDs on the Accton as5712 54x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index e4f6bf5..db2d096 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -43,6 +43,7 @@ obj-$(CONFIG_LEDS_NS2) += leds-ns2.o - obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o - obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o - obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o -+obj-$(CONFIG_LEDS_ACCTON_AS5712_54x) += leds-accton_as5712_54x.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as5712_54x.c b/drivers/leds/leds-accton_as5712_54x.c -new file mode 100644 -index 0000000..5e346b2 ---- /dev/null -+++ b/drivers/leds/leds-accton_as5712_54x.c -@@ -0,0 +1,597 @@ -+/* -+ * A LED driver for the accton_as5712_54x_led -+ * -+ * Copyright (C) 2013 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int as5712_54x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "as5712_54x_led" -+ -+struct accton_as5712_54x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED -+ 1 = PSU1/PSU2 LED -+ 2 = FAN1-4 LED -+ 3 = FAN5-6 LED */ -+}; -+ -+static struct accton_as5712_54x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+#define LED_TYPE_PSU1_REG_MASK 0x03 -+#define LED_MODE_PSU1_GREEN_MASK 0x02 -+#define LED_MODE_PSU1_AMBER_MASK 0x01 -+#define LED_MODE_PSU1_OFF_MASK 0x03 -+#define LED_MODE_PSU1_AUTO_MASK 0x00 -+ -+#define LED_TYPE_PSU2_REG_MASK 0x0C -+#define LED_MODE_PSU2_GREEN_MASK 0x08 -+#define LED_MODE_PSU2_AMBER_MASK 0x04 -+#define LED_MODE_PSU2_OFF_MASK 0x0C -+#define LED_MODE_PSU2_AUTO_MASK 0x00 -+ -+#define LED_TYPE_DIAG_REG_MASK 0x0C -+#define LED_MODE_DIAG_GREEN_MASK 0x08 -+#define LED_MODE_DIAG_AMBER_MASK 0x04 -+#define LED_MODE_DIAG_OFF_MASK 0x0C -+ -+#define LED_TYPE_FAN_REG_MASK 0x03 -+#define LED_MODE_FAN_GREEN_MASK 0x02 -+#define LED_MODE_FAN_AMBER_MASK 0x01 -+#define LED_MODE_FAN_OFF_MASK 0x03 -+#define LED_MODE_FAN_AUTO_MASK 0x00 -+ -+#define LED_TYPE_FAN1_REG_MASK 0x03 -+#define LED_TYPE_FAN2_REG_MASK 0x0C -+#define LED_TYPE_FAN3_REG_MASK 0x30 -+#define LED_TYPE_FAN4_REG_MASK 0xC0 -+#define LED_TYPE_FAN5_REG_MASK 0x03 -+#define LED_TYPE_FAN6_REG_MASK 0x0C -+ -+#define LED_MODE_FANX_GREEN_MASK 0x01 -+#define LED_MODE_FANX_RED_MASK 0x02 -+#define LED_MODE_FANX_OFF_MASK 0x00 -+ -+#define LED_TYPE_LOC_REG_MASK 0x30 -+#define LED_MODE_LOC_ON_MASK 0x00 -+#define LED_MODE_LOC_OFF_MASK 0x10 -+#define LED_MODE_LOC_BLINK_MASK 0x20 -+ -+static const u8 led_reg[] = { -+ 0xA, /* LOC/DIAG/FAN LED*/ -+ 0xB, /* PSU1/PSU2 LED */ -+ 0x16, /* FAN1-4 LED */ -+ 0x17, /* FAN4-6 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2, -+ LED_TYPE_DIAG, -+ LED_TYPE_FAN, -+ LED_TYPE_FAN1, -+ LED_TYPE_FAN2, -+ LED_TYPE_FAN3, -+ LED_TYPE_FAN4, -+ LED_TYPE_FAN5, -+ LED_TYPE_LOC -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_AMBER, -+ LED_MODE_RED, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED_BLINK, -+ LED_MODE_AUTO, -+}; -+ -+struct led_type_mode { -+ enum led_type type; -+ int type_mask; -+ enum led_light_mode mode; -+ int mode_mask; -+}; -+ -+static struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} -+}; -+ -+ -+struct fanx_info_s { -+ u8 cname; /* device name */ -+ enum led_type type; -+ u8 reg_id; /* map to led_reg & reg_val */ -+}; -+ -+static struct fanx_info_s fanx_info[] = { -+ {'1', LED_TYPE_FAN1, 2}, -+ {'2', LED_TYPE_FAN2, 2}, -+ {'3', LED_TYPE_FAN3, 2}, -+ {'4', LED_TYPE_FAN4, 2}, -+ {'5', LED_TYPE_FAN5, 3} -+}; -+ -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if ((led_type_mode_data[i].type_mask & reg_val) == -+ led_type_mode_data[i].mode_mask) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return 0; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ reg_val = led_type_mode_data[i].mode_mask | -+ (reg_val & (~led_type_mode_data[i].type_mask)); -+ } -+ -+ return reg_val; -+} -+ -+static int accton_as5712_54x_led_read_value(u8 reg) -+{ -+ return as5712_54x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5712_54x_led_write_value(u8 reg, u8 value) -+{ -+ return as5712_54x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5712_54x_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as5712_54x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as5712_54x_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5712_54x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as5712_54x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as5712_54x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5712_54x_led_psu_1_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); -+} -+ -+static enum led_brightness accton_as5712_54x_led_psu_1_get(struct led_classdev *cdev) -+{ -+ accton_as5712_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); -+} -+ -+static void accton_as5712_54x_led_psu_2_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); -+} -+ -+static enum led_brightness accton_as5712_54x_led_psu_2_get(struct led_classdev *cdev) -+{ -+ accton_as5712_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); -+} -+ -+static void accton_as5712_54x_led_fan_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); -+} -+ -+static enum led_brightness accton_as5712_54x_led_fan_get(struct led_classdev *cdev) -+{ -+ accton_as5712_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); -+} -+ -+ -+static void accton_as5712_54x_led_fanx_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (led_cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); -+ return; -+ } -+ } -+} -+ -+ -+static enum led_brightness accton_as5712_54x_led_fanx_get(struct led_classdev *cdev) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as5712_54x_led_update(); -+ return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); -+ } -+ } -+ -+ -+ return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); -+} -+ -+ -+static void accton_as5712_54x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as5712_54x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as5712_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as5712_54x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5712_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as5712_54x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as5712_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static struct led_classdev accton_as5712_54x_leds[] = { -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as5712_54x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_psu_1_set, -+ .brightness_get = accton_as5712_54x_led_psu_1_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as5712_54x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_psu_2_set, -+ .brightness_get = accton_as5712_54x_led_psu_2_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as5712_54x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_fan_set, -+ .brightness_get = accton_as5712_54x_led_fan_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN1] = { -+ .name = "accton_as5712_54x_led::fan1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_fanx_set, -+ .brightness_get = accton_as5712_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN2] = { -+ .name = "accton_as5712_54x_led::fan2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_fanx_set, -+ .brightness_get = accton_as5712_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN3] = { -+ .name = "accton_as5712_54x_led::fan3", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_fanx_set, -+ .brightness_get = accton_as5712_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN4] = { -+ .name = "accton_as5712_54x_led::fan4", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_fanx_set, -+ .brightness_get = accton_as5712_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN5] = { -+ .name = "accton_as5712_54x_led::fan5", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_fanx_set, -+ .brightness_get = accton_as5712_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as5712_54x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_diag_set, -+ .brightness_get = accton_as5712_54x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as5712_54x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5712_54x_led_loc_set, -+ .brightness_get = accton_as5712_54x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as5712_54x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { -+ led_classdev_suspend(&accton_as5712_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5712_54x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { -+ led_classdev_resume(&accton_as5712_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5712_54x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as5712_54x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as5712_54x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as5712_54x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as5712_54x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5712_54x_leds); i++) { -+ led_classdev_unregister(&accton_as5712_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as5712_54x_led_driver = { -+ .probe = accton_as5712_54x_led_probe, -+ .remove = accton_as5712_54x_led_remove, -+ .suspend = accton_as5712_54x_led_suspend, -+ .resume = accton_as5712_54x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5712_54x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5712_54x(void); -+ if(!platform_accton_as5712_54x()) { -+ return -ENODEV; -+ } -+ ret = platform_driver_register(&accton_as5712_54x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as5712_54x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5712_54x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as5712_54x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5712_54x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as5712_54x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as5712_54x_led_init); -+module_exit(accton_as5712_54x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5712_54x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 7c7b208..4e5d6dc 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -73,6 +73,16 @@ config EEPROM_MAX6875 - will be called max6875. - - -+config EEPROM_ACCTON_AS5712_54x_SFP -+ tristate "Accton as5712 54x sfp" -+ depends on I2C && I2C_MUX_ACCTON_AS5712_54x_CPLD -+ help -+ If you say yes here you get support for Accton as5712 54x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5712_54x_sfp. -+ -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 9edd559..807158a 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -6,4 +6,5 @@ obj-$(CONFIG_EEPROM_MAX6875) += max6875.o - obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o - obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o - obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o -+obj-$(CONFIG_EEPROM_ACCTON_AS5712_54x_SFP) += accton_as5712_54x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as5712_54x_sfp.c b/drivers/misc/eeprom/accton_as5712_54x_sfp.c -new file mode 100644 -index 0000000..54d885e ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as5712_54x_sfp.c -@@ -0,0 +1,672 @@ -+/* -+ * An hwmon driver for accton as5712_54x sfp -+ * -+ * Copyright (C) Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define NUM_OF_SFP_PORT 54 -+#define BIT_INDEX(i) (1ULL << (i)) -+ -+#if 0 -+static ssize_t show_status(struct device *dev, struct device_attribute *da,char *buf); -+static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+static int as5712_54x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+#endif -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5712_54x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u64 status[4]; /* bit0:port0, bit1:port1 and so on */ -+ /* index 0 => is_present -+ 1 => tx_fail -+ 2 => tx_disable -+ 3 => rx_loss */ -+}; -+ -+/* The table maps active port to cpld port. -+ * Array index 0 is for active port 1, -+ * index 1 for active port 2, and so on. -+ * The array content implies cpld port index. -+ */ -+static const u8 cpld_to_front_port_table[] = -+{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, -+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, -+ 49, 52, 50, 53, 51, 54}; -+ -+#define CPLD_PORT_TO_FRONT_PORT(port) (cpld_to_front_port_table[port]) -+ -+static struct as5712_54x_sfp_data *as5712_54x_sfp_update_device(struct device *dev, int update_eeprom); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+extern int as5712_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as5712_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+enum as5712_54x_sfp_sysfs_attributes { -+ SFP_IS_PRESENT, -+ SFP_TX_FAULT, -+ SFP_TX_DISABLE, -+ SFP_RX_LOSS, -+ SFP_PORT_NUMBER, -+ SFP_EEPROM, -+ SFP_RX_LOS_ALL, -+ SFP_IS_PRESENT_ALL, -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_status, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, show_status, NULL, SFP_TX_FAULT); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, show_status, set_tx_disable, SFP_TX_DISABLE); -+static SENSOR_DEVICE_ATTR(sfp_rx_loss, S_IRUGO, show_status,NULL, SFP_RX_LOSS); -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, show_status,NULL, SFP_RX_LOS_ALL); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_status,NULL, SFP_IS_PRESENT_ALL); -+ -+static struct attribute *as5712_54x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_loss.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5712_54x_sfp_data *data; -+ u8 val; -+ int values[7]; -+ -+ /* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+ do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+ } while(0) -+ -+ if(attr->index == SFP_RX_LOS_ALL) { -+ /* -+ * Report the RX_LOS status for all ports. -+ * This does not depend on the currently active SFP selector. -+ */ -+ -+ /* RX_LOS Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], as5712_54x_i2c_cpld_read(0x61, 0x0F), 0); -+ /* RX_LOS Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], as5712_54x_i2c_cpld_read(0x61, 0x10), 0); -+ /* RX_LOS Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], as5712_54x_i2c_cpld_read(0x61, 0x11), 0); -+ /* RX_LOS Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], as5712_54x_i2c_cpld_read(0x62, 0x0F), 0); -+ /* RX_LOS Ports 33-40 */ -+ VALIDATED_READ(buf, values[4], as5712_54x_i2c_cpld_read(0x62, 0x10), 0); -+ /* RX_LOS Ports 41-48 */ -+ VALIDATED_READ(buf, values[5], as5712_54x_i2c_cpld_read(0x62, 0x11), 0); -+ -+ /** Return values 1 -> 48 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], -+ values[3], values[4], values[5]); -+ } -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ * This does not depend on the currently active SFP selector. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], as5712_54x_i2c_cpld_read(0x61, 0x6), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], as5712_54x_i2c_cpld_read(0x61, 0x7), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], as5712_54x_i2c_cpld_read(0x61, 0x8), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], as5712_54x_i2c_cpld_read(0x62, 0x6), 1); -+ /* SFP_PRESENT Ports 33-40 */ -+ VALIDATED_READ(buf, values[4], as5712_54x_i2c_cpld_read(0x62, 0x7), 1); -+ /* SFP_PRESENT Ports 41-48 */ -+ VALIDATED_READ(buf, values[5], as5712_54x_i2c_cpld_read(0x62, 0x8), 1); -+ /* QSFP_PRESENT Ports 49-54 */ -+ VALIDATED_READ(buf, values[6], as5712_54x_i2c_cpld_read(0x62, 0x14), 1); -+ -+ /* Return values 1 -> 54 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], -+ values[3], values[4], values[5], -+ values[6] & 0x3F); -+ } -+ /* -+ * The remaining attributes are gathered on a per-selected-sfp basis. -+ */ -+ data = as5712_54x_sfp_update_device(dev, 0); -+ if (attr->index == SFP_IS_PRESENT) { -+ val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 0 : 1; -+ } -+ else { -+ val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 1 : 0; -+ } -+ -+ return sprintf(buf, "%d", val); -+} -+ -+static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); -+ unsigned short cpld_addr = 0; -+ u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; -+ long disable; -+ int error; -+ -+ /* Tx disable is not supported for QSFP ports(49-54) */ -+ if (data->port >= 48) { -+ return -EINVAL; -+ } -+ -+ error = kstrtol(buf, 10, &disable); -+ if (error) { -+ return error; -+ } -+ -+ mutex_lock(&data->update_lock); -+ -+ if(data->port < 24) { -+ cpld_addr = 0x61; -+ cpld_reg = 0xC + data->port / 8; -+ cpld_bit = 1 << (data->port % 8); -+ } -+ else { -+ cpld_addr = 0x62; -+ cpld_reg = 0xC + (data->port - 24) / 8; -+ cpld_bit = 1 << (data->port % 8); -+ } -+ -+ cpld_val = as5712_54x_i2c_cpld_read(cpld_addr, cpld_reg); -+ -+ /* Update tx_disable status */ -+ if (disable) { -+ data->status[SFP_TX_DISABLE] |= BIT_INDEX(data->port); -+ cpld_val |= cpld_bit; -+ } -+ else { -+ data->status[SFP_TX_DISABLE] &= ~BIT_INDEX(data->port); -+ cpld_val &= ~cpld_bit; -+ } -+ -+ as5712_54x_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); -+ -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5712_54x_sfp_data *data = as5712_54x_sfp_update_device(dev, 1); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) != 0) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as5712_54x_sfp_group = { -+ .attrs = as5712_54x_sfp_attributes, -+}; -+ -+static int as5712_54x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5712_54x_sfp_data *data; -+ int status; -+ -+ extern int platform_accton_as5712_54x(void); -+ if(!platform_accton_as5712_54x()) { -+ return -ENODEV; -+ } -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5712_54x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5712_54x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5712_54x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5712_54x_sfp_remove(struct i2c_client *client) -+{ -+ struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5712_54x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as5712_54x_sfp1, as5712_54x_sfp2, as5712_54x_sfp3, as5712_54x_sfp4, -+as5712_54x_sfp5, as5712_54x_sfp6, as5712_54x_sfp7, as5712_54x_sfp8, -+as5712_54x_sfp9, as5712_54x_sfp10, as5712_54x_sfp11,as5712_54x_sfp12, -+as5712_54x_sfp13, as5712_54x_sfp14, as5712_54x_sfp15,as5712_54x_sfp16, -+as5712_54x_sfp17, as5712_54x_sfp18, as5712_54x_sfp19,as5712_54x_sfp20, -+as5712_54x_sfp21, as5712_54x_sfp22, as5712_54x_sfp23,as5712_54x_sfp24, -+as5712_54x_sfp25, as5712_54x_sfp26, as5712_54x_sfp27,as5712_54x_sfp28, -+as5712_54x_sfp29, as5712_54x_sfp30, as5712_54x_sfp31,as5712_54x_sfp32, -+as5712_54x_sfp33, as5712_54x_sfp34, as5712_54x_sfp35,as5712_54x_sfp36, -+as5712_54x_sfp37, as5712_54x_sfp38, as5712_54x_sfp39,as5712_54x_sfp40, -+as5712_54x_sfp41, as5712_54x_sfp42, as5712_54x_sfp43,as5712_54x_sfp44, -+as5712_54x_sfp45, as5712_54x_sfp46, as5712_54x_sfp47,as5712_54x_sfp48, -+as5712_54x_sfp49, as5712_54x_sfp52, as5712_54x_sfp50,as5712_54x_sfp53, -+as5712_54x_sfp51, as5712_54x_sfp54 -+}; -+ -+static const struct i2c_device_id as5712_54x_sfp_id[] = { -+{ "as5712_54x_sfp1", as5712_54x_sfp1 }, { "as5712_54x_sfp2", as5712_54x_sfp2 }, -+{ "as5712_54x_sfp3", as5712_54x_sfp3 }, { "as5712_54x_sfp4", as5712_54x_sfp4 }, -+{ "as5712_54x_sfp5", as5712_54x_sfp5 }, { "as5712_54x_sfp6", as5712_54x_sfp6 }, -+{ "as5712_54x_sfp7", as5712_54x_sfp7 }, { "as5712_54x_sfp8", as5712_54x_sfp8 }, -+{ "as5712_54x_sfp9", as5712_54x_sfp9 }, { "as5712_54x_sfp10", as5712_54x_sfp10 }, -+{ "as5712_54x_sfp11", as5712_54x_sfp11 }, { "as5712_54x_sfp12", as5712_54x_sfp12 }, -+{ "as5712_54x_sfp13", as5712_54x_sfp13 }, { "as5712_54x_sfp14", as5712_54x_sfp14 }, -+{ "as5712_54x_sfp15", as5712_54x_sfp15 }, { "as5712_54x_sfp16", as5712_54x_sfp16 }, -+{ "as5712_54x_sfp17", as5712_54x_sfp17 }, { "as5712_54x_sfp18", as5712_54x_sfp18 }, -+{ "as5712_54x_sfp19", as5712_54x_sfp19 }, { "as5712_54x_sfp20", as5712_54x_sfp20 }, -+{ "as5712_54x_sfp21", as5712_54x_sfp21 }, { "as5712_54x_sfp22", as5712_54x_sfp22 }, -+{ "as5712_54x_sfp23", as5712_54x_sfp23 }, { "as5712_54x_sfp24", as5712_54x_sfp24 }, -+{ "as5712_54x_sfp25", as5712_54x_sfp25 }, { "as5712_54x_sfp26", as5712_54x_sfp26 }, -+{ "as5712_54x_sfp27", as5712_54x_sfp27 }, { "as5712_54x_sfp28", as5712_54x_sfp28 }, -+{ "as5712_54x_sfp29", as5712_54x_sfp29 }, { "as5712_54x_sfp30", as5712_54x_sfp30 }, -+{ "as5712_54x_sfp31", as5712_54x_sfp31 }, { "as5712_54x_sfp32", as5712_54x_sfp32 }, -+{ "as5712_54x_sfp33", as5712_54x_sfp33 }, { "as5712_54x_sfp34", as5712_54x_sfp34 }, -+{ "as5712_54x_sfp35", as5712_54x_sfp35 }, { "as5712_54x_sfp36", as5712_54x_sfp36 }, -+{ "as5712_54x_sfp37", as5712_54x_sfp37 }, { "as5712_54x_sfp38", as5712_54x_sfp38 }, -+{ "as5712_54x_sfp39", as5712_54x_sfp39 }, { "as5712_54x_sfp40", as5712_54x_sfp40 }, -+{ "as5712_54x_sfp41", as5712_54x_sfp41 }, { "as5712_54x_sfp42", as5712_54x_sfp42 }, -+{ "as5712_54x_sfp43", as5712_54x_sfp43 }, { "as5712_54x_sfp44", as5712_54x_sfp44 }, -+{ "as5712_54x_sfp45", as5712_54x_sfp45 }, { "as5712_54x_sfp46", as5712_54x_sfp46 }, -+{ "as5712_54x_sfp47", as5712_54x_sfp47 }, { "as5712_54x_sfp48", as5712_54x_sfp48 }, -+{ "as5712_54x_sfp49", as5712_54x_sfp49 }, { "as5712_54x_sfp50", as5712_54x_sfp50 }, -+{ "as5712_54x_sfp51", as5712_54x_sfp51 }, { "as5712_54x_sfp52", as5712_54x_sfp52 }, -+{ "as5712_54x_sfp53", as5712_54x_sfp53 }, { "as5712_54x_sfp54", as5712_54x_sfp54 }, -+ -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as5712_54x_sfp_id); -+ -+static struct i2c_driver as5712_54x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5712_54x_sfp", -+ }, -+ .probe = as5712_54x_sfp_probe, -+ .remove = as5712_54x_sfp_remove, -+ .id_table = as5712_54x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5712_54x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) -+{ -+ int result = i2c_smbus_read_byte_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u8)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+#define ALWAYS_UPDATE_DEVICE 1 -+ -+static struct as5712_54x_sfp_data *as5712_54x_sfp_update_device(struct device *dev, int update_eeprom) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (ALWAYS_UPDATE_DEVICE || time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0, j = 0; -+ -+ data->valid = 0; -+ //dev_dbg(&client->dev, "Starting as5712_54x sfp status update\n"); -+ memset(data->status, 0, sizeof(data->status)); -+ -+ /* Read status of port 1~48(SFP port) */ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 12; j++) { -+ status = as5712_54x_i2c_cpld_read(0x61+i, 0x6+j); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ goto exit; -+ } -+ -+ data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); -+ } -+ } -+ -+ /* -+ * Bring QSFPs out of reset, -+ * This is a temporary fix until the QSFP+_MOD_RST register -+ * can be exposed through the driver. -+ */ -+ as5712_54x_i2c_cpld_write(0x62, 0x15, 0x3F); -+ -+ /* Read present status of port 49-54(QSFP port) */ -+ status = as5712_54x_i2c_cpld_read(0x62, 0x14); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ } -+ else { -+ data->status[SFP_IS_PRESENT] |= (u64)status << 48; -+ } -+ -+ if (update_eeprom) { -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) == 0) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom); i++) { -+ status = as5712_54x_sfp_read_byte(client, i, data->eeprom + i); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", -+ CPLD_PORT_TO_FRONT_PORT(data->port)); -+ goto exit; -+ } -+ } -+ } -+ } -+ -+ data->valid = 1; -+ data->last_updated = jiffies; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+module_i2c_driver(as5712_54x_sfp_driver); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5712_54x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+#if 0 -+ int i = 0, j = 0; -+ -+ data->valid = 0; -+ //dev_dbg(&client->dev, "Starting as5712_54x sfp update\n"); -+ memset(data->status, 0, sizeof(data->status)); -+ -+ /* Read status of port 1~48(SFP port) */ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 12; j++) { -+ status = as5712_54x_i2c_cpld_read(0x61+i, 0x6+j); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ continue; -+ } -+ -+ data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); -+ } -+ } -+ -+ /* Read present status of port 49-54(QSFP port) */ -+ status = as5712_54x_i2c_cpld_read(0x62, 0x14); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ } -+ else { -+ data->status[SFP_IS_PRESENT] |= (u64)status << 48; -+ } -+#endif -+ -+/* Reserver to prevent from CPLD port mapping is changed -+ */ -+#if 0 -+BIT_INDEX(port_present_index[data->port]) -+/* The bit index of is_present field read from CPLD -+ * Array index 0 is for as5712_54x_sfp1, -+ * index 1 is for as5712_54x_sfp2, and so on. -+ */ -+static const int port_present_index[] = { -+ 4, 5, 6, 7, 9, 8, 11, 10, -+ 0, 1, 2, 3, 12, 13, 14, 15, -+16, 17, 18, 19, 28, 29, 30, 31, -+20, 21, 22, 23, 24, 25, 26, 27 -+}; -+#endif -+ -+#if 0 -+static struct as5712_54x_sfp_data *as5712_54x_sfp_update_status(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5712_54x_sfp_data *data = i2c_get_clientdata(client); -+ int status = -1; -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->status_last_updated + HZ + HZ / 2) -+ || !data->status_valid) { -+ int status = -1; -+ int i = 0, j = 0; -+ -+ data->status_valid = 0; -+ //dev_dbg(&client->dev, "Starting as5712_54x sfp status update\n"); -+ memset(data->status, 0, sizeof(data->status)); -+ -+ /* Read status of port 1~48(SFP port) */ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 12; j++) { -+ status = as5712_54x_i2c_cpld_read(0x61+i, 0x6+j); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ goto exit; -+ } -+ -+ data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); -+ } -+ } -+ -+ /* -+ * Bring QSFPs out of reset, -+ * This is a temporary fix until the QSFP+_MOD_RST register -+ * can be exposed through the driver. -+ */ -+ as5712_54x_i2c_cpld_write(0x62, 0x15, 0x3F); -+ -+ /* Read present status of port 49-54(QSFP port) */ -+ status = as5712_54x_i2c_cpld_read(0x62, 0x14); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ } -+ else { -+ data->status[SFP_IS_PRESENT] |= (u64)status << 48; -+ } -+ -+ data->status_valid = 1; -+ data->status_last_updated = jiffies; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static struct as5712_54x_sfp_data *as5712_54x_sfp_update_eeprom(struct device *dev) -+{ -+ struct as5712_54x_sfp_data *data = NULL; -+ -+ data = as5712_54x_sfp_update_status(dev); -+ -+ if (data == NULL || data->status_valid == 0) { -+ data->eeprom_valid = 0; -+ return data; -+ } -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->eeprom_last_updated + HZ + HZ / 2) -+ || !data->eeprom_valid) { -+ int status = -1; -+ int i = 0; -+ -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) == 0) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { -+ status = as5712_54x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, -+ data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), -+ I2C_SMBUS_BLOCK_MAX); -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", -+ CPLD_PORT_TO_FRONT_PORT(data->port)); -+ goto exit; -+ } -+ } -+ } -+ -+ data->eeprom_last_updated = jiffies; -+ data->eeprom_valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+#endif diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch deleted file mode 100644 index 17db04c3..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch +++ /dev/null @@ -1,1861 +0,0 @@ -Device driver patches for accton as5812_54t (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 73ee085..89c619d 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1555,6 +1555,24 @@ config SENSORS_ACCTON_AS6812_32x_PSU - - This driver can also be built as a module. If so, the module will - be called accton_as6812_32x_psu. -+ -+config SENSORS_ACCTON_AS5812_54t_FAN -+ tristate "Accton as5812 54t fan" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as5812 54t fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5812_54t_fan. -+ -+config SENSORS_ACCTON_AS5812_54t_PSU -+ tristate "Accton as5812 54t psu" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as5812 54t psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5812_54t_psu. - - if ACPI - -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index 7700250..b8cf2ef 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -34,6 +34,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS5812_54x_FAN) += accton_as5812_54x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54x_PSU) += accton_as5812_54x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_FAN) += accton_as6812_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_PSU) += accton_as6812_32x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_PSU) += accton_as5812_54t_psu.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as5812_54t_fan.c b/drivers/hwmon/accton_as5812_54t_fan.c -new file mode 100644 -index 0000000..bad9245 ---- /dev/null -+++ b/drivers/hwmon/accton_as5812_54t_fan.c -@@ -0,0 +1,442 @@ -+/* -+ * A hwmon driver for the Accton as5812 54t fan -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FAN_MAX_NUMBER 5 -+#define FAN_SPEED_CPLD_TO_RPM_STEP 150 -+#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 -+#define FAN_DUTY_CYCLE_MIN 0 -+#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ -+ -+#define CPLD_REG_FAN_STATUS_OFFSET 0xC -+#define CPLD_REG_FANR_STATUS_OFFSET 0x1F -+#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E -+ -+#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 -+#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 -+#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 -+#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 -+#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 -+ -+#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 -+#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 -+#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A -+#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B -+#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C -+ -+#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD -+ -+#define CPLD_FAN1_INFO_BIT_MASK 0x1 -+#define CPLD_FAN2_INFO_BIT_MASK 0x2 -+#define CPLD_FAN3_INFO_BIT_MASK 0x4 -+#define CPLD_FAN4_INFO_BIT_MASK 0x8 -+#define CPLD_FAN5_INFO_BIT_MASK 0x10 -+ -+#define PROJECT_NAME -+ -+#define LOCAL_DEBUG 0 -+ -+static struct accton_as5812_54t_fan *fan_data = NULL; -+ -+struct accton_as5812_54t_fan { -+ struct platform_device *pdev; -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ -+ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ -+ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ -+ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ -+ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ -+ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ -+}; -+ -+/*******************/ -+#define MAKE_FAN_MASK_OR_REG(name,type) \ -+ CPLD_FAN##type##1_##name, \ -+ CPLD_FAN##type##2_##name, \ -+ CPLD_FAN##type##3_##name, \ -+ CPLD_FAN##type##4_##name, \ -+ CPLD_FAN##type##5_##name, -+ -+/* fan related data -+ */ -+static const u8 fan_info_mask[] = { -+ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) -+}; -+ -+static const u8 fan_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) -+}; -+ -+static const u8 fanr_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) -+}; -+ -+/*******************/ -+#define DEF_FAN_SET(id) \ -+ FAN##id##_FAULT, \ -+ FAN##id##_SPEED, \ -+ FAN##id##_DUTY_CYCLE, \ -+ FAN##id##_DIRECTION, \ -+ FANR##id##_FAULT, \ -+ FANR##id##_SPEED, -+ -+enum sysfs_fan_attributes { -+ DEF_FAN_SET(1) -+ DEF_FAN_SET(2) -+ DEF_FAN_SET(3) -+ DEF_FAN_SET(4) -+ DEF_FAN_SET(5) -+}; -+/*******************/ -+static void accton_as5812_54t_fan_update_device(struct device *dev); -+static int accton_as5812_54t_fan_read_value(u8 reg); -+static int accton_as5812_54t_fan_write_value(u8 reg, u8 value); -+ -+static ssize_t fan_set_duty_cycle(struct device *dev, -+ struct device_attribute *da,const char *buf, size_t count); -+static ssize_t fan_show_value(struct device *dev, -+ struct device_attribute *da, char *buf); -+ -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+ -+/*******************/ -+#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ -+ fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); -+ -+#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) -+ -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) -+/*******************/ -+ -+#define _MAKE_FAN_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ -+ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, -+ -+#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) -+ -+static struct attribute *accton_as5812_54t_fan_attributes[] = { -+ /* fan related attributes */ -+ MAKE_FAN_ATTR(PROJECT_NAME,1) -+ MAKE_FAN_ATTR(PROJECT_NAME,2) -+ MAKE_FAN_ATTR(PROJECT_NAME,3) -+ MAKE_FAN_ATTR(PROJECT_NAME,4) -+ MAKE_FAN_ATTR(PROJECT_NAME,5) -+ NULL -+}; -+/*******************/ -+ -+/* fan related functions -+ */ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ ssize_t ret = 0; -+ int data_index, type_index; -+ -+ accton_as5812_54t_fan_update_device(dev); -+ -+ if (fan_data->valid == 0) { -+ return ret; -+ } -+ -+ type_index = attr->index%FAN2_FAULT; -+ data_index = attr->index/FAN2_FAULT; -+ -+ switch (type_index) { -+ case FAN1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DUTY_CYCLE: -+ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DIRECTION: -+ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ default: -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); -+ break; -+ } -+ -+ return ret; -+} -+/*******************/ -+static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) { -+ -+ int error, value; -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) -+ return -EINVAL; -+ -+ accton_as5812_54t_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); -+ -+ fan_data->valid = 0; -+ -+ return count; -+} -+ -+static const struct attribute_group accton_as5812_54t_fan_group = { -+ .attrs = accton_as5812_54t_fan_attributes, -+}; -+ -+static int accton_as5812_54t_fan_read_value(u8 reg) -+{ -+ return accton_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5812_54t_fan_write_value(u8 reg, u8 value) -+{ -+ return accton_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5812_54t_fan_update_device(struct device *dev) -+{ -+ int speed, r_speed, fault, r_fault, ctrl_speed, direction; -+ int i; -+ -+ mutex_lock(&fan_data->update_lock); -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5812_54t_fan update \n"); -+ -+ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { -+ /* do nothing */ -+ goto _exit; -+ } -+ -+ fan_data->valid = 0; -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5812_54t_fan update 2 \n"); -+ -+ fault = accton_as5812_54t_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); -+ r_fault = accton_as5812_54t_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); -+ direction = accton_as5812_54t_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); -+ ctrl_speed = accton_as5812_54t_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); -+ -+ if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); -+ -+ for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); -+ -+ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; -+ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; -+ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; -+ -+ /* fan speed -+ */ -+ speed = accton_as5812_54t_fan_read_value(fan_speed_reg[i]); -+ r_speed = accton_as5812_54t_fan_read_value(fanr_speed_reg[i]); -+ if ( (speed < 0) || (r_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); -+ -+ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ } -+ -+ /* finish to update */ -+ fan_data->last_updated = jiffies; -+ fan_data->valid = 1; -+ -+_exit: -+ mutex_unlock(&fan_data->update_lock); -+} -+ -+static int accton_as5812_54t_fan_probe(struct platform_device *pdev) -+{ -+ int status = -1; -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&pdev->dev.kobj, &accton_as5812_54t_fan_group); -+ if (status) { -+ goto exit; -+ -+ } -+ -+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); -+ if (IS_ERR(fan_data->hwmon_dev)) { -+ status = PTR_ERR(fan_data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&pdev->dev, "accton_as5812_54t_fan\n"); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&pdev->dev.kobj, &accton_as5812_54t_fan_group); -+exit: -+ return status; -+} -+ -+static int accton_as5812_54t_fan_remove(struct platform_device *pdev) -+{ -+ hwmon_device_unregister(fan_data->hwmon_dev); -+ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5812_54t_fan_group); -+ -+ return 0; -+} -+ -+#define DRVNAME "as5812_54t_fan" -+ -+static struct platform_driver accton_as5812_54t_fan_driver = { -+ .probe = accton_as5812_54t_fan_probe, -+ .remove = accton_as5812_54t_fan_remove, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5812_54t_fan_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5812_54t(void); -+ if (!platform_accton_as5812_54t()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as5812_54t_fan_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ fan_data = kzalloc(sizeof(struct accton_as5812_54t_fan), GFP_KERNEL); -+ if (!fan_data) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5812_54t_fan_driver); -+ goto exit; -+ } -+ -+ mutex_init(&fan_data->update_lock); -+ fan_data->valid = 0; -+ -+ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(fan_data->pdev)) { -+ ret = PTR_ERR(fan_data->pdev); -+ platform_driver_unregister(&accton_as5812_54t_fan_driver); -+ kfree(fan_data); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5812_54t_fan_exit(void) -+{ -+ platform_device_unregister(fan_data->pdev); -+ platform_driver_unregister(&accton_as5812_54t_fan_driver); -+ kfree(fan_data); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5812_54t_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_as5812_54t_fan_init); -+module_exit(accton_as5812_54t_fan_exit); -+ -diff --git a/drivers/hwmon/accton_as5812_54t_psu.c b/drivers/hwmon/accton_as5812_54t_psu.c -new file mode 100644 -index 0000000..bf1b79e ---- /dev/null -+++ b/drivers/hwmon/accton_as5812_54t_psu.c -@@ -0,0 +1,295 @@ -+/* -+ * An hwmon driver for accton as5812_54t Power Module -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as5812_54t_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5812_54t_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[14]; /* Model name, read from eeprom */ -+}; -+ -+static struct as5812_54t_psu_data *as5812_54t_psu_update_device(struct device *dev); -+ -+enum as5812_54t_psu_sysfs_attributes { -+ PSU_INDEX, -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as5812_54t_psu_attributes[] = { -+ &sensor_dev_attr_psu_index.dev_attr.attr, -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54t_psu_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->index); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5812_54t_psu_data *data = as5812_54t_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((data->index - 1) * 4) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5812_54t_psu_data *data = as5812_54t_psu_update_device(dev); -+ -+ return sprintf(buf, "%s", data->model_name); -+} -+ -+static const struct attribute_group as5812_54t_psu_group = { -+ .attrs = as5812_54t_psu_attributes, -+}; -+ -+static int as5812_54t_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5812_54t_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5812_54t_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5812_54t_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x38 || client->addr == 0x50) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x3b || client->addr == 0x53) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5812_54t_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5812_54t_psu_remove(struct i2c_client *client) -+{ -+ struct as5812_54t_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5812_54t_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as5812_54t_psu_id[] = { -+ { "as5812_54t_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as5812_54t_psu_id); -+ -+static struct i2c_driver as5812_54t_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5812_54t_psu", -+ }, -+ .probe = as5812_54t_psu_probe, -+ .remove = as5812_54t_psu_remove, -+ .id_table = as5812_54t_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5812_54t_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as5812_54t_psu_data *as5812_54t_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54t_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ -+ dev_dbg(&client->dev, "Starting as5812_54t update\n"); -+ -+ /* Read model name */ -+ if (client->addr == 0x38 || client->addr == 0x3b) { -+ /* AC power */ -+ status = as5812_54t_psu_read_block(client, 0x26, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ else { -+ /* DC power */ -+ status = as5812_54t_psu_read_block(client, 0x50, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; -+ } -+ -+ /* Read psu status */ -+ status = accton_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as5812_54t_psu_init(void) -+{ -+ extern int platform_accton_as5812_54t(void); -+ if (!platform_accton_as5812_54t()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as5812_54t_psu_driver); -+} -+ -+static void __exit as5812_54t_psu_exit(void) -+{ -+ i2c_del_driver(&as5812_54t_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5812_54t_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as5812_54t_psu_init); -+module_exit(as5812_54t_psu_exit); -+ -diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c -index 3aeb08d..acf88c9 100644 ---- a/drivers/hwmon/accton_i2c_cpld.c -+++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -40,6 +40,22 @@ struct cpld_client_node { - */ - static const unsigned short normal_i2c[] = { 0x31, 0x35, 0x60, 0x61, 0x62, I2C_CLIENT_END }; - -+static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ int val = 0; -+ struct i2c_client *client = to_i2c_client(dev); -+ -+ val = i2c_smbus_read_byte_data(client, 0x1); -+ -+ if (val < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x1) err %d\n", client->addr, val); -+ } -+ -+ return sprintf(buf, "%d", val); -+} -+ -+static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); -+ - static void accton_i2c_cpld_add_client(struct i2c_client *client) - { - struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); -@@ -93,6 +109,11 @@ static int accton_i2c_cpld_probe(struct i2c_client *client, - goto exit; - } - -+ status = sysfs_create_file(&client->dev.kobj, &ver.attr); -+ if (status) { -+ goto exit; -+ } -+ - dev_info(&client->dev, "chip found\n"); - accton_i2c_cpld_add_client(client); - -@@ -104,6 +125,7 @@ exit: - - static int accton_i2c_cpld_remove(struct i2c_client *client) - { -+ sysfs_remove_file(&client->dev.kobj, &ver.attr); - accton_i2c_cpld_remove_client(client); - - return 0; -@@ -217,6 +239,29 @@ int platform_accton_as7712_32x(void) - } - EXPORT_SYMBOL(platform_accton_as7712_32x); - -+static struct dmi_system_id as5812_54t_dmi_table[] = { -+ { -+ .ident = "Accton AS5812 54t", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), -+ }, -+ }, -+ { -+ .ident = "Accton AS5812 54t", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), -+ }, -+ }, -+}; -+ -+int platform_accton_as5812_54t(void) -+{ -+ return dmi_check_system(as5812_54t_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as5812_54t); -+ - MODULE_AUTHOR("Brandon Chuang "); - MODULE_DESCRIPTION("accton_i2c_cpld driver"); - MODULE_LICENSE("GPL"); -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index cb0c17f..599b97b 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -81,6 +81,13 @@ config LEDS_ACCTON_AS6812_32x - help - This option enables support for the LEDs on the Accton as6812 32x. - Say Y to enable LEDs on the Accton as6812 32x. -+ -+config LEDS_ACCTON_AS5812_54t -+ tristate "LED support for the Accton as5812 54t" -+ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD -+ help -+ This option enables support for the LEDs on the Accton as5812 54t. -+ Say Y to enable LEDs on the Accton as5812 54t. - - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index 8db7a43..bd20baa 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -49,6 +49,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS7512_32x) += leds-accton_as7512_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o - obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o -+obj-$(CONFIG_LEDS_ACCTON_AS5812_54t) += leds-accton_as5812_54t.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as5812_54t.c b/drivers/leds/leds-accton_as5812_54t.c -new file mode 100644 -index 0000000..011f62e ---- /dev/null -+++ b/drivers/leds/leds-accton_as5812_54t.c -@@ -0,0 +1,601 @@ -+/* -+ * A LED driver for the accton_as5812_54t_led -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "as5812_54t_led" -+ -+struct accton_as5812_54t_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED -+ 1 = PSU1/PSU2 LED -+ 2 = FAN1-4 LED -+ 3 = FAN5-6 LED */ -+}; -+ -+static struct accton_as5812_54t_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+#define LED_TYPE_PSU1_REG_MASK 0x03 -+#define LED_MODE_PSU1_GREEN_MASK 0x02 -+#define LED_MODE_PSU1_AMBER_MASK 0x01 -+#define LED_MODE_PSU1_OFF_MASK 0x03 -+#define LED_MODE_PSU1_AUTO_MASK 0x00 -+ -+#define LED_TYPE_PSU2_REG_MASK 0x0C -+#define LED_MODE_PSU2_GREEN_MASK 0x08 -+#define LED_MODE_PSU2_AMBER_MASK 0x04 -+#define LED_MODE_PSU2_OFF_MASK 0x0C -+#define LED_MODE_PSU2_AUTO_MASK 0x00 -+ -+#define LED_TYPE_DIAG_REG_MASK 0x0C -+#define LED_MODE_DIAG_GREEN_MASK 0x08 -+#define LED_MODE_DIAG_AMBER_MASK 0x04 -+#define LED_MODE_DIAG_OFF_MASK 0x0C -+ -+#define LED_TYPE_FAN_REG_MASK 0x03 -+#define LED_MODE_FAN_GREEN_MASK 0x02 -+#define LED_MODE_FAN_AMBER_MASK 0x01 -+#define LED_MODE_FAN_OFF_MASK 0x03 -+#define LED_MODE_FAN_AUTO_MASK 0x00 -+ -+#define LED_TYPE_FAN1_REG_MASK 0x03 -+#define LED_TYPE_FAN2_REG_MASK 0x0C -+#define LED_TYPE_FAN3_REG_MASK 0x30 -+#define LED_TYPE_FAN4_REG_MASK 0xC0 -+#define LED_TYPE_FAN5_REG_MASK 0x03 -+#define LED_TYPE_FAN6_REG_MASK 0x0C -+ -+#define LED_MODE_FANX_GREEN_MASK 0x01 -+#define LED_MODE_FANX_RED_MASK 0x02 -+#define LED_MODE_FANX_OFF_MASK 0x00 -+ -+#define LED_TYPE_LOC_REG_MASK 0x30 -+#define LED_MODE_LOC_ON_MASK 0x00 -+#define LED_MODE_LOC_OFF_MASK 0x10 -+#define LED_MODE_LOC_BLINK_MASK 0x20 -+ -+static const u8 led_reg[] = { -+ 0xA, /* LOC/DIAG/FAN LED*/ -+ 0xB, /* PSU1/PSU2 LED */ -+ 0x16, /* FAN1-4 LED */ -+ 0x17, /* FAN4-6 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2, -+ LED_TYPE_DIAG, -+ LED_TYPE_FAN, -+ LED_TYPE_FAN1, -+ LED_TYPE_FAN2, -+ LED_TYPE_FAN3, -+ LED_TYPE_FAN4, -+ LED_TYPE_FAN5, -+ LED_TYPE_LOC -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED, -+ LED_MODE_RED_BLINK, -+ LED_MODE_BLUE, -+ LED_MODE_BLUE_BLINK, -+ LED_MODE_AUTO, -+ LED_MODE_UNKNOWN -+}; -+ -+struct led_type_mode { -+ enum led_type type; -+ int type_mask; -+ enum led_light_mode mode; -+ int mode_mask; -+}; -+ -+static struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} -+}; -+ -+ -+struct fanx_info_s { -+ u8 cname; /* device name */ -+ enum led_type type; -+ u8 reg_id; /* map to led_reg & reg_val */ -+}; -+ -+static struct fanx_info_s fanx_info[] = { -+ {'1', LED_TYPE_FAN1, 2}, -+ {'2', LED_TYPE_FAN2, 2}, -+ {'3', LED_TYPE_FAN3, 2}, -+ {'4', LED_TYPE_FAN4, 2}, -+ {'5', LED_TYPE_FAN5, 3} -+}; -+ -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if ((led_type_mode_data[i].type_mask & reg_val) == -+ led_type_mode_data[i].mode_mask) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return LED_MODE_UNKNOWN; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ reg_val = led_type_mode_data[i].mode_mask | -+ (reg_val & (~led_type_mode_data[i].type_mask)); -+ } -+ -+ return reg_val; -+} -+ -+static int accton_as5812_54t_led_read_value(u8 reg) -+{ -+ return accton_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5812_54t_led_write_value(u8 reg, u8 value) -+{ -+ return accton_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5812_54t_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as5812_54t_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as5812_54t_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5812_54t_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as5812_54t_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as5812_54t_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5812_54t_led_psu_1_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); -+} -+ -+static enum led_brightness accton_as5812_54t_led_psu_1_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54t_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); -+} -+ -+static void accton_as5812_54t_led_psu_2_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); -+} -+ -+static enum led_brightness accton_as5812_54t_led_psu_2_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54t_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); -+} -+ -+static void accton_as5812_54t_led_fan_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); -+} -+ -+static enum led_brightness accton_as5812_54t_led_fan_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54t_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); -+} -+ -+ -+static void accton_as5812_54t_led_fanx_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (led_cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); -+ return; -+ } -+ } -+} -+ -+ -+static enum led_brightness accton_as5812_54t_led_fanx_get(struct led_classdev *cdev) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as5812_54t_led_update(); -+ return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); -+ } -+ } -+ -+ -+ return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); -+} -+ -+ -+static void accton_as5812_54t_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as5812_54t_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54t_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as5812_54t_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54t_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as5812_54t_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54t_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static struct led_classdev accton_as5812_54t_leds[] = { -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as5812_54t_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_psu_1_set, -+ .brightness_get = accton_as5812_54t_led_psu_1_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as5812_54t_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_psu_2_set, -+ .brightness_get = accton_as5812_54t_led_psu_2_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as5812_54t_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_fan_set, -+ .brightness_get = accton_as5812_54t_led_fan_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN1] = { -+ .name = "accton_as5812_54t_led::fan1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_fanx_set, -+ .brightness_get = accton_as5812_54t_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN2] = { -+ .name = "accton_as5812_54t_led::fan2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_fanx_set, -+ .brightness_get = accton_as5812_54t_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN3] = { -+ .name = "accton_as5812_54t_led::fan3", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_fanx_set, -+ .brightness_get = accton_as5812_54t_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN4] = { -+ .name = "accton_as5812_54t_led::fan4", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_fanx_set, -+ .brightness_get = accton_as5812_54t_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN5] = { -+ .name = "accton_as5812_54t_led::fan5", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_fanx_set, -+ .brightness_get = accton_as5812_54t_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as5812_54t_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_diag_set, -+ .brightness_get = accton_as5812_54t_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as5812_54t_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54t_led_loc_set, -+ .brightness_get = accton_as5812_54t_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as5812_54t_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { -+ led_classdev_suspend(&accton_as5812_54t_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5812_54t_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { -+ led_classdev_resume(&accton_as5812_54t_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5812_54t_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as5812_54t_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as5812_54t_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as5812_54t_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as5812_54t_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54t_leds); i++) { -+ led_classdev_unregister(&accton_as5812_54t_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as5812_54t_led_driver = { -+ .probe = accton_as5812_54t_led_probe, -+ .remove = accton_as5812_54t_led_remove, -+ .suspend = accton_as5812_54t_led_suspend, -+ .resume = accton_as5812_54t_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5812_54t_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5812_54t(void); -+ if (!platform_accton_as5812_54t()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as5812_54t_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as5812_54t_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5812_54t_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as5812_54t_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5812_54t_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as5812_54t_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as5812_54t_led_init); -+module_exit(accton_as5812_54t_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5812_54t_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index ff68df7..c75227b 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -126,6 +126,15 @@ config EEPROM_ACCTON_AS6812_32x_SFP - - This driver can also be built as a module. If so, the module will - be called accton_as6812_32x_sfp. -+ -+config EEPROM_ACCTON_AS5812_54t_SFP -+ tristate "Accton as5812 54t sfp" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as5812 54t sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5812_54t_sfp. - - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 4b682a1..152a8bc 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -12,4 +12,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS7512_32x_SFP) += accton_as7512_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS5812_54t_SFP) += accton_as5812_54t_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as5812_54t_sfp.c b/drivers/misc/eeprom/accton_as5812_54t_sfp.c -new file mode 100644 -index 0000000..0985c80 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as5812_54t_sfp.c -@@ -0,0 +1,318 @@ -+/* -+ * An hwmon driver for accton as5812_54t sfp -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define QSFP_PORT_START_INDEX 49 -+#define BIT_INDEX(i) (1ULL << (i)) -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5812_54t_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u8 status; /* bit0:port49, bit1:port50 and so on */ -+}; -+ -+static struct as5812_54t_sfp_data *as5812_54t_sfp_update_device(struct device *dev, int update_eeprom); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+enum as5812_54t_sfp_sysfs_attributes { -+ SFP_IS_PRESENT, -+ SFP_PORT_NUMBER, -+ SFP_EEPROM, -+ SFP_IS_PRESENT_ALL, -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_status, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_status,NULL, SFP_IS_PRESENT_ALL); -+ -+static struct attribute *as5812_54t_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54t_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n",data->port); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5812_54t_sfp_data *data = as5812_54t_sfp_update_device(dev, 0); -+ -+ if (attr->index == SFP_IS_PRESENT) { -+ u8 val; -+ -+ val = (data->status & BIT_INDEX(data->port - QSFP_PORT_START_INDEX)) ? 0 : 1; -+ return sprintf(buf, "%d", val); -+ } -+ else { /* SFP_IS_PRESENT_ALL */ -+ return sprintf(buf, "%.2x\n", ~data->status); -+ } -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5812_54t_sfp_data *data = as5812_54t_sfp_update_device(dev, 1); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if ((data->status & BIT_INDEX(data->port - QSFP_PORT_START_INDEX)) != 0) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as5812_54t_sfp_group = { -+ .attrs = as5812_54t_sfp_attributes, -+}; -+ -+static int as5812_54t_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5812_54t_sfp_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5812_54t_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5812_54t_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5812_54t_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5812_54t_sfp_remove(struct i2c_client *client) -+{ -+ struct as5812_54t_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5812_54t_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as5812_54t_qsfp49 = 49, -+as5812_54t_qsfp50, -+as5812_54t_qsfp51, -+as5812_54t_qsfp52, -+as5812_54t_qsfp53, -+as5812_54t_qsfp54 -+}; -+ -+static const struct i2c_device_id as5812_54t_sfp_id[] = { -+{ "as5812_54t_qsfp49", as5812_54t_qsfp49 }, { "as5812_54t_qsfp50", as5812_54t_qsfp50 }, -+{ "as5812_54t_qsfp51", as5812_54t_qsfp51 }, { "as5812_54t_qsfp52", as5812_54t_qsfp52 }, -+{ "as5812_54t_qsfp53", as5812_54t_qsfp53 }, { "as5812_54t_qsfp54", as5812_54t_qsfp54 }, -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as5812_54t_sfp_id); -+ -+static struct i2c_driver as5812_54t_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5812_54t_sfp", -+ }, -+ .probe = as5812_54t_sfp_probe, -+ .remove = as5812_54t_sfp_remove, -+ .id_table = as5812_54t_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5812_54t_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) -+{ -+ int result = i2c_smbus_read_byte_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u8)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as5812_54t_sfp_data *as5812_54t_sfp_update_device(struct device *dev, int update_eeprom) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54t_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid || update_eeprom) { -+ int status = -1; -+ int i = 0; -+ -+ data->valid = 0; -+ //dev_dbg(&client->dev, "Starting as5812_54t sfp status update\n"); -+ data->status = 0xFF; -+ -+ /* -+ * Bring QSFPs out of reset, -+ * This is a temporary fix until the QSFP+_MOD_RST register -+ * can be exposed through the driver. -+ */ -+ accton_i2c_cpld_write(0x60, 0x23, 0x3F); -+ -+ /* Read present status of port 49-54(QSFP port) */ -+ status = accton_i2c_cpld_read(0x60, 0x22); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x60) reg(0x22) err %d\n", status); -+ } -+ else { -+ data->status = status & 0x3F; /* (u32)status */ -+ } -+ -+ if (update_eeprom) { -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if ((data->status & BIT_INDEX(data->port - QSFP_PORT_START_INDEX)) == 0) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom); i++) { -+ status = as5812_54t_sfp_read_byte(client, i, data->eeprom + i); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", -+ data->port); -+ goto exit; -+ } -+ } -+ } -+ } -+ -+ data->valid = 1; -+ data->last_updated = jiffies; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as5812_54t_sfp_init(void) -+{ -+ extern int platform_accton_as5812_54t(void); -+ if (!platform_accton_as5812_54t()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as5812_54t_sfp_driver); -+} -+ -+static void __exit as5812_54t_sfp_exit(void) -+{ -+ i2c_del_driver(&as5812_54t_sfp_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5812_54t_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as5812_54t_sfp_init); -+module_exit(as5812_54t_sfp_exit); -+ diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch deleted file mode 100644 index 0b25bf3c..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch +++ /dev/null @@ -1,2401 +0,0 @@ -Device driver patches for accton as5812-54x (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 2d4a7fb..4d9fb22 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1520,6 +1520,24 @@ config SENSORS_ACCTON_AS7712_32x_PSU - This driver can also be built as a module. If so, the module will - be called accton_as7712_32x_psu. - -+config SENSORS_ACCTON_AS5812_54x_FAN -+ tristate "Accton as5812 54x fan" -+ depends on I2C && I2C_MUX_ACCTON_AS5812_54x_CPLD -+ help -+ If you say yes here you get support for Accton as5812 54x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5812_54x_fan. -+ -+config SENSORS_ACCTON_AS5812_54x_PSU -+ tristate "Accton as5812 54x psu" -+ depends on I2C && I2C_MUX_ACCTON_AS5812_54x_CPLD -+ help -+ If you say yes here you get support for Accton as5812 54x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5812_54x_psu. -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index ea97f4a..818dd01 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -30,6 +30,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS7512_32x_PSU) += accton_as7512_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS7712_32x_FAN) += accton_as7712_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS7712_32x_PSU) += accton_as7712_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_I2C_CPLD) += accton_i2c_cpld.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5812_54x_FAN) += accton_as5812_54x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS5812_54x_PSU) += accton_as5812_54x_psu.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as5812_54x_fan.c b/drivers/hwmon/accton_as5812_54x_fan.c -new file mode 100644 -index 0000000..3e25db1 ---- /dev/null -+++ b/drivers/hwmon/accton_as5812_54x_fan.c -@@ -0,0 +1,442 @@ -+/* -+ * A hwmon driver for the Accton as5812 54x fan -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FAN_MAX_NUMBER 5 -+#define FAN_SPEED_CPLD_TO_RPM_STEP 150 -+#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 -+#define FAN_DUTY_CYCLE_MIN 0 -+#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ -+ -+#define CPLD_REG_FAN_STATUS_OFFSET 0xC -+#define CPLD_REG_FANR_STATUS_OFFSET 0x1F -+#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E -+ -+#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 -+#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 -+#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 -+#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 -+#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 -+ -+#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 -+#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 -+#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A -+#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B -+#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C -+ -+#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD -+ -+#define CPLD_FAN1_INFO_BIT_MASK 0x1 -+#define CPLD_FAN2_INFO_BIT_MASK 0x2 -+#define CPLD_FAN3_INFO_BIT_MASK 0x4 -+#define CPLD_FAN4_INFO_BIT_MASK 0x8 -+#define CPLD_FAN5_INFO_BIT_MASK 0x10 -+ -+#define PROJECT_NAME -+ -+#define LOCAL_DEBUG 0 -+ -+static struct accton_as5812_54x_fan *fan_data = NULL; -+ -+struct accton_as5812_54x_fan { -+ struct platform_device *pdev; -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ -+ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ -+ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ -+ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ -+ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ -+ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ -+}; -+ -+/*******************/ -+#define MAKE_FAN_MASK_OR_REG(name,type) \ -+ CPLD_FAN##type##1_##name, \ -+ CPLD_FAN##type##2_##name, \ -+ CPLD_FAN##type##3_##name, \ -+ CPLD_FAN##type##4_##name, \ -+ CPLD_FAN##type##5_##name, -+ -+/* fan related data -+ */ -+static const u8 fan_info_mask[] = { -+ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) -+}; -+ -+static const u8 fan_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) -+}; -+ -+static const u8 fanr_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) -+}; -+ -+/*******************/ -+#define DEF_FAN_SET(id) \ -+ FAN##id##_FAULT, \ -+ FAN##id##_SPEED, \ -+ FAN##id##_DUTY_CYCLE, \ -+ FAN##id##_DIRECTION, \ -+ FANR##id##_FAULT, \ -+ FANR##id##_SPEED, -+ -+enum sysfs_fan_attributes { -+ DEF_FAN_SET(1) -+ DEF_FAN_SET(2) -+ DEF_FAN_SET(3) -+ DEF_FAN_SET(4) -+ DEF_FAN_SET(5) -+}; -+/*******************/ -+static void accton_as5812_54x_fan_update_device(struct device *dev); -+static int accton_as5812_54x_fan_read_value(u8 reg); -+static int accton_as5812_54x_fan_write_value(u8 reg, u8 value); -+ -+static ssize_t fan_set_duty_cycle(struct device *dev, -+ struct device_attribute *da,const char *buf, size_t count); -+static ssize_t fan_show_value(struct device *dev, -+ struct device_attribute *da, char *buf); -+ -+extern int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+ -+/*******************/ -+#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ -+ fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); -+ -+#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) -+ -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) -+/*******************/ -+ -+#define _MAKE_FAN_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ -+ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, -+ -+#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) -+ -+static struct attribute *accton_as5812_54x_fan_attributes[] = { -+ /* fan related attributes */ -+ MAKE_FAN_ATTR(PROJECT_NAME,1) -+ MAKE_FAN_ATTR(PROJECT_NAME,2) -+ MAKE_FAN_ATTR(PROJECT_NAME,3) -+ MAKE_FAN_ATTR(PROJECT_NAME,4) -+ MAKE_FAN_ATTR(PROJECT_NAME,5) -+ NULL -+}; -+/*******************/ -+ -+/* fan related functions -+ */ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ ssize_t ret = 0; -+ int data_index, type_index; -+ -+ accton_as5812_54x_fan_update_device(dev); -+ -+ if (fan_data->valid == 0) { -+ return ret; -+ } -+ -+ type_index = attr->index%FAN2_FAULT; -+ data_index = attr->index/FAN2_FAULT; -+ -+ switch (type_index) { -+ case FAN1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DUTY_CYCLE: -+ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DIRECTION: -+ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ default: -+ if (LOCAL_DEBUG) -+ printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); -+ break; -+ } -+ -+ return ret; -+} -+/*******************/ -+static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) { -+ -+ int error, value; -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) -+ return -EINVAL; -+ -+ accton_as5812_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); -+ -+ fan_data->valid = 0; -+ -+ return count; -+} -+ -+static const struct attribute_group accton_as5812_54x_fan_group = { -+ .attrs = accton_as5812_54x_fan_attributes, -+}; -+ -+static int accton_as5812_54x_fan_read_value(u8 reg) -+{ -+ return as5812_54x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5812_54x_fan_write_value(u8 reg, u8 value) -+{ -+ return as5812_54x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5812_54x_fan_update_device(struct device *dev) -+{ -+ int speed, r_speed, fault, r_fault, ctrl_speed, direction; -+ int i; -+ -+ mutex_lock(&fan_data->update_lock); -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5812_54x_fan update \n"); -+ -+ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { -+ /* do nothing */ -+ goto _exit; -+ } -+ -+ fan_data->valid = 0; -+ -+ if (LOCAL_DEBUG) -+ printk ("Starting accton_as5812_54x_fan update 2 \n"); -+ -+ fault = accton_as5812_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); -+ r_fault = accton_as5812_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); -+ direction = accton_as5812_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); -+ ctrl_speed = accton_as5812_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); -+ -+ if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); -+ -+ for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); -+ -+ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; -+ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; -+ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; -+ -+ /* fan speed -+ */ -+ speed = accton_as5812_54x_fan_read_value(fan_speed_reg[i]); -+ r_speed = accton_as5812_54x_fan_read_value(fanr_speed_reg[i]); -+ if ( (speed < 0) || (r_speed < 0) ) -+ { -+ if (LOCAL_DEBUG) -+ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ if (LOCAL_DEBUG) -+ printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); -+ -+ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ } -+ -+ /* finish to update */ -+ fan_data->last_updated = jiffies; -+ fan_data->valid = 1; -+ -+_exit: -+ mutex_unlock(&fan_data->update_lock); -+} -+ -+static int accton_as5812_54x_fan_probe(struct platform_device *pdev) -+{ -+ int status = -1; -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&pdev->dev.kobj, &accton_as5812_54x_fan_group); -+ if (status) { -+ goto exit; -+ -+ } -+ -+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); -+ if (IS_ERR(fan_data->hwmon_dev)) { -+ status = PTR_ERR(fan_data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&pdev->dev, "accton_as5812_54x_fan\n"); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&pdev->dev.kobj, &accton_as5812_54x_fan_group); -+exit: -+ return status; -+} -+ -+static int accton_as5812_54x_fan_remove(struct platform_device *pdev) -+{ -+ hwmon_device_unregister(fan_data->hwmon_dev); -+ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5812_54x_fan_group); -+ -+ return 0; -+} -+ -+#define DRVNAME "as5812_54x_fan" -+ -+static struct platform_driver accton_as5812_54x_fan_driver = { -+ .probe = accton_as5812_54x_fan_probe, -+ .remove = accton_as5812_54x_fan_remove, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5812_54x_fan_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5812_54x(void); -+ if(!platform_accton_as5812_54x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as5812_54x_fan_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ fan_data = kzalloc(sizeof(struct accton_as5812_54x_fan), GFP_KERNEL); -+ if (!fan_data) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5812_54x_fan_driver); -+ goto exit; -+ } -+ -+ mutex_init(&fan_data->update_lock); -+ fan_data->valid = 0; -+ -+ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(fan_data->pdev)) { -+ ret = PTR_ERR(fan_data->pdev); -+ platform_driver_unregister(&accton_as5812_54x_fan_driver); -+ kfree(fan_data); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5812_54x_fan_exit(void) -+{ -+ platform_device_unregister(fan_data->pdev); -+ platform_driver_unregister(&accton_as5812_54x_fan_driver); -+ kfree(fan_data); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5812_54x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_as5812_54x_fan_init); -+module_exit(accton_as5812_54x_fan_exit); -+ -diff --git a/drivers/hwmon/accton_as5812_54x_psu.c b/drivers/hwmon/accton_as5812_54x_psu.c -new file mode 100644 -index 0000000..0d29980 ---- /dev/null -+++ b/drivers/hwmon/accton_as5812_54x_psu.c -@@ -0,0 +1,294 @@ -+/* -+ * An hwmon driver for accton as5812_54x Power Module -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as5812_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5812_54x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[14]; /* Model name, read from eeprom */ -+}; -+ -+static struct as5812_54x_psu_data *as5812_54x_psu_update_device(struct device *dev); -+ -+enum as5812_54x_psu_sysfs_attributes { -+ PSU_INDEX, -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as5812_54x_psu_attributes[] = { -+ &sensor_dev_attr_psu_index.dev_attr.attr, -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_index(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->index); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5812_54x_psu_data *data = as5812_54x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((data->index - 1) * 4) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5812_54x_psu_data *data = as5812_54x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s", data->model_name); -+} -+ -+static const struct attribute_group as5812_54x_psu_group = { -+ .attrs = as5812_54x_psu_attributes, -+}; -+ -+static int as5812_54x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5812_54x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5812_54x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5812_54x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x38 || client->addr == 0x50) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x3b || client->addr == 0x53) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5812_54x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5812_54x_psu_remove(struct i2c_client *client) -+{ -+ struct as5812_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5812_54x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as5812_54x_psu_id[] = { -+ { "as5812_54x_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as5812_54x_psu_id); -+ -+static struct i2c_driver as5812_54x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5812_54x_psu", -+ }, -+ .probe = as5812_54x_psu_probe, -+ .remove = as5812_54x_psu_remove, -+ .id_table = as5812_54x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5812_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as5812_54x_psu_data *as5812_54x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ -+ dev_dbg(&client->dev, "Starting as5812_54x update\n"); -+ -+ /* Read model name */ -+ if (client->addr == 0x38 || client->addr == 0x3b) { -+ /* AC power */ -+ status = as5812_54x_psu_read_block(client, 0x26, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ else { -+ /* DC power */ -+ status = as5812_54x_psu_read_block(client, 0x50, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ } -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; -+ } -+ -+ /* Read psu status */ -+ status = as5812_54x_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as5812_54x_psu_init(void) -+{ -+ extern int platform_accton_as5812_54x(void); -+ if(!platform_accton_as5812_54x()) { -+ return -ENODEV; -+ } -+ return i2c_add_driver(&as5812_54x_psu_driver); -+} -+ -+static void __exit as5812_54x_psu_exit(void) -+{ -+ i2c_del_driver(&as5812_54x_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5812_54x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as5812_54x_psu_init); -+module_exit(as5812_54x_psu_exit); -+ -diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig -index 4429dd9..8ac67ef 100644 ---- a/drivers/i2c/muxes/Kconfig -+++ b/drivers/i2c/muxes/Kconfig -@@ -24,6 +24,15 @@ config I2C_MUX_ACCTON_AS6712_32x_CPLD - This driver can also be built as a module. If so, the module - will be called i2c-mux-accton_as6712_32x_cpld. - -+config I2C_MUX_ACCTON_AS5812_54x_CPLD -+ tristate "Accton as5812_54x CPLD I2C multiplexer" -+ help -+ If you say yes here you get support for the Accton CPLD -+ I2C mux devices. -+ -+ This driver can also be built as a module. If so, the module -+ will be called i2c-mux-accton_as5812_54x_cpld. -+ - config I2C_MUX_GPIO - tristate "GPIO-based I2C multiplexer" - depends on GENERIC_GPIO -diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile -index cab3174..7769d29 100644 ---- a/drivers/i2c/muxes/Makefile -+++ b/drivers/i2c/muxes/Makefile -@@ -9,5 +9,6 @@ obj-$(CONFIG_I2C_MUX_QUANTA) += quanta-i2cmux.o - obj-$(CONFIG_I2C_MUX_QUANTA_LY2) += quanta-ly2-i2c-mux.o - obj-$(CONFIG_I2C_MUX_ACCTON_AS5712_54x_CPLD) += i2c-mux-accton_as5712_54x_cpld.o - obj-$(CONFIG_I2C_MUX_ACCTON_AS6712_32x_CPLD) += i2c-mux-accton_as6712_32x_cpld.o -+obj-$(CONFIG_I2C_MUX_ACCTON_AS5812_54x_CPLD) += i2c-mux-accton_as5812_54x_cpld.o - - ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG -diff --git a/drivers/i2c/muxes/i2c-mux-accton_as5812_54x_cpld.c b/drivers/i2c/muxes/i2c-mux-accton_as5812_54x_cpld.c -new file mode 100644 -index 0000000..e01e557 ---- /dev/null -+++ b/drivers/i2c/muxes/i2c-mux-accton_as5812_54x_cpld.c -@@ -0,0 +1,394 @@ -+/* -+ * An I2C multiplexer dirver for accton as5812 CPLD -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This module supports the accton cpld that hold the channel select -+ * mechanism for other i2c slave devices, such as SFP. -+ * This includes the: -+ * Accton as5812_54x CPLD1/CPLD2/CPLD3 -+ * -+ * Based on: -+ * pca954x.c from Kumar Gala -+ * Copyright (C) 2006 -+ * -+ * Based on: -+ * pca954x.c from Ken Harrenstien -+ * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) -+ * -+ * Based on: -+ * i2c-virtual_cb.c from Brian Kuschak -+ * and -+ * pca9540.c from Jean Delvare . -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static struct dmi_system_id as5812_54x_dmi_table[] = { -+ { -+ .ident = "Accton AS5812-54X", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), -+ }, -+ }, -+ { -+ .ident = "Accton AS5812-54X", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), -+ }, -+ }, -+}; -+ -+int platform_accton_as5812_54x(void) -+{ -+ return dmi_check_system(as5812_54x_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as5812_54x); -+ -+#define NUM_OF_CPLD1_CHANS 0x0 -+#define NUM_OF_CPLD2_CHANS 0x18 -+#define NUM_OF_CPLD3_CHANS 0x1E -+#define CPLD_CHANNEL_SELECT_REG 0x2 -+#define CPLD_DESELECT_CHANNEL 0xFF -+ -+#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS -+ -+static LIST_HEAD(cpld_client_list); -+static struct mutex list_lock; -+ -+struct cpld_client_node { -+ struct i2c_client *client; -+ struct list_head list; -+}; -+ -+enum cpld_mux_type { -+ as5812_54x_cpld2, -+ as5812_54x_cpld3, -+ as5812_54x_cpld1 -+}; -+ -+struct accton_i2c_cpld_mux { -+ enum cpld_mux_type type; -+ struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; -+ u8 last_chan; /* last register value */ -+}; -+ -+struct chip_desc { -+ u8 nchans; -+ u8 deselectChan; -+}; -+ -+/* Provide specs for the PCA954x types we know about */ -+static const struct chip_desc chips[] = { -+ [as5812_54x_cpld1] = { -+ .nchans = NUM_OF_CPLD1_CHANS, -+ .deselectChan = CPLD_DESELECT_CHANNEL, -+ }, -+ [as5812_54x_cpld2] = { -+ .nchans = NUM_OF_CPLD2_CHANS, -+ .deselectChan = CPLD_DESELECT_CHANNEL, -+ }, -+ [as5812_54x_cpld3] = { -+ .nchans = NUM_OF_CPLD3_CHANS, -+ .deselectChan = CPLD_DESELECT_CHANNEL, -+ } -+}; -+ -+static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { -+ { "as5812_54x_cpld1", as5812_54x_cpld1 }, -+ { "as5812_54x_cpld2", as5812_54x_cpld2 }, -+ { "as5812_54x_cpld3", as5812_54x_cpld3 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); -+ -+/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() -+ for this as they will try to lock adapter a second time */ -+static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, -+ struct i2c_client *client, u8 val) -+{ -+ unsigned long orig_jiffies; -+ unsigned short flags; -+ union i2c_smbus_data data; -+ int try; -+ s32 res = -EIO; -+ -+ data.byte = val; -+ flags = client->flags; -+ flags &= I2C_M_TEN | I2C_CLIENT_PEC; -+ -+ if (adap->algo->smbus_xfer) { -+ /* Retry automatically on arbitration loss */ -+ orig_jiffies = jiffies; -+ for (res = 0, try = 0; try <= adap->retries; try++) { -+ res = adap->algo->smbus_xfer(adap, client->addr, flags, -+ I2C_SMBUS_WRITE, CPLD_CHANNEL_SELECT_REG, -+ I2C_SMBUS_BYTE_DATA, &data); -+ if (res != -EAGAIN) -+ break; -+ if (time_after(jiffies, -+ orig_jiffies + adap->timeout)) -+ break; -+ } -+ } -+ -+ return res; -+} -+ -+static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ u8 regval; -+ int ret = 0; -+ regval = chan; -+ -+ /* Only select the channel if its different from the last channel */ -+ if (data->last_chan != regval) { -+ ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); -+ data->last_chan = regval; -+ } -+ -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ -+ /* Deselect active channel */ -+ data->last_chan = chips[data->type].deselectChan; -+ -+ return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); -+} -+ -+static void accton_i2c_cpld_add_client(struct i2c_client *client) -+{ -+ struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); -+ -+ if (!node) { -+ dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); -+ return; -+ } -+ -+ node->client = client; -+ -+ mutex_lock(&list_lock); -+ list_add(&node->list, &cpld_client_list); -+ mutex_unlock(&list_lock); -+} -+ -+static void accton_i2c_cpld_remove_client(struct i2c_client *client) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int found = 0; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client == client) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) { -+ list_del(list_node); -+ kfree(cpld_node); -+ } -+ -+ mutex_unlock(&list_lock); -+} -+ -+static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ u8 reg = 0x1; -+ struct i2c_client *client; -+ int len; -+ -+ client = to_i2c_client(dev); -+ len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); -+ -+ return len; -+} -+ -+static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); -+ -+/* -+ * I2C init/probing/exit functions -+ */ -+static int accton_i2c_cpld_mux_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); -+ int chan=0; -+ struct accton_i2c_cpld_mux *data; -+ int ret = -ENODEV; -+ -+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) -+ goto err; -+ -+ data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); -+ if (!data) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ i2c_set_clientdata(client, data); -+ -+ data->type = id->driver_data; -+ -+ if (data->type == as5812_54x_cpld2 || data->type == as5812_54x_cpld3) { -+ data->last_chan = chips[data->type].deselectChan; /* force the first selection */ -+ -+ /* Now create an adapter for each channel */ -+ for (chan = 0; chan < chips[data->type].nchans; chan++) { -+ data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, -+ accton_i2c_cpld_mux_select_chan, -+ accton_i2c_cpld_mux_deselect_mux); -+ -+ if (data->virt_adaps[chan] == NULL) { -+ ret = -ENODEV; -+ dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); -+ goto virt_reg_failed; -+ } -+ } -+ -+ dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", -+ chan, client->name); -+ } -+ -+ accton_i2c_cpld_add_client(client); -+ -+ ret = sysfs_create_file(&client->dev.kobj, &ver.attr); -+ if (ret) -+ goto virt_reg_failed; -+ -+ return 0; -+ -+virt_reg_failed: -+ for (chan--; chan >= 0; chan--) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ } -+ -+ kfree(data); -+err: -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_remove(struct i2c_client *client) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ const struct chip_desc *chip = &chips[data->type]; -+ int chan; -+ -+ sysfs_remove_file(&client->dev.kobj, &ver.attr); -+ -+ for (chan = 0; chan < chip->nchans; ++chan) { -+ if (data->virt_adaps[chan]) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ data->virt_adaps[chan] = NULL; -+ } -+ } -+ -+ kfree(data); -+ accton_i2c_cpld_remove_client(client); -+ -+ return 0; -+} -+ -+int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EPERM; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_read_byte_data(cpld_node->client, reg); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as5812_54x_i2c_cpld_read); -+ -+int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EIO; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as5812_54x_i2c_cpld_write); -+ -+static struct i2c_driver accton_i2c_cpld_mux_driver = { -+ .driver = { -+ .name = "as5812_54x_cpld", -+ .owner = THIS_MODULE, -+ }, -+ .probe = accton_i2c_cpld_mux_probe, -+ .remove = accton_i2c_cpld_mux_remove, -+ .id_table = accton_i2c_cpld_mux_id, -+}; -+ -+static int __init accton_i2c_cpld_mux_init(void) -+{ -+ mutex_init(&list_lock); -+ return i2c_add_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+static void __exit accton_i2c_cpld_mux_exit(void) -+{ -+ i2c_del_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_i2c_cpld_mux_init); -+module_exit(accton_i2c_cpld_mux_exit); -+ -+ -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index 48106f2..514f978 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -68,6 +68,13 @@ config LEDS_ACCTON_AS7712_32x - This option enables support for the LEDs on the Accton as7712 32x. - Say Y to enable LEDs on the Accton as7712 32x. - -+config LEDS_ACCTON_AS5812_54x -+ tristate "LED support for the Accton as5812 54x" -+ depends on LEDS_CLASS && I2C_MUX_ACCTON_AS5812_54x_CPLD -+ help -+ This option enables support for the LEDs on the Accton as5812 54x. -+ Say Y to enable LEDs on the Accton as5812 54x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index c4ea931..379c448 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -47,6 +47,8 @@ obj-$(CONFIG_LEDS_ACCTON_AS5712_54x) += leds-accton_as5712_54x.o - obj-$(CONFIG_LEDS_ACCTON_AS6712_32x) += leds-accton_as6712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS7512_32x) += leds-accton_as7512_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o -+obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o -+ - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o - -diff --git a/drivers/leds/leds-accton_as5812_54x.c b/drivers/leds/leds-accton_as5812_54x.c -new file mode 100644 -index 0000000..b701868 ---- /dev/null -+++ b/drivers/leds/leds-accton_as5812_54x.c -@@ -0,0 +1,597 @@ -+/* -+ * A LED driver for the accton_as5812_54x_led -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int as5812_54x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "as5812_54x_led" -+ -+struct accton_as5812_54x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED -+ 1 = PSU1/PSU2 LED -+ 2 = FAN1-4 LED -+ 3 = FAN5-6 LED */ -+}; -+ -+static struct accton_as5812_54x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+#define LED_TYPE_PSU1_REG_MASK 0x03 -+#define LED_MODE_PSU1_GREEN_MASK 0x02 -+#define LED_MODE_PSU1_AMBER_MASK 0x01 -+#define LED_MODE_PSU1_OFF_MASK 0x03 -+#define LED_MODE_PSU1_AUTO_MASK 0x00 -+ -+#define LED_TYPE_PSU2_REG_MASK 0x0C -+#define LED_MODE_PSU2_GREEN_MASK 0x08 -+#define LED_MODE_PSU2_AMBER_MASK 0x04 -+#define LED_MODE_PSU2_OFF_MASK 0x0C -+#define LED_MODE_PSU2_AUTO_MASK 0x00 -+ -+#define LED_TYPE_DIAG_REG_MASK 0x0C -+#define LED_MODE_DIAG_GREEN_MASK 0x08 -+#define LED_MODE_DIAG_AMBER_MASK 0x04 -+#define LED_MODE_DIAG_OFF_MASK 0x0C -+ -+#define LED_TYPE_FAN_REG_MASK 0x03 -+#define LED_MODE_FAN_GREEN_MASK 0x02 -+#define LED_MODE_FAN_AMBER_MASK 0x01 -+#define LED_MODE_FAN_OFF_MASK 0x03 -+#define LED_MODE_FAN_AUTO_MASK 0x00 -+ -+#define LED_TYPE_FAN1_REG_MASK 0x03 -+#define LED_TYPE_FAN2_REG_MASK 0x0C -+#define LED_TYPE_FAN3_REG_MASK 0x30 -+#define LED_TYPE_FAN4_REG_MASK 0xC0 -+#define LED_TYPE_FAN5_REG_MASK 0x03 -+#define LED_TYPE_FAN6_REG_MASK 0x0C -+ -+#define LED_MODE_FANX_GREEN_MASK 0x01 -+#define LED_MODE_FANX_RED_MASK 0x02 -+#define LED_MODE_FANX_OFF_MASK 0x00 -+ -+#define LED_TYPE_LOC_REG_MASK 0x30 -+#define LED_MODE_LOC_ON_MASK 0x00 -+#define LED_MODE_LOC_OFF_MASK 0x10 -+#define LED_MODE_LOC_BLINK_MASK 0x20 -+ -+static const u8 led_reg[] = { -+ 0xA, /* LOC/DIAG/FAN LED*/ -+ 0xB, /* PSU1/PSU2 LED */ -+ 0x16, /* FAN1-4 LED */ -+ 0x17, /* FAN4-6 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2, -+ LED_TYPE_DIAG, -+ LED_TYPE_FAN, -+ LED_TYPE_FAN1, -+ LED_TYPE_FAN2, -+ LED_TYPE_FAN3, -+ LED_TYPE_FAN4, -+ LED_TYPE_FAN5, -+ LED_TYPE_LOC -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_AMBER, -+ LED_MODE_RED, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED_BLINK, -+ LED_MODE_AUTO, -+}; -+ -+struct led_type_mode { -+ enum led_type type; -+ int type_mask; -+ enum led_light_mode mode; -+ int mode_mask; -+}; -+ -+static struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} -+}; -+ -+ -+struct fanx_info_s { -+ u8 cname; /* device name */ -+ enum led_type type; -+ u8 reg_id; /* map to led_reg & reg_val */ -+}; -+ -+static struct fanx_info_s fanx_info[] = { -+ {'1', LED_TYPE_FAN1, 2}, -+ {'2', LED_TYPE_FAN2, 2}, -+ {'3', LED_TYPE_FAN3, 2}, -+ {'4', LED_TYPE_FAN4, 2}, -+ {'5', LED_TYPE_FAN5, 3} -+}; -+ -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if ((led_type_mode_data[i].type_mask & reg_val) == -+ led_type_mode_data[i].mode_mask) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return 0; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ reg_val = led_type_mode_data[i].mode_mask | -+ (reg_val & (~led_type_mode_data[i].type_mask)); -+ } -+ -+ return reg_val; -+} -+ -+static int accton_as5812_54x_led_read_value(u8 reg) -+{ -+ return as5812_54x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as5812_54x_led_write_value(u8 reg, u8 value) -+{ -+ return as5812_54x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as5812_54x_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as5812_54x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as5812_54x_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5812_54x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as5812_54x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as5812_54x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as5812_54x_led_psu_1_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); -+} -+ -+static enum led_brightness accton_as5812_54x_led_psu_1_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); -+} -+ -+static void accton_as5812_54x_led_psu_2_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); -+} -+ -+static enum led_brightness accton_as5812_54x_led_psu_2_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); -+} -+ -+static void accton_as5812_54x_led_fan_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); -+} -+ -+static enum led_brightness accton_as5812_54x_led_fan_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); -+} -+ -+ -+static void accton_as5812_54x_led_fanx_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (led_cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); -+ return; -+ } -+ } -+} -+ -+ -+static enum led_brightness accton_as5812_54x_led_fanx_get(struct led_classdev *cdev) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as5812_54x_led_update(); -+ return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); -+ } -+ } -+ -+ -+ return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); -+} -+ -+ -+static void accton_as5812_54x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as5812_54x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as5812_54x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as5812_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as5812_54x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as5812_54x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static struct led_classdev accton_as5812_54x_leds[] = { -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as5812_54x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_psu_1_set, -+ .brightness_get = accton_as5812_54x_led_psu_1_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as5812_54x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_psu_2_set, -+ .brightness_get = accton_as5812_54x_led_psu_2_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as5812_54x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_fan_set, -+ .brightness_get = accton_as5812_54x_led_fan_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN1] = { -+ .name = "accton_as5812_54x_led::fan1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_fanx_set, -+ .brightness_get = accton_as5812_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN2] = { -+ .name = "accton_as5812_54x_led::fan2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_fanx_set, -+ .brightness_get = accton_as5812_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN3] = { -+ .name = "accton_as5812_54x_led::fan3", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_fanx_set, -+ .brightness_get = accton_as5812_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN4] = { -+ .name = "accton_as5812_54x_led::fan4", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_fanx_set, -+ .brightness_get = accton_as5812_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN5] = { -+ .name = "accton_as5812_54x_led::fan5", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_fanx_set, -+ .brightness_get = accton_as5812_54x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as5812_54x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_diag_set, -+ .brightness_get = accton_as5812_54x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as5812_54x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as5812_54x_led_loc_set, -+ .brightness_get = accton_as5812_54x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as5812_54x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { -+ led_classdev_suspend(&accton_as5812_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5812_54x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { -+ led_classdev_resume(&accton_as5812_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as5812_54x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as5812_54x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as5812_54x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as5812_54x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as5812_54x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as5812_54x_leds); i++) { -+ led_classdev_unregister(&accton_as5812_54x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as5812_54x_led_driver = { -+ .probe = accton_as5812_54x_led_probe, -+ .remove = accton_as5812_54x_led_remove, -+ .suspend = accton_as5812_54x_led_suspend, -+ .resume = accton_as5812_54x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as5812_54x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as5812_54x(void); -+ if(!platform_accton_as5812_54x()) { -+ return -ENODEV; -+ } -+ ret = platform_driver_register(&accton_as5812_54x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as5812_54x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as5812_54x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as5812_54x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as5812_54x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as5812_54x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as5812_54x_led_init); -+module_exit(accton_as5812_54x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as5812_54x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index bd435a0..7c8d3b8 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -109,6 +109,15 @@ config EEPROM_ACCTON_AS7712_32x_SFP - This driver can also be built as a module. If so, the module will - be called accton_as7712_32x_sfp. - -+config EEPROM_ACCTON_AS5812_54x_SFP -+ tristate "Accton as5812 54x sfp" -+ depends on I2C && I2C_MUX_ACCTON_AS5812_54x_CPLD -+ help -+ If you say yes here you get support for Accton as5812 54x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as5812_54x_sfp. -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 4ad6540..e11d273 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -10,4 +10,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS5712_54x_SFP) += accton_as5712_54x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS6712_32x_SFP) += accton_as6712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS7512_32x_SFP) += accton_as7512_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as5812_54x_sfp.c b/drivers/misc/eeprom/accton_as5812_54x_sfp.c -new file mode 100644 -index 0000000..44727e2 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as5812_54x_sfp.c -@@ -0,0 +1,508 @@ -+/* -+ * An hwmon driver for accton as5812_54x sfp -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define NUM_OF_SFP_PORT 54 -+#define BIT_INDEX(i) (1ULL << (i)) -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as5812_54x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u64 status[4]; /* bit0:port0, bit1:port1 and so on */ -+ /* index 0 => is_present -+ 1 => tx_fail -+ 2 => tx_disable -+ 3 => rx_loss */ -+}; -+ -+/* The table maps active port to cpld port. -+ * Array index 0 is for active port 1, -+ * index 1 for active port 2, and so on. -+ * The array content implies cpld port index. -+ */ -+static const u8 cpld_to_front_port_table[] = -+{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, -+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, -+ 49, 52, 50, 53, 51, 54}; -+ -+#define CPLD_PORT_TO_FRONT_PORT(port) (cpld_to_front_port_table[port]) -+ -+static struct as5812_54x_sfp_data *as5812_54x_sfp_update_device(struct device *dev, int update_eeprom); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+extern int as5812_54x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as5812_54x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+enum as5812_54x_sfp_sysfs_attributes { -+ SFP_IS_PRESENT, -+ SFP_TX_FAULT, -+ SFP_TX_DISABLE, -+ SFP_RX_LOSS, -+ SFP_PORT_NUMBER, -+ SFP_EEPROM, -+ SFP_RX_LOS_ALL, -+ SFP_IS_PRESENT_ALL, -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_status, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, show_status, NULL, SFP_TX_FAULT); -+static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, show_status, set_tx_disable, SFP_TX_DISABLE); -+static SENSOR_DEVICE_ATTR(sfp_rx_loss, S_IRUGO, show_status,NULL, SFP_RX_LOSS); -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, show_status,NULL, SFP_RX_LOS_ALL); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_status,NULL, SFP_IS_PRESENT_ALL); -+ -+static struct attribute *as5812_54x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_loss.dev_attr.attr, -+ &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); -+} -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as5812_54x_sfp_data *data; -+ u8 val; -+ int values[7]; -+ -+ /* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+ do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+ } while(0) -+ -+ if(attr->index == SFP_RX_LOS_ALL) { -+ /* -+ * Report the RX_LOS status for all ports. -+ * This does not depend on the currently active SFP selector. -+ */ -+ -+ /* RX_LOS Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], as5812_54x_i2c_cpld_read(0x61, 0x0F), 0); -+ /* RX_LOS Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], as5812_54x_i2c_cpld_read(0x61, 0x10), 0); -+ /* RX_LOS Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], as5812_54x_i2c_cpld_read(0x61, 0x11), 0); -+ /* RX_LOS Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], as5812_54x_i2c_cpld_read(0x62, 0x0F), 0); -+ /* RX_LOS Ports 33-40 */ -+ VALIDATED_READ(buf, values[4], as5812_54x_i2c_cpld_read(0x62, 0x10), 0); -+ /* RX_LOS Ports 41-48 */ -+ VALIDATED_READ(buf, values[5], as5812_54x_i2c_cpld_read(0x62, 0x11), 0); -+ -+ /** Return values 1 -> 48 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], -+ values[3], values[4], values[5]); -+ } -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ * This does not depend on the currently active SFP selector. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], as5812_54x_i2c_cpld_read(0x61, 0x6), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], as5812_54x_i2c_cpld_read(0x61, 0x7), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], as5812_54x_i2c_cpld_read(0x61, 0x8), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], as5812_54x_i2c_cpld_read(0x62, 0x6), 1); -+ /* SFP_PRESENT Ports 33-40 */ -+ VALIDATED_READ(buf, values[4], as5812_54x_i2c_cpld_read(0x62, 0x7), 1); -+ /* SFP_PRESENT Ports 41-48 */ -+ VALIDATED_READ(buf, values[5], as5812_54x_i2c_cpld_read(0x62, 0x8), 1); -+ /* QSFP_PRESENT Ports 49-54 */ -+ VALIDATED_READ(buf, values[6], as5812_54x_i2c_cpld_read(0x62, 0x14), 1); -+ -+ /* Return values 1 -> 54 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], -+ values[3], values[4], values[5], -+ values[6] & 0x3F); -+ } -+ /* -+ * The remaining attributes are gathered on a per-selected-sfp basis. -+ */ -+ data = as5812_54x_sfp_update_device(dev, 0); -+ if (attr->index == SFP_IS_PRESENT) { -+ val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 0 : 1; -+ } -+ else { -+ val = (data->status[attr->index] & BIT_INDEX(data->port)) ? 1 : 0; -+ } -+ -+ return sprintf(buf, "%d", val); -+} -+ -+static ssize_t set_tx_disable(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); -+ unsigned short cpld_addr = 0; -+ u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; -+ long disable; -+ int error; -+ -+ /* Tx disable is not supported for QSFP ports(49-54) */ -+ if (data->port >= 48) { -+ return -EINVAL; -+ } -+ -+ error = kstrtol(buf, 10, &disable); -+ if (error) { -+ return error; -+ } -+ -+ mutex_lock(&data->update_lock); -+ -+ if(data->port < 24) { -+ cpld_addr = 0x61; -+ cpld_reg = 0xC + data->port / 8; -+ cpld_bit = 1 << (data->port % 8); -+ } -+ else { -+ cpld_addr = 0x62; -+ cpld_reg = 0xC + (data->port - 24) / 8; -+ cpld_bit = 1 << (data->port % 8); -+ } -+ -+ cpld_val = as5812_54x_i2c_cpld_read(cpld_addr, cpld_reg); -+ -+ /* Update tx_disable status */ -+ if (disable) { -+ data->status[SFP_TX_DISABLE] |= BIT_INDEX(data->port); -+ cpld_val |= cpld_bit; -+ } -+ else { -+ data->status[SFP_TX_DISABLE] &= ~BIT_INDEX(data->port); -+ cpld_val &= ~cpld_bit; -+ } -+ -+ as5812_54x_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); -+ -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as5812_54x_sfp_data *data = as5812_54x_sfp_update_device(dev, 1); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) != 0) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as5812_54x_sfp_group = { -+ .attrs = as5812_54x_sfp_attributes, -+}; -+ -+static int as5812_54x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as5812_54x_sfp_data *data; -+ int status; -+ -+ extern int platform_accton_as5812_54x(void); -+ if(!platform_accton_as5812_54x()) { -+ return -ENODEV; -+ } -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as5812_54x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as5812_54x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as5812_54x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as5812_54x_sfp_remove(struct i2c_client *client) -+{ -+ struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as5812_54x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as5812_54x_sfp1, as5812_54x_sfp2, as5812_54x_sfp3, as5812_54x_sfp4, -+as5812_54x_sfp5, as5812_54x_sfp6, as5812_54x_sfp7, as5812_54x_sfp8, -+as5812_54x_sfp9, as5812_54x_sfp10, as5812_54x_sfp11,as5812_54x_sfp12, -+as5812_54x_sfp13, as5812_54x_sfp14, as5812_54x_sfp15,as5812_54x_sfp16, -+as5812_54x_sfp17, as5812_54x_sfp18, as5812_54x_sfp19,as5812_54x_sfp20, -+as5812_54x_sfp21, as5812_54x_sfp22, as5812_54x_sfp23,as5812_54x_sfp24, -+as5812_54x_sfp25, as5812_54x_sfp26, as5812_54x_sfp27,as5812_54x_sfp28, -+as5812_54x_sfp29, as5812_54x_sfp30, as5812_54x_sfp31,as5812_54x_sfp32, -+as5812_54x_sfp33, as5812_54x_sfp34, as5812_54x_sfp35,as5812_54x_sfp36, -+as5812_54x_sfp37, as5812_54x_sfp38, as5812_54x_sfp39,as5812_54x_sfp40, -+as5812_54x_sfp41, as5812_54x_sfp42, as5812_54x_sfp43,as5812_54x_sfp44, -+as5812_54x_sfp45, as5812_54x_sfp46, as5812_54x_sfp47,as5812_54x_sfp48, -+as5812_54x_sfp49, as5812_54x_sfp52, as5812_54x_sfp50,as5812_54x_sfp53, -+as5812_54x_sfp51, as5812_54x_sfp54 -+}; -+ -+static const struct i2c_device_id as5812_54x_sfp_id[] = { -+{ "as5812_54x_sfp1", as5812_54x_sfp1 }, { "as5812_54x_sfp2", as5812_54x_sfp2 }, -+{ "as5812_54x_sfp3", as5812_54x_sfp3 }, { "as5812_54x_sfp4", as5812_54x_sfp4 }, -+{ "as5812_54x_sfp5", as5812_54x_sfp5 }, { "as5812_54x_sfp6", as5812_54x_sfp6 }, -+{ "as5812_54x_sfp7", as5812_54x_sfp7 }, { "as5812_54x_sfp8", as5812_54x_sfp8 }, -+{ "as5812_54x_sfp9", as5812_54x_sfp9 }, { "as5812_54x_sfp10", as5812_54x_sfp10 }, -+{ "as5812_54x_sfp11", as5812_54x_sfp11 }, { "as5812_54x_sfp12", as5812_54x_sfp12 }, -+{ "as5812_54x_sfp13", as5812_54x_sfp13 }, { "as5812_54x_sfp14", as5812_54x_sfp14 }, -+{ "as5812_54x_sfp15", as5812_54x_sfp15 }, { "as5812_54x_sfp16", as5812_54x_sfp16 }, -+{ "as5812_54x_sfp17", as5812_54x_sfp17 }, { "as5812_54x_sfp18", as5812_54x_sfp18 }, -+{ "as5812_54x_sfp19", as5812_54x_sfp19 }, { "as5812_54x_sfp20", as5812_54x_sfp20 }, -+{ "as5812_54x_sfp21", as5812_54x_sfp21 }, { "as5812_54x_sfp22", as5812_54x_sfp22 }, -+{ "as5812_54x_sfp23", as5812_54x_sfp23 }, { "as5812_54x_sfp24", as5812_54x_sfp24 }, -+{ "as5812_54x_sfp25", as5812_54x_sfp25 }, { "as5812_54x_sfp26", as5812_54x_sfp26 }, -+{ "as5812_54x_sfp27", as5812_54x_sfp27 }, { "as5812_54x_sfp28", as5812_54x_sfp28 }, -+{ "as5812_54x_sfp29", as5812_54x_sfp29 }, { "as5812_54x_sfp30", as5812_54x_sfp30 }, -+{ "as5812_54x_sfp31", as5812_54x_sfp31 }, { "as5812_54x_sfp32", as5812_54x_sfp32 }, -+{ "as5812_54x_sfp33", as5812_54x_sfp33 }, { "as5812_54x_sfp34", as5812_54x_sfp34 }, -+{ "as5812_54x_sfp35", as5812_54x_sfp35 }, { "as5812_54x_sfp36", as5812_54x_sfp36 }, -+{ "as5812_54x_sfp37", as5812_54x_sfp37 }, { "as5812_54x_sfp38", as5812_54x_sfp38 }, -+{ "as5812_54x_sfp39", as5812_54x_sfp39 }, { "as5812_54x_sfp40", as5812_54x_sfp40 }, -+{ "as5812_54x_sfp41", as5812_54x_sfp41 }, { "as5812_54x_sfp42", as5812_54x_sfp42 }, -+{ "as5812_54x_sfp43", as5812_54x_sfp43 }, { "as5812_54x_sfp44", as5812_54x_sfp44 }, -+{ "as5812_54x_sfp45", as5812_54x_sfp45 }, { "as5812_54x_sfp46", as5812_54x_sfp46 }, -+{ "as5812_54x_sfp47", as5812_54x_sfp47 }, { "as5812_54x_sfp48", as5812_54x_sfp48 }, -+{ "as5812_54x_sfp49", as5812_54x_sfp49 }, { "as5812_54x_sfp50", as5812_54x_sfp50 }, -+{ "as5812_54x_sfp51", as5812_54x_sfp51 }, { "as5812_54x_sfp52", as5812_54x_sfp52 }, -+{ "as5812_54x_sfp53", as5812_54x_sfp53 }, { "as5812_54x_sfp54", as5812_54x_sfp54 }, -+ -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as5812_54x_sfp_id); -+ -+static struct i2c_driver as5812_54x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as5812_54x_sfp", -+ }, -+ .probe = as5812_54x_sfp_probe, -+ .remove = as5812_54x_sfp_remove, -+ .id_table = as5812_54x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as5812_54x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) -+{ -+ int result = i2c_smbus_read_byte_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u8)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+#define ALWAYS_UPDATE_DEVICE 1 -+ -+static struct as5812_54x_sfp_data *as5812_54x_sfp_update_device(struct device *dev, int update_eeprom) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as5812_54x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (ALWAYS_UPDATE_DEVICE || time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0, j = 0; -+ -+ data->valid = 0; -+ //dev_dbg(&client->dev, "Starting as5812_54x sfp status update\n"); -+ memset(data->status, 0, sizeof(data->status)); -+ -+ /* Read status of port 1~48(SFP port) */ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 12; j++) { -+ status = as5812_54x_i2c_cpld_read(0x61+i, 0x6+j); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ goto exit; -+ } -+ -+ data->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); -+ } -+ } -+ -+ /* -+ * Bring QSFPs out of reset, -+ * This is a temporary fix until the QSFP+_MOD_RST register -+ * can be exposed through the driver. -+ */ -+ as5812_54x_i2c_cpld_write(0x62, 0x15, 0x3F); -+ -+ /* Read present status of port 49-54(QSFP port) */ -+ status = as5812_54x_i2c_cpld_read(0x62, 0x14); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x61+i, 0x6+j, status); -+ } -+ else { -+ data->status[SFP_IS_PRESENT] |= (u64)status << 48; -+ } -+ -+ if (update_eeprom) { -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if ((data->status[SFP_IS_PRESENT] & BIT_INDEX(data->port)) == 0) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom); i++) { -+ status = as5812_54x_sfp_read_byte(client, i, data->eeprom + i); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", -+ CPLD_PORT_TO_FRONT_PORT(data->port)); -+ goto exit; -+ } -+ } -+ } -+ } -+ -+ data->valid = 1; -+ data->last_updated = jiffies; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+module_i2c_driver(as5812_54x_sfp_driver); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as5812_54x_sfp driver"); -+MODULE_LICENSE("GPL"); diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch deleted file mode 100644 index 95ab532b..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch +++ /dev/null @@ -1,2334 +0,0 @@ -Device driver patches for accton as6712 (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 52a68eb..2787ddd 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1422,7 +1422,6 @@ config SENSORS_CPR_4011_4MXX - This driver can also be built as a module. If so, the module will - be called cpr_4011_4mxx. - -- - config SENSORS_ACCTON_AS5712_54x_FAN - tristate "Accton as5712 54x fan" - depends on I2C && I2C_MUX_ACCTON_AS5712_54x_CPLD -@@ -1440,7 +1439,24 @@ config SENSORS_ACCTON_AS5712_54x_PSU - - This driver can also be built as a module. If so, the module will - be called accton_as5712_54x_psu. -+ -+config SENSORS_ACCTON_AS6712_32x_FAN -+ tristate "Accton as6712 32x fan" -+ depends on I2C && I2C_MUX_ACCTON_AS6712_32x_CPLD -+ help -+ If you say yes here you get support for Accton as6712 32x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as6712_32x_fan. -+ -+config SENSORS_ACCTON_AS6712_32x_PSU -+ tristate "Accton as6712 32x psu" -+ depends on I2C && I2C_MUX_ACCTON_AS6712_32x_CPLD -+ help -+ If you say yes here you get support for Accton as6712 32x psu. - -+ This driver can also be built as a module. If so, the module will -+ be called accton_as6712_32x_psu. - - if ACPI - -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index e2f3bce..50d7007 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -23,6 +23,8 @@ obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o - obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o - obj-$(CONFIG_SENSORS_ACCTON_AS5712_54x_FAN) += accton_as5712_54x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS5712_54x_PSU) += accton_as5712_54x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS6712_32x_FAN) += accton_as6712_32x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS6712_32x_PSU) += accton_as6712_32x_psu.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as6712_32x_fan.c b/drivers/hwmon/accton_as6712_32x_fan.c -new file mode 100644 -index 0000000..9c7cac7 ---- /dev/null -+++ b/drivers/hwmon/accton_as6712_32x_fan.c -@@ -0,0 +1,434 @@ -+/* -+ * A hwmon driver for the Accton as6712 32x fan contrl -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FAN_MAX_NUMBER 5 -+#define FAN_SPEED_CPLD_TO_RPM_STEP 150 -+#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 -+#define FAN_DUTY_CYCLE_MIN 0 /* 10% ??*/ -+#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ -+ -+#define CPLD_REG_FAN_STATUS_OFFSET 0xC -+#define CPLD_REG_FANR_STATUS_OFFSET 0x17 -+#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E -+ -+#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 -+#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 -+#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 -+#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 -+#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 -+ -+#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 -+#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 -+#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A -+#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B -+#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C -+ -+#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD -+ -+#define CPLD_FAN1_INFO_BIT_MASK 0x1 -+#define CPLD_FAN2_INFO_BIT_MASK 0x2 -+#define CPLD_FAN3_INFO_BIT_MASK 0x4 -+#define CPLD_FAN4_INFO_BIT_MASK 0x8 -+#define CPLD_FAN5_INFO_BIT_MASK 0x10 -+ -+#define PROJECT_NAME -+ -+#define DEBUG_MODE 0 -+ -+#if (DEBUG_MODE == 1) -+ #define DEBUG_PRINT(format, ...) printk(format, __VA_ARGS__) -+#else -+ #define DEBUG_PRINT(format, ...) -+#endif -+ -+static struct accton_as6712_32x_fan *fan_data = NULL; -+ -+struct accton_as6712_32x_fan { -+ struct platform_device *pdev; -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ -+ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ -+ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ -+ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ -+ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ -+ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ -+}; -+ -+/*******************/ -+#define MAKE_FAN_MASK_OR_REG(name,type) \ -+ CPLD_FAN##type##1_##name, \ -+ CPLD_FAN##type##2_##name, \ -+ CPLD_FAN##type##3_##name, \ -+ CPLD_FAN##type##4_##name, \ -+ CPLD_FAN##type##5_##name, -+ -+/* fan related data -+ */ -+static const u8 fan_info_mask[] = { -+ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) -+}; -+ -+static const u8 fan_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) -+}; -+ -+static const u8 fanr_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) -+}; -+ -+/*******************/ -+#define DEF_FAN_SET(id) \ -+ FAN##id##_FAULT, \ -+ FAN##id##_SPEED, \ -+ FAN##id##_DUTY_CYCLE, \ -+ FAN##id##_DIRECTION, \ -+ FANR##id##_FAULT, \ -+ FANR##id##_SPEED, -+ -+enum sysfs_fan_attributes { -+ DEF_FAN_SET(1) -+ DEF_FAN_SET(2) -+ DEF_FAN_SET(3) -+ DEF_FAN_SET(4) -+ DEF_FAN_SET(5) -+}; -+/*******************/ -+static void accton_as6712_32x_fan_update_device(struct device *dev); -+static int accton_as6712_32x_fan_read_value(u8 reg); -+static int accton_as6712_32x_fan_write_value(u8 reg, u8 value); -+ -+static ssize_t fan_set_duty_cycle(struct device *dev, -+ struct device_attribute *da,const char *buf, size_t count); -+static ssize_t fan_show_value(struct device *dev, -+ struct device_attribute *da, char *buf); -+ -+extern int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+ -+/*******************/ -+#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ -+ fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); -+ -+#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) -+ -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) -+/*******************/ -+ -+#define _MAKE_FAN_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ -+ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, -+ -+#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) -+ -+static struct attribute *accton_as6712_32x_fan_attributes[] = { -+ /* fan related attributes */ -+ MAKE_FAN_ATTR(PROJECT_NAME,1) -+ MAKE_FAN_ATTR(PROJECT_NAME,2) -+ MAKE_FAN_ATTR(PROJECT_NAME,3) -+ MAKE_FAN_ATTR(PROJECT_NAME,4) -+ MAKE_FAN_ATTR(PROJECT_NAME,5) -+ NULL -+}; -+/*******************/ -+ -+/* fan related functions -+ */ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ ssize_t ret = 0; -+ int data_index, type_index; -+ -+ accton_as6712_32x_fan_update_device(dev); -+ -+ if (fan_data->valid == 0) { -+ return ret; -+ } -+ -+ type_index = attr->index%FAN2_FAULT; -+ data_index = attr->index/FAN2_FAULT; -+ -+ switch (type_index) { -+ case FAN1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DUTY_CYCLE: -+ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DIRECTION: -+ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ default: -+ DEBUG_PRINT("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); -+ break; -+ } -+ -+ return ret; -+} -+/*******************/ -+static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) { -+ -+ int error, value; -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) -+ return -EINVAL; -+ -+ accton_as6712_32x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); -+ -+ fan_data->valid = 0; -+ -+ return count; -+} -+ -+static const struct attribute_group accton_as6712_32x_fan_group = { -+ .attrs = accton_as6712_32x_fan_attributes, -+}; -+ -+static int accton_as6712_32x_fan_read_value(u8 reg) -+{ -+ return as6712_32x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as6712_32x_fan_write_value(u8 reg, u8 value) -+{ -+ return as6712_32x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as6712_32x_fan_update_device(struct device *dev) -+{ -+ int speed, r_speed, fault, r_fault, direction, ctrl_speed; -+ int i; -+ -+ mutex_lock(&fan_data->update_lock); -+ -+ DEBUG_PRINT("Starting accton_as6712_32x_fan update \n"); -+ -+ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { -+ /* do nothing */ -+ goto _exit; -+ } -+ -+ fan_data->valid = 0; -+ -+ DEBUG_PRINT("Starting accton_as6712_32x_fan update 2 \n"); -+ -+ fault = accton_as6712_32x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); -+ r_fault = accton_as6712_32x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); -+ direction = accton_as6712_32x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); -+ ctrl_speed = accton_as6712_32x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); -+ -+ if ( (fault < 0) || (r_fault < 0) || (ctrl_speed < 0) ) -+ { -+ DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ DEBUG_PRINT("[fan:] fault:%d, r_fault=%d, ctrl_speed=%d \n",fault, r_fault, ctrl_speed); -+ -+ for (i = 0; i < FAN_MAX_NUMBER; i++) -+ { -+ /* Update fan data -+ */ -+ -+ /* fan fault -+ * 0: normal, 1:abnormal -+ * Each FAN-tray module has two fans. -+ */ -+ fan_data->status[i] = (fault & fan_info_mask[i]) >> i; -+ DEBUG_PRINT("[fan%d:] fail=%d \n",i, fan_data->status[i]); -+ -+ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; -+ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; -+ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; -+ -+ /* fan speed -+ */ -+ speed = accton_as6712_32x_fan_read_value(fan_speed_reg[i]); -+ r_speed = accton_as6712_32x_fan_read_value(fanr_speed_reg[i]); -+ if ( (speed < 0) || (r_speed < 0) ) -+ { -+ DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ DEBUG_PRINT("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); -+ -+ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ } -+ -+ /* finish to update */ -+ fan_data->last_updated = jiffies; -+ fan_data->valid = 1; -+ -+_exit: -+ mutex_unlock(&fan_data->update_lock); -+} -+ -+static int accton_as6712_32x_fan_probe(struct platform_device *pdev) -+{ -+ int status = -1; -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&pdev->dev.kobj, &accton_as6712_32x_fan_group); -+ if (status) { -+ goto exit; -+ -+ } -+ -+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); -+ if (IS_ERR(fan_data->hwmon_dev)) { -+ status = PTR_ERR(fan_data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&pdev->dev, "accton_as6712_32x_fan\n"); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&pdev->dev.kobj, &accton_as6712_32x_fan_group); -+exit: -+ return status; -+} -+ -+static int accton_as6712_32x_fan_remove(struct platform_device *pdev) -+{ -+ hwmon_device_unregister(fan_data->hwmon_dev); -+ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as6712_32x_fan_group); -+ -+ return 0; -+} -+ -+#define DRVNAME "as6712_32x_fan" -+ -+static struct platform_driver accton_as6712_32x_fan_driver = { -+ .probe = accton_as6712_32x_fan_probe, -+ .remove = accton_as6712_32x_fan_remove, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as6712_32x_fan_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as6712_32x(void); -+ if(!platform_accton_as6712_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as6712_32x_fan_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ fan_data = kzalloc(sizeof(struct accton_as6712_32x_fan), GFP_KERNEL); -+ if (!fan_data) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as6712_32x_fan_driver); -+ goto exit; -+ } -+ -+ mutex_init(&fan_data->update_lock); -+ fan_data->valid = 0; -+ -+ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(fan_data->pdev)) { -+ ret = PTR_ERR(fan_data->pdev); -+ platform_driver_unregister(&accton_as6712_32x_fan_driver); -+ kfree(fan_data); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as6712_32x_fan_exit(void) -+{ -+ platform_device_unregister(fan_data->pdev); -+ platform_driver_unregister(&accton_as6712_32x_fan_driver); -+ kfree(fan_data); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as6712_32x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_as6712_32x_fan_init); -+module_exit(accton_as6712_32x_fan_exit); -+ -diff --git a/drivers/hwmon/accton_as6712_32x_psu.c b/drivers/hwmon/accton_as6712_32x_psu.c -new file mode 100644 -index 0000000..ef9fadf ---- /dev/null -+++ b/drivers/hwmon/accton_as6712_32x_psu.c -@@ -0,0 +1,304 @@ -+/* -+ * An hwmon driver for accton as6712_32x Power Module -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as6712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as6712_32x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[14]; /* Model name, read from eeprom */ -+}; -+ -+static struct as6712_32x_psu_data *as6712_32x_psu_update_device(struct device *dev); -+ -+enum as6712_32x_psu_sysfs_attributes { -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as6712_32x_psu_attributes[] = { -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as6712_32x_psu_data *data = as6712_32x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((data->index-1)*4) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = data->status >> ((data->index-1)*4 + 1) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as6712_32x_psu_data *data = as6712_32x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s\n", data->model_name); -+} -+ -+static const struct attribute_group as6712_32x_psu_group = { -+ .attrs = as6712_32x_psu_attributes, -+}; -+ -+static int as6712_32x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as6712_32x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as6712_32x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as6712_32x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x50 || client->addr == 0x38) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x53 || client->addr == 0x3b) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as6712_32x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as6712_32x_psu_remove(struct i2c_client *client) -+{ -+ struct as6712_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as6712_32x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as6712_32x_psu_id[] = { -+ { "as6712_32x_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as6712_32x_psu_id); -+ -+static struct i2c_driver as6712_32x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as6712_32x_psu", -+ }, -+ .probe = as6712_32x_psu_probe, -+ .remove = as6712_32x_psu_remove, -+ .id_table = as6712_32x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as6712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = 0; -+ int retry_count = 5; -+ -+ while (retry_count) { -+ retry_count--; -+ -+ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) { -+ msleep(10); -+ continue; -+ } -+ -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ msleep(10); -+ continue; -+ } -+ -+ result = 0; -+ break; -+ } -+ -+ return result; -+} -+ -+static struct as6712_32x_psu_data *as6712_32x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as6712_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status; -+ int present = 0; -+ -+ dev_dbg(&client->dev, "Starting as6712_32x update\n"); -+ -+ /* Read psu status */ -+ status = as6712_32x_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ /* Read model name */ -+ memset(data->model_name, 0, sizeof(data->model_name)); -+ present = !(data->status >> ((data->index-1)*4) & 0x1); -+ -+ if (present) { -+ u8 command; -+ int model_name_len = 0; -+ -+ if (client->addr == 0x38 || client->addr == 0x3b) { -+ /* cpr_4011_4mxx AC power */ -+ command = 0x26; -+ model_name_len = 13; -+ } -+ else { /* 0x50 & 0x53 */ -+ /* um400d01x DC power */ -+ command = 0x50; -+ model_name_len = 13; -+ } -+ -+ status = as6712_32x_psu_read_block(client,command,data->model_name, -+ model_name_len); -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[model_name_len] = '\0'; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as6712_32x_psu_init(void) -+{ -+ extern int platform_accton_as6712_32x(void); -+ if(!platform_accton_as6712_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as6712_32x_psu_driver); -+} -+ -+static void __exit as6712_32x_psu_exit(void) -+{ -+ i2c_del_driver(&as6712_32x_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as6712_32x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as6712_32x_psu_init); -+module_exit(as6712_32x_psu_exit); -diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig -index 339e2b2..4bed9f6 100644 ---- a/drivers/i2c/muxes/Kconfig -+++ b/drivers/i2c/muxes/Kconfig -@@ -14,7 +14,16 @@ config I2C_MUX_ACCTON_AS5712_54x_CPLD - - This driver can also be built as a module. If so, the module - will be called i2c-mux-accton_as5712_54x_cpld. -+ -+config I2C_MUX_ACCTON_AS6712_32x_CPLD -+ tristate "Accton as6712_32x CPLD I2C multiplexer" -+ help -+ If you say yes here you get support for the Accton CPLD -+ I2C mux devices. - -+ This driver can also be built as a module. If so, the module -+ will be called i2c-mux-accton_as6712_32x_cpld. -+ - config I2C_MUX_GPIO - tristate "GPIO-based I2C multiplexer" - depends on GENERIC_GPIO -diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile -index 997522c..2bcfa0c 100644 ---- a/drivers/i2c/muxes/Makefile -+++ b/drivers/i2c/muxes/Makefile -@@ -7,5 +7,6 @@ obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o - obj-$(CONFIG_I2C_MUX_DNI_6448) += dni_6448_i2c_mux.o - obj-$(CONFIG_I2C_MUX_QUANTA) += quanta-i2cmux.o - obj-$(CONFIG_I2C_MUX_ACCTON_AS5712_54x_CPLD) += i2c-mux-accton_as5712_54x_cpld.o -+obj-$(CONFIG_I2C_MUX_ACCTON_AS6712_32x_CPLD) += i2c-mux-accton_as6712_32x_cpld.o - - ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG -diff --git a/drivers/i2c/muxes/i2c-mux-accton_as6712_32x_cpld.c b/drivers/i2c/muxes/i2c-mux-accton_as6712_32x_cpld.c -new file mode 100644 -index 0000000..2ec0a59 ---- /dev/null -+++ b/drivers/i2c/muxes/i2c-mux-accton_as6712_32x_cpld.c -@@ -0,0 +1,427 @@ -+/* -+ * I2C multiplexer -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * -+ * This module supports the accton cpld that hold the channel select -+ * mechanism for other i2c slave devices, such as SFP. -+ * This includes the: -+ * Accton as6712_32x CPLD1/CPLD2/CPLD3 -+ * -+ * Based on: -+ * pca954x.c from Kumar Gala -+ * Copyright (C) 2006 -+ * -+ * Based on: -+ * pca954x.c from Ken Harrenstien -+ * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) -+ * -+ * Based on: -+ * i2c-virtual_cb.c from Brian Kuschak -+ * and -+ * pca9540.c from Jean Delvare . -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static struct dmi_system_id as6712_dmi_table[] = { -+ { -+ .ident = "Accton AS6712", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), -+ }, -+ }, -+ { -+ .ident = "Accton AS6712", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), -+ }, -+ }, -+}; -+ -+int platform_accton_as6712_32x(void) -+{ -+ return dmi_check_system(as6712_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as6712_32x); -+ -+#define NUM_OF_CPLD1_CHANS 0x0 -+#define NUM_OF_CPLD2_CHANS 0x10 -+#define NUM_OF_CPLD3_CHANS 0x10 -+#define NUM_OF_ALL_CPLD_CHANS (NUM_OF_CPLD2_CHANS + NUM_OF_CPLD3_CHANS) -+#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS -+ -+static LIST_HEAD(cpld_client_list); -+static struct mutex list_lock; -+ -+struct cpld_client_node { -+ struct i2c_client *client; -+ struct list_head list; -+}; -+ -+enum cpld_mux_type { -+ as6712_32x_cpld2, -+ as6712_32x_cpld3, -+ as6712_32x_cpld1 -+}; -+ -+struct accton_i2c_cpld_mux { -+ enum cpld_mux_type type; -+ struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; -+ u8 last_chan; /* last register value */ -+}; -+ -+struct chip_desc { -+ u8 nchans; -+ u8 deselectChan; -+}; -+ -+/* Provide specs for the PCA954x types we know about */ -+static const struct chip_desc chips[] = { -+ [as6712_32x_cpld1] = { -+ .nchans = NUM_OF_CPLD1_CHANS, -+ .deselectChan = NUM_OF_CPLD1_CHANS, -+ }, -+ [as6712_32x_cpld2] = { -+ .nchans = NUM_OF_CPLD2_CHANS, -+ .deselectChan = NUM_OF_CPLD2_CHANS, -+ }, -+ [as6712_32x_cpld3] = { -+ .nchans = NUM_OF_CPLD3_CHANS, -+ .deselectChan = NUM_OF_CPLD3_CHANS, -+ } -+}; -+ -+static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { -+ { "as6712_32x_cpld1", as6712_32x_cpld1 }, -+ { "as6712_32x_cpld2", as6712_32x_cpld2 }, -+ { "as6712_32x_cpld3", as6712_32x_cpld3 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); -+ -+/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() -+ for this as they will try to lock adapter a second time */ -+static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, -+ struct i2c_client *client, u8 val) -+{ -+#if 0 -+ int ret = -ENODEV; -+ -+ //if (adap->algo->master_xfer) { -+ if (0) -+ struct i2c_msg msg; -+ char buf[2]; -+ -+ msg.addr = client->addr; -+ msg.flags = 0; -+ msg.len = 2; -+ buf[0] = 0x2; -+ buf[1] = val; -+ msg.buf = buf; -+ ret = adap->algo->master_xfer(adap, &msg, 1); -+ } -+ else { -+ union i2c_smbus_data data; -+ ret = adap->algo->smbus_xfer(adap, client->addr, -+ client->flags, -+ I2C_SMBUS_WRITE, -+ 0x2, I2C_SMBUS_BYTE, &data); -+ } -+ -+ return ret; -+#else -+ unsigned long orig_jiffies; -+ unsigned short flags; -+ union i2c_smbus_data data; -+ int try; -+ s32 res = -EIO; -+ -+ data.byte = val; -+ flags = client->flags; -+ flags &= I2C_M_TEN | I2C_CLIENT_PEC; -+ -+ if (adap->algo->smbus_xfer) { -+ /* Retry automatically on arbitration loss */ -+ orig_jiffies = jiffies; -+ for (res = 0, try = 0; try <= adap->retries; try++) { -+ res = adap->algo->smbus_xfer(adap, client->addr, flags, -+ I2C_SMBUS_WRITE, 0x2, -+ I2C_SMBUS_BYTE_DATA, &data); -+ if (res != -EAGAIN) -+ break; -+ if (time_after(jiffies, -+ orig_jiffies + adap->timeout)) -+ break; -+ } -+ } -+ -+ return res; -+#endif -+} -+ -+static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ u8 regval; -+ int ret = 0; -+ regval = chan; -+ -+ /* Only select the channel if its different from the last channel */ -+ if (data->last_chan != regval) { -+ ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); -+ data->last_chan = regval; -+ } -+ -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ -+ /* Deselect active channel */ -+ data->last_chan = chips[data->type].deselectChan; -+ -+ return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); -+} -+ -+static void accton_i2c_cpld_add_client(struct i2c_client *client) -+{ -+ struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); -+ -+ if (!node) { -+ dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); -+ return; -+ } -+ -+ node->client = client; -+ -+ mutex_lock(&list_lock); -+ list_add(&node->list, &cpld_client_list); -+ mutex_unlock(&list_lock); -+} -+ -+static void accton_i2c_cpld_remove_client(struct i2c_client *client) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int found = 0; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client == client) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) { -+ list_del(list_node); -+ kfree(cpld_node); -+ } -+ -+ mutex_unlock(&list_lock); -+} -+ -+static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ u8 reg = 0x1; -+ struct i2c_client *client; -+ int len; -+ -+ client = to_i2c_client(dev); -+ len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); -+ -+ return len; -+} -+ -+static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); -+ -+/* -+ * I2C init/probing/exit functions -+ */ -+static int accton_i2c_cpld_mux_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); -+ int chan=0; -+ struct accton_i2c_cpld_mux *data; -+ int ret = -ENODEV; -+ -+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) -+ goto err; -+ -+ data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); -+ if (!data) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ i2c_set_clientdata(client, data); -+ -+#if 0 -+ /* Write the mux register at addr to verify -+ * that the mux is in fact present. -+ */ -+ if (i2c_smbus_write_byte(client, 0) < 0) { -+ dev_warn(&client->dev, "probe failed\n"); -+ goto exit_free; -+ } -+#endif -+ -+ data->type = id->driver_data; -+ -+ if (data->type == as6712_32x_cpld2 || data->type == as6712_32x_cpld3) { -+ data->last_chan = chips[data->type].deselectChan; /* force the first selection */ -+ -+ /* Now create an adapter for each channel */ -+ for (chan = 0; chan < chips[data->type].nchans; chan++) { -+ data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, -+ accton_i2c_cpld_mux_select_chan, -+ accton_i2c_cpld_mux_deselect_mux); -+ -+ if (data->virt_adaps[chan] == NULL) { -+ ret = -ENODEV; -+ dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); -+ goto virt_reg_failed; -+ } -+ } -+ -+ dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", -+ chan, client->name); -+ } -+ -+ accton_i2c_cpld_add_client(client); -+ -+ ret = sysfs_create_file(&client->dev.kobj, &ver.attr); -+ if (ret) -+ goto virt_reg_failed; -+ -+ return 0; -+ -+virt_reg_failed: -+ for (chan--; chan >= 0; chan--) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ } -+ kfree(data); -+err: -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_remove(struct i2c_client *client) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ const struct chip_desc *chip = &chips[data->type]; -+ int chan; -+ -+ sysfs_remove_file(&client->dev.kobj, &ver.attr); -+ -+ for (chan = 0; chan < chip->nchans; ++chan) { -+ if (data->virt_adaps[chan]) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ data->virt_adaps[chan] = NULL; -+ } -+ } -+ -+ kfree(data); -+ accton_i2c_cpld_remove_client(client); -+ -+ return 0; -+} -+ -+int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EPERM; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_read_byte_data(cpld_node->client, reg); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as6712_32x_i2c_cpld_read); -+ -+int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EIO; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as6712_32x_i2c_cpld_write); -+ -+static struct i2c_driver accton_i2c_cpld_mux_driver = { -+ .driver = { -+ .name = "as6712_32x_cpld", -+ .owner = THIS_MODULE, -+ }, -+ .probe = accton_i2c_cpld_mux_probe, -+ .remove = accton_i2c_cpld_mux_remove, -+ .id_table = accton_i2c_cpld_mux_id, -+}; -+ -+static int __init accton_i2c_cpld_mux_init(void) -+{ -+ mutex_init(&list_lock); -+ return i2c_add_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+static void __exit accton_i2c_cpld_mux_exit(void) -+{ -+ i2c_del_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_i2c_cpld_mux_init); -+module_exit(accton_i2c_cpld_mux_exit); -+ -+ -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index 361ef45..a5ccde1 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -47,6 +47,13 @@ config LEDS_ACCTON_AS5712_54x - This option enables support for the LEDs on the Accton as5712 54x. - Say Y to enable LEDs on the Accton as5712 54x. - -+config LEDS_ACCTON_AS6712_32x -+ tristate "LED support for the Accton as6712 32x" -+ depends on LEDS_CLASS && I2C_MUX_ACCTON_AS6712_32x_CPLD -+ help -+ This option enables support for the LEDs on the Accton as6712 32x. -+ Say Y to enable LEDs on the Accton as6712 32x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index db2d096..d952f0f 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -44,6 +44,7 @@ obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o - obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o - obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o - obj-$(CONFIG_LEDS_ACCTON_AS5712_54x) += leds-accton_as5712_54x.o -+obj-$(CONFIG_LEDS_ACCTON_AS6712_32x) += leds-accton_as6712_32x.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as6712_32x.c b/drivers/leds/leds-accton_as6712_32x.c -new file mode 100644 -index 0000000..c1da3bc ---- /dev/null -+++ b/drivers/leds/leds-accton_as6712_32x.c -@@ -0,0 +1,617 @@ -+/* -+ * A LED driver for the accton_as6712_32x_led -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int as6712_32x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "as6712_32x_led" -+ -+struct accton_as6712_32x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED -+ 1 = PSU1/PSU2 LED -+ 2 = FAN1-4 LED -+ 3 = FAN5-6 LED */ -+}; -+ -+static struct accton_as6712_32x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+#define LED_TYPE_PSU1_REG_MASK 0x03 -+#define LED_MODE_PSU1_GREEN_MASK 0x02 -+#define LED_MODE_PSU1_AMBER_MASK 0x01 -+#define LED_MODE_PSU1_OFF_MASK 0x03 -+#define LED_MODE_PSU1_AUTO_MASK 0x00 -+ -+#define LED_TYPE_PSU2_REG_MASK 0x0C -+#define LED_MODE_PSU2_GREEN_MASK 0x08 -+#define LED_MODE_PSU2_AMBER_MASK 0x04 -+#define LED_MODE_PSU2_OFF_MASK 0x0C -+#define LED_MODE_PSU2_AUTO_MASK 0x00 -+ -+#define LED_TYPE_DIAG_REG_MASK 0x0C -+#define LED_MODE_DIAG_GREEN_MASK 0x08 -+#define LED_MODE_DIAG_AMBER_MASK 0x04 -+#define LED_MODE_DIAG_OFF_MASK 0x0C -+#define LED_MODE_DIAG_BLINK_MASK 0x48 -+ -+#define LED_TYPE_FAN_REG_MASK 0x03 -+#define LED_MODE_FAN_GREEN_MASK 0x02 -+#define LED_MODE_FAN_AMBER_MASK 0x01 -+#define LED_MODE_FAN_OFF_MASK 0x03 -+#define LED_MODE_FAN_AUTO_MASK 0x00 -+ -+#define LED_TYPE_FAN1_REG_MASK 0x03 -+#define LED_TYPE_FAN2_REG_MASK 0xC0 -+#define LED_TYPE_FAN3_REG_MASK 0x30 -+#define LED_TYPE_FAN4_REG_MASK 0x0C -+#define LED_TYPE_FAN5_REG_MASK 0x03 -+ -+#define LED_MODE_FANX_GREEN_MASK 0x01 -+#define LED_MODE_FANX_RED_MASK 0x02 -+#define LED_MODE_FANX_OFF_MASK 0x00 -+ -+#define LED_TYPE_LOC_REG_MASK 0x30 -+#define LED_MODE_LOC_ON_MASK 0x00 -+#define LED_MODE_LOC_OFF_MASK 0x10 -+#define LED_MODE_LOC_BLINK_MASK 0x20 -+ -+static const u8 led_reg[] = { -+ 0xA, /* LOC/DIAG/FAN LED*/ -+ 0xB, /* PSU1/PSU2 LED */ -+ 0xE, /* FAN2-5 LED */ -+ 0xF, /* FAN1 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2, -+ LED_TYPE_DIAG, -+ LED_TYPE_FAN, -+ LED_TYPE_FAN1, -+ LED_TYPE_FAN2, -+ LED_TYPE_FAN3, -+ LED_TYPE_FAN4, -+ LED_TYPE_FAN5, -+ LED_TYPE_LOC -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_AMBER, -+ LED_MODE_RED, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED_BLINK, -+ LED_MODE_AUTO, -+}; -+ -+struct led_type_mode { -+ enum led_type type; -+ int type_mask; -+ enum led_light_mode mode; -+ int mode_mask; -+}; -+ -+struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN_BLINK, LED_MODE_DIAG_BLINK_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} -+}; -+ -+ -+struct fanx_info_s { -+ u8 cname; /* device name */ -+ enum led_type type; -+ u8 reg_id; /* map to led_reg & reg_val */ -+}; -+ -+static struct fanx_info_s fanx_info[] = { -+ {'1', LED_TYPE_FAN1, 3}, -+ {'2', LED_TYPE_FAN2, 2}, -+ {'3', LED_TYPE_FAN3, 2}, -+ {'4', LED_TYPE_FAN4, 2}, -+ {'5', LED_TYPE_FAN5, 2}, -+}; -+ -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (type == LED_TYPE_DIAG) -+ { /* special case : bit 6 - meaning blinking */ -+ if (0x40 & reg_val) -+ return LED_MODE_GREEN_BLINK; -+ } -+ if ((led_type_mode_data[i].type_mask & reg_val) == -+ led_type_mode_data[i].mode_mask) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return 0; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ if (type == LED_TYPE_DIAG) -+ { -+ if (mode == LED_MODE_GREEN_BLINK) -+ { /* special case : bit 6 - meaning blinking */ -+ reg_val = 0x48 | (reg_val & ~0x4C); -+ break; -+ } -+ else -+ { /* for diag led, other case must cancel bit 6 first */ -+ reg_val = reg_val & ~0x40; -+ } -+ } -+ reg_val = led_type_mode_data[i].mode_mask | -+ (reg_val & (~led_type_mode_data[i].type_mask)); -+ break; -+ } -+ -+ return reg_val; -+} -+ -+static int accton_as6712_32x_led_read_value(u8 reg) -+{ -+ return as6712_32x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as6712_32x_led_write_value(u8 reg, u8 value) -+{ -+ return as6712_32x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as6712_32x_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as6712_32x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as6712_32x_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as6712_32x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as6712_32x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as6712_32x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as6712_32x_led_psu_1_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); -+} -+ -+static enum led_brightness accton_as6712_32x_led_psu_1_get(struct led_classdev *cdev) -+{ -+ accton_as6712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); -+} -+ -+static void accton_as6712_32x_led_psu_2_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); -+} -+ -+static enum led_brightness accton_as6712_32x_led_psu_2_get(struct led_classdev *cdev) -+{ -+ accton_as6712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); -+} -+ -+static void accton_as6712_32x_led_fan_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); -+} -+ -+static enum led_brightness accton_as6712_32x_led_fan_get(struct led_classdev *cdev) -+{ -+ accton_as6712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); -+} -+ -+ -+static void accton_as6712_32x_led_fanx_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (led_cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); -+ return; -+ } -+ } -+} -+ -+ -+static enum led_brightness accton_as6712_32x_led_fanx_get(struct led_classdev *cdev) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as6712_32x_led_update(); -+ return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); -+ } -+ } -+ -+ -+ return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); -+} -+ -+ -+static void accton_as6712_32x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as6712_32x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as6712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as6712_32x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6712_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as6712_32x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as6712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static struct led_classdev accton_as6712_32x_leds[] = { -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as6712_32x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_psu_1_set, -+ .brightness_get = accton_as6712_32x_led_psu_1_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as6712_32x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_psu_2_set, -+ .brightness_get = accton_as6712_32x_led_psu_2_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as6712_32x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_fan_set, -+ .brightness_get = accton_as6712_32x_led_fan_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN1] = { -+ .name = "accton_as6712_32x_led::fan1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_fanx_set, -+ .brightness_get = accton_as6712_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN2] = { -+ .name = "accton_as6712_32x_led::fan2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_fanx_set, -+ .brightness_get = accton_as6712_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN3] = { -+ .name = "accton_as6712_32x_led::fan3", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_fanx_set, -+ .brightness_get = accton_as6712_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN4] = { -+ .name = "accton_as6712_32x_led::fan4", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_fanx_set, -+ .brightness_get = accton_as6712_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN5] = { -+ .name = "accton_as6712_32x_led::fan5", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_fanx_set, -+ .brightness_get = accton_as6712_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as6712_32x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_diag_set, -+ .brightness_get = accton_as6712_32x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as6712_32x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6712_32x_led_loc_set, -+ .brightness_get = accton_as6712_32x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as6712_32x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { -+ led_classdev_suspend(&accton_as6712_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as6712_32x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { -+ led_classdev_resume(&accton_as6712_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as6712_32x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as6712_32x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as6712_32x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as6712_32x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as6712_32x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6712_32x_leds); i++) { -+ led_classdev_unregister(&accton_as6712_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as6712_32x_led_driver = { -+ .probe = accton_as6712_32x_led_probe, -+ .remove = accton_as6712_32x_led_remove, -+ .suspend = accton_as6712_32x_led_suspend, -+ .resume = accton_as6712_32x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as6712_32x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as6712_32x(void); -+ if(!platform_accton_as6712_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as6712_32x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as6712_32x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as6712_32x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as6712_32x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as6712_32x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as6712_32x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as6712_32x_led_init); -+module_exit(accton_as6712_32x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as6712_32x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 4e5d6dc..6e025e9 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -81,7 +81,15 @@ config EEPROM_ACCTON_AS5712_54x_SFP - - This driver can also be built as a module. If so, the module will - be called accton_as5712_54x_sfp. -- -+ -+config EEPROM_ACCTON_AS6712_32x_SFP -+ tristate "Accton as6712 32x sfp" -+ depends on I2C && I2C_MUX_ACCTON_AS6712_32x_CPLD -+ help -+ If you say yes here you get support for Accton as6712 32x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as6712_32x_sfp. - - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 807158a..9001de9 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -7,4 +7,5 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o - obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o - obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o - obj-$(CONFIG_EEPROM_ACCTON_AS5712_54x_SFP) += accton_as5712_54x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS6712_32x_SFP) += accton_as6712_32x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as6712_32x_sfp.c b/drivers/misc/eeprom/accton_as6712_32x_sfp.c -new file mode 100644 -index 0000000..8253134 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as6712_32x_sfp.c -@@ -0,0 +1,377 @@ -+/* -+ * An hwmon driver for accton as6712_32x sfp -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BIT_INDEX(i) (1ULL << (i)) -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as6712_32x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u64 is_present; /* present status */ -+}; -+ -+static struct as6712_32x_sfp_data *as6712_32x_sfp_update_device(struct device *dev, int update_eeprom); -+static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf); -+static int as6712_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data); -+extern int as6712_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as6712_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+//extern int accton_i2c_cpld_mux_get_index(int adap_index); -+ -+enum as6712_32x_sfp_sysfs_attributes { -+ SFP_IS_PRESENT, -+ SFP_EEPROM, -+ SFP_PORT_NUMBER, -+ SFP_IS_PRESENT_ALL -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+ -+static struct attribute *as6712_32x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as6712_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->port+1); -+} -+ -+/* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+} while(0) -+ -+static ssize_t show_present(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ int values[4]; -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], as6712_32x_i2c_cpld_read(0x62, 0xA), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], as6712_32x_i2c_cpld_read(0x62, 0xB), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], as6712_32x_i2c_cpld_read(0x64, 0xA), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], as6712_32x_i2c_cpld_read(0x64, 0xB), 1); -+ -+ /* Return values 1 -> 32 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], values[3]); -+ } -+ else { /* SFP_IS_PRESENT */ -+ u8 val; -+ struct as6712_32x_sfp_data *data = as6712_32x_sfp_update_device(dev, 0); -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ val = (data->is_present & BIT_INDEX(data->port)) ? 0 : 1; -+ return sprintf(buf, "%d", val); -+ } -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as6712_32x_sfp_data *data = as6712_32x_sfp_update_device(dev, 1); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if ((data->is_present & BIT_INDEX(data->port)) != 0) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as6712_32x_sfp_group = { -+ .attrs = as6712_32x_sfp_attributes, -+}; -+ -+static int as6712_32x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as6712_32x_sfp_data *data; -+ int status; -+ -+ extern int platform_accton_as6712_32x(void); -+ if(!platform_accton_as6712_32x()) { -+ return -ENODEV; -+ } -+ -+ if (!i2c_check_functionality(client->adapter, /*I2C_FUNC_SMBUS_BYTE_DATA | */I2C_FUNC_SMBUS_WORD_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as6712_32x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as6712_32x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as6712_32x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as6712_32x_sfp_remove(struct i2c_client *client) -+{ -+ struct as6712_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as6712_32x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as6712_32x_sfp1, as6712_32x_sfp2, as6712_32x_sfp3, as6712_32x_sfp4, -+as6712_32x_sfp5, as6712_32x_sfp6, as6712_32x_sfp7, as6712_32x_sfp8, -+as6712_32x_sfp9, as6712_32x_sfp10, as6712_32x_sfp11,as6712_32x_sfp12, -+as6712_32x_sfp13, as6712_32x_sfp14, as6712_32x_sfp15,as6712_32x_sfp16, -+as6712_32x_sfp17, as6712_32x_sfp18, as6712_32x_sfp19,as6712_32x_sfp20, -+as6712_32x_sfp21, as6712_32x_sfp22, as6712_32x_sfp23,as6712_32x_sfp24, -+as6712_32x_sfp25, as6712_32x_sfp26, as6712_32x_sfp27,as6712_32x_sfp28, -+as6712_32x_sfp29, as6712_32x_sfp30, as6712_32x_sfp31,as6712_32x_sfp32 -+}; -+ -+static const struct i2c_device_id as6712_32x_sfp_id[] = { -+{ "as6712_32x_sfp1", as6712_32x_sfp1 }, { "as6712_32x_sfp2", as6712_32x_sfp2 }, -+{ "as6712_32x_sfp3", as6712_32x_sfp3 }, { "as6712_32x_sfp4", as6712_32x_sfp4 }, -+{ "as6712_32x_sfp5", as6712_32x_sfp5 }, { "as6712_32x_sfp6", as6712_32x_sfp6 }, -+{ "as6712_32x_sfp7", as6712_32x_sfp7 }, { "as6712_32x_sfp8", as6712_32x_sfp8 }, -+{ "as6712_32x_sfp9", as6712_32x_sfp9 }, { "as6712_32x_sfp10", as6712_32x_sfp10 }, -+{ "as6712_32x_sfp11", as6712_32x_sfp11 }, { "as6712_32x_sfp12", as6712_32x_sfp12 }, -+{ "as6712_32x_sfp13", as6712_32x_sfp13 }, { "as6712_32x_sfp14", as6712_32x_sfp14 }, -+{ "as6712_32x_sfp15", as6712_32x_sfp15 }, { "as6712_32x_sfp16", as6712_32x_sfp16 }, -+{ "as6712_32x_sfp17", as6712_32x_sfp17 }, { "as6712_32x_sfp18", as6712_32x_sfp18 }, -+{ "as6712_32x_sfp19", as6712_32x_sfp19 }, { "as6712_32x_sfp20", as6712_32x_sfp20 }, -+{ "as6712_32x_sfp21", as6712_32x_sfp21 }, { "as6712_32x_sfp22", as6712_32x_sfp22 }, -+{ "as6712_32x_sfp23", as6712_32x_sfp23 }, { "as6712_32x_sfp24", as6712_32x_sfp24 }, -+{ "as6712_32x_sfp25", as6712_32x_sfp25 }, { "as6712_32x_sfp26", as6712_32x_sfp26 }, -+{ "as6712_32x_sfp27", as6712_32x_sfp27 }, { "as6712_32x_sfp28", as6712_32x_sfp28 }, -+{ "as6712_32x_sfp29", as6712_32x_sfp29 }, { "as6712_32x_sfp30", as6712_32x_sfp30 }, -+{ "as6712_32x_sfp31", as6712_32x_sfp31 }, { "as6712_32x_sfp32", as6712_32x_sfp32 }, -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as6712_32x_sfp_id); -+ -+ -+static struct i2c_driver as6712_32x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as6712_32x_sfp", -+ }, -+ .probe = as6712_32x_sfp_probe, -+ .remove = as6712_32x_sfp_remove, -+ .id_table = as6712_32x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+#if 0 -+static int as6712_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) -+{ -+ int result = i2c_smbus_read_byte_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u8)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+#endif -+ -+static int as6712_32x_sfp_read_word(struct i2c_client *client, u8 command, u16 *data) -+{ -+ int result = i2c_smbus_read_word_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u16)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+#define ALWAYS_UPDATE 1 -+ -+static struct as6712_32x_sfp_data *as6712_32x_sfp_update_device(struct device *dev, int update_eeprom) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as6712_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (ALWAYS_UPDATE || time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0, j = 0; -+ -+ data->valid = 0; -+ -+ /* Read present status of port 1~32 */ -+ data->is_present = 0; -+ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 2; j++) { -+ status = as6712_32x_i2c_cpld_read(0x62+i*2, 0xA+j); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x62+i*2, 0xA+j, status); -+ goto exit; -+ } -+ -+ data->is_present |= (u64)status << ((i*16) + (j*8)); -+ } -+ } -+ -+ if (update_eeprom) { -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if ((data->is_present & BIT_INDEX(data->port)) == 0) { -+ /* read eeprom */ -+ u16 eeprom_data; -+ for (i = 0; i < (sizeof(data->eeprom) / 2); i++) { -+ status = as6712_32x_sfp_read_word(client, i*2, &eeprom_data); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); -+ goto exit; -+ } -+ -+ data->eeprom[i*2] = eeprom_data & 0xff; -+ data->eeprom[i*2 + 1] = eeprom_data >> 8; -+ } -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+module_i2c_driver(as6712_32x_sfp_driver); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as6712_32x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch deleted file mode 100644 index 5fe16e5e..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch +++ /dev/null @@ -1,2300 +0,0 @@ -Device driver patches for accton as6812-32x (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 4d9fb22..73ee085 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1538,6 +1538,24 @@ config SENSORS_ACCTON_AS5812_54x_PSU - This driver can also be built as a module. If so, the module will - be called accton_as5812_54x_psu. - -+config SENSORS_ACCTON_AS6812_32x_FAN -+ tristate "Accton as6812 32x fan" -+ depends on I2C && I2C_MUX_ACCTON_AS6812_32x_CPLD -+ help -+ If you say yes here you get support for Accton as6812 32x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as6812_32x_fan. -+ -+config SENSORS_ACCTON_AS6812_32x_PSU -+ tristate "Accton as6812 32x psu" -+ depends on I2C && I2C_MUX_ACCTON_AS6812_32x_CPLD -+ help -+ If you say yes here you get support for Accton as6812 32x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as6812_32x_psu. -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index 818dd01..7700250 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -32,6 +32,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS7712_32x_PSU) += accton_as7712_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_I2C_CPLD) += accton_i2c_cpld.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54x_FAN) += accton_as5812_54x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54x_PSU) += accton_as5812_54x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_FAN) += accton_as6812_32x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_PSU) += accton_as6812_32x_psu.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as6812_32x_fan.c b/drivers/hwmon/accton_as6812_32x_fan.c -new file mode 100644 -index 0000000..f055567 ---- /dev/null -+++ b/drivers/hwmon/accton_as6812_32x_fan.c -@@ -0,0 +1,434 @@ -+/* -+ * A hwmon driver for the Accton as6812 32x fan contrl -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FAN_MAX_NUMBER 5 -+#define FAN_SPEED_CPLD_TO_RPM_STEP 150 -+#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 -+#define FAN_DUTY_CYCLE_MIN 0 -+#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ -+ -+#define CPLD_REG_FAN_STATUS_OFFSET 0xC -+#define CPLD_REG_FANR_STATUS_OFFSET 0x17 -+#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1E -+ -+#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 -+#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 -+#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 -+#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 -+#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 -+ -+#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 -+#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 -+#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A -+#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B -+#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C -+ -+#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0xD -+ -+#define CPLD_FAN1_INFO_BIT_MASK 0x1 -+#define CPLD_FAN2_INFO_BIT_MASK 0x2 -+#define CPLD_FAN3_INFO_BIT_MASK 0x4 -+#define CPLD_FAN4_INFO_BIT_MASK 0x8 -+#define CPLD_FAN5_INFO_BIT_MASK 0x10 -+ -+#define PROJECT_NAME -+ -+#define DEBUG_MODE 0 -+ -+#if (DEBUG_MODE == 1) -+ #define DEBUG_PRINT(format, ...) printk(format, __VA_ARGS__) -+#else -+ #define DEBUG_PRINT(format, ...) -+#endif -+ -+static struct accton_as6812_32x_fan *fan_data = NULL; -+ -+struct accton_as6812_32x_fan { -+ struct platform_device *pdev; -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ -+ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ -+ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ -+ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ -+ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ -+ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ -+}; -+ -+/*******************/ -+#define MAKE_FAN_MASK_OR_REG(name,type) \ -+ CPLD_FAN##type##1_##name, \ -+ CPLD_FAN##type##2_##name, \ -+ CPLD_FAN##type##3_##name, \ -+ CPLD_FAN##type##4_##name, \ -+ CPLD_FAN##type##5_##name, -+ -+/* fan related data -+ */ -+static const u8 fan_info_mask[] = { -+ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) -+}; -+ -+static const u8 fan_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) -+}; -+ -+static const u8 fanr_speed_reg[] = { -+ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) -+}; -+ -+/*******************/ -+#define DEF_FAN_SET(id) \ -+ FAN##id##_FAULT, \ -+ FAN##id##_SPEED, \ -+ FAN##id##_DUTY_CYCLE, \ -+ FAN##id##_DIRECTION, \ -+ FANR##id##_FAULT, \ -+ FANR##id##_SPEED, -+ -+enum sysfs_fan_attributes { -+ DEF_FAN_SET(1) -+ DEF_FAN_SET(2) -+ DEF_FAN_SET(3) -+ DEF_FAN_SET(4) -+ DEF_FAN_SET(5) -+}; -+/*******************/ -+static void accton_as6812_32x_fan_update_device(struct device *dev); -+static int accton_as6812_32x_fan_read_value(u8 reg); -+static int accton_as6812_32x_fan_write_value(u8 reg, u8 value); -+ -+static ssize_t fan_set_duty_cycle(struct device *dev, -+ struct device_attribute *da,const char *buf, size_t count); -+static ssize_t fan_show_value(struct device *dev, -+ struct device_attribute *da, char *buf); -+ -+extern int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+ -+/*******************/ -+#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ -+ fan_set_duty_cycle, FAN##id##_DUTY_CYCLE); \ -+ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ -+ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); -+ -+#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) -+ -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) -+MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) -+/*******************/ -+ -+#define _MAKE_FAN_ATTR(prj, id) \ -+ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr,\ -+ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ -+ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, -+ -+#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) -+ -+static struct attribute *accton_as6812_32x_fan_attributes[] = { -+ /* fan related attributes */ -+ MAKE_FAN_ATTR(PROJECT_NAME,1) -+ MAKE_FAN_ATTR(PROJECT_NAME,2) -+ MAKE_FAN_ATTR(PROJECT_NAME,3) -+ MAKE_FAN_ATTR(PROJECT_NAME,4) -+ MAKE_FAN_ATTR(PROJECT_NAME,5) -+ NULL -+}; -+/*******************/ -+ -+/* fan related functions -+ */ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ ssize_t ret = 0; -+ int data_index, type_index; -+ -+ accton_as6812_32x_fan_update_device(dev); -+ -+ if (fan_data->valid == 0) { -+ return ret; -+ } -+ -+ type_index = attr->index%FAN2_FAULT; -+ data_index = attr->index/FAN2_FAULT; -+ -+ switch (type_index) { -+ case FAN1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DUTY_CYCLE: -+ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FAN1_DIRECTION: -+ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_FAULT: -+ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ case FANR1_SPEED: -+ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); -+ DEBUG_PRINT("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); -+ break; -+ default: -+ DEBUG_PRINT("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); -+ break; -+ } -+ -+ return ret; -+} -+/*******************/ -+static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) { -+ -+ int error, value; -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) -+ return -EINVAL; -+ -+ accton_as6812_32x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); -+ -+ fan_data->valid = 0; -+ -+ return count; -+} -+ -+static const struct attribute_group accton_as6812_32x_fan_group = { -+ .attrs = accton_as6812_32x_fan_attributes, -+}; -+ -+static int accton_as6812_32x_fan_read_value(u8 reg) -+{ -+ return as6812_32x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as6812_32x_fan_write_value(u8 reg, u8 value) -+{ -+ return as6812_32x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as6812_32x_fan_update_device(struct device *dev) -+{ -+ int speed, r_speed, fault, r_fault, direction, ctrl_speed; -+ int i; -+ -+ mutex_lock(&fan_data->update_lock); -+ -+ DEBUG_PRINT("Starting accton_as6812_32x_fan update \n"); -+ -+ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { -+ /* do nothing */ -+ goto _exit; -+ } -+ -+ fan_data->valid = 0; -+ -+ DEBUG_PRINT("Starting accton_as6812_32x_fan update 2 \n"); -+ -+ fault = accton_as6812_32x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); -+ r_fault = accton_as6812_32x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); -+ direction = accton_as6812_32x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); -+ ctrl_speed = accton_as6812_32x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); -+ -+ if ( (fault < 0) || (r_fault < 0) || (ctrl_speed < 0) ) -+ { -+ DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ DEBUG_PRINT("[fan:] fault:%d, r_fault=%d, ctrl_speed=%d \n",fault, r_fault, ctrl_speed); -+ -+ for (i = 0; i < FAN_MAX_NUMBER; i++) -+ { -+ /* Update fan data -+ */ -+ -+ /* fan fault -+ * 0: normal, 1:abnormal -+ * Each FAN-tray module has two fans. -+ */ -+ fan_data->status[i] = (fault & fan_info_mask[i]) >> i; -+ DEBUG_PRINT("[fan%d:] fail=%d \n",i, fan_data->status[i]); -+ -+ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; -+ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; -+ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; -+ -+ /* fan speed -+ */ -+ speed = accton_as6812_32x_fan_read_value(fan_speed_reg[i]); -+ r_speed = accton_as6812_32x_fan_read_value(fanr_speed_reg[i]); -+ if ( (speed < 0) || (r_speed < 0) ) -+ { -+ DEBUG_PRINT("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); -+ goto _exit; /* error */ -+ } -+ -+ DEBUG_PRINT("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); -+ -+ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; -+ } -+ -+ /* finish to update */ -+ fan_data->last_updated = jiffies; -+ fan_data->valid = 1; -+ -+_exit: -+ mutex_unlock(&fan_data->update_lock); -+} -+ -+static int accton_as6812_32x_fan_probe(struct platform_device *pdev) -+{ -+ int status = -1; -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&pdev->dev.kobj, &accton_as6812_32x_fan_group); -+ if (status) { -+ goto exit; -+ -+ } -+ -+ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); -+ if (IS_ERR(fan_data->hwmon_dev)) { -+ status = PTR_ERR(fan_data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&pdev->dev, "accton_as6812_32x_fan\n"); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&pdev->dev.kobj, &accton_as6812_32x_fan_group); -+exit: -+ return status; -+} -+ -+static int accton_as6812_32x_fan_remove(struct platform_device *pdev) -+{ -+ hwmon_device_unregister(fan_data->hwmon_dev); -+ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as6812_32x_fan_group); -+ -+ return 0; -+} -+ -+#define DRVNAME "as6812_32x_fan" -+ -+static struct platform_driver accton_as6812_32x_fan_driver = { -+ .probe = accton_as6812_32x_fan_probe, -+ .remove = accton_as6812_32x_fan_remove, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as6812_32x_fan_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as6812_32x(void); -+ if(!platform_accton_as6812_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as6812_32x_fan_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ fan_data = kzalloc(sizeof(struct accton_as6812_32x_fan), GFP_KERNEL); -+ if (!fan_data) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as6812_32x_fan_driver); -+ goto exit; -+ } -+ -+ mutex_init(&fan_data->update_lock); -+ fan_data->valid = 0; -+ -+ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(fan_data->pdev)) { -+ ret = PTR_ERR(fan_data->pdev); -+ platform_driver_unregister(&accton_as6812_32x_fan_driver); -+ kfree(fan_data); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as6812_32x_fan_exit(void) -+{ -+ platform_device_unregister(fan_data->pdev); -+ platform_driver_unregister(&accton_as6812_32x_fan_driver); -+ kfree(fan_data); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as6812_32x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_as6812_32x_fan_init); -+module_exit(accton_as6812_32x_fan_exit); -+ -diff --git a/drivers/hwmon/accton_as6812_32x_psu.c b/drivers/hwmon/accton_as6812_32x_psu.c -new file mode 100644 -index 0000000..dfee68b ---- /dev/null -+++ b/drivers/hwmon/accton_as6812_32x_psu.c -@@ -0,0 +1,305 @@ -+/* -+ * An hwmon driver for accton as6812_32x Power Module -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as6812_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as6812_32x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[14]; /* Model name, read from eeprom */ -+}; -+ -+static struct as6812_32x_psu_data *as6812_32x_psu_update_device(struct device *dev); -+ -+enum as6812_32x_psu_sysfs_attributes { -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as6812_32x_psu_attributes[] = { -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as6812_32x_psu_data *data = as6812_32x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((data->index-1)*4) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = data->status >> ((data->index-1)*4 + 1) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as6812_32x_psu_data *data = as6812_32x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s\n", data->model_name); -+} -+ -+static const struct attribute_group as6812_32x_psu_group = { -+ .attrs = as6812_32x_psu_attributes, -+}; -+ -+static int as6812_32x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as6812_32x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as6812_32x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as6812_32x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x50 || client->addr == 0x38) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x53 || client->addr == 0x3b) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as6812_32x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as6812_32x_psu_remove(struct i2c_client *client) -+{ -+ struct as6812_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as6812_32x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as6812_32x_psu_id[] = { -+ { "as6812_32x_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as6812_32x_psu_id); -+ -+static struct i2c_driver as6812_32x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as6812_32x_psu", -+ }, -+ .probe = as6812_32x_psu_probe, -+ .remove = as6812_32x_psu_remove, -+ .id_table = as6812_32x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as6812_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = 0; -+ int retry_count = 5; -+ -+ while (retry_count) { -+ retry_count--; -+ -+ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) { -+ msleep(10); -+ continue; -+ } -+ -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ msleep(10); -+ continue; -+ } -+ -+ result = 0; -+ break; -+ } -+ -+ return result; -+} -+ -+static struct as6812_32x_psu_data *as6812_32x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as6812_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status; -+ int present = 0; -+ -+ dev_dbg(&client->dev, "Starting as6812_32x update\n"); -+ -+ /* Read psu status */ -+ status = as6812_32x_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ /* Read model name */ -+ memset(data->model_name, 0, sizeof(data->model_name)); -+ present = !(data->status >> ((data->index-1)*4) & 0x1); -+ -+ if (present) { -+ u8 command; -+ int model_name_len = 0; -+ -+ if (client->addr == 0x38 || client->addr == 0x3b) { -+ /* cpr_4011_4mxx AC power */ -+ command = 0x26; -+ model_name_len = 13; -+ } -+ else { /* 0x50 & 0x53 */ -+ /* ym2651 AC power */ -+ command = 0x20; -+ model_name_len = 8; -+ } -+ -+ status = as6812_32x_psu_read_block(client,command,data->model_name, -+ model_name_len); -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[model_name_len] = '\0'; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as6812_32x_psu_init(void) -+{ -+ extern int platform_accton_as6812_32x(void); -+ if(!platform_accton_as6812_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as6812_32x_psu_driver); -+} -+ -+static void __exit as6812_32x_psu_exit(void) -+{ -+ i2c_del_driver(&as6812_32x_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as6812_32x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as6812_32x_psu_init); -+module_exit(as6812_32x_psu_exit); -diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig -index 8ac67ef..b378ade 100644 ---- a/drivers/i2c/muxes/Kconfig -+++ b/drivers/i2c/muxes/Kconfig -@@ -33,6 +33,15 @@ config I2C_MUX_ACCTON_AS5812_54x_CPLD - This driver can also be built as a module. If so, the module - will be called i2c-mux-accton_as5812_54x_cpld. - -+config I2C_MUX_ACCTON_AS6812_32x_CPLD -+ tristate "Accton as6812_32x CPLD I2C multiplexer" -+ help -+ If you say yes here you get support for the Accton CPLD -+ I2C mux devices. -+ -+ This driver can also be built as a module. If so, the module -+ will be called i2c-mux-accton_as6812_32x_cpld. -+ - config I2C_MUX_GPIO - tristate "GPIO-based I2C multiplexer" - depends on GENERIC_GPIO -diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile -index 7769d29..840a66c 100644 ---- a/drivers/i2c/muxes/Makefile -+++ b/drivers/i2c/muxes/Makefile -@@ -10,5 +10,6 @@ obj-$(CONFIG_I2C_MUX_QUANTA_LY2) += quanta-ly2-i2c-mux.o - obj-$(CONFIG_I2C_MUX_ACCTON_AS5712_54x_CPLD) += i2c-mux-accton_as5712_54x_cpld.o - obj-$(CONFIG_I2C_MUX_ACCTON_AS6712_32x_CPLD) += i2c-mux-accton_as6712_32x_cpld.o - obj-$(CONFIG_I2C_MUX_ACCTON_AS5812_54x_CPLD) += i2c-mux-accton_as5812_54x_cpld.o -+obj-$(CONFIG_I2C_MUX_ACCTON_AS6812_32x_CPLD) += i2c-mux-accton_as6812_32x_cpld.o - - ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG -diff --git a/drivers/i2c/muxes/i2c-mux-accton_as6812_32x_cpld.c b/drivers/i2c/muxes/i2c-mux-accton_as6812_32x_cpld.c -new file mode 100644 -index 0000000..d668ca4 ---- /dev/null -+++ b/drivers/i2c/muxes/i2c-mux-accton_as6812_32x_cpld.c -@@ -0,0 +1,389 @@ -+/* -+ * I2C multiplexer for accton as6812 CPLD -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This module supports the accton cpld that hold the channel select -+ * mechanism for other i2c slave devices, such as SFP. -+ * This includes the: -+ * Accton as6812_32x CPLD1/CPLD2/CPLD3 -+ * -+ * Based on: -+ * pca954x.c from Kumar Gala -+ * Copyright (C) 2006 -+ * -+ * Based on: -+ * pca954x.c from Ken Harrenstien -+ * Copyright (C) 2004 Google, Inc. (Ken Harrenstien) -+ * -+ * Based on: -+ * i2c-virtual_cb.c from Brian Kuschak -+ * and -+ * pca9540.c from Jean Delvare . -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static struct dmi_system_id as6812_dmi_table[] = { -+ { -+ .ident = "Accton AS6812", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), -+ }, -+ }, -+ { -+ .ident = "Accton AS6812", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), -+ }, -+ }, -+}; -+ -+int platform_accton_as6812_32x(void) -+{ -+ return dmi_check_system(as6812_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as6812_32x); -+ -+#define NUM_OF_CPLD1_CHANS 0x0 -+#define NUM_OF_CPLD2_CHANS 0x10 -+#define NUM_OF_CPLD3_CHANS 0x10 -+#define NUM_OF_ALL_CPLD_CHANS (NUM_OF_CPLD2_CHANS + NUM_OF_CPLD3_CHANS) -+#define ACCTON_I2C_CPLD_MUX_MAX_NCHANS NUM_OF_CPLD3_CHANS -+ -+static LIST_HEAD(cpld_client_list); -+static struct mutex list_lock; -+ -+struct cpld_client_node { -+ struct i2c_client *client; -+ struct list_head list; -+}; -+ -+enum cpld_mux_type { -+ as6812_32x_cpld2, -+ as6812_32x_cpld3, -+ as6812_32x_cpld1 -+}; -+ -+struct accton_i2c_cpld_mux { -+ enum cpld_mux_type type; -+ struct i2c_adapter *virt_adaps[ACCTON_I2C_CPLD_MUX_MAX_NCHANS]; -+ u8 last_chan; /* last register value */ -+}; -+ -+struct chip_desc { -+ u8 nchans; -+ u8 deselectChan; -+}; -+ -+/* Provide specs for the PCA954x types we know about */ -+static const struct chip_desc chips[] = { -+ [as6812_32x_cpld1] = { -+ .nchans = NUM_OF_CPLD1_CHANS, -+ .deselectChan = NUM_OF_CPLD1_CHANS, -+ }, -+ [as6812_32x_cpld2] = { -+ .nchans = NUM_OF_CPLD2_CHANS, -+ .deselectChan = NUM_OF_CPLD2_CHANS, -+ }, -+ [as6812_32x_cpld3] = { -+ .nchans = NUM_OF_CPLD3_CHANS, -+ .deselectChan = NUM_OF_CPLD3_CHANS, -+ } -+}; -+ -+static const struct i2c_device_id accton_i2c_cpld_mux_id[] = { -+ { "as6812_32x_cpld1", as6812_32x_cpld1 }, -+ { "as6812_32x_cpld2", as6812_32x_cpld2 }, -+ { "as6812_32x_cpld3", as6812_32x_cpld3 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_mux_id); -+ -+/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer() -+ for this as they will try to lock adapter a second time */ -+static int accton_i2c_cpld_mux_reg_write(struct i2c_adapter *adap, -+ struct i2c_client *client, u8 val) -+{ -+ unsigned long orig_jiffies; -+ unsigned short flags; -+ union i2c_smbus_data data; -+ int try; -+ s32 res = -EIO; -+ -+ data.byte = val; -+ flags = client->flags; -+ flags &= I2C_M_TEN | I2C_CLIENT_PEC; -+ -+ if (adap->algo->smbus_xfer) { -+ /* Retry automatically on arbitration loss */ -+ orig_jiffies = jiffies; -+ for (res = 0, try = 0; try <= adap->retries; try++) { -+ res = adap->algo->smbus_xfer(adap, client->addr, flags, -+ I2C_SMBUS_WRITE, 0x2, -+ I2C_SMBUS_BYTE_DATA, &data); -+ if (res != -EAGAIN) -+ break; -+ if (time_after(jiffies, -+ orig_jiffies + adap->timeout)) -+ break; -+ } -+ } -+ -+ return res; -+} -+ -+static int accton_i2c_cpld_mux_select_chan(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ u8 regval; -+ int ret = 0; -+ regval = chan; -+ -+ /* Only select the channel if its different from the last channel */ -+ if (data->last_chan != regval) { -+ ret = accton_i2c_cpld_mux_reg_write(adap, client, regval); -+ data->last_chan = regval; -+ } -+ -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_deselect_mux(struct i2c_adapter *adap, -+ void *client, u32 chan) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ -+ /* Deselect active channel */ -+ data->last_chan = chips[data->type].deselectChan; -+ -+ return accton_i2c_cpld_mux_reg_write(adap, client, data->last_chan); -+} -+ -+static void accton_i2c_cpld_add_client(struct i2c_client *client) -+{ -+ struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); -+ -+ if (!node) { -+ dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); -+ return; -+ } -+ -+ node->client = client; -+ -+ mutex_lock(&list_lock); -+ list_add(&node->list, &cpld_client_list); -+ mutex_unlock(&list_lock); -+} -+ -+static void accton_i2c_cpld_remove_client(struct i2c_client *client) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int found = 0; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client == client) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) { -+ list_del(list_node); -+ kfree(cpld_node); -+ } -+ -+ mutex_unlock(&list_lock); -+} -+ -+static ssize_t show_cpld_version(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ u8 reg = 0x1; -+ struct i2c_client *client; -+ int len; -+ -+ client = to_i2c_client(dev); -+ len = sprintf(buf, "%d", i2c_smbus_read_byte_data(client, reg)); -+ -+ return len; -+} -+ -+static struct device_attribute ver = __ATTR(version, 0600, show_cpld_version, NULL); -+ -+/* -+ * I2C init/probing/exit functions -+ */ -+static int accton_i2c_cpld_mux_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); -+ int chan=0; -+ struct accton_i2c_cpld_mux *data; -+ int ret = -ENODEV; -+ -+ if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) -+ goto err; -+ -+ data = kzalloc(sizeof(struct accton_i2c_cpld_mux), GFP_KERNEL); -+ if (!data) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ i2c_set_clientdata(client, data); -+ -+ data->type = id->driver_data; -+ -+ if (data->type == as6812_32x_cpld2 || data->type == as6812_32x_cpld3) { -+ data->last_chan = chips[data->type].deselectChan; /* force the first selection */ -+ -+ /* Now create an adapter for each channel */ -+ for (chan = 0; chan < chips[data->type].nchans; chan++) { -+ data->virt_adaps[chan] = i2c_add_mux_adapter(adap, &client->dev, client, 0, chan, -+ accton_i2c_cpld_mux_select_chan, -+ accton_i2c_cpld_mux_deselect_mux); -+ -+ if (data->virt_adaps[chan] == NULL) { -+ ret = -ENODEV; -+ dev_err(&client->dev, "failed to register multiplexed adapter %d\n", chan); -+ goto virt_reg_failed; -+ } -+ } -+ -+ dev_info(&client->dev, "registered %d multiplexed busses for I2C mux %s\n", -+ chan, client->name); -+ } -+ -+ accton_i2c_cpld_add_client(client); -+ -+ ret = sysfs_create_file(&client->dev.kobj, &ver.attr); -+ if (ret) -+ goto virt_reg_failed; -+ -+ return 0; -+ -+virt_reg_failed: -+ for (chan--; chan >= 0; chan--) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ } -+ kfree(data); -+err: -+ return ret; -+} -+ -+static int accton_i2c_cpld_mux_remove(struct i2c_client *client) -+{ -+ struct accton_i2c_cpld_mux *data = i2c_get_clientdata(client); -+ const struct chip_desc *chip = &chips[data->type]; -+ int chan; -+ -+ sysfs_remove_file(&client->dev.kobj, &ver.attr); -+ -+ for (chan = 0; chan < chip->nchans; ++chan) { -+ if (data->virt_adaps[chan]) { -+ i2c_del_mux_adapter(data->virt_adaps[chan]); -+ data->virt_adaps[chan] = NULL; -+ } -+ } -+ -+ kfree(data); -+ accton_i2c_cpld_remove_client(client); -+ -+ return 0; -+} -+ -+int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EPERM; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_read_byte_data(cpld_node->client, reg); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as6812_32x_i2c_cpld_read); -+ -+int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EIO; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(as6812_32x_i2c_cpld_write); -+ -+static struct i2c_driver accton_i2c_cpld_mux_driver = { -+ .driver = { -+ .name = "as6812_32x_cpld", -+ .owner = THIS_MODULE, -+ }, -+ .probe = accton_i2c_cpld_mux_probe, -+ .remove = accton_i2c_cpld_mux_remove, -+ .id_table = accton_i2c_cpld_mux_id, -+}; -+ -+static int __init accton_i2c_cpld_mux_init(void) -+{ -+ mutex_init(&list_lock); -+ return i2c_add_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+static void __exit accton_i2c_cpld_mux_exit(void) -+{ -+ i2c_del_driver(&accton_i2c_cpld_mux_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("Accton I2C CPLD mux driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_i2c_cpld_mux_init); -+module_exit(accton_i2c_cpld_mux_exit); -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index 514f978..cb0c17f 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -75,6 +75,13 @@ config LEDS_ACCTON_AS5812_54x - This option enables support for the LEDs on the Accton as5812 54x. - Say Y to enable LEDs on the Accton as5812 54x. - -+config LEDS_ACCTON_AS6812_32x -+ tristate "LED support for the Accton as6812 32x" -+ depends on LEDS_CLASS && I2C_MUX_ACCTON_AS6812_32x_CPLD -+ help -+ This option enables support for the LEDs on the Accton as6812 32x. -+ Say Y to enable LEDs on the Accton as6812 32x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index 379c448..8db7a43 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -48,6 +48,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS6712_32x) += leds-accton_as6712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS7512_32x) += leds-accton_as7512_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o -+obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as6812_32x.c b/drivers/leds/leds-accton_as6812_32x.c -new file mode 100644 -index 0000000..59c59cb ---- /dev/null -+++ b/drivers/leds/leds-accton_as6812_32x.c -@@ -0,0 +1,617 @@ -+/* -+ * A LED driver for the accton_as6812_32x_led -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int as6812_32x_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "as6812_32x_led" -+ -+struct accton_as6812_32x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[4]; /* Register value, 0 = LOC/DIAG/FAN LED -+ 1 = PSU1/PSU2 LED -+ 2 = FAN1-4 LED -+ 3 = FAN5-6 LED */ -+}; -+ -+static struct accton_as6812_32x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+#define LED_TYPE_PSU1_REG_MASK 0x03 -+#define LED_MODE_PSU1_GREEN_MASK 0x02 -+#define LED_MODE_PSU1_AMBER_MASK 0x01 -+#define LED_MODE_PSU1_OFF_MASK 0x03 -+#define LED_MODE_PSU1_AUTO_MASK 0x00 -+ -+#define LED_TYPE_PSU2_REG_MASK 0x0C -+#define LED_MODE_PSU2_GREEN_MASK 0x08 -+#define LED_MODE_PSU2_AMBER_MASK 0x04 -+#define LED_MODE_PSU2_OFF_MASK 0x0C -+#define LED_MODE_PSU2_AUTO_MASK 0x00 -+ -+#define LED_TYPE_DIAG_REG_MASK 0x0C -+#define LED_MODE_DIAG_GREEN_MASK 0x08 -+#define LED_MODE_DIAG_AMBER_MASK 0x04 -+#define LED_MODE_DIAG_OFF_MASK 0x0C -+#define LED_MODE_DIAG_BLINK_MASK 0x48 -+ -+#define LED_TYPE_FAN_REG_MASK 0x03 -+#define LED_MODE_FAN_GREEN_MASK 0x02 -+#define LED_MODE_FAN_AMBER_MASK 0x01 -+#define LED_MODE_FAN_OFF_MASK 0x03 -+#define LED_MODE_FAN_AUTO_MASK 0x00 -+ -+#define LED_TYPE_FAN1_REG_MASK 0x03 -+#define LED_TYPE_FAN2_REG_MASK 0xC0 -+#define LED_TYPE_FAN3_REG_MASK 0x30 -+#define LED_TYPE_FAN4_REG_MASK 0x0C -+#define LED_TYPE_FAN5_REG_MASK 0x03 -+ -+#define LED_MODE_FANX_GREEN_MASK 0x01 -+#define LED_MODE_FANX_RED_MASK 0x02 -+#define LED_MODE_FANX_OFF_MASK 0x00 -+ -+#define LED_TYPE_LOC_REG_MASK 0x30 -+#define LED_MODE_LOC_ON_MASK 0x00 -+#define LED_MODE_LOC_OFF_MASK 0x10 -+#define LED_MODE_LOC_BLINK_MASK 0x20 -+ -+static const u8 led_reg[] = { -+ 0xA, /* LOC/DIAG/FAN LED*/ -+ 0xB, /* PSU1/PSU2 LED */ -+ 0xE, /* FAN2-5 LED */ -+ 0xF, /* FAN1 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2, -+ LED_TYPE_DIAG, -+ LED_TYPE_FAN, -+ LED_TYPE_FAN1, -+ LED_TYPE_FAN2, -+ LED_TYPE_FAN3, -+ LED_TYPE_FAN4, -+ LED_TYPE_FAN5, -+ LED_TYPE_LOC -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_AMBER, -+ LED_MODE_RED, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED_BLINK, -+ LED_MODE_AUTO, -+}; -+ -+struct led_type_mode { -+ enum led_type type; -+ int type_mask; -+ enum led_light_mode mode; -+ int mode_mask; -+}; -+ -+static struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, -+{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, -+{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, -+{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN1, LED_TYPE_FAN1_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 6}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 6}, -+{LED_TYPE_FAN2, LED_TYPE_FAN2_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 6}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 4}, -+{LED_TYPE_FAN3, LED_TYPE_FAN3_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 4}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 2}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 2}, -+{LED_TYPE_FAN4, LED_TYPE_FAN4_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 2}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_GREEN, LED_MODE_FANX_GREEN_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_RED, LED_MODE_FANX_RED_MASK << 0}, -+{LED_TYPE_FAN5, LED_TYPE_FAN5_REG_MASK, LED_MODE_OFF, LED_MODE_FANX_OFF_MASK << 0}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, -+{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN_BLINK, LED_MODE_DIAG_BLINK_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, -+{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} -+}; -+ -+ -+struct fanx_info_s { -+ u8 cname; /* device name */ -+ enum led_type type; -+ u8 reg_id; /* map to led_reg & reg_val */ -+}; -+ -+static struct fanx_info_s fanx_info[] = { -+ {'1', LED_TYPE_FAN1, 3}, -+ {'2', LED_TYPE_FAN2, 2}, -+ {'3', LED_TYPE_FAN3, 2}, -+ {'4', LED_TYPE_FAN4, 2}, -+ {'5', LED_TYPE_FAN5, 2}, -+}; -+ -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (type == LED_TYPE_DIAG) -+ { /* special case : bit 6 - meaning blinking */ -+ if (0x40 & reg_val) -+ return LED_MODE_GREEN_BLINK; -+ } -+ if ((led_type_mode_data[i].type_mask & reg_val) == -+ led_type_mode_data[i].mode_mask) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return 0; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) { -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ if (type == LED_TYPE_DIAG) -+ { -+ if (mode == LED_MODE_GREEN_BLINK) -+ { /* special case : bit 6 - meaning blinking */ -+ reg_val = 0x48 | (reg_val & ~0x4C); -+ break; -+ } -+ else -+ { /* for diag led, other case must cancel bit 6 first */ -+ reg_val = reg_val & ~0x40; -+ } -+ } -+ reg_val = led_type_mode_data[i].mode_mask | -+ (reg_val & (~led_type_mode_data[i].type_mask)); -+ break; -+ } -+ -+ return reg_val; -+} -+ -+static int accton_as6812_32x_led_read_value(u8 reg) -+{ -+ return as6812_32x_i2c_cpld_read(0x60, reg); -+} -+ -+static int accton_as6812_32x_led_write_value(u8 reg, u8 value) -+{ -+ return as6812_32x_i2c_cpld_write(0x60, reg, value); -+} -+ -+static void accton_as6812_32x_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as6812_32x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as6812_32x_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as6812_32x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as6812_32x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as6812_32x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as6812_32x_led_psu_1_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); -+} -+ -+static enum led_brightness accton_as6812_32x_led_psu_1_get(struct led_classdev *cdev) -+{ -+ accton_as6812_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); -+} -+ -+static void accton_as6812_32x_led_psu_2_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); -+} -+ -+static enum led_brightness accton_as6812_32x_led_psu_2_get(struct led_classdev *cdev) -+{ -+ accton_as6812_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); -+} -+ -+static void accton_as6812_32x_led_fan_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); -+} -+ -+static enum led_brightness accton_as6812_32x_led_fan_get(struct led_classdev *cdev) -+{ -+ accton_as6812_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); -+} -+ -+ -+static void accton_as6812_32x_led_fanx_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (led_cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[reg_id], led_type1); -+ return; -+ } -+ } -+} -+ -+ -+static enum led_brightness accton_as6812_32x_led_fanx_get(struct led_classdev *cdev) -+{ -+ enum led_type led_type1; -+ int reg_id; -+ int i, nsize; -+ int ncount = sizeof(fanx_info)/sizeof(struct fanx_info_s); -+ -+ for(i=0;iname); -+ -+ if (cdev->name[nsize-1] == fanx_info[i].cname) -+ { -+ led_type1 = fanx_info[i].type; -+ reg_id = fanx_info[i].reg_id; -+ accton_as6812_32x_led_update(); -+ return led_reg_val_to_light_mode(led_type1, ledctl->reg_val[reg_id]); -+ } -+ } -+ -+ -+ return led_reg_val_to_light_mode(LED_TYPE_FAN1, ledctl->reg_val[2]); -+} -+ -+ -+static void accton_as6812_32x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as6812_32x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as6812_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as6812_32x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as6812_32x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as6812_32x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as6812_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static struct led_classdev accton_as6812_32x_leds[] = { -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as6812_32x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_psu_1_set, -+ .brightness_get = accton_as6812_32x_led_psu_1_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as6812_32x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_psu_2_set, -+ .brightness_get = accton_as6812_32x_led_psu_2_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as6812_32x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_fan_set, -+ .brightness_get = accton_as6812_32x_led_fan_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN1] = { -+ .name = "accton_as6812_32x_led::fan1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_fanx_set, -+ .brightness_get = accton_as6812_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN2] = { -+ .name = "accton_as6812_32x_led::fan2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_fanx_set, -+ .brightness_get = accton_as6812_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN3] = { -+ .name = "accton_as6812_32x_led::fan3", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_fanx_set, -+ .brightness_get = accton_as6812_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN4] = { -+ .name = "accton_as6812_32x_led::fan4", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_fanx_set, -+ .brightness_get = accton_as6812_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_FAN5] = { -+ .name = "accton_as6812_32x_led::fan5", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_fanx_set, -+ .brightness_get = accton_as6812_32x_led_fanx_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as6812_32x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_diag_set, -+ .brightness_get = accton_as6812_32x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as6812_32x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as6812_32x_led_loc_set, -+ .brightness_get = accton_as6812_32x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as6812_32x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { -+ led_classdev_suspend(&accton_as6812_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as6812_32x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { -+ led_classdev_resume(&accton_as6812_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as6812_32x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as6812_32x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as6812_32x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as6812_32x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as6812_32x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as6812_32x_leds); i++) { -+ led_classdev_unregister(&accton_as6812_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as6812_32x_led_driver = { -+ .probe = accton_as6812_32x_led_probe, -+ .remove = accton_as6812_32x_led_remove, -+ .suspend = accton_as6812_32x_led_suspend, -+ .resume = accton_as6812_32x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as6812_32x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as6812_32x(void); -+ if(!platform_accton_as6812_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as6812_32x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as6812_32x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as6812_32x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as6812_32x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as6812_32x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as6812_32x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as6812_32x_led_init); -+module_exit(accton_as6812_32x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as6812_32x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 7c8d3b8..ff68df7 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -118,6 +118,15 @@ config EEPROM_ACCTON_AS5812_54x_SFP - This driver can also be built as a module. If so, the module will - be called accton_as5812_54x_sfp. - -+config EEPROM_ACCTON_AS6812_32x_SFP -+ tristate "Accton as6812 32x sfp" -+ depends on I2C && I2C_MUX_ACCTON_AS6812_32x_CPLD -+ help -+ If you say yes here you get support for Accton as6812 32x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as6812_32x_sfp. -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index e11d273..4b682a1 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -11,4 +11,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS6712_32x_SFP) += accton_as6712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS7512_32x_SFP) += accton_as7512_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as6812_32x_sfp.c b/drivers/misc/eeprom/accton_as6812_32x_sfp.c -new file mode 100644 -index 0000000..1669fb8 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as6812_32x_sfp.c -@@ -0,0 +1,390 @@ -+/* -+ * An hwmon driver for accton as6812_32x sfp -+ * -+ * Copyright (C) 2015 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BIT_INDEX(i) (1ULL << (i)) -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as6812_32x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u64 is_present; /* present status */ -+}; -+ -+static struct as6812_32x_sfp_data *as6812_32x_sfp_update_device(struct device *dev, int update_eeprom); -+static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf); -+static int as6812_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data); -+extern int as6812_32x_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int as6812_32x_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+//extern int accton_i2c_cpld_mux_get_index(int adap_index); -+ -+enum as6812_32x_sfp_sysfs_attributes { -+ SFP_IS_PRESENT, -+ SFP_EEPROM, -+ SFP_PORT_NUMBER, -+ SFP_IS_PRESENT_ALL -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+ -+static struct attribute *as6812_32x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as6812_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->port+1); -+} -+ -+/* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+} while(0) -+ -+static ssize_t show_present(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ int values[4]; -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], as6812_32x_i2c_cpld_read(0x62, 0xA), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], as6812_32x_i2c_cpld_read(0x62, 0xB), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], as6812_32x_i2c_cpld_read(0x64, 0xA), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], as6812_32x_i2c_cpld_read(0x64, 0xB), 1); -+ -+ /* Return values 1 -> 32 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], values[3]); -+ } -+ else { /* SFP_IS_PRESENT */ -+ u8 val; -+ struct as6812_32x_sfp_data *data = as6812_32x_sfp_update_device(dev, 0); -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ val = (data->is_present & BIT_INDEX(data->port)) ? 0 : 1; -+ return sprintf(buf, "%d", val); -+ } -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as6812_32x_sfp_data *data = as6812_32x_sfp_update_device(dev, 1); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if ((data->is_present & BIT_INDEX(data->port)) != 0) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as6812_32x_sfp_group = { -+ .attrs = as6812_32x_sfp_attributes, -+}; -+ -+static int as6812_32x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as6812_32x_sfp_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, /*I2C_FUNC_SMBUS_BYTE_DATA | */I2C_FUNC_SMBUS_WORD_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as6812_32x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as6812_32x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as6812_32x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as6812_32x_sfp_remove(struct i2c_client *client) -+{ -+ struct as6812_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as6812_32x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as6812_32x_sfp1, as6812_32x_sfp2, as6812_32x_sfp3, as6812_32x_sfp4, -+as6812_32x_sfp5, as6812_32x_sfp6, as6812_32x_sfp7, as6812_32x_sfp8, -+as6812_32x_sfp9, as6812_32x_sfp10, as6812_32x_sfp11,as6812_32x_sfp12, -+as6812_32x_sfp13, as6812_32x_sfp14, as6812_32x_sfp15,as6812_32x_sfp16, -+as6812_32x_sfp17, as6812_32x_sfp18, as6812_32x_sfp19,as6812_32x_sfp20, -+as6812_32x_sfp21, as6812_32x_sfp22, as6812_32x_sfp23,as6812_32x_sfp24, -+as6812_32x_sfp25, as6812_32x_sfp26, as6812_32x_sfp27,as6812_32x_sfp28, -+as6812_32x_sfp29, as6812_32x_sfp30, as6812_32x_sfp31,as6812_32x_sfp32 -+}; -+ -+static const struct i2c_device_id as6812_32x_sfp_id[] = { -+{ "as6812_32x_sfp1", as6812_32x_sfp1 }, { "as6812_32x_sfp2", as6812_32x_sfp2 }, -+{ "as6812_32x_sfp3", as6812_32x_sfp3 }, { "as6812_32x_sfp4", as6812_32x_sfp4 }, -+{ "as6812_32x_sfp5", as6812_32x_sfp5 }, { "as6812_32x_sfp6", as6812_32x_sfp6 }, -+{ "as6812_32x_sfp7", as6812_32x_sfp7 }, { "as6812_32x_sfp8", as6812_32x_sfp8 }, -+{ "as6812_32x_sfp9", as6812_32x_sfp9 }, { "as6812_32x_sfp10", as6812_32x_sfp10 }, -+{ "as6812_32x_sfp11", as6812_32x_sfp11 }, { "as6812_32x_sfp12", as6812_32x_sfp12 }, -+{ "as6812_32x_sfp13", as6812_32x_sfp13 }, { "as6812_32x_sfp14", as6812_32x_sfp14 }, -+{ "as6812_32x_sfp15", as6812_32x_sfp15 }, { "as6812_32x_sfp16", as6812_32x_sfp16 }, -+{ "as6812_32x_sfp17", as6812_32x_sfp17 }, { "as6812_32x_sfp18", as6812_32x_sfp18 }, -+{ "as6812_32x_sfp19", as6812_32x_sfp19 }, { "as6812_32x_sfp20", as6812_32x_sfp20 }, -+{ "as6812_32x_sfp21", as6812_32x_sfp21 }, { "as6812_32x_sfp22", as6812_32x_sfp22 }, -+{ "as6812_32x_sfp23", as6812_32x_sfp23 }, { "as6812_32x_sfp24", as6812_32x_sfp24 }, -+{ "as6812_32x_sfp25", as6812_32x_sfp25 }, { "as6812_32x_sfp26", as6812_32x_sfp26 }, -+{ "as6812_32x_sfp27", as6812_32x_sfp27 }, { "as6812_32x_sfp28", as6812_32x_sfp28 }, -+{ "as6812_32x_sfp29", as6812_32x_sfp29 }, { "as6812_32x_sfp30", as6812_32x_sfp30 }, -+{ "as6812_32x_sfp31", as6812_32x_sfp31 }, { "as6812_32x_sfp32", as6812_32x_sfp32 }, -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as6812_32x_sfp_id); -+ -+ -+static struct i2c_driver as6812_32x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as6812_32x_sfp", -+ }, -+ .probe = as6812_32x_sfp_probe, -+ .remove = as6812_32x_sfp_remove, -+ .id_table = as6812_32x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+#if 0 -+static int as6812_32x_sfp_read_byte(struct i2c_client *client, u8 command, u8 *data) -+{ -+ int result = i2c_smbus_read_byte_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u8)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+#endif -+ -+static int as6812_32x_sfp_read_word(struct i2c_client *client, u8 command, u16 *data) -+{ -+ int result = i2c_smbus_read_word_data(client, command); -+ -+ if (unlikely(result < 0)) { -+ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); -+ goto abort; -+ } -+ -+ *data = (u16)result; -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+#define ALWAYS_UPDATE 1 -+ -+static struct as6812_32x_sfp_data *as6812_32x_sfp_update_device(struct device *dev, int update_eeprom) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as6812_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (ALWAYS_UPDATE || time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0, j = 0; -+ -+ data->valid = 0; -+ -+ /* Read present status of port 1~32 */ -+ data->is_present = 0; -+ -+ for (i = 0; i < 2; i++) { -+ for (j = 0; j < 2; j++) { -+ status = as6812_32x_i2c_cpld_read(0x62+i*2, 0xA+j); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", 0x62+i*2, 0xA+j, status); -+ goto exit; -+ } -+ -+ data->is_present |= (u64)status << ((i*16) + (j*8)); -+ } -+ } -+ -+ if (update_eeprom) { -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if ((data->is_present & BIT_INDEX(data->port)) == 0) { -+ /* read eeprom */ -+ u16 eeprom_data; -+ for (i = 0; i < (sizeof(data->eeprom) / 2); i++) { -+ status = as6812_32x_sfp_read_word(client, i*2, &eeprom_data); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); -+ goto exit; -+ } -+ -+ data->eeprom[i*2] = eeprom_data & 0xff; -+ data->eeprom[i*2 + 1] = eeprom_data >> 8; -+ } -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as6812_32x_sfp_init(void) -+{ -+ extern int platform_accton_as6812_32x(void); -+ if(!platform_accton_as6812_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as6812_32x_sfp_driver); -+} -+ -+static void __exit as6812_32x_sfp_exit(void) -+{ -+ i2c_del_driver(&as6812_32x_sfp_driver); -+} -+ -+module_i2c_driver(as6812_32x_sfp_driver); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as6812_32x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as6812_32x_sfp_init); -+module_exit(as6812_32x_sfp_exit); diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch deleted file mode 100644 index e5028ae3..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch +++ /dev/null @@ -1,2675 +0,0 @@ -Device driver patches for accton as7512 (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 1ff287b..c410426 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1465,6 +1465,43 @@ config SENSORS_ACCTON_AS6712_32x_PSU - This driver can also be built as a module. If so, the module will - be called accton_as6712_32x_psu. - -+config SENSORS_ACCTON_I2C_CPLD -+ tristate "Accton i2c cpld" -+ depends on I2C -+ help -+ If you say yes here you get support for Accton i2c cpld. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_i2c_cpld. -+ -+config SENSORS_ACCTON_AS7512_32x_FAN -+ tristate "Accton as7512 32x fan" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7512 32x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7512_32x_fan. -+ -+config SENSORS_ACCTON_AS7512_32x_PSU -+ tristate "Accton as7512 32x psu" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7512 32x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7512_32x_psu. -+ -+config SENSORS_YM2651Y -+ tristate "3Y Power YM-2651Y Power Module" -+ depends on I2C -+ help -+ If you say yes here you get support for 3Y Power YM-2651Y -+ Power Module. -+ -+ This driver can also be built as a module. If so, the module will -+ be called ym2651y. -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index dbbb5ee..f8ee399 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -25,6 +25,9 @@ obj-$(CONFIG_SENSORS_ACCTON_AS5712_54x_FAN) += accton_as5712_54x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS5712_54x_PSU) += accton_as5712_54x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS6712_32x_FAN) += accton_as6712_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS6712_32x_PSU) += accton_as6712_32x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS7512_32x_FAN) += accton_as7512_32x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS7512_32x_PSU) += accton_as7512_32x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_I2C_CPLD) += accton_i2c_cpld.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -@@ -136,6 +139,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o - obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o - obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o - obj-$(CONFIG_SENSORS_QUANTA_LY_HWMON) += quanta-ly-hwmon.o -+obj-$(CONFIG_SENSORS_YM2651Y) += ym2651y.o - - obj-$(CONFIG_PMBUS) += pmbus/ - -diff --git a/drivers/hwmon/accton_as7512_32x_fan.c b/drivers/hwmon/accton_as7512_32x_fan.c -new file mode 100644 -index 0000000..be323b5 ---- /dev/null -+++ b/drivers/hwmon/accton_as7512_32x_fan.c -@@ -0,0 +1,510 @@ -+/* -+ * A hwmon driver for the Accton as7512 32x fan -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRVNAME "as7512_32x_fan" -+ -+static struct as7512_32x_fan_data *as7512_32x_fan_update_device(struct device *dev); -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+/* fan related data, the index should match sysfs_fan_attributes -+ */ -+static const u8 fan_reg[] = { -+ 0x0F, /* fan 1-6 present status */ -+ 0x10, /* fan 1-6 direction(0:B2F 1:F2B) */ -+ 0x11, /* fan PWM(for all fan) */ -+ 0x12, /* front fan 1 speed(rpm) */ -+ 0x13, /* front fan 2 speed(rpm) */ -+ 0x14, /* front fan 3 speed(rpm) */ -+ 0x15, /* front fan 4 speed(rpm) */ -+ 0x16, /* front fan 5 speed(rpm) */ -+ 0x17, /* front fan 6 speed(rpm) */ -+ 0x22, /* rear fan 1 speed(rpm) */ -+ 0x23, /* rear fan 2 speed(rpm) */ -+ 0x24, /* rear fan 3 speed(rpm) */ -+ 0x25, /* rear fan 4 speed(rpm) */ -+ 0x26, /* rear fan 5 speed(rpm) */ -+ 0x27, /* rear fan 6 speed(rpm) */ -+}; -+ -+/* Each client has this additional data */ -+struct as7512_32x_fan_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ -+}; -+ -+enum fan_id { -+ FAN1_ID, -+ FAN2_ID, -+ FAN3_ID, -+ FAN4_ID, -+ FAN5_ID, -+ FAN6_ID -+}; -+ -+enum sysfs_fan_attributes { -+ FAN_PRESENT_REG, -+ FAN_DIRECTION_REG, -+ FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ -+ FAN1_FRONT_SPEED_RPM, -+ FAN2_FRONT_SPEED_RPM, -+ FAN3_FRONT_SPEED_RPM, -+ FAN4_FRONT_SPEED_RPM, -+ FAN5_FRONT_SPEED_RPM, -+ FAN6_FRONT_SPEED_RPM, -+ FAN1_REAR_SPEED_RPM, -+ FAN2_REAR_SPEED_RPM, -+ FAN3_REAR_SPEED_RPM, -+ FAN4_REAR_SPEED_RPM, -+ FAN5_REAR_SPEED_RPM, -+ FAN6_REAR_SPEED_RPM, -+ FAN1_DIRECTION, -+ FAN2_DIRECTION, -+ FAN3_DIRECTION, -+ FAN4_DIRECTION, -+ FAN5_DIRECTION, -+ FAN6_DIRECTION, -+ FAN1_PRESENT, -+ FAN2_PRESENT, -+ FAN3_PRESENT, -+ FAN4_PRESENT, -+ FAN5_PRESENT, -+ FAN6_PRESENT, -+ FAN1_FAULT, -+ FAN2_FAULT, -+ FAN3_FAULT, -+ FAN4_FAULT, -+ FAN5_FAULT, -+ FAN6_FAULT -+}; -+ -+/* Define attributes -+ */ -+#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) -+#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr -+ -+#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) -+#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr -+ -+#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) -+#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr -+ -+#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) -+#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr -+ -+#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ -+ static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) -+#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr -+ -+/* 6 fan fault attributes in this platform */ -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); -+ -+#if 0 -+/* 6 fan direction attribute in this platform */ -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(6); -+#endif -+ -+/* 6 fan speed(rpm) attributes in this platform */ -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); -+/* 6 fan present attributes in this platform */ -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); -+/* 1 fan duty cycle attribute in this platform */ -+DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); -+ -+static struct attribute *as7512_32x_fan_attributes[] = { -+ /* fan related attributes */ -+ DECLARE_FAN_FAULT_ATTR(1), -+ DECLARE_FAN_FAULT_ATTR(2), -+ DECLARE_FAN_FAULT_ATTR(3), -+ DECLARE_FAN_FAULT_ATTR(4), -+ DECLARE_FAN_FAULT_ATTR(5), -+ DECLARE_FAN_FAULT_ATTR(6), -+#if 0 -+ DECLARE_FAN_DIRECTION_ATTR(1), -+ DECLARE_FAN_DIRECTION_ATTR(2), -+ DECLARE_FAN_DIRECTION_ATTR(3), -+ DECLARE_FAN_DIRECTION_ATTR(4), -+ DECLARE_FAN_DIRECTION_ATTR(5), -+ DECLARE_FAN_DIRECTION_ATTR(6), -+#endif -+ DECLARE_FAN_SPEED_RPM_ATTR(1), -+ DECLARE_FAN_SPEED_RPM_ATTR(2), -+ DECLARE_FAN_SPEED_RPM_ATTR(3), -+ DECLARE_FAN_SPEED_RPM_ATTR(4), -+ DECLARE_FAN_SPEED_RPM_ATTR(5), -+ DECLARE_FAN_SPEED_RPM_ATTR(6), -+ DECLARE_FAN_PRESENT_ATTR(1), -+ DECLARE_FAN_PRESENT_ATTR(2), -+ DECLARE_FAN_PRESENT_ATTR(3), -+ DECLARE_FAN_PRESENT_ATTR(4), -+ DECLARE_FAN_PRESENT_ATTR(5), -+ DECLARE_FAN_PRESENT_ATTR(6), -+ DECLARE_FAN_DUTY_CYCLE_ATTR(), -+ NULL -+}; -+ -+#define FAN_DUTY_CYCLE_REG_MASK 0x0F -+#define FAN_MAX_DUTY_CYCLE 100 -+#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 -+ -+static int as7512_32x_fan_read_value(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_byte_data(client, reg); -+} -+ -+static int as7512_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) -+{ -+ return i2c_smbus_write_byte_data(client, reg, value); -+} -+ -+/* fan utility functions -+ */ -+static u32 reg_val_to_duty_cycle(u8 reg_val) -+{ -+ reg_val &= FAN_DUTY_CYCLE_REG_MASK; -+ return (u32)(reg_val+1) * 625 / 100; -+} -+ -+static u8 duty_cycle_to_reg_val(u8 duty_cycle) -+{ -+ return ((u32)duty_cycle * 100 / 625) - 1; -+} -+ -+static u32 reg_val_to_speed_rpm(u8 reg_val) -+{ -+ return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; -+} -+ -+static u8 reg_val_to_direction(u8 reg_val, enum fan_id id) -+{ -+ u8 mask = (1 << id); -+ -+ reg_val &= mask; -+ -+ return reg_val ? 1 : 0; -+} -+ -+static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) -+{ -+ u8 mask = (1 << id); -+ -+ reg_val &= mask; -+ -+ return reg_val ? 0 : 1; -+} -+ -+static u8 is_fan_fault(struct as7512_32x_fan_data *data, enum fan_id id) -+{ -+ u8 ret = 1; -+ int front_fan_index = FAN1_FRONT_SPEED_RPM + id; -+ int rear_fan_index = FAN1_REAR_SPEED_RPM + id; -+ -+ /* Check if the speed of front or rear fan is ZERO, -+ */ -+ if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && -+ reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { -+ ret = 0; -+ } -+ -+ return ret; -+} -+ -+static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ int error, value; -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7512_32x_fan_data *data = i2c_get_clientdata(client); -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) { -+ return error; -+ } -+ -+ if (value < 0 || value > FAN_MAX_DUTY_CYCLE) { -+ return -EINVAL; -+ } -+ -+ /* Disable the watchdog timer -+ */ -+ error = as7512_32x_fan_write_value(client, 0x33, 0); -+ -+ if (error != 0) { -+ dev_dbg(&client->dev, "Unable to disable the watchdog timer\n"); -+ return error; -+ } -+ -+ as7512_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); -+ data->valid = 0; -+ -+ return count; -+} -+ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7512_32x_fan_data *data = as7512_32x_fan_update_device(dev); -+ ssize_t ret = 0; -+ -+ if (data->valid) { -+ switch (attr->index) { -+ case FAN_DUTY_CYCLE_PERCENTAGE: -+ { -+ u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); -+ ret = sprintf(buf, "%u\n", duty_cycle); -+ break; -+ } -+ case FAN1_FRONT_SPEED_RPM: -+ case FAN2_FRONT_SPEED_RPM: -+ case FAN3_FRONT_SPEED_RPM: -+ case FAN4_FRONT_SPEED_RPM: -+ case FAN5_FRONT_SPEED_RPM: -+ case FAN6_FRONT_SPEED_RPM: -+ case FAN1_REAR_SPEED_RPM: -+ case FAN2_REAR_SPEED_RPM: -+ case FAN3_REAR_SPEED_RPM: -+ case FAN4_REAR_SPEED_RPM: -+ case FAN5_REAR_SPEED_RPM: -+ case FAN6_REAR_SPEED_RPM: -+ ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); -+ break; -+ case FAN1_DIRECTION: -+ case FAN2_DIRECTION: -+ case FAN3_DIRECTION: -+ case FAN4_DIRECTION: -+ case FAN5_DIRECTION: -+ case FAN6_DIRECTION: -+ ret = sprintf(buf, "%d\n", -+ reg_val_to_direction(data->reg_val[FAN_DIRECTION_REG], -+ attr->index - FAN1_DIRECTION)); -+ break; -+ case FAN1_PRESENT: -+ case FAN2_PRESENT: -+ case FAN3_PRESENT: -+ case FAN4_PRESENT: -+ case FAN5_PRESENT: -+ case FAN6_PRESENT: -+ ret = sprintf(buf, "%d\n", -+ reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], -+ attr->index - FAN1_PRESENT)); -+ break; -+ case FAN1_FAULT: -+ case FAN2_FAULT: -+ case FAN3_FAULT: -+ case FAN4_FAULT: -+ case FAN5_FAULT: -+ case FAN6_FAULT: -+ ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ return ret; -+} -+ -+static const struct attribute_group as7512_32x_fan_group = { -+ .attrs = as7512_32x_fan_attributes, -+}; -+ -+static struct as7512_32x_fan_data *as7512_32x_fan_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7512_32x_fan_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || -+ !data->valid) { -+ int i; -+ -+ dev_dbg(&client->dev, "Starting as7512_32x_fan update\n"); -+ data->valid = 0; -+ -+ /* Update fan data -+ */ -+ for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { -+ int status = as7512_32x_fan_read_value(client, fan_reg[i]); -+ -+ if (status < 0) { -+ data->valid = 0; -+ mutex_unlock(&data->update_lock); -+ dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); -+ return data; -+ } -+ else { -+ data->reg_val[i] = status; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int as7512_32x_fan_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7512_32x_fan_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7512_32x_fan_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7512_32x_fan_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: fan '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7512_32x_fan_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7512_32x_fan_remove(struct i2c_client *client) -+{ -+ struct as7512_32x_fan_data *data = i2c_get_clientdata(client); -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7512_32x_fan_group); -+ -+ return 0; -+} -+ -+/* Addresses to scan */ -+static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; -+ -+static const struct i2c_device_id as7512_32x_fan_id[] = { -+ { "as7512_32x_fan", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as7512_32x_fan_id); -+ -+static struct i2c_driver as7512_32x_fan_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = DRVNAME, -+ }, -+ .probe = as7512_32x_fan_probe, -+ .remove = as7512_32x_fan_remove, -+ .id_table = as7512_32x_fan_id, -+ .address_list = normal_i2c, -+}; -+ -+static int __init as7512_32x_fan_init(void) -+{ -+ extern int platform_accton_as7512_32x(void); -+ if (!platform_accton_as7512_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7512_32x_fan_driver); -+} -+ -+static void __exit as7512_32x_fan_exit(void) -+{ -+ i2c_del_driver(&as7512_32x_fan_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as7512_32x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as7512_32x_fan_init); -+module_exit(as7512_32x_fan_exit); -diff --git a/drivers/hwmon/accton_as7512_32x_psu.c b/drivers/hwmon/accton_as7512_32x_psu.c -new file mode 100644 -index 0000000..5873833 ---- /dev/null -+++ b/drivers/hwmon/accton_as7512_32x_psu.c -@@ -0,0 +1,291 @@ -+/* -+ * An hwmon driver for accton as7512_32x Power Module -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as7512_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as7512_32x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[9]; /* Model name, read from eeprom */ -+}; -+ -+static struct as7512_32x_psu_data *as7512_32x_psu_update_device(struct device *dev); -+ -+enum as7512_32x_psu_sysfs_attributes { -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as7512_32x_psu_attributes[] = { -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7512_32x_psu_data *data = as7512_32x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> ((2 - data->index) + 2) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = (data->status >> (2 - data->index)) & 0x1; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as7512_32x_psu_data *data = as7512_32x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s\n", data->model_name); -+} -+ -+static const struct attribute_group as7512_32x_psu_group = { -+ .attrs = as7512_32x_psu_attributes, -+}; -+ -+static int as7512_32x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7512_32x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7512_32x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7512_32x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ /* Update PSU index */ -+ if (client->addr == 0x50) { -+ data->index = 1; -+ } -+ else if (client->addr == 0x53) { -+ data->index = 2; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7512_32x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7512_32x_psu_remove(struct i2c_client *client) -+{ -+ struct as7512_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7512_32x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id as7512_32x_psu_id[] = { -+ { "as7512_32x_psu", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as7512_32x_psu_id); -+ -+static struct i2c_driver as7512_32x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as7512_32x_psu", -+ }, -+ .probe = as7512_32x_psu_probe, -+ .remove = as7512_32x_psu_remove, -+ .id_table = as7512_32x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as7512_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = 0; -+ int retry_count = 5; -+ -+ while (retry_count) { -+ retry_count--; -+ -+ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) { -+ msleep(10); -+ continue; -+ } -+ -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ msleep(10); -+ continue; -+ } -+ -+ result = 0; -+ break; -+ } -+ -+ return result; -+} -+ -+static struct as7512_32x_psu_data *as7512_32x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7512_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status; -+ int power_good = 0; -+ -+ dev_dbg(&client->dev, "Starting as7512_32x update\n"); -+ -+ /* Read psu status */ -+ status = accton_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ /* Read model name */ -+ memset(data->model_name, 0, sizeof(data->model_name)); -+ power_good = (data->status >> (2 - data->index)) & 0x1; -+ -+ if (power_good) { -+ status = as7512_32x_psu_read_block(client, 0x20, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as7512_32x_psu_init(void) -+{ -+ extern int platform_accton_as7512_32x(void); -+ if (!platform_accton_as7512_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7512_32x_psu_driver); -+} -+ -+static void __exit as7512_32x_psu_exit(void) -+{ -+ i2c_del_driver(&as7512_32x_psu_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as7512_32x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as7512_32x_psu_init); -+module_exit(as7512_32x_psu_exit); -diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c -new file mode 100644 -index 0000000..96e3490 ---- /dev/null -+++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -0,0 +1,216 @@ -+/* -+ * A hwmon driver for the accton_i2c_cpld -+ * -+ * Copyright (C) 2013 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+static LIST_HEAD(cpld_client_list); -+static struct mutex list_lock; -+ -+struct cpld_client_node { -+ struct i2c_client *client; -+ struct list_head list; -+}; -+ -+/* Addresses scanned for accton_i2c_cpld -+ */ -+static const unsigned short normal_i2c[] = { 0x31, 0x35, 0x60, 0x61, 0x62, I2C_CLIENT_END }; -+ -+static void accton_i2c_cpld_add_client(struct i2c_client *client) -+{ -+ struct cpld_client_node *node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); -+ -+ if (!node) { -+ dev_dbg(&client->dev, "Can't allocate cpld_client_node (0x%x)\n", client->addr); -+ return; -+ } -+ -+ node->client = client; -+ -+ mutex_lock(&list_lock); -+ list_add(&node->list, &cpld_client_list); -+ mutex_unlock(&list_lock); -+} -+ -+static void accton_i2c_cpld_remove_client(struct i2c_client *client) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int found = 0; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client == client) { -+ found = 1; -+ break; -+ } -+ } -+ -+ if (found) { -+ list_del(list_node); -+ kfree(cpld_node); -+ } -+ -+ mutex_unlock(&list_lock); -+} -+ -+static int accton_i2c_cpld_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ dev_dbg(&client->dev, "i2c_check_functionality failed (0x%x)\n", client->addr); -+ status = -EIO; -+ goto exit; -+ } -+ -+ dev_info(&client->dev, "chip found\n"); -+ accton_i2c_cpld_add_client(client); -+ -+ return 0; -+ -+exit: -+ return status; -+} -+ -+static int accton_i2c_cpld_remove(struct i2c_client *client) -+{ -+ accton_i2c_cpld_remove_client(client); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id accton_i2c_cpld_id[] = { -+ { "accton_i2c_cpld", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, accton_i2c_cpld_id); -+ -+static struct i2c_driver accton_i2c_cpld_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "accton_i2c_cpld", -+ }, -+ .probe = accton_i2c_cpld_probe, -+ .remove = accton_i2c_cpld_remove, -+ .id_table = accton_i2c_cpld_id, -+ .address_list = normal_i2c, -+}; -+ -+int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EPERM; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_read_byte_data(cpld_node->client, reg); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(accton_i2c_cpld_read); -+ -+int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value) -+{ -+ struct list_head *list_node = NULL; -+ struct cpld_client_node *cpld_node = NULL; -+ int ret = -EIO; -+ -+ mutex_lock(&list_lock); -+ -+ list_for_each(list_node, &cpld_client_list) -+ { -+ cpld_node = list_entry(list_node, struct cpld_client_node, list); -+ -+ if (cpld_node->client->addr == cpld_addr) { -+ ret = i2c_smbus_write_byte_data(cpld_node->client, reg, value); -+ break; -+ } -+ } -+ -+ mutex_unlock(&list_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL(accton_i2c_cpld_write); -+ -+static int __init accton_i2c_cpld_init(void) -+{ -+ mutex_init(&list_lock); -+ return i2c_add_driver(&accton_i2c_cpld_driver); -+} -+ -+static void __exit accton_i2c_cpld_exit(void) -+{ -+ i2c_del_driver(&accton_i2c_cpld_driver); -+} -+ -+static struct dmi_system_id as7512_dmi_table[] = { -+ { -+ .ident = "Accton AS7512", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), -+ }, -+ }, -+ { -+ .ident = "Accton AS7512", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), -+ }, -+ }, -+}; -+ -+int platform_accton_as7512_32x(void) -+{ -+ return dmi_check_system(as7512_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as7512_32x); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_i2c_cpld driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(accton_i2c_cpld_init); -+module_exit(accton_i2c_cpld_exit); -diff --git a/drivers/hwmon/ym2651y.c b/drivers/hwmon/ym2651y.c -new file mode 100644 -index 0000000..2fe455b ---- /dev/null -+++ b/drivers/hwmon/ym2651y.c -@@ -0,0 +1,631 @@ -+/* -+ * An hwmon driver for the 3Y Power YM-2651Y Power Module -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MAX_FAN_DUTY_CYCLE 100 -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x58, 0x5b, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct ym2651y_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 capability; /* Register value */ -+ u16 status_word; /* Register value */ -+ u8 fan_fault; /* Register value */ -+ u8 over_temp; /* Register value */ -+ u16 v_out; /* Register value */ -+ u16 i_out; /* Register value */ -+ u16 p_out; /* Register value */ -+ u16 temp; /* Register value */ -+ u16 fan_speed; /* Register value */ -+ u16 fan_duty_cycle[2]; /* Register value */ -+ u8 fan_dir[5]; /* Register value */ -+ u8 pmbus_revision; /* Register value */ -+ u8 mfr_id[10]; /* Register value */ -+ u8 mfr_model[10]; /* Register value */ -+ u8 mfr_revsion[3]; /* Register value */ -+ u16 mfr_vin_min; /* Register value */ -+ u16 mfr_vin_max; /* Register value */ -+ u16 mfr_iin_max; /* Register value */ -+ u16 mfr_iout_max; /* Register value */ -+ u16 mfr_pin_max; /* Register value */ -+ u16 mfr_pout_max; /* Register value */ -+ u16 mfr_vout_min; /* Register value */ -+ u16 mfr_vout_max; /* Register value */ -+}; -+ -+static ssize_t show_byte(struct device *dev, struct device_attribute *da, -+ char *buf); -+static ssize_t show_word(struct device *dev, struct device_attribute *da, -+ char *buf); -+static ssize_t show_linear(struct device *dev, struct device_attribute *da, -+ char *buf); -+static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, -+ char *buf); -+static ssize_t show_over_temp(struct device *dev, struct device_attribute *da, -+ char *buf); -+static ssize_t show_ascii(struct device *dev, struct device_attribute *da, -+ char *buf); -+static struct ym2651y_data *ym2651y_update_device(struct device *dev); -+static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+static int ym2651y_write_word(struct i2c_client *client, u8 reg, u16 value); -+ -+enum ym2651y_sysfs_attributes { -+ PSU_POWER_ON = 0, -+ PSU_TEMP_FAULT, -+ PSU_POWER_GOOD, -+ PSU_FAN1_FAULT, -+ PSU_FAN_DIRECTION, -+ PSU_OVER_TEMP, -+ PSU_V_OUT, -+ PSU_I_OUT, -+ PSU_P_OUT, -+ PSU_TEMP1_INPUT, -+ PSU_FAN1_SPEED, -+ PSU_FAN1_DUTY_CYCLE, -+ PSU_PMBUS_REVISION, -+ PSU_MFR_ID, -+ PSU_MFR_MODEL, -+ PSU_MFR_REVISION, -+ PSU_MFR_VIN_MIN, -+ PSU_MFR_VIN_MAX, -+ PSU_MFR_VOUT_MIN, -+ PSU_MFR_VOUT_MAX, -+ PSU_MFR_IIN_MAX, -+ PSU_MFR_IOUT_MAX, -+ PSU_MFR_PIN_MAX, -+ PSU_MFR_POUT_MAX -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_power_on, S_IRUGO, show_word, NULL, PSU_POWER_ON); -+static SENSOR_DEVICE_ATTR(psu_temp_fault, S_IRUGO, show_word, NULL, PSU_TEMP_FAULT); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_word, NULL, PSU_POWER_GOOD); -+static SENSOR_DEVICE_ATTR(psu_fan1_fault, S_IRUGO, show_fan_fault, NULL, PSU_FAN1_FAULT); -+static SENSOR_DEVICE_ATTR(psu_over_temp, S_IRUGO, show_over_temp, NULL, PSU_OVER_TEMP); -+static SENSOR_DEVICE_ATTR(psu_v_out, S_IRUGO, show_linear, NULL, PSU_V_OUT); -+static SENSOR_DEVICE_ATTR(psu_i_out, S_IRUGO, show_linear, NULL, PSU_I_OUT); -+static SENSOR_DEVICE_ATTR(psu_p_out, S_IRUGO, show_linear, NULL, PSU_P_OUT); -+static SENSOR_DEVICE_ATTR(psu_temp1_input, S_IRUGO, show_linear, NULL, PSU_TEMP1_INPUT); -+static SENSOR_DEVICE_ATTR(psu_fan1_speed_rpm, S_IRUGO, show_linear, NULL, PSU_FAN1_SPEED); -+static SENSOR_DEVICE_ATTR(psu_fan1_duty_cycle_percentage, S_IWUSR | S_IRUGO, show_linear, set_fan_duty_cycle, PSU_FAN1_DUTY_CYCLE); -+static SENSOR_DEVICE_ATTR(psu_fan_dir, S_IRUGO, show_ascii, NULL, PSU_FAN_DIRECTION); -+static SENSOR_DEVICE_ATTR(psu_pmbus_revision,S_IRUGO, show_byte, NULL, PSU_PMBUS_REVISION); -+static SENSOR_DEVICE_ATTR(psu_mfr_id, S_IRUGO, show_ascii, NULL, PSU_MFR_ID); -+static SENSOR_DEVICE_ATTR(psu_mfr_model, S_IRUGO, show_ascii, NULL, PSU_MFR_MODEL); -+static SENSOR_DEVICE_ATTR(psu_mfr_revision, S_IRUGO, show_ascii, NULL, PSU_MFR_REVISION); -+static SENSOR_DEVICE_ATTR(psu_mfr_vin_min, S_IRUGO, show_linear, NULL, PSU_MFR_VIN_MIN); -+static SENSOR_DEVICE_ATTR(psu_mfr_vin_max, S_IRUGO, show_linear, NULL, PSU_MFR_VIN_MAX); -+static SENSOR_DEVICE_ATTR(psu_mfr_vout_min, S_IRUGO, show_linear, NULL, PSU_MFR_VOUT_MIN); -+static SENSOR_DEVICE_ATTR(psu_mfr_vout_max, S_IRUGO, show_linear, NULL, PSU_MFR_VOUT_MAX); -+static SENSOR_DEVICE_ATTR(psu_mfr_iin_max, S_IRUGO, show_linear, NULL, PSU_MFR_IIN_MAX); -+static SENSOR_DEVICE_ATTR(psu_mfr_iout_max, S_IRUGO, show_linear, NULL, PSU_MFR_IOUT_MAX); -+static SENSOR_DEVICE_ATTR(psu_mfr_pin_max, S_IRUGO, show_linear, NULL, PSU_MFR_PIN_MAX); -+static SENSOR_DEVICE_ATTR(psu_mfr_pout_max, S_IRUGO, show_linear, NULL, PSU_MFR_POUT_MAX); -+ -+static struct attribute *ym2651y_attributes[] = { -+ &sensor_dev_attr_psu_power_on.dev_attr.attr, -+ &sensor_dev_attr_psu_temp_fault.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ &sensor_dev_attr_psu_fan1_fault.dev_attr.attr, -+ &sensor_dev_attr_psu_over_temp.dev_attr.attr, -+ &sensor_dev_attr_psu_v_out.dev_attr.attr, -+ &sensor_dev_attr_psu_i_out.dev_attr.attr, -+ &sensor_dev_attr_psu_p_out.dev_attr.attr, -+ &sensor_dev_attr_psu_temp1_input.dev_attr.attr, -+ &sensor_dev_attr_psu_fan1_speed_rpm.dev_attr.attr, -+ &sensor_dev_attr_psu_fan1_duty_cycle_percentage.dev_attr.attr, -+ &sensor_dev_attr_psu_fan_dir.dev_attr.attr, -+ &sensor_dev_attr_psu_pmbus_revision.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_id.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_model.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_revision.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_vin_min.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_vin_max.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_pout_max.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_iin_max.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_pin_max.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_vout_min.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_vout_max.dev_attr.attr, -+ &sensor_dev_attr_psu_mfr_iout_max.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_byte(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct ym2651y_data *data = ym2651y_update_device(dev); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ return (attr->index == PSU_PMBUS_REVISION) ? sprintf(buf, "%d\n", data->pmbus_revision) : -+ sprintf(buf, "0\n"); -+} -+ -+static ssize_t show_word(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct ym2651y_data *data = ym2651y_update_device(dev); -+ u16 status = 0; -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ switch (attr->index) { -+ case PSU_POWER_ON: /* psu_power_on, low byte bit 6 of status_word, 0=>ON, 1=>OFF */ -+ status = (data->status_word & 0x40) ? 0 : 1; -+ break; -+ case PSU_TEMP_FAULT: /* psu_temp_fault, low byte bit 2 of status_word, 0=>Normal, 1=>temp fault */ -+ status = (data->status_word & 0x4) >> 2; -+ break; -+ case PSU_POWER_GOOD: /* psu_power_good, high byte bit 3 of status_word, 0=>OK, 1=>FAIL */ -+ status = (data->status_word & 0x800) ? 0 : 1; -+ break; -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static int two_complement_to_int(u16 data, u8 valid_bit, int mask) -+{ -+ u16 valid_data = data & mask; -+ bool is_negative = valid_data >> (valid_bit - 1); -+ -+ return is_negative ? (-(((~valid_data) & mask) + 1)) : valid_data; -+} -+ -+static ssize_t set_fan_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct i2c_client *client = to_i2c_client(dev); -+ struct ym2651y_data *data = i2c_get_clientdata(client); -+ int nr = (attr->index == PSU_FAN1_DUTY_CYCLE) ? 0 : 1; -+ long speed; -+ int error; -+ -+ error = kstrtol(buf, 10, &speed); -+ if (error) -+ return error; -+ -+ if (speed < 0 || speed > MAX_FAN_DUTY_CYCLE) -+ return -EINVAL; -+ -+ mutex_lock(&data->update_lock); -+ data->fan_duty_cycle[nr] = speed; -+ ym2651y_write_word(client, 0x3B + nr, data->fan_duty_cycle[nr]); -+ mutex_unlock(&data->update_lock); -+ -+ return count; -+} -+ -+static ssize_t show_linear(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct ym2651y_data *data = ym2651y_update_device(dev); -+ -+ u16 value = 0; -+ int exponent, mantissa; -+ int multiplier = 1000; -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ switch (attr->index) { -+ case PSU_V_OUT: -+ value = data->v_out; -+ break; -+ case PSU_I_OUT: -+ value = data->i_out; -+ break; -+ case PSU_P_OUT: -+ value = data->p_out; -+ break; -+ case PSU_TEMP1_INPUT: -+ value = data->temp; -+ break; -+ case PSU_FAN1_SPEED: -+ value = data->fan_speed; -+ multiplier = 1; -+ break; -+ case PSU_FAN1_DUTY_CYCLE: -+ value = data->fan_duty_cycle[0]; -+ multiplier = 1; -+ break; -+ case PSU_MFR_VIN_MIN: -+ value = data->mfr_vin_min; -+ break; -+ case PSU_MFR_VIN_MAX: -+ value = data->mfr_vin_max; -+ break; -+ case PSU_MFR_VOUT_MIN: -+ value = data->mfr_vout_min; -+ break; -+ case PSU_MFR_VOUT_MAX: -+ value = data->mfr_vout_max; -+ break; -+ case PSU_MFR_PIN_MAX: -+ value = data->mfr_pin_max; -+ break; -+ case PSU_MFR_POUT_MAX: -+ value = data->mfr_pout_max; -+ break; -+ case PSU_MFR_IOUT_MAX: -+ value = data->mfr_iout_max; -+ break; -+ case PSU_MFR_IIN_MAX: -+ value = data->mfr_iin_max; -+ break; -+ } -+ -+ exponent = two_complement_to_int(value >> 11, 5, 0x1f); -+ mantissa = two_complement_to_int(value & 0x7ff, 11, 0x7ff); -+ -+ return (exponent >= 0) ? sprintf(buf, "%d\n", (mantissa << exponent) * multiplier) : -+ sprintf(buf, "%d\n", (mantissa * multiplier) / (1 << -exponent)); -+} -+ -+static ssize_t show_fan_fault(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct ym2651y_data *data = ym2651y_update_device(dev); -+ u8 shift; -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ shift = (attr->index == PSU_FAN1_FAULT) ? 7 : 6; -+ -+ return sprintf(buf, "%d\n", data->fan_fault >> shift); -+} -+ -+static ssize_t show_over_temp(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct ym2651y_data *data = ym2651y_update_device(dev); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ return sprintf(buf, "%d\n", data->over_temp >> 7); -+} -+ -+static ssize_t show_ascii(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct ym2651y_data *data = ym2651y_update_device(dev); -+ u8 *ptr = NULL; -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ switch (attr->index) { -+ case PSU_FAN_DIRECTION: /* psu_fan_dir */ -+ ptr = data->fan_dir + 1; /* Skip the first byte since it is the length of string. */ -+ break; -+ case PSU_MFR_ID: /* psu_mfr_id */ -+ ptr = data->mfr_id + 1; /* The first byte is the count byte of string. */; -+ break; -+ case PSU_MFR_MODEL: /* psu_mfr_model */ -+ ptr = data->mfr_model + 1; /* The first byte is the count byte of string. */ -+ break; -+ case PSU_MFR_REVISION: /* psu_mfr_revision */ -+ ptr = data->mfr_revsion + 1; /* The first byte is the count byte of string. */ -+ break; -+ default: -+ return 0; -+ } -+ -+ return sprintf(buf, "%s\n", ptr); -+} -+ -+static const struct attribute_group ym2651y_group = { -+ .attrs = ym2651y_attributes, -+}; -+ -+static int ym2651y_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct ym2651y_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, -+ I2C_FUNC_SMBUS_BYTE_DATA | -+ I2C_FUNC_SMBUS_WORD_DATA | -+ I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct ym2651y_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &ym2651y_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &ym2651y_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int ym2651y_remove(struct i2c_client *client) -+{ -+ struct ym2651y_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &ym2651y_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+static const struct i2c_device_id ym2651y_id[] = { -+ { "ym2651", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, ym2651y_id); -+ -+static struct i2c_driver ym2651y_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "ym2651", -+ }, -+ .probe = ym2651y_probe, -+ .remove = ym2651y_remove, -+ .id_table = ym2651y_id, -+ .address_list = normal_i2c, -+}; -+ -+static int ym2651y_read_byte(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_byte_data(client, reg); -+} -+ -+static int ym2651y_read_word(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_word_data(client, reg); -+} -+ -+static int ym2651y_write_word(struct i2c_client *client, u8 reg, u16 value) -+{ -+ return i2c_smbus_write_word_data(client, reg, value); -+} -+ -+static int ym2651y_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+struct reg_data_byte { -+ u8 reg; -+ u8 *value; -+}; -+ -+struct reg_data_word { -+ u8 reg; -+ u16 *value; -+}; -+ -+static struct ym2651y_data *ym2651y_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct ym2651y_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int i, status; -+ u8 command; -+ struct reg_data_byte regs_byte[] = { {0x19, &data->capability}, -+ {0x7d, &data->over_temp}, -+ {0x81, &data->fan_fault}, -+ {0x98, &data->pmbus_revision}}; -+ struct reg_data_word regs_word[] = { {0x79, &data->status_word}, -+ {0x8b, &data->v_out}, -+ {0x8c, &data->i_out}, -+ {0x96, &data->p_out}, -+ {0x8d, &data->temp}, -+ {0x3b, &(data->fan_duty_cycle[0])}, -+ {0x3c, &(data->fan_duty_cycle[1])}, -+ {0x90, &data->fan_speed}, -+ {0xa0, &data->mfr_vin_min}, -+ {0xa1, &data->mfr_vin_max}, -+ {0xa2, &data->mfr_iin_max}, -+ {0xa3, &data->mfr_pin_max}, -+ {0xa4, &data->mfr_vout_min}, -+ {0xa5, &data->mfr_vout_max}, -+ {0xa6, &data->mfr_iout_max}, -+ {0xa7, &data->mfr_pout_max}}; -+ -+ dev_dbg(&client->dev, "Starting ym2651 update\n"); -+ data->valid = 0; -+ -+ /* Read byte data */ -+ for (i = 0; i < ARRAY_SIZE(regs_byte); i++) { -+ status = ym2651y_read_byte(client, regs_byte[i].reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", -+ regs_byte[i].reg, status); -+ goto exit; -+ } -+ else { -+ *(regs_byte[i].value) = status; -+ } -+ } -+ -+ /* Read word data */ -+ for (i = 0; i < ARRAY_SIZE(regs_word); i++) { -+ status = ym2651y_read_word(client, regs_word[i].reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", -+ regs_word[i].reg, status); -+ goto exit; -+ } -+ else { -+ *(regs_word[i].value) = status; -+ } -+ } -+ -+ /* Read fan_direction */ -+ command = 0xC3; -+ status = ym2651y_read_block(client, command, data->fan_dir, -+ ARRAY_SIZE(data->fan_dir)-1); -+ data->fan_dir[ARRAY_SIZE(data->fan_dir)-1] = '\0'; -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", command, status); -+ goto exit; -+ } -+ -+ /* Read mfr_id */ -+ command = 0x99; -+ status = ym2651y_read_block(client, command, data->mfr_id, -+ ARRAY_SIZE(data->mfr_id)-1); -+ data->mfr_id[ARRAY_SIZE(data->mfr_id)-1] = '\0'; -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", command, status); -+ goto exit; -+ } -+ -+ /* Read mfr_model */ -+ command = 0x9a; -+ status = ym2651y_read_block(client, command, data->mfr_model, -+ ARRAY_SIZE(data->mfr_model)-1); -+ data->mfr_model[ARRAY_SIZE(data->mfr_model)-1] = '\0'; -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", command, status); -+ goto exit; -+ } -+ -+ /* Read mfr_revsion */ -+ command = 0x9b; -+ status = ym2651y_read_block(client, command, data->mfr_revsion, -+ ARRAY_SIZE(data->mfr_revsion)-1); -+ data->mfr_revsion[ARRAY_SIZE(data->mfr_revsion)-1] = '\0'; -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "reg %d, err %d\n", command, status); -+ goto exit; -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init ym2651y_init(void) -+{ -+ return i2c_add_driver(&ym2651y_driver); -+} -+ -+static void __exit ym2651y_exit(void) -+{ -+ i2c_del_driver(&ym2651y_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("3Y Power YM-2651Y driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(ym2651y_init); -+module_exit(ym2651y_exit); -+ -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index a5ccde1..fb48975 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -54,6 +54,13 @@ config LEDS_ACCTON_AS6712_32x - This option enables support for the LEDs on the Accton as6712 32x. - Say Y to enable LEDs on the Accton as6712 32x. - -+config LEDS_ACCTON_AS7512_32x -+ tristate "LED support for the Accton as7512 32x" -+ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD -+ help -+ This option enables support for the LEDs on the Accton as7512 32x. -+ Say Y to enable LEDs on the Accton as7512 32x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index d952f0f..dff0462 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -45,6 +45,7 @@ obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o - obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o - obj-$(CONFIG_LEDS_ACCTON_AS5712_54x) += leds-accton_as5712_54x.o - obj-$(CONFIG_LEDS_ACCTON_AS6712_32x) += leds-accton_as6712_32x.o -+obj-$(CONFIG_LEDS_ACCTON_AS7512_32x) += leds-accton_as7512_32x.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as7512_32x.c b/drivers/leds/leds-accton_as7512_32x.c -new file mode 100644 -index 0000000..3dc5def ---- /dev/null -+++ b/drivers/leds/leds-accton_as7512_32x.c -@@ -0,0 +1,503 @@ -+/* -+ * A LED driver for the accton_as7512_32x_led -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+#define DRVNAME "as7512_32x_led" -+#define NUM_OF_LED_REG 5 -+ -+struct accton_as7512_32x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[NUM_OF_LED_REG]; /* 5 LED registers */ -+}; -+ -+static struct accton_as7512_32x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+ -+#define LED_CNTRLER_I2C_ADDRESS (0x60) -+/* -+#define LED_TYPE_DIAG_REG_MASK (0x3) -+#define LED_MODE_DIAG_GREEN_VALUE (0x02) -+#define LED_MODE_DIAG_RED_VALUE (0x01) -+#define LED_MODE_DIAG_AMBER_VALUE (0x00) -+#define LED_MODE_DIAG_OFF_VALUE (0x03) -+#define LED_TYPE_DIAG_REG_MASK 0xFF -+#define LED_MODE_DIAG_GREEN_VALUE 0xFF -+#define LED_MODE_DIAG_RED_VALUE 0xFF -+#define LED_MODE_DIAG_OFF_VALUE 0 -+ -+#define LED_TYPE_LOC_REG_MASK 0xFF -+#define LED_MODE_LOC_ON_VALUE 0x0 -+#define LED_MODE_LOC_OFF_VALUE 0xFF -+*/ -+/* -+#define LED_TYPE_FAN_REG_MASK 0xFF -+#define LED_MODE_FAN_GREEN_VALUE 0xFF -+#define LED_MODE_FAN_RED_VALUE 0xFF -+#define LED_MODE_FAN_OFF_VALUE 0 -+*/ -+#define LED_BRIGHTNESS_ON_VALUE 0x0 -+#define LED_BRIGHTNESS_OFF_VALUE 0xFF -+ -+static const u8 led_reg[NUM_OF_LED_REG] = -+{ -+ 0x41, /* Diag LED-Green. */ -+ 0x42, /* Diag LED-Red. */ -+ 0x43, /* FAN LED-Green. */ -+ 0x44, /* FAN LED-Red. */ -+ 0x45, /* LOC LED. */ -+ //0x1C, /* FAN 1-4 LED */ -+ //0x1D /* FAN 5-6 LED */ -+}; -+ -+enum led_type { -+ LED_TYPE_DIAG_GREEN, -+ LED_TYPE_DIAG_RED, -+ LED_TYPE_LOC, -+ LED_TYPE_FAN_GREEN, -+ LED_TYPE_FAN_RED -+}; -+ -+struct led_reg { -+ u32 types; -+ u8 reg_addr; -+}; -+ -+enum led_light_mode { -+ LED_MODE_OFF = 0, -+ LED_MODE_GREEN, -+ LED_MODE_AMBER, -+ LED_MODE_RED, -+ LED_MODE_BLUE, -+ LED_MODE_GREEN_BLINK, -+ LED_MODE_AMBER_BLINK, -+ LED_MODE_RED_BLINK, -+ LED_MODE_BLUE_BLINK, -+ LED_MODE_AUTO, -+ LED_MODE_UNKNOWN -+}; -+ -+#if 0 -+struct led_type_mode { -+ enum led_type type; -+ enum led_light_mode mode; -+ int reg_bit_mask; -+ int mode_value; -+}; -+ -+struct led_type_mode led_type_mode_data[] = { -+{LED_TYPE_LOC, LED_MODE_OFF, LED_TYPE_LOC_REG_MASK, LED_MODE_LOC_OFF_VALUE}, -+{LED_TYPE_LOC, LED_MODE_BLUE, LED_TYPE_LOC_REG_MASK, LED_MODE_LOC_ON_VALUE}, -+{LED_TYPE_DIAG_GREEN, LED_MODE_OFF, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_OFF_VALUE}, -+{LED_TYPE_DIAG_GREEN, LED_MODE_GREEN, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_GREEN_VALUE}, -+{LED_TYPE_DIAG_RED, LED_MODE_OFF, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_OFF_VALUE}, -+{LED_TYPE_DIAG_RED, LED_MODE_RED, LED_TYPE_DIAG_REG_MASK, LED_MODE_DIAG_RED_VALUE}, -+{LED_TYPE_FAN_GREEN, LED_MODE_OFF, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_OFF_VALUE}, -+{LED_TYPE_FAN_GREEN, LED_MODE_GREEN, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_GREEN_VALUE}, -+{LED_TYPE_FAN_RED, LED_MODE_OFF, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_OFF_VALUE}, -+{LED_TYPE_FAN_RED, LED_MODE_RED, LED_TYPE_FAN_REG_MASK, LED_MODE_FAN_RED_VALUE}, -+}; -+#endif -+ -+/* -+static int accton_getLedReg(enum led_type type, u8 *reg) -+{ -+ int i; -+ for (i = 0; i < ARRAY_SIZE(led_reg_map); i++) { -+ if(led_reg_map[i].types & (type<<1)){ -+ *reg = led_reg_map[i].reg_addr; -+ return 0; -+ } -+ } -+ return 1; -+} -+*/ -+ -+#if 0 -+static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if ((led_type_mode_data[i].reg_bit_mask & reg_val) == -+ led_type_mode_data[i].mode_value) -+ { -+ return led_type_mode_data[i].mode; -+ } -+ } -+ -+ return LED_MODE_UNKNOWN; -+} -+ -+static u8 led_light_mode_to_reg_val(enum led_type type, -+ enum led_light_mode mode, u8 reg_val) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { -+ if (type != led_type_mode_data[i].type) -+ continue; -+ -+ if (mode != led_type_mode_data[i].mode) -+ continue; -+ -+ reg_val = led_type_mode_data[i].mode_value | -+ (reg_val & (~led_type_mode_data[i].reg_bit_mask)); -+ break; -+ } -+ -+ return reg_val; -+} -+#endif -+ -+static int accton_as7512_32x_led_read_value(u8 reg) -+{ -+ return accton_i2c_cpld_read(LED_CNTRLER_I2C_ADDRESS, reg); -+} -+ -+static int accton_as7512_32x_led_write_value(u8 reg, u8 value) -+{ -+ return accton_i2c_cpld_write(LED_CNTRLER_I2C_ADDRESS, reg, value); -+} -+ -+static void accton_as7512_32x_led_update(void) -+{ -+ mutex_lock(&ledctl->update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as7512_32x_led update\n"); -+ ledctl->valid = 0; -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as7512_32x_led_read_value(led_reg[i]); -+ -+ if (status < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); -+ goto exit; -+ } -+ else { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+#if 0 -+static void accton_as7512_32x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ u8 reg, enum led_type type) -+{ -+ int reg_val; -+ -+ mutex_lock(&ledctl->update_lock); -+ -+ reg_val = accton_as7512_32x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as7512_32x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+#endif -+ -+static void accton_as7512_32x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ if (LED_MODE_OFF == (enum led_light_mode)led_light_mode) { -+ accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_OFF_VALUE); -+ accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_OFF_VALUE); -+ return; -+ } -+ -+ if (LED_MODE_GREEN == (enum led_light_mode)led_light_mode) { -+ accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_ON_VALUE); -+ accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_OFF_VALUE); -+ return; -+ } -+ -+ if (LED_MODE_RED == (enum led_light_mode)led_light_mode) { -+ accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_OFF_VALUE); -+ accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_ON_VALUE); -+ return; -+ } -+ -+ if (LED_MODE_AMBER == (enum led_light_mode)led_light_mode) { -+ accton_as7512_32x_led_write_value(led_reg[0], LED_BRIGHTNESS_ON_VALUE); -+ accton_as7512_32x_led_write_value(led_reg[1], LED_BRIGHTNESS_ON_VALUE); -+ return; -+ } -+} -+ -+static enum led_brightness accton_as7512_32x_led_diag_get(struct led_classdev *cdev) -+{ -+ u8 is_green_reg_on, is_red_reg_on; -+ -+ accton_as7512_32x_led_update(); -+ -+ is_green_reg_on = (ledctl->reg_val[0] == LED_BRIGHTNESS_OFF_VALUE) ? 0 : 1; -+ is_red_reg_on = (ledctl->reg_val[1] == LED_BRIGHTNESS_OFF_VALUE) ? 0 : 1; -+ -+ if (is_green_reg_on && is_red_reg_on) { -+ return LED_MODE_AMBER; -+ } -+ -+ if (is_green_reg_on) { -+ return LED_MODE_GREEN; -+ } -+ -+ if (is_red_reg_on) { -+ return LED_MODE_RED; -+ } -+ -+ return LED_MODE_OFF; -+} -+ -+static void accton_as7512_32x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ if (LED_MODE_OFF == (enum led_light_mode)led_light_mode) { -+ accton_as7512_32x_led_write_value(led_reg[4], LED_BRIGHTNESS_OFF_VALUE); -+ return; -+ } -+ -+ if (LED_MODE_BLUE == (enum led_light_mode)led_light_mode) { -+ accton_as7512_32x_led_write_value(led_reg[4], LED_BRIGHTNESS_ON_VALUE); -+ return; -+ } -+} -+ -+static enum led_brightness accton_as7512_32x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as7512_32x_led_update(); -+ -+ if (ledctl->reg_val[0] == LED_BRIGHTNESS_OFF_VALUE) { -+ return LED_MODE_OFF; -+ } -+ -+ return LED_MODE_BLUE; -+} -+ -+static enum led_brightness accton_as7512_32x_led_auto_get(struct led_classdev *cdev) -+{ -+ return LED_MODE_AUTO; -+} -+ -+static struct led_classdev accton_as7512_32x_leds[] = { -+ [0] = { -+ .name = "accton_as7512_32x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7512_32x_led_diag_set, -+ .brightness_get = accton_as7512_32x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_RED, -+ }, -+ [1] = { -+ .name = "accton_as7512_32x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7512_32x_led_loc_set, -+ .brightness_get = accton_as7512_32x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_BLUE, -+ }, -+ [2] = { -+ .name = "accton_as7512_32x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = NULL, -+ .brightness_get = accton_as7512_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [3] = { -+ .name = "accton_as7512_32x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = NULL, -+ .brightness_get = accton_as7512_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [4] = { -+ .name = "accton_as7512_32x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = NULL, -+ .brightness_get = accton_as7512_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as7512_32x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { -+ led_classdev_suspend(&accton_as7512_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as7512_32x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { -+ led_classdev_resume(&accton_as7512_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as7512_32x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as7512_32x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as7512_32x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as7512_32x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as7512_32x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7512_32x_leds); i++) { -+ led_classdev_unregister(&accton_as7512_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as7512_32x_led_driver = { -+ .probe = accton_as7512_32x_led_probe, -+ .remove = accton_as7512_32x_led_remove, -+ .suspend = accton_as7512_32x_led_suspend, -+ .resume = accton_as7512_32x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as7512_32x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as7512_32x(void); -+ if (!platform_accton_as7512_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as7512_32x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as7512_32x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as7512_32x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as7512_32x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as7512_32x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as7512_32x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as7512_32x_led_init); -+module_exit(accton_as7512_32x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as7512_32x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 6e025e9..8e959c7 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -91,6 +91,15 @@ config EEPROM_ACCTON_AS6712_32x_SFP - This driver can also be built as a module. If so, the module will - be called accton_as6712_32x_sfp. - -+config EEPROM_ACCTON_AS7512_32x_SFP -+ tristate "Accton as7512 32x sfp" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7512 32x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7512_32x_sfp. -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 9001de9..0386999 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -8,4 +8,5 @@ obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o - obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o - obj-$(CONFIG_EEPROM_ACCTON_AS5712_54x_SFP) += accton_as5712_54x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS6712_32x_SFP) += accton_as6712_32x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS7512_32x_SFP) += accton_as7512_32x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as7512_32x_sfp.c b/drivers/misc/eeprom/accton_as7512_32x_sfp.c -new file mode 100644 -index 0000000..1468961 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as7512_32x_sfp.c -@@ -0,0 +1,356 @@ -+/* -+ * An hwmon driver for accton as7512_32x sfp -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BIT_INDEX(i) (1UL << (i)) -+ -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as7512_32x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u32 is_present; /* present status */ -+}; -+ -+static struct as7512_32x_sfp_data *as7512_32x_sfp_update_device(struct device *dev); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+enum as7512_32x_sfp_sysfs_attributes { -+ SFP_PORT_NUMBER, -+ SFP_IS_PRESENT, -+ SFP_IS_PRESENT_ALL, -+ SFP_EEPROM -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+ -+static struct attribute *as7512_32x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7512_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->port+1); -+} -+ -+/* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+} while(0) -+ -+static ssize_t show_present(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ int values[4]; -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); -+ -+ /* Return values 1 -> 32 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], values[3]); -+ } -+ else { /* SFP_IS_PRESENT */ -+ struct as7512_32x_sfp_data *data = as7512_32x_sfp_update_device(dev); -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ return sprintf(buf, "%d\n", data->is_present); -+ } -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as7512_32x_sfp_data *data = as7512_32x_sfp_update_device(dev); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if (!data->is_present) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as7512_32x_sfp_group = { -+ .attrs = as7512_32x_sfp_attributes, -+}; -+ -+static int as7512_32x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7512_32x_sfp_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7512_32x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7512_32x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7512_32x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7512_32x_sfp_remove(struct i2c_client *client) -+{ -+ struct as7512_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7512_32x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as7512_32x_sfp1, as7512_32x_sfp2, as7512_32x_sfp3, as7512_32x_sfp4, -+as7512_32x_sfp5, as7512_32x_sfp6, as7512_32x_sfp7, as7512_32x_sfp8, -+as7512_32x_sfp9, as7512_32x_sfp10,as7512_32x_sfp11,as7512_32x_sfp12, -+as7512_32x_sfp13,as7512_32x_sfp14,as7512_32x_sfp15,as7512_32x_sfp16, -+as7512_32x_sfp17,as7512_32x_sfp18,as7512_32x_sfp19,as7512_32x_sfp20, -+as7512_32x_sfp21,as7512_32x_sfp22,as7512_32x_sfp23,as7512_32x_sfp24, -+as7512_32x_sfp25,as7512_32x_sfp26,as7512_32x_sfp27,as7512_32x_sfp28, -+as7512_32x_sfp29,as7512_32x_sfp30,as7512_32x_sfp31,as7512_32x_sfp32 -+}; -+ -+static const struct i2c_device_id as7512_32x_sfp_id[] = { -+{ "as7512_32x_sfp1", as7512_32x_sfp1 }, { "as7512_32x_sfp2", as7512_32x_sfp2 }, -+{ "as7512_32x_sfp3", as7512_32x_sfp3 }, { "as7512_32x_sfp4", as7512_32x_sfp4 }, -+{ "as7512_32x_sfp5", as7512_32x_sfp5 }, { "as7512_32x_sfp6", as7512_32x_sfp6 }, -+{ "as7512_32x_sfp7", as7512_32x_sfp7 }, { "as7512_32x_sfp8", as7512_32x_sfp8 }, -+{ "as7512_32x_sfp9", as7512_32x_sfp9 }, { "as7512_32x_sfp10", as7512_32x_sfp10 }, -+{ "as7512_32x_sfp11", as7512_32x_sfp11 }, { "as7512_32x_sfp12", as7512_32x_sfp12 }, -+{ "as7512_32x_sfp13", as7512_32x_sfp13 }, { "as7512_32x_sfp14", as7512_32x_sfp14 }, -+{ "as7512_32x_sfp15", as7512_32x_sfp15 }, { "as7512_32x_sfp16", as7512_32x_sfp16 }, -+{ "as7512_32x_sfp17", as7512_32x_sfp17 }, { "as7512_32x_sfp18", as7512_32x_sfp18 }, -+{ "as7512_32x_sfp19", as7512_32x_sfp19 }, { "as7512_32x_sfp20", as7512_32x_sfp20 }, -+{ "as7512_32x_sfp21", as7512_32x_sfp21 }, { "as7512_32x_sfp22", as7512_32x_sfp22 }, -+{ "as7512_32x_sfp23", as7512_32x_sfp23 }, { "as7512_32x_sfp24", as7512_32x_sfp24 }, -+{ "as7512_32x_sfp25", as7512_32x_sfp25 }, { "as7512_32x_sfp26", as7512_32x_sfp26 }, -+{ "as7512_32x_sfp27", as7512_32x_sfp27 }, { "as7512_32x_sfp28", as7512_32x_sfp28 }, -+{ "as7512_32x_sfp29", as7512_32x_sfp29 }, { "as7512_32x_sfp30", as7512_32x_sfp30 }, -+{ "as7512_32x_sfp31", as7512_32x_sfp31 }, { "as7512_32x_sfp32", as7512_32x_sfp32 }, -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as7512_32x_sfp_id); -+ -+static struct i2c_driver as7512_32x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as7512_32x_sfp", -+ }, -+ .probe = as7512_32x_sfp_probe, -+ .remove = as7512_32x_sfp_remove, -+ .id_table = as7512_32x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as7512_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as7512_32x_sfp_data *as7512_32x_sfp_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7512_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0; -+ u8 cpld_reg = 0x30 + (data->port/8); -+ -+ data->valid = 0; -+ -+ /* Read present status of the specified port number */ -+ data->is_present = 0; -+ status = accton_i2c_cpld_read(0x60, cpld_reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); -+ goto exit; -+ } -+ -+ data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; -+ -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if (data->is_present) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { -+ status = as7512_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, -+ data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), -+ I2C_SMBUS_BLOCK_MAX); -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); -+ goto exit; -+ } -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as7512_32x_sfp_init(void) -+{ -+ extern int platform_accton_as7512_32x(void); -+ if (!platform_accton_as7512_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7512_32x_sfp_driver); -+} -+ -+static void __exit as7512_32x_sfp_exit(void) -+{ -+ i2c_del_driver(&as7512_32x_sfp_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as7512_32x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as7512_32x_sfp_init); -+module_exit(as7512_32x_sfp_exit); diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch deleted file mode 100644 index 41ff38bc..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch +++ /dev/null @@ -1,1830 +0,0 @@ -Device driver patches for accton as7712 (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index c410426..2d4a7fb 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1502,6 +1502,24 @@ config SENSORS_YM2651Y - This driver can also be built as a module. If so, the module will - be called ym2651y. - -+config SENSORS_ACCTON_AS7712_32x_FAN -+ tristate "Accton as7712 32x fan" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7712 32x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7712_32x_fan. -+ -+config SENSORS_ACCTON_AS7712_32x_PSU -+ tristate "Accton as7712 32x psu" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7712 32x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7712_32x_psu. -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index f8ee399..ea97f4a 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -27,6 +27,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS6712_32x_FAN) += accton_as6712_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS6712_32x_PSU) += accton_as6712_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS7512_32x_FAN) += accton_as7512_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS7512_32x_PSU) += accton_as7512_32x_psu.o -+obj-$(CONFIG_SENSORS_ACCTON_AS7712_32x_FAN) += accton_as7712_32x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS7712_32x_PSU) += accton_as7712_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_I2C_CPLD) += accton_i2c_cpld.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o -diff --git a/drivers/hwmon/accton_as7712_32x_fan.c b/drivers/hwmon/accton_as7712_32x_fan.c -new file mode 100644 -index 0000000..74c577d ---- /dev/null -+++ b/drivers/hwmon/accton_as7712_32x_fan.c -@@ -0,0 +1,491 @@ -+/* -+ * A hwmon driver for the Accton as7712 32x fan -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRVNAME "as7712_32x_fan" -+ -+static struct as7712_32x_fan_data *as7712_32x_fan_update_device(struct device *dev); -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+/* fan related data, the index should match sysfs_fan_attributes -+ */ -+static const u8 fan_reg[] = { -+ 0x0F, /* fan 1-6 present status */ -+ 0x10, /* fan 1-6 direction(0:B2F 1:F2B) */ -+ 0x11, /* fan PWM(for all fan) */ -+ 0x12, /* front fan 1 speed(rpm) */ -+ 0x13, /* front fan 2 speed(rpm) */ -+ 0x14, /* front fan 3 speed(rpm) */ -+ 0x15, /* front fan 4 speed(rpm) */ -+ 0x16, /* front fan 5 speed(rpm) */ -+ 0x17, /* front fan 6 speed(rpm) */ -+ 0x22, /* rear fan 1 speed(rpm) */ -+ 0x23, /* rear fan 2 speed(rpm) */ -+ 0x24, /* rear fan 3 speed(rpm) */ -+ 0x25, /* rear fan 4 speed(rpm) */ -+ 0x26, /* rear fan 5 speed(rpm) */ -+ 0x27, /* rear fan 6 speed(rpm) */ -+}; -+ -+/* Each client has this additional data */ -+struct as7712_32x_fan_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ -+}; -+ -+enum fan_id { -+ FAN1_ID, -+ FAN2_ID, -+ FAN3_ID, -+ FAN4_ID, -+ FAN5_ID, -+ FAN6_ID -+}; -+ -+enum sysfs_fan_attributes { -+ FAN_PRESENT_REG, -+ FAN_DIRECTION_REG, -+ FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ -+ FAN1_FRONT_SPEED_RPM, -+ FAN2_FRONT_SPEED_RPM, -+ FAN3_FRONT_SPEED_RPM, -+ FAN4_FRONT_SPEED_RPM, -+ FAN5_FRONT_SPEED_RPM, -+ FAN6_FRONT_SPEED_RPM, -+ FAN1_REAR_SPEED_RPM, -+ FAN2_REAR_SPEED_RPM, -+ FAN3_REAR_SPEED_RPM, -+ FAN4_REAR_SPEED_RPM, -+ FAN5_REAR_SPEED_RPM, -+ FAN6_REAR_SPEED_RPM, -+ FAN1_DIRECTION, -+ FAN2_DIRECTION, -+ FAN3_DIRECTION, -+ FAN4_DIRECTION, -+ FAN5_DIRECTION, -+ FAN6_DIRECTION, -+ FAN1_PRESENT, -+ FAN2_PRESENT, -+ FAN3_PRESENT, -+ FAN4_PRESENT, -+ FAN5_PRESENT, -+ FAN6_PRESENT, -+ FAN1_FAULT, -+ FAN2_FAULT, -+ FAN3_FAULT, -+ FAN4_FAULT, -+ FAN5_FAULT, -+ FAN6_FAULT -+}; -+ -+/* Define attributes -+ */ -+#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) -+#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr -+ -+#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) -+#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr -+ -+#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) -+#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr -+ -+#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) -+#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr -+ -+#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ -+ static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) -+#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr -+ -+/* 6 fan fault attributes in this platform */ -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); -+/* 6 fan speed(rpm) attributes in this platform */ -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); -+/* 6 fan present attributes in this platform */ -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); -+/* 6 fan direction attribute in this platform */ -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(6); -+/* 1 fan duty cycle attribute in this platform */ -+DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); -+ -+static struct attribute *as7712_32x_fan_attributes[] = { -+ /* fan related attributes */ -+ DECLARE_FAN_FAULT_ATTR(1), -+ DECLARE_FAN_FAULT_ATTR(2), -+ DECLARE_FAN_FAULT_ATTR(3), -+ DECLARE_FAN_FAULT_ATTR(4), -+ DECLARE_FAN_FAULT_ATTR(5), -+ DECLARE_FAN_FAULT_ATTR(6), -+ DECLARE_FAN_SPEED_RPM_ATTR(1), -+ DECLARE_FAN_SPEED_RPM_ATTR(2), -+ DECLARE_FAN_SPEED_RPM_ATTR(3), -+ DECLARE_FAN_SPEED_RPM_ATTR(4), -+ DECLARE_FAN_SPEED_RPM_ATTR(5), -+ DECLARE_FAN_SPEED_RPM_ATTR(6), -+ DECLARE_FAN_PRESENT_ATTR(1), -+ DECLARE_FAN_PRESENT_ATTR(2), -+ DECLARE_FAN_PRESENT_ATTR(3), -+ DECLARE_FAN_PRESENT_ATTR(4), -+ DECLARE_FAN_PRESENT_ATTR(5), -+ DECLARE_FAN_PRESENT_ATTR(6), -+ DECLARE_FAN_DIRECTION_ATTR(1), -+ DECLARE_FAN_DIRECTION_ATTR(2), -+ DECLARE_FAN_DIRECTION_ATTR(3), -+ DECLARE_FAN_DIRECTION_ATTR(4), -+ DECLARE_FAN_DIRECTION_ATTR(5), -+ DECLARE_FAN_DIRECTION_ATTR(6), -+ DECLARE_FAN_DUTY_CYCLE_ATTR(), -+ NULL -+}; -+ -+#define FAN_DUTY_CYCLE_REG_MASK 0xF -+#define FAN_MAX_DUTY_CYCLE 100 -+#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 -+ -+static int as7712_32x_fan_read_value(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_byte_data(client, reg); -+} -+ -+static int as7712_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) -+{ -+ return i2c_smbus_write_byte_data(client, reg, value); -+} -+ -+/* fan utility functions -+ */ -+static u32 reg_val_to_duty_cycle(u8 reg_val) -+{ -+ reg_val &= FAN_DUTY_CYCLE_REG_MASK; -+ return ((u32)(reg_val+1) * 625 + 75)/ 100; -+} -+ -+static u8 duty_cycle_to_reg_val(u8 duty_cycle) -+{ -+ return ((u32)duty_cycle * 100 / 625) - 1; -+} -+ -+static u32 reg_val_to_speed_rpm(u8 reg_val) -+{ -+ return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; -+} -+ -+static u8 reg_val_to_direction(u8 reg_val, enum fan_id id) -+{ -+ u8 mask = (1 << id); -+ -+ reg_val &= mask; -+ -+ return reg_val ? 1 : 0; -+} -+static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) -+{ -+ u8 mask = (1 << id); -+ -+ reg_val &= mask; -+ -+ return reg_val ? 0 : 1; -+} -+ -+static u8 is_fan_fault(struct as7712_32x_fan_data *data, enum fan_id id) -+{ -+ u8 ret = 1; -+ int front_fan_index = FAN1_FRONT_SPEED_RPM + id; -+ int rear_fan_index = FAN1_REAR_SPEED_RPM + id; -+ -+ /* Check if the speed of front or rear fan is ZERO, -+ */ -+ if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && -+ reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { -+ ret = 0; -+ } -+ -+ return ret; -+} -+ -+static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ int error, value; -+ struct i2c_client *client = to_i2c_client(dev); -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < 0 || value > FAN_MAX_DUTY_CYCLE) -+ return -EINVAL; -+ -+ as7712_32x_fan_write_value(client, 0x33, 0); /* Disable fan speed watch dog */ -+ as7712_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); -+ return count; -+} -+ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7712_32x_fan_data *data = as7712_32x_fan_update_device(dev); -+ ssize_t ret = 0; -+ -+ if (data->valid) { -+ switch (attr->index) { -+ case FAN_DUTY_CYCLE_PERCENTAGE: -+ { -+ u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); -+ ret = sprintf(buf, "%u\n", duty_cycle); -+ break; -+ } -+ case FAN1_FRONT_SPEED_RPM: -+ case FAN2_FRONT_SPEED_RPM: -+ case FAN3_FRONT_SPEED_RPM: -+ case FAN4_FRONT_SPEED_RPM: -+ case FAN5_FRONT_SPEED_RPM: -+ case FAN6_FRONT_SPEED_RPM: -+ case FAN1_REAR_SPEED_RPM: -+ case FAN2_REAR_SPEED_RPM: -+ case FAN3_REAR_SPEED_RPM: -+ case FAN4_REAR_SPEED_RPM: -+ case FAN5_REAR_SPEED_RPM: -+ case FAN6_REAR_SPEED_RPM: -+ ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); -+ break; -+ case FAN1_PRESENT: -+ case FAN2_PRESENT: -+ case FAN3_PRESENT: -+ case FAN4_PRESENT: -+ case FAN5_PRESENT: -+ case FAN6_PRESENT: -+ ret = sprintf(buf, "%d\n", -+ reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], -+ attr->index - FAN1_PRESENT)); -+ break; -+ case FAN1_FAULT: -+ case FAN2_FAULT: -+ case FAN3_FAULT: -+ case FAN4_FAULT: -+ case FAN5_FAULT: -+ case FAN6_FAULT: -+ ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); -+ break; -+ case FAN1_DIRECTION: -+ case FAN2_DIRECTION: -+ case FAN3_DIRECTION: -+ case FAN4_DIRECTION: -+ case FAN5_DIRECTION: -+ case FAN6_DIRECTION: -+ ret = sprintf(buf, "%d\n", -+ reg_val_to_direction(data->reg_val[FAN_DIRECTION_REG], -+ attr->index - FAN1_DIRECTION)); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ return ret; -+} -+ -+static const struct attribute_group as7712_32x_fan_group = { -+ .attrs = as7712_32x_fan_attributes, -+}; -+ -+static struct as7712_32x_fan_data *as7712_32x_fan_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7712_32x_fan_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || -+ !data->valid) { -+ int i; -+ -+ dev_dbg(&client->dev, "Starting as7712_32x_fan update\n"); -+ data->valid = 0; -+ -+ /* Update fan data -+ */ -+ for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { -+ int status = as7712_32x_fan_read_value(client, fan_reg[i]); -+ -+ if (status < 0) { -+ data->valid = 0; -+ mutex_unlock(&data->update_lock); -+ dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); -+ return data; -+ } -+ else { -+ data->reg_val[i] = status; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int as7712_32x_fan_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7712_32x_fan_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7712_32x_fan_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7712_32x_fan_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: fan '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7712_32x_fan_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7712_32x_fan_remove(struct i2c_client *client) -+{ -+ struct as7712_32x_fan_data *data = i2c_get_clientdata(client); -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7712_32x_fan_group); -+ -+ return 0; -+} -+ -+/* Addresses to scan */ -+static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; -+ -+static const struct i2c_device_id as7712_32x_fan_id[] = { -+ { "as7712_32x_fan", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as7712_32x_fan_id); -+ -+static struct i2c_driver as7712_32x_fan_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = DRVNAME, -+ }, -+ .probe = as7712_32x_fan_probe, -+ .remove = as7712_32x_fan_remove, -+ .id_table = as7712_32x_fan_id, -+ .address_list = normal_i2c, -+}; -+ -+static int __init as7712_32x_fan_init(void) -+{ -+ extern int platform_accton_as7712_32x(void); -+ if (!platform_accton_as7712_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7712_32x_fan_driver); -+} -+ -+static void __exit as7712_32x_fan_exit(void) -+{ -+ i2c_del_driver(&as7712_32x_fan_driver); -+} -+ -+module_init(as7712_32x_fan_init); -+module_exit(as7712_32x_fan_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as7712_32x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/hwmon/accton_as7712_32x_psu.c b/drivers/hwmon/accton_as7712_32x_psu.c -new file mode 100644 -index 0000000..f1f11f5 ---- /dev/null -+++ b/drivers/hwmon/accton_as7712_32x_psu.c -@@ -0,0 +1,384 @@ -+/* -+ * An hwmon driver for accton as7712_32x Power Module -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MAX_MODEL_NAME 16 -+ -+#define DC12V_FAN_DIR_OFFSET 0x34 -+#define DC12V_FAN_DIR_LEN 3 -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as7712_32x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[MAX_MODEL_NAME]; /* Model name, read from eeprom */ -+ char fan_dir[DC12V_FAN_DIR_LEN+1]; /* DC12V fan direction */ -+}; -+ -+static ssize_t show_string(struct device *dev, struct device_attribute *da, char *buf); -+static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev); -+ -+enum as7712_32x_psu_sysfs_attributes { -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD, -+ PSU_FAN_DIR /* For DC12V only */ -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_string, NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+static SENSOR_DEVICE_ATTR(psu_fan_dir, S_IRUGO, show_string, NULL, PSU_FAN_DIR); -+ -+static struct attribute *as7712_32x_psu_attributes[] = { -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ &sensor_dev_attr_psu_fan_dir.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> (1-data->index) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = (data->status >> (3-data->index) & 0x1); -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_string(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7712_32x_psu_data *data = as7712_32x_psu_update_device(dev); -+ char *ptr = NULL; -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ if (attr->index == PSU_MODEL_NAME) { -+ ptr = data->model_name; -+ } -+ else { /* PSU_FAN_DIR */ -+ ptr = data->fan_dir; -+ } -+ -+ return sprintf(buf, "%s\n", ptr); -+} -+ -+static const struct attribute_group as7712_32x_psu_group = { -+ .attrs = as7712_32x_psu_attributes, -+}; -+ -+static int as7712_32x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7712_32x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7712_32x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ data->index = dev_id->driver_data; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7712_32x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7712_32x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7712_32x_psu_remove(struct i2c_client *client) -+{ -+ struct as7712_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7712_32x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum psu_index -+{ -+ as7712_32x_psu1, -+ as7712_32x_psu2 -+}; -+ -+static const struct i2c_device_id as7712_32x_psu_id[] = { -+ { "as7712_32x_psu1", as7712_32x_psu1 }, -+ { "as7712_32x_psu2", as7712_32x_psu2 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as7712_32x_psu_id); -+ -+static struct i2c_driver as7712_32x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as7712_32x_psu", -+ }, -+ .probe = as7712_32x_psu_probe, -+ .remove = as7712_32x_psu_remove, -+ .id_table = as7712_32x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as7712_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = 0; -+ int retry_count = 5; -+ -+ while (retry_count) { -+ retry_count--; -+ -+ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) { -+ msleep(10); -+ continue; -+ } -+ -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ msleep(10); -+ continue; -+ } -+ -+ result = 0; -+ break; -+ } -+ -+ return result; -+} -+ -+enum psu_type { -+ PSU_TYPE_AC_110V, -+ PSU_TYPE_DC_48V, -+ PSU_TYPE_DC_12V -+}; -+ -+struct model_name_info { -+ enum psu_type type; -+ u8 offset; -+ u8 length; -+ char* model_name; -+}; -+ -+struct model_name_info models[] = { -+{PSU_TYPE_AC_110V, 0x20, 8, "YM-2651Y"}, -+{PSU_TYPE_DC_48V, 0x20, 8, "YM-2651V"}, -+{PSU_TYPE_DC_12V, 0x00, 11, "PSU-12V-750"}, -+}; -+ -+static int as7712_32x_psu_model_name_get(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7712_32x_psu_data *data = i2c_get_clientdata(client); -+ int i, status; -+ -+ for (i = 0; i < ARRAY_SIZE(models); i++) { -+ memset(data->model_name, 0, sizeof(data->model_name)); -+ -+ status = as7712_32x_psu_read_block(client, models[i].offset, -+ data->model_name, models[i].length); -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x) offset(0x%x)\n", -+ client->addr, models[i].offset); -+ return status; -+ } -+ else { -+ data->model_name[models[i].length] = '\0'; -+ } -+ -+ /* Determine if the model name is known, if not, read next index -+ */ -+ if (strncmp(data->model_name, models[i].model_name, models[i].length) == 0) { -+ return 0; -+ } -+ else { -+ data->model_name[0] = '\0'; -+ } -+ } -+ -+ return -ENODATA; -+} -+ -+static struct as7712_32x_psu_data *as7712_32x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7712_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status; -+ int power_good = 0; -+ -+ data->valid = 0; -+ dev_dbg(&client->dev, "Starting as7712_32x update\n"); -+ -+ /* Read psu status */ -+ status = accton_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ goto exit; -+ } -+ else { -+ data->status = status; -+ } -+ -+ /* Read model name */ -+ memset(data->model_name, 0, sizeof(data->model_name)); -+ memset(data->fan_dir, 0, sizeof(data->fan_dir)); -+ power_good = (data->status >> (3-data->index) & 0x1); -+ -+ if (power_good) { -+ if (as7712_32x_psu_model_name_get(dev) < 0) { -+ goto exit; -+ } -+ -+ if (strncmp(data->model_name, -+ models[PSU_TYPE_DC_12V].model_name, -+ models[PSU_TYPE_DC_12V].length) == 0) { -+ /* Read fan direction */ -+ status = as7712_32x_psu_read_block(client, DC12V_FAN_DIR_OFFSET, -+ data->fan_dir, DC12V_FAN_DIR_LEN); -+ -+ if (status < 0) { -+ data->fan_dir[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read fan direction from (0x%x) offset(0x%x)\n", -+ client->addr, DC12V_FAN_DIR_OFFSET); -+ goto exit; -+ } -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as7712_32x_psu_init(void) -+{ -+ extern int platform_accton_as7712_32x(void); -+ if (!platform_accton_as7712_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7712_32x_psu_driver); -+} -+ -+static void __exit as7712_32x_psu_exit(void) -+{ -+ i2c_del_driver(&as7712_32x_psu_driver); -+} -+ -+module_init(as7712_32x_psu_init); -+module_exit(as7712_32x_psu_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as7712_32x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c -index 96e3490..3aeb08d 100644 ---- a/drivers/hwmon/accton_i2c_cpld.c -+++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -201,6 +201,22 @@ int platform_accton_as7512_32x(void) - } - EXPORT_SYMBOL(platform_accton_as7512_32x); - -+static struct dmi_system_id as7712_dmi_table[] = { -+ { -+ .ident = "Accton AS7712", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), -+ }, -+ } -+}; -+ -+int platform_accton_as7712_32x(void) -+{ -+ return dmi_check_system(as7712_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as7712_32x); -+ - MODULE_AUTHOR("Brandon Chuang "); - MODULE_DESCRIPTION("accton_i2c_cpld driver"); - MODULE_LICENSE("GPL"); -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index fb48975..48106f2 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -61,6 +61,13 @@ config LEDS_ACCTON_AS7512_32x - This option enables support for the LEDs on the Accton as7512 32x. - Say Y to enable LEDs on the Accton as7512 32x. - -+config LEDS_ACCTON_AS7712_32x -+ tristate "LED support for the Accton as7712 32x" -+ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD -+ help -+ This option enables support for the LEDs on the Accton as7712 32x. -+ Say Y to enable LEDs on the Accton as7712 32x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index dff0462..c4ea931 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -46,7 +46,7 @@ obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o - obj-$(CONFIG_LEDS_ACCTON_AS5712_54x) += leds-accton_as5712_54x.o - obj-$(CONFIG_LEDS_ACCTON_AS6712_32x) += leds-accton_as6712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS7512_32x) += leds-accton_as7512_32x.o -- -+obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o - -diff --git a/drivers/leds/leds-accton_as7712_32x.c b/drivers/leds/leds-accton_as7712_32x.c -new file mode 100644 -index 0000000..8b14437 ---- /dev/null -+++ b/drivers/leds/leds-accton_as7712_32x.c -@@ -0,0 +1,443 @@ -+/* -+ * A LED driver for the accton_as7712_32x_led -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "accton_as7712_32x_led" -+ -+struct accton_as7712_32x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[1]; /* only 1 register*/ -+}; -+ -+static struct accton_as7712_32x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+ -+#define LED_CNTRLER_I2C_ADDRESS (0x60) -+ -+#define LED_TYPE_DIAG_REG_MASK (0x3) -+#define LED_MODE_DIAG_GREEN_VALUE (0x02) -+#define LED_MODE_DIAG_RED_VALUE (0x01) -+#define LED_MODE_DIAG_AMBER_VALUE (0x00) /*It's yellow actually. Green+Red=Yellow*/ -+#define LED_MODE_DIAG_OFF_VALUE (0x03) -+ -+ -+#define LED_TYPE_LOC_REG_MASK (0x80) -+#define LED_MODE_LOC_ON_VALUE (0) -+#define LED_MODE_LOC_OFF_VALUE (0x80) -+ -+enum led_type { -+ LED_TYPE_DIAG, -+ LED_TYPE_LOC, -+ LED_TYPE_FAN, -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2 -+}; -+ -+struct led_reg { -+ u32 types; -+ u8 reg_addr; -+}; -+ -+static const struct led_reg led_reg_map[] = { -+ {(1<update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as7712_32x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as7712_32x_led_read_value(led_reg_map[i].reg_addr); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg_map[i].reg_addr, status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as7712_32x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ enum led_type type) -+{ -+ int reg_val; -+ u8 reg ; -+ mutex_lock(&ledctl->update_lock); -+ -+ if( !accton_getLedReg(type, ®)) -+ { -+ dev_dbg(&ledctl->pdev->dev, "Not match item for %d.\n", type); -+ } -+ -+ reg_val = accton_as7712_32x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as7712_32x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+ -+static void accton_as7712_32x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as7712_32x_led_set(led_cdev, led_light_mode, LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as7712_32x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as7712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as7712_32x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as7712_32x_led_set(led_cdev, led_light_mode, LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as7712_32x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as7712_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static void accton_as7712_32x_led_auto_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+} -+ -+static enum led_brightness accton_as7712_32x_led_auto_get(struct led_classdev *cdev) -+{ -+ return LED_MODE_AUTO; -+} -+ -+static struct led_classdev accton_as7712_32x_leds[] = { -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as7712_32x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7712_32x_led_diag_set, -+ .brightness_get = accton_as7712_32x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_RED, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as7712_32x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7712_32x_led_loc_set, -+ .brightness_get = accton_as7712_32x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_BLUE, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as7712_32x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7712_32x_led_auto_set, -+ .brightness_get = accton_as7712_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as7712_32x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7712_32x_led_auto_set, -+ .brightness_get = accton_as7712_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as7712_32x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7712_32x_led_auto_set, -+ .brightness_get = accton_as7712_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as7712_32x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { -+ led_classdev_suspend(&accton_as7712_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as7712_32x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { -+ led_classdev_resume(&accton_as7712_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as7712_32x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as7712_32x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as7712_32x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as7712_32x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as7712_32x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7712_32x_leds); i++) { -+ led_classdev_unregister(&accton_as7712_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as7712_32x_led_driver = { -+ .probe = accton_as7712_32x_led_probe, -+ .remove = accton_as7712_32x_led_remove, -+ .suspend = accton_as7712_32x_led_suspend, -+ .resume = accton_as7712_32x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as7712_32x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as7712_32x(void); -+ if (!platform_accton_as7712_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as7712_32x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as7712_32x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as7712_32x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as7712_32x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as7712_32x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as7712_32x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as7712_32x_led_init); -+module_exit(accton_as7712_32x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as7712_32x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 8e959c7..bd435a0 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -100,6 +100,15 @@ config EEPROM_ACCTON_AS7512_32x_SFP - This driver can also be built as a module. If so, the module will - be called accton_as7512_32x_sfp. - -+config EEPROM_ACCTON_AS7712_32x_SFP -+ tristate "Accton as7712 32x sfp" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7712 32x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7712_32x_sfp. -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 0386999..4ad6540 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -9,4 +9,5 @@ obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o - obj-$(CONFIG_EEPROM_ACCTON_AS5712_54x_SFP) += accton_as5712_54x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS6712_32x_SFP) += accton_as6712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS7512_32x_SFP) += accton_as7512_32x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as7712_32x_sfp.c b/drivers/misc/eeprom/accton_as7712_32x_sfp.c -new file mode 100644 -index 0000000..5953ae6 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as7712_32x_sfp.c -@@ -0,0 +1,356 @@ -+/* -+ * An hwmon driver for accton as7712_32x sfp -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BIT_INDEX(i) (1UL << (i)) -+ -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as7712_32x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u32 is_present; /* present status */ -+}; -+ -+static struct as7712_32x_sfp_data *as7712_32x_sfp_update_device(struct device *dev); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+enum as7712_32x_sfp_sysfs_attributes { -+ SFP_PORT_NUMBER, -+ SFP_IS_PRESENT, -+ SFP_IS_PRESENT_ALL, -+ SFP_EEPROM -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+ -+static struct attribute *as7712_32x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7712_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->port+1); -+} -+ -+/* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+} while(0) -+ -+static ssize_t show_present(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ int values[4]; -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); -+ -+ /* Return values 1 -> 32 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], values[3]); -+ } -+ else { /* SFP_IS_PRESENT */ -+ struct as7712_32x_sfp_data *data = as7712_32x_sfp_update_device(dev); -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ return sprintf(buf, "%d\n", data->is_present); -+ } -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as7712_32x_sfp_data *data = as7712_32x_sfp_update_device(dev); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if (!data->is_present) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as7712_32x_sfp_group = { -+ .attrs = as7712_32x_sfp_attributes, -+}; -+ -+static int as7712_32x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7712_32x_sfp_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7712_32x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7712_32x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7712_32x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7712_32x_sfp_remove(struct i2c_client *client) -+{ -+ struct as7712_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7712_32x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as7712_32x_sfp1, as7712_32x_sfp2, as7712_32x_sfp3, as7712_32x_sfp4, -+as7712_32x_sfp5, as7712_32x_sfp6, as7712_32x_sfp7, as7712_32x_sfp8, -+as7712_32x_sfp9, as7712_32x_sfp10,as7712_32x_sfp11,as7712_32x_sfp12, -+as7712_32x_sfp13,as7712_32x_sfp14,as7712_32x_sfp15,as7712_32x_sfp16, -+as7712_32x_sfp17,as7712_32x_sfp18,as7712_32x_sfp19,as7712_32x_sfp20, -+as7712_32x_sfp21,as7712_32x_sfp22,as7712_32x_sfp23,as7712_32x_sfp24, -+as7712_32x_sfp25,as7712_32x_sfp26,as7712_32x_sfp27,as7712_32x_sfp28, -+as7712_32x_sfp29,as7712_32x_sfp30,as7712_32x_sfp31,as7712_32x_sfp32 -+}; -+ -+static const struct i2c_device_id as7712_32x_sfp_id[] = { -+{ "as7712_32x_sfp1", as7712_32x_sfp1 }, { "as7712_32x_sfp2", as7712_32x_sfp2 }, -+{ "as7712_32x_sfp3", as7712_32x_sfp3 }, { "as7712_32x_sfp4", as7712_32x_sfp4 }, -+{ "as7712_32x_sfp5", as7712_32x_sfp5 }, { "as7712_32x_sfp6", as7712_32x_sfp6 }, -+{ "as7712_32x_sfp7", as7712_32x_sfp7 }, { "as7712_32x_sfp8", as7712_32x_sfp8 }, -+{ "as7712_32x_sfp9", as7712_32x_sfp9 }, { "as7712_32x_sfp10", as7712_32x_sfp10 }, -+{ "as7712_32x_sfp11", as7712_32x_sfp11 }, { "as7712_32x_sfp12", as7712_32x_sfp12 }, -+{ "as7712_32x_sfp13", as7712_32x_sfp13 }, { "as7712_32x_sfp14", as7712_32x_sfp14 }, -+{ "as7712_32x_sfp15", as7712_32x_sfp15 }, { "as7712_32x_sfp16", as7712_32x_sfp16 }, -+{ "as7712_32x_sfp17", as7712_32x_sfp17 }, { "as7712_32x_sfp18", as7712_32x_sfp18 }, -+{ "as7712_32x_sfp19", as7712_32x_sfp19 }, { "as7712_32x_sfp20", as7712_32x_sfp20 }, -+{ "as7712_32x_sfp21", as7712_32x_sfp21 }, { "as7712_32x_sfp22", as7712_32x_sfp22 }, -+{ "as7712_32x_sfp23", as7712_32x_sfp23 }, { "as7712_32x_sfp24", as7712_32x_sfp24 }, -+{ "as7712_32x_sfp25", as7712_32x_sfp25 }, { "as7712_32x_sfp26", as7712_32x_sfp26 }, -+{ "as7712_32x_sfp27", as7712_32x_sfp27 }, { "as7712_32x_sfp28", as7712_32x_sfp28 }, -+{ "as7712_32x_sfp29", as7712_32x_sfp29 }, { "as7712_32x_sfp30", as7712_32x_sfp30 }, -+{ "as7712_32x_sfp31", as7712_32x_sfp31 }, { "as7712_32x_sfp32", as7712_32x_sfp32 }, -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as7712_32x_sfp_id); -+ -+static struct i2c_driver as7712_32x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as7712_32x_sfp", -+ }, -+ .probe = as7712_32x_sfp_probe, -+ .remove = as7712_32x_sfp_remove, -+ .id_table = as7712_32x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as7712_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as7712_32x_sfp_data *as7712_32x_sfp_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7712_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0; -+ u8 cpld_reg = 0x30 + (data->port/8); -+ -+ data->valid = 0; -+ -+ /* Read present status of the specified port number */ -+ data->is_present = 0; -+ status = accton_i2c_cpld_read(0x60, cpld_reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); -+ goto exit; -+ } -+ -+ data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; -+ -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if (data->is_present) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { -+ status = as7712_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, -+ data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), -+ I2C_SMBUS_BLOCK_MAX); -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); -+ goto exit; -+ } -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as7712_32x_sfp_init(void) -+{ -+ extern int platform_accton_as7712_32x(void); -+ if (!platform_accton_as7712_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7712_32x_sfp_driver); -+} -+ -+static void __exit as7712_32x_sfp_exit(void) -+{ -+ i2c_del_driver(&as7712_32x_sfp_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as7712_32x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as7712_32x_sfp_init); -+module_exit(as7712_32x_sfp_exit); diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch deleted file mode 100644 index d4b5d41c..00000000 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch +++ /dev/null @@ -1,1707 +0,0 @@ -Device driver patches for accton as7716-32x (fan/psu/cpld/led/sfp) - -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 968bd5f..bc10314 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1592,6 +1592,24 @@ config SENSORS_ACCTON_AS5512_54X_FAN - This driver can also be built as a module. If so, the module will - be called accton_as5512_54x_fan. - -+config SENSORS_ACCTON_AS7716_32x_FAN -+ tristate "Accton as7716 32x fan" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7716 32x fan. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7716_32x_fan. -+ -+config SENSORS_ACCTON_AS7716_32x_PSU -+ tristate "Accton as7716 32x psu" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7716 32x psu. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7716_32x_psu. -+ - if ACPI - - comment "ACPI drivers" -diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index b8ee7b0..851d90a 100644 ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -38,6 +38,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_PSU) += accton_as5812_54t_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_PSU) += accton_as5512_54x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_FAN) += accton_as5512_54x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS7716_32x_FAN) += accton_as7716_32x_fan.o -+obj-$(CONFIG_SENSORS_ACCTON_AS7716_32x_PSU) += accton_as7716_32x_psu.o - obj-$(CONFIG_SENSORS_AD7314) += ad7314.o - obj-$(CONFIG_SENSORS_AD7414) += ad7414.o - obj-$(CONFIG_SENSORS_AD7418) += ad7418.o -diff --git a/drivers/hwmon/accton_as7716_32x_fan.c b/drivers/hwmon/accton_as7716_32x_fan.c -new file mode 100644 -index 0000000..924374c ---- /dev/null -+++ b/drivers/hwmon/accton_as7716_32x_fan.c -@@ -0,0 +1,452 @@ -+/* -+ * A hwmon driver for the Accton as7716 32x fan -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRVNAME "as7716_32x_fan" -+ -+static struct as7716_32x_fan_data *as7716_32x_fan_update_device(struct device *dev); -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+/* fan related data, the index should match sysfs_fan_attributes -+ */ -+static const u8 fan_reg[] = { -+ 0x0F, /* fan 1-6 present status */ -+ 0x11, /* fan PWM(for all fan) */ -+ 0x12, /* front fan 1 speed(rpm) */ -+ 0x13, /* front fan 2 speed(rpm) */ -+ 0x14, /* front fan 3 speed(rpm) */ -+ 0x15, /* front fan 4 speed(rpm) */ -+ 0x16, /* front fan 5 speed(rpm) */ -+ 0x17, /* front fan 6 speed(rpm) */ -+ 0x22, /* rear fan 1 speed(rpm) */ -+ 0x23, /* rear fan 2 speed(rpm) */ -+ 0x24, /* rear fan 3 speed(rpm) */ -+ 0x25, /* rear fan 4 speed(rpm) */ -+ 0x26, /* rear fan 5 speed(rpm) */ -+ 0x27, /* rear fan 6 speed(rpm) */ -+}; -+ -+/* Each client has this additional data */ -+struct as7716_32x_fan_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ -+}; -+ -+enum fan_id { -+ FAN1_ID, -+ FAN2_ID, -+ FAN3_ID, -+ FAN4_ID, -+ FAN5_ID, -+ FAN6_ID -+}; -+ -+enum sysfs_fan_attributes { -+ FAN_PRESENT_REG, -+ FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ -+ FAN1_FRONT_SPEED_RPM, -+ FAN2_FRONT_SPEED_RPM, -+ FAN3_FRONT_SPEED_RPM, -+ FAN4_FRONT_SPEED_RPM, -+ FAN5_FRONT_SPEED_RPM, -+ FAN6_FRONT_SPEED_RPM, -+ FAN1_REAR_SPEED_RPM, -+ FAN2_REAR_SPEED_RPM, -+ FAN3_REAR_SPEED_RPM, -+ FAN4_REAR_SPEED_RPM, -+ FAN5_REAR_SPEED_RPM, -+ FAN6_REAR_SPEED_RPM, -+ FAN1_PRESENT, -+ FAN2_PRESENT, -+ FAN3_PRESENT, -+ FAN4_PRESENT, -+ FAN5_PRESENT, -+ FAN6_PRESENT, -+ FAN1_FAULT, -+ FAN2_FAULT, -+ FAN3_FAULT, -+ FAN4_FAULT, -+ FAN5_FAULT, -+ FAN6_FAULT -+}; -+ -+/* Define attributes -+ */ -+#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) -+#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr -+ -+#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) -+#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr -+ -+#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) -+#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr -+ -+#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) -+#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr -+ -+#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ -+ static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ -+ static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) -+#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ -+ &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr -+ -+/* 6 fan fault attributes in this platform */ -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); -+/* 6 fan speed(rpm) attributes in this platform */ -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); -+/* 6 fan present attributes in this platform */ -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); -+DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); -+/* 1 fan duty cycle attribute in this platform */ -+DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); -+ -+static struct attribute *as7716_32x_fan_attributes[] = { -+ /* fan related attributes */ -+ DECLARE_FAN_FAULT_ATTR(1), -+ DECLARE_FAN_FAULT_ATTR(2), -+ DECLARE_FAN_FAULT_ATTR(3), -+ DECLARE_FAN_FAULT_ATTR(4), -+ DECLARE_FAN_FAULT_ATTR(5), -+ DECLARE_FAN_FAULT_ATTR(6), -+ DECLARE_FAN_SPEED_RPM_ATTR(1), -+ DECLARE_FAN_SPEED_RPM_ATTR(2), -+ DECLARE_FAN_SPEED_RPM_ATTR(3), -+ DECLARE_FAN_SPEED_RPM_ATTR(4), -+ DECLARE_FAN_SPEED_RPM_ATTR(5), -+ DECLARE_FAN_SPEED_RPM_ATTR(6), -+ DECLARE_FAN_PRESENT_ATTR(1), -+ DECLARE_FAN_PRESENT_ATTR(2), -+ DECLARE_FAN_PRESENT_ATTR(3), -+ DECLARE_FAN_PRESENT_ATTR(4), -+ DECLARE_FAN_PRESENT_ATTR(5), -+ DECLARE_FAN_PRESENT_ATTR(6), -+ DECLARE_FAN_DUTY_CYCLE_ATTR(), -+ NULL -+}; -+ -+#define FAN_DUTY_CYCLE_REG_MASK 0xF -+#define FAN_MAX_DUTY_CYCLE 100 -+#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 -+ -+static int as7716_32x_fan_read_value(struct i2c_client *client, u8 reg) -+{ -+ return i2c_smbus_read_byte_data(client, reg); -+} -+ -+static int as7716_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) -+{ -+ return i2c_smbus_write_byte_data(client, reg, value); -+} -+ -+/* fan utility functions -+ */ -+static u32 reg_val_to_duty_cycle(u8 reg_val) -+{ -+ reg_val &= FAN_DUTY_CYCLE_REG_MASK; -+ return ((u32)(reg_val+1) * 625 + 75)/ 100; -+} -+ -+static u8 duty_cycle_to_reg_val(u8 duty_cycle) -+{ -+ return ((u32)duty_cycle * 100 / 625) - 1; -+} -+ -+static u32 reg_val_to_speed_rpm(u8 reg_val) -+{ -+ return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; -+} -+ -+static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) -+{ -+ u8 mask = (1 << id); -+ -+ reg_val &= mask; -+ -+ return reg_val ? 0 : 1; -+} -+ -+static u8 is_fan_fault(struct as7716_32x_fan_data *data, enum fan_id id) -+{ -+ u8 ret = 1; -+ int front_fan_index = FAN1_FRONT_SPEED_RPM + id; -+ int rear_fan_index = FAN1_REAR_SPEED_RPM + id; -+ -+ /* Check if the speed of front or rear fan is ZERO, -+ */ -+ if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && -+ reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { -+ ret = 0; -+ } -+ -+ return ret; -+} -+ -+static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, -+ const char *buf, size_t count) -+{ -+ int error, value; -+ struct i2c_client *client = to_i2c_client(dev); -+ -+ error = kstrtoint(buf, 10, &value); -+ if (error) -+ return error; -+ -+ if (value < 0 || value > FAN_MAX_DUTY_CYCLE) -+ return -EINVAL; -+ -+ as7716_32x_fan_write_value(client, 0x33, 0); /* Disable fan speed watch dog */ -+ as7716_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); -+ return count; -+} -+ -+static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7716_32x_fan_data *data = as7716_32x_fan_update_device(dev); -+ ssize_t ret = 0; -+ -+ if (data->valid) { -+ switch (attr->index) { -+ case FAN_DUTY_CYCLE_PERCENTAGE: -+ { -+ u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); -+ ret = sprintf(buf, "%u\n", duty_cycle); -+ break; -+ } -+ case FAN1_FRONT_SPEED_RPM: -+ case FAN2_FRONT_SPEED_RPM: -+ case FAN3_FRONT_SPEED_RPM: -+ case FAN4_FRONT_SPEED_RPM: -+ case FAN5_FRONT_SPEED_RPM: -+ case FAN6_FRONT_SPEED_RPM: -+ case FAN1_REAR_SPEED_RPM: -+ case FAN2_REAR_SPEED_RPM: -+ case FAN3_REAR_SPEED_RPM: -+ case FAN4_REAR_SPEED_RPM: -+ case FAN5_REAR_SPEED_RPM: -+ case FAN6_REAR_SPEED_RPM: -+ ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); -+ break; -+ case FAN1_PRESENT: -+ case FAN2_PRESENT: -+ case FAN3_PRESENT: -+ case FAN4_PRESENT: -+ case FAN5_PRESENT: -+ case FAN6_PRESENT: -+ ret = sprintf(buf, "%d\n", -+ reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], -+ attr->index - FAN1_PRESENT)); -+ break; -+ case FAN1_FAULT: -+ case FAN2_FAULT: -+ case FAN3_FAULT: -+ case FAN4_FAULT: -+ case FAN5_FAULT: -+ case FAN6_FAULT: -+ ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ return ret; -+} -+ -+static const struct attribute_group as7716_32x_fan_group = { -+ .attrs = as7716_32x_fan_attributes, -+}; -+ -+static struct as7716_32x_fan_data *as7716_32x_fan_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7716_32x_fan_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || -+ !data->valid) { -+ int i; -+ -+ dev_dbg(&client->dev, "Starting as7716_32x_fan update\n"); -+ data->valid = 0; -+ -+ /* Update fan data -+ */ -+ for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { -+ int status = as7716_32x_fan_read_value(client, fan_reg[i]); -+ -+ if (status < 0) { -+ data->valid = 0; -+ mutex_unlock(&data->update_lock); -+ dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); -+ return data; -+ } -+ else { -+ data->reg_val[i] = status; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int as7716_32x_fan_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7716_32x_fan_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7716_32x_fan_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7716_32x_fan_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: fan '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7716_32x_fan_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7716_32x_fan_remove(struct i2c_client *client) -+{ -+ struct as7716_32x_fan_data *data = i2c_get_clientdata(client); -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7716_32x_fan_group); -+ -+ return 0; -+} -+ -+/* Addresses to scan */ -+static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; -+ -+static const struct i2c_device_id as7716_32x_fan_id[] = { -+ { "as7716_32x_fan", 0 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as7716_32x_fan_id); -+ -+static struct i2c_driver as7716_32x_fan_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = DRVNAME, -+ }, -+ .probe = as7716_32x_fan_probe, -+ .remove = as7716_32x_fan_remove, -+ .id_table = as7716_32x_fan_id, -+ .address_list = normal_i2c, -+}; -+ -+static int __init as7716_32x_fan_init(void) -+{ -+ extern int platform_accton_as7716_32x(void); -+ if (!platform_accton_as7716_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7716_32x_fan_driver); -+} -+ -+static void __exit as7716_32x_fan_exit(void) -+{ -+ i2c_del_driver(&as7716_32x_fan_driver); -+} -+ -+module_init(as7716_32x_fan_init); -+module_exit(as7716_32x_fan_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as7716_32x_fan driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/hwmon/accton_as7716_32x_psu.c b/drivers/hwmon/accton_as7716_32x_psu.c -new file mode 100644 -index 0000000..4fd15ae ---- /dev/null -+++ b/drivers/hwmon/accton_as7716_32x_psu.c -@@ -0,0 +1,293 @@ -+/* -+ * An hwmon driver for accton as7716_32x Power Module -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); -+static int as7716_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as7716_32x_psu_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 index; /* PSU index */ -+ u8 status; /* Status(present/power_good) register read from CPLD */ -+ char model_name[9]; /* Model name, read from eeprom */ -+}; -+ -+static struct as7716_32x_psu_data *as7716_32x_psu_update_device(struct device *dev); -+ -+enum as7716_32x_psu_sysfs_attributes { -+ PSU_PRESENT, -+ PSU_MODEL_NAME, -+ PSU_POWER_GOOD -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); -+static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); -+static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); -+ -+static struct attribute *as7716_32x_psu_attributes[] = { -+ &sensor_dev_attr_psu_present.dev_attr.attr, -+ &sensor_dev_attr_psu_model_name.dev_attr.attr, -+ &sensor_dev_attr_psu_power_good.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_status(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ struct as7716_32x_psu_data *data = as7716_32x_psu_update_device(dev); -+ u8 status = 0; -+ -+ if (attr->index == PSU_PRESENT) { -+ status = !(data->status >> (1-data->index) & 0x1); -+ } -+ else { /* PSU_POWER_GOOD */ -+ status = (data->status >> (3-data->index) & 0x1); -+ } -+ -+ return sprintf(buf, "%d\n", status); -+} -+ -+static ssize_t show_model_name(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as7716_32x_psu_data *data = as7716_32x_psu_update_device(dev); -+ -+ return sprintf(buf, "%s\n", data->model_name); -+} -+ -+static const struct attribute_group as7716_32x_psu_group = { -+ .attrs = as7716_32x_psu_attributes, -+}; -+ -+static int as7716_32x_psu_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7716_32x_psu_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7716_32x_psu_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ data->valid = 0; -+ data->index = dev_id->driver_data; -+ mutex_init(&data->update_lock); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7716_32x_psu_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: psu '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7716_32x_psu_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7716_32x_psu_remove(struct i2c_client *client) -+{ -+ struct as7716_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7716_32x_psu_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum psu_index -+{ -+ as7716_32x_psu1, -+ as7716_32x_psu2 -+}; -+ -+static const struct i2c_device_id as7716_32x_psu_id[] = { -+ { "as7716_32x_psu1", as7716_32x_psu1 }, -+ { "as7716_32x_psu2", as7716_32x_psu2 }, -+ {} -+}; -+MODULE_DEVICE_TABLE(i2c, as7716_32x_psu_id); -+ -+static struct i2c_driver as7716_32x_psu_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as7716_32x_psu", -+ }, -+ .probe = as7716_32x_psu_probe, -+ .remove = as7716_32x_psu_remove, -+ .id_table = as7716_32x_psu_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as7716_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = 0; -+ int retry_count = 5; -+ -+ while (retry_count) { -+ retry_count--; -+ -+ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) { -+ msleep(10); -+ continue; -+ } -+ -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ msleep(10); -+ continue; -+ } -+ -+ result = 0; -+ break; -+ } -+ -+ return result; -+} -+ -+static struct as7716_32x_psu_data *as7716_32x_psu_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7716_32x_psu_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status; -+ int power_good = 0; -+ -+ dev_dbg(&client->dev, "Starting as7716_32x update\n"); -+ -+ /* Read psu status */ -+ status = accton_i2c_cpld_read(0x60, 0x2); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); -+ } -+ else { -+ data->status = status; -+ } -+ -+ /* Read model name */ -+ memset(data->model_name, 0, sizeof(data->model_name)); -+ power_good = (data->status >> (3-data->index) & 0x1); -+ -+ if (power_good) { -+ status = as7716_32x_psu_read_block(client, 0x20, data->model_name, -+ ARRAY_SIZE(data->model_name)-1); -+ -+ if (status < 0) { -+ data->model_name[0] = '\0'; -+ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); -+ } -+ else { -+ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as7716_32x_psu_init(void) -+{ -+ extern int platform_accton_as7716_32x(void); -+ if (!platform_accton_as7716_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7716_32x_psu_driver); -+} -+ -+static void __exit as7716_32x_psu_exit(void) -+{ -+ i2c_del_driver(&as7716_32x_psu_driver); -+} -+ -+module_init(as7716_32x_psu_init); -+module_exit(as7716_32x_psu_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("as7716_32x_psu driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c -index e50c599..89e3a0e 100644 ---- a/drivers/hwmon/accton_i2c_cpld.c -+++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -271,6 +271,29 @@ int platform_accton_as5512_54x(void) - } - EXPORT_SYMBOL(platform_accton_as5512_54x); - -+static struct dmi_system_id as7716_dmi_table[] = { -+ { -+ .ident = "Accton AS7716", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), -+ }, -+ }, -+ { -+ .ident = "Accton AS7716", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), -+ }, -+ }, -+}; -+ -+int platform_accton_as7716_32x(void) -+{ -+ return dmi_check_system(as7716_dmi_table); -+} -+EXPORT_SYMBOL(platform_accton_as7716_32x); -+ - MODULE_AUTHOR("Brandon Chuang "); - MODULE_DESCRIPTION("accton_i2c_cpld driver"); - MODULE_LICENSE("GPL"); -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index 9ba4a1b..e29de21 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -96,6 +96,13 @@ config LEDS_ACCTON_AS5512_54X - This option enables support for the LEDs on the Accton as5512 54x. - Say Y to enable LEDs on the Accton as5512 54x. - -+config LEDS_ACCTON_AS7716_32x -+ tristate "LED support for the Accton as7716 32x" -+ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD -+ help -+ This option enables support for the LEDs on the Accton as7716 32x. -+ Say Y to enable LEDs on the Accton as7716 32x. -+ - config LEDS_LM3530 - tristate "LCD Backlight driver for LM3530" - depends on LEDS_CLASS -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index ff3be6c..42f274a 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -51,6 +51,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o - obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS5812_54t) += leds-accton_as5812_54t.o - obj-$(CONFIG_LEDS_ACCTON_AS5512_54X) += leds-accton_as5512_54x.o -+obj-$(CONFIG_LEDS_ACCTON_AS7716_32x) += leds-accton_as7716_32x.o - - # LED SPI Drivers - obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o -diff --git a/drivers/leds/leds-accton_as7716_32x.c b/drivers/leds/leds-accton_as7716_32x.c -new file mode 100644 -index 0000000..5a84897 ---- /dev/null -+++ b/drivers/leds/leds-accton_as7716_32x.c -@@ -0,0 +1,443 @@ -+/* -+ * A LED driver for the accton_as7716_32x_led -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+/*#define DEBUG*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+extern void led_classdev_unregister(struct led_classdev *led_cdev); -+extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); -+extern void led_classdev_resume(struct led_classdev *led_cdev); -+extern void led_classdev_suspend(struct led_classdev *led_cdev); -+ -+#define DRVNAME "accton_as7716_32x_led" -+ -+struct accton_as7716_32x_led_data { -+ struct platform_device *pdev; -+ struct mutex update_lock; -+ char valid; /* != 0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ u8 reg_val[1]; /* only 1 register*/ -+}; -+ -+static struct accton_as7716_32x_led_data *ledctl = NULL; -+ -+/* LED related data -+ */ -+ -+#define LED_CNTRLER_I2C_ADDRESS (0x60) -+ -+#define LED_TYPE_DIAG_REG_MASK (0x3) -+#define LED_MODE_DIAG_GREEN_VALUE (0x02) -+#define LED_MODE_DIAG_RED_VALUE (0x01) -+#define LED_MODE_DIAG_AMBER_VALUE (0x00) /*It's yellow actually. Green+Red=Yellow*/ -+#define LED_MODE_DIAG_OFF_VALUE (0x03) -+ -+ -+#define LED_TYPE_LOC_REG_MASK (0x80) -+#define LED_MODE_LOC_ON_VALUE (0) -+#define LED_MODE_LOC_OFF_VALUE (0x80) -+ -+enum led_type { -+ LED_TYPE_DIAG, -+ LED_TYPE_LOC, -+ LED_TYPE_FAN, -+ LED_TYPE_PSU1, -+ LED_TYPE_PSU2 -+}; -+ -+struct led_reg { -+ u32 types; -+ u8 reg_addr; -+}; -+ -+static const struct led_reg led_reg_map[] = { -+ {(1<update_lock); -+ -+ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) -+ || !ledctl->valid) { -+ int i; -+ -+ dev_dbg(&ledctl->pdev->dev, "Starting accton_as7716_32x_led update\n"); -+ -+ /* Update LED data -+ */ -+ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { -+ int status = accton_as7716_32x_led_read_value(led_reg_map[i].reg_addr); -+ -+ if (status < 0) { -+ ledctl->valid = 0; -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg_map[i].reg_addr, status); -+ goto exit; -+ } -+ else -+ { -+ ledctl->reg_val[i] = status; -+ } -+ } -+ -+ ledctl->last_updated = jiffies; -+ ledctl->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+static void accton_as7716_32x_led_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode, -+ enum led_type type) -+{ -+ int reg_val; -+ u8 reg ; -+ mutex_lock(&ledctl->update_lock); -+ -+ if( !accton_getLedReg(type, ®)) -+ { -+ dev_dbg(&ledctl->pdev->dev, "Not match item for %d.\n", type); -+ } -+ -+ reg_val = accton_as7716_32x_led_read_value(reg); -+ -+ if (reg_val < 0) { -+ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); -+ goto exit; -+ } -+ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); -+ accton_as7716_32x_led_write_value(reg, reg_val); -+ -+ /* to prevent the slow-update issue */ -+ ledctl->valid = 0; -+ -+exit: -+ mutex_unlock(&ledctl->update_lock); -+} -+ -+ -+static void accton_as7716_32x_led_diag_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as7716_32x_led_set(led_cdev, led_light_mode, LED_TYPE_DIAG); -+} -+ -+static enum led_brightness accton_as7716_32x_led_diag_get(struct led_classdev *cdev) -+{ -+ accton_as7716_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); -+} -+ -+static void accton_as7716_32x_led_loc_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+ accton_as7716_32x_led_set(led_cdev, led_light_mode, LED_TYPE_LOC); -+} -+ -+static enum led_brightness accton_as7716_32x_led_loc_get(struct led_classdev *cdev) -+{ -+ accton_as7716_32x_led_update(); -+ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); -+} -+ -+static void accton_as7716_32x_led_auto_set(struct led_classdev *led_cdev, -+ enum led_brightness led_light_mode) -+{ -+} -+ -+static enum led_brightness accton_as7716_32x_led_auto_get(struct led_classdev *cdev) -+{ -+ return LED_MODE_AUTO; -+} -+ -+static struct led_classdev accton_as7716_32x_leds[] = { -+ [LED_TYPE_DIAG] = { -+ .name = "accton_as7716_32x_led::diag", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7716_32x_led_diag_set, -+ .brightness_get = accton_as7716_32x_led_diag_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_RED, -+ }, -+ [LED_TYPE_LOC] = { -+ .name = "accton_as7716_32x_led::loc", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7716_32x_led_loc_set, -+ .brightness_get = accton_as7716_32x_led_loc_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_BLUE, -+ }, -+ [LED_TYPE_FAN] = { -+ .name = "accton_as7716_32x_led::fan", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7716_32x_led_auto_set, -+ .brightness_get = accton_as7716_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU1] = { -+ .name = "accton_as7716_32x_led::psu1", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7716_32x_led_auto_set, -+ .brightness_get = accton_as7716_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+ [LED_TYPE_PSU2] = { -+ .name = "accton_as7716_32x_led::psu2", -+ .default_trigger = "unused", -+ .brightness_set = accton_as7716_32x_led_auto_set, -+ .brightness_get = accton_as7716_32x_led_auto_get, -+ .flags = LED_CORE_SUSPENDRESUME, -+ .max_brightness = LED_MODE_AUTO, -+ }, -+}; -+ -+static int accton_as7716_32x_led_suspend(struct platform_device *dev, -+ pm_message_t state) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { -+ led_classdev_suspend(&accton_as7716_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as7716_32x_led_resume(struct platform_device *dev) -+{ -+ int i = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { -+ led_classdev_resume(&accton_as7716_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static int accton_as7716_32x_led_probe(struct platform_device *pdev) -+{ -+ int ret, i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { -+ ret = led_classdev_register(&pdev->dev, &accton_as7716_32x_leds[i]); -+ -+ if (ret < 0) -+ break; -+ } -+ -+ /* Check if all LEDs were successfully registered */ -+ if (i != ARRAY_SIZE(accton_as7716_32x_leds)){ -+ int j; -+ -+ /* only unregister the LEDs that were successfully registered */ -+ for (j = 0; j < i; j++) { -+ led_classdev_unregister(&accton_as7716_32x_leds[i]); -+ } -+ } -+ -+ return ret; -+} -+ -+static int accton_as7716_32x_led_remove(struct platform_device *pdev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { -+ led_classdev_unregister(&accton_as7716_32x_leds[i]); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver accton_as7716_32x_led_driver = { -+ .probe = accton_as7716_32x_led_probe, -+ .remove = accton_as7716_32x_led_remove, -+ .suspend = accton_as7716_32x_led_suspend, -+ .resume = accton_as7716_32x_led_resume, -+ .driver = { -+ .name = DRVNAME, -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init accton_as7716_32x_led_init(void) -+{ -+ int ret; -+ -+ extern int platform_accton_as7716_32x(void); -+ if (!platform_accton_as7716_32x()) { -+ return -ENODEV; -+ } -+ -+ ret = platform_driver_register(&accton_as7716_32x_led_driver); -+ if (ret < 0) { -+ goto exit; -+ } -+ -+ ledctl = kzalloc(sizeof(struct accton_as7716_32x_led_data), GFP_KERNEL); -+ if (!ledctl) { -+ ret = -ENOMEM; -+ platform_driver_unregister(&accton_as7716_32x_led_driver); -+ goto exit; -+ } -+ -+ mutex_init(&ledctl->update_lock); -+ -+ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); -+ if (IS_ERR(ledctl->pdev)) { -+ ret = PTR_ERR(ledctl->pdev); -+ platform_driver_unregister(&accton_as7716_32x_led_driver); -+ kfree(ledctl); -+ goto exit; -+ } -+ -+exit: -+ return ret; -+} -+ -+static void __exit accton_as7716_32x_led_exit(void) -+{ -+ platform_device_unregister(ledctl->pdev); -+ platform_driver_unregister(&accton_as7716_32x_led_driver); -+ kfree(ledctl); -+} -+ -+module_init(accton_as7716_32x_led_init); -+module_exit(accton_as7716_32x_led_exit); -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton_as7716_32x_led driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index 70a3f59..97f811f 100644 ---- a/drivers/misc/eeprom/Kconfig -+++ b/drivers/misc/eeprom/Kconfig -@@ -145,6 +145,15 @@ config EEPROM_ACCTON_AS5512_54X_SFP - This driver can also be built as a module. If so, the module will - be called accton_5512_54x_sfp. - -+config EEPROM_ACCTON_AS7716_32x_SFP -+ tristate "Accton as7716 32x sfp" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7716 32x sfp. -+ -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7716_32x_sfp. -+ - config EEPROM_93CX6 - tristate "EEPROM 93CX6 support" - help -diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 907f836..b59d70d 100644 ---- a/drivers/misc/eeprom/Makefile -+++ b/drivers/misc/eeprom/Makefile -@@ -14,4 +14,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5812_54t_SFP) += accton_as5812_54t_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5512_54X_SFP) += accton_as5512_54x_sfp.o -+obj-$(CONFIG_EEPROM_ACCTON_AS7716_32x_SFP) += accton_as7716_32x_sfp.o - obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o -diff --git a/drivers/misc/eeprom/accton_as7716_32x_sfp.c b/drivers/misc/eeprom/accton_as7716_32x_sfp.c -new file mode 100644 -index 0000000..432e9b7 ---- /dev/null -+++ b/drivers/misc/eeprom/accton_as7716_32x_sfp.c -@@ -0,0 +1,356 @@ -+/* -+ * An hwmon driver for accton as7716_32x sfp -+ * -+ * Copyright (C) 2014 Accton Technology Corporation. -+ * Brandon Chuang -+ * -+ * Based on ad7414.c -+ * Copyright 2006 Stefan Roese , DENX Software Engineering -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BIT_INDEX(i) (1UL << (i)) -+ -+ -+/* Addresses scanned -+ */ -+static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; -+ -+/* Each client has this additional data -+ */ -+struct as7716_32x_sfp_data { -+ struct device *hwmon_dev; -+ struct mutex update_lock; -+ char valid; /* !=0 if registers are valid */ -+ unsigned long last_updated; /* In jiffies */ -+ int port; /* Front port index */ -+ char eeprom[256]; /* eeprom data */ -+ u32 is_present; /* present status */ -+}; -+ -+static struct as7716_32x_sfp_data *as7716_32x_sfp_update_device(struct device *dev); -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); -+static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); -+extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); -+extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); -+ -+enum as7716_32x_sfp_sysfs_attributes { -+ SFP_PORT_NUMBER, -+ SFP_IS_PRESENT, -+ SFP_IS_PRESENT_ALL, -+ SFP_EEPROM -+}; -+ -+/* sysfs attributes for hwmon -+ */ -+static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); -+static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); -+static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); -+static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); -+ -+static struct attribute *as7716_32x_sfp_attributes[] = { -+ &sensor_dev_attr_sfp_port_number.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present.dev_attr.attr, -+ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, -+ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, -+ NULL -+}; -+ -+static ssize_t show_port_number(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ return sprintf(buf, "%d\n", data->port+1); -+} -+ -+/* Error-check the CPLD read results. */ -+#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ -+do { \ -+ _rv = (_read_expr); \ -+ if(_rv < 0) { \ -+ return sprintf(_buf, "READ ERROR\n"); \ -+ } \ -+ if(_invert) { \ -+ _rv = ~_rv; \ -+ } \ -+ _rv &= 0xFF; \ -+} while(0) -+ -+static ssize_t show_present(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); -+ -+ if(attr->index == SFP_IS_PRESENT_ALL) { -+ int values[4]; -+ /* -+ * Report the SFP_PRESENCE status for all ports. -+ */ -+ -+ /* SFP_PRESENT Ports 1-8 */ -+ VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); -+ /* SFP_PRESENT Ports 9-16 */ -+ VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); -+ /* SFP_PRESENT Ports 17-24 */ -+ VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); -+ /* SFP_PRESENT Ports 25-32 */ -+ VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); -+ -+ /* Return values 1 -> 32 in order */ -+ return sprintf(buf, "%.2x %.2x %.2x %.2x\n", -+ values[0], values[1], values[2], values[3]); -+ } -+ else { /* SFP_IS_PRESENT */ -+ struct as7716_32x_sfp_data *data = as7716_32x_sfp_update_device(dev); -+ -+ if (!data->valid) { -+ return -EIO; -+ } -+ -+ return sprintf(buf, "%d\n", data->is_present); -+ } -+} -+ -+static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, -+ char *buf) -+{ -+ struct as7716_32x_sfp_data *data = as7716_32x_sfp_update_device(dev); -+ -+ if (!data->valid) { -+ return 0; -+ } -+ -+ if (!data->is_present) { -+ return 0; -+ } -+ -+ memcpy(buf, data->eeprom, sizeof(data->eeprom)); -+ -+ return sizeof(data->eeprom); -+} -+ -+static const struct attribute_group as7716_32x_sfp_group = { -+ .attrs = as7716_32x_sfp_attributes, -+}; -+ -+static int as7716_32x_sfp_probe(struct i2c_client *client, -+ const struct i2c_device_id *dev_id) -+{ -+ struct as7716_32x_sfp_data *data; -+ int status; -+ -+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { -+ status = -EIO; -+ goto exit; -+ } -+ -+ data = kzalloc(sizeof(struct as7716_32x_sfp_data), GFP_KERNEL); -+ if (!data) { -+ status = -ENOMEM; -+ goto exit; -+ } -+ -+ mutex_init(&data->update_lock); -+ data->port = dev_id->driver_data; -+ i2c_set_clientdata(client, data); -+ -+ dev_info(&client->dev, "chip found\n"); -+ -+ /* Register sysfs hooks */ -+ status = sysfs_create_group(&client->dev.kobj, &as7716_32x_sfp_group); -+ if (status) { -+ goto exit_free; -+ } -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ status = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ dev_info(&client->dev, "%s: sfp '%s'\n", -+ dev_name(data->hwmon_dev), client->name); -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &as7716_32x_sfp_group); -+exit_free: -+ kfree(data); -+exit: -+ -+ return status; -+} -+ -+static int as7716_32x_sfp_remove(struct i2c_client *client) -+{ -+ struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &as7716_32x_sfp_group); -+ kfree(data); -+ -+ return 0; -+} -+ -+enum port_numbers { -+as7716_32x_sfp1, as7716_32x_sfp2, as7716_32x_sfp3, as7716_32x_sfp4, -+as7716_32x_sfp5, as7716_32x_sfp6, as7716_32x_sfp7, as7716_32x_sfp8, -+as7716_32x_sfp9, as7716_32x_sfp10,as7716_32x_sfp11,as7716_32x_sfp12, -+as7716_32x_sfp13,as7716_32x_sfp14,as7716_32x_sfp15,as7716_32x_sfp16, -+as7716_32x_sfp17,as7716_32x_sfp18,as7716_32x_sfp19,as7716_32x_sfp20, -+as7716_32x_sfp21,as7716_32x_sfp22,as7716_32x_sfp23,as7716_32x_sfp24, -+as7716_32x_sfp25,as7716_32x_sfp26,as7716_32x_sfp27,as7716_32x_sfp28, -+as7716_32x_sfp29,as7716_32x_sfp30,as7716_32x_sfp31,as7716_32x_sfp32 -+}; -+ -+static const struct i2c_device_id as7716_32x_sfp_id[] = { -+{ "as7716_32x_sfp1", as7716_32x_sfp1 }, { "as7716_32x_sfp2", as7716_32x_sfp2 }, -+{ "as7716_32x_sfp3", as7716_32x_sfp3 }, { "as7716_32x_sfp4", as7716_32x_sfp4 }, -+{ "as7716_32x_sfp5", as7716_32x_sfp5 }, { "as7716_32x_sfp6", as7716_32x_sfp6 }, -+{ "as7716_32x_sfp7", as7716_32x_sfp7 }, { "as7716_32x_sfp8", as7716_32x_sfp8 }, -+{ "as7716_32x_sfp9", as7716_32x_sfp9 }, { "as7716_32x_sfp10", as7716_32x_sfp10 }, -+{ "as7716_32x_sfp11", as7716_32x_sfp11 }, { "as7716_32x_sfp12", as7716_32x_sfp12 }, -+{ "as7716_32x_sfp13", as7716_32x_sfp13 }, { "as7716_32x_sfp14", as7716_32x_sfp14 }, -+{ "as7716_32x_sfp15", as7716_32x_sfp15 }, { "as7716_32x_sfp16", as7716_32x_sfp16 }, -+{ "as7716_32x_sfp17", as7716_32x_sfp17 }, { "as7716_32x_sfp18", as7716_32x_sfp18 }, -+{ "as7716_32x_sfp19", as7716_32x_sfp19 }, { "as7716_32x_sfp20", as7716_32x_sfp20 }, -+{ "as7716_32x_sfp21", as7716_32x_sfp21 }, { "as7716_32x_sfp22", as7716_32x_sfp22 }, -+{ "as7716_32x_sfp23", as7716_32x_sfp23 }, { "as7716_32x_sfp24", as7716_32x_sfp24 }, -+{ "as7716_32x_sfp25", as7716_32x_sfp25 }, { "as7716_32x_sfp26", as7716_32x_sfp26 }, -+{ "as7716_32x_sfp27", as7716_32x_sfp27 }, { "as7716_32x_sfp28", as7716_32x_sfp28 }, -+{ "as7716_32x_sfp29", as7716_32x_sfp29 }, { "as7716_32x_sfp30", as7716_32x_sfp30 }, -+{ "as7716_32x_sfp31", as7716_32x_sfp31 }, { "as7716_32x_sfp32", as7716_32x_sfp32 }, -+{} -+}; -+MODULE_DEVICE_TABLE(i2c, as7716_32x_sfp_id); -+ -+static struct i2c_driver as7716_32x_sfp_driver = { -+ .class = I2C_CLASS_HWMON, -+ .driver = { -+ .name = "as7716_32x_sfp", -+ }, -+ .probe = as7716_32x_sfp_probe, -+ .remove = as7716_32x_sfp_remove, -+ .id_table = as7716_32x_sfp_id, -+ .address_list = normal_i2c, -+}; -+ -+static int as7716_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, -+ int data_len) -+{ -+ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); -+ -+ if (unlikely(result < 0)) -+ goto abort; -+ if (unlikely(result != data_len)) { -+ result = -EIO; -+ goto abort; -+ } -+ -+ result = 0; -+ -+abort: -+ return result; -+} -+ -+static struct as7716_32x_sfp_data *as7716_32x_sfp_update_device(struct device *dev) -+{ -+ struct i2c_client *client = to_i2c_client(dev); -+ struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); -+ -+ mutex_lock(&data->update_lock); -+ -+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) -+ || !data->valid) { -+ int status = -1; -+ int i = 0; -+ u8 cpld_reg = 0x30 + (data->port/8); -+ -+ data->valid = 0; -+ -+ /* Read present status of the specified port number */ -+ data->is_present = 0; -+ status = accton_i2c_cpld_read(0x60, cpld_reg); -+ -+ if (status < 0) { -+ dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); -+ goto exit; -+ } -+ -+ data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; -+ -+ /* Read eeprom data based on port number */ -+ memset(data->eeprom, 0, sizeof(data->eeprom)); -+ -+ /* Check if the port is present */ -+ if (data->is_present) { -+ /* read eeprom */ -+ for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { -+ status = as7716_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, -+ data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), -+ I2C_SMBUS_BLOCK_MAX); -+ if (status < 0) { -+ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); -+ goto exit; -+ } -+ } -+ } -+ -+ data->last_updated = jiffies; -+ data->valid = 1; -+ } -+ -+exit: -+ mutex_unlock(&data->update_lock); -+ -+ return data; -+} -+ -+static int __init as7716_32x_sfp_init(void) -+{ -+ extern int platform_accton_as7716_32x(void); -+ if (!platform_accton_as7716_32x()) { -+ return -ENODEV; -+ } -+ -+ return i2c_add_driver(&as7716_32x_sfp_driver); -+} -+ -+static void __exit as7716_32x_sfp_exit(void) -+{ -+ i2c_del_driver(&as7716_32x_sfp_driver); -+} -+ -+MODULE_AUTHOR("Brandon Chuang "); -+MODULE_DESCRIPTION("accton as7716_32x_sfp driver"); -+MODULE_LICENSE("GPL"); -+ -+module_init(as7716_32x_sfp_init); -+module_exit(as7716_32x_sfp_exit); - diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index 08b76145..be705fac 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -234,22 +234,11 @@ network-core-proto-down.patch network-bonding-clag-proto-down.patch network-bridge-disable-multiple-sub-intfs-on-same-port.patch network-virtio-proto-down.patch -#driver-hwmon-cpr-4011-4mxx.patch -#platform-accton-as5712_54x-device-drivers.patch -#platform-accton-as6712_32x-device-drivers.patch overlayfs_notify.patch -#platform-accton-as7512_32x-device-drivers.patch driver-pca954x-i2c-mux-deselect-on-exit-config-option.patch -#platform-accton-as7712_32x-device-drivers.patch -#platform-accton-as5812_54x-device-drivers.patch -#platform-accton-as6812_32x-device-drivers.patch -#platform-accton-as5812_54t-device-drivers.patch driver-mfd-lpc-ich.patch driver-watchdog-itco-wd.patch -#platform-accton-as5512_54x-device-drivers.patch -#platform-accton-as7716_32x-device-drivers.patch driver-broadcom-tigon3.patch -#mgmt-port-init-config.patch arch-intel-reboot-cf9-cold.patch drivers-hwmon-adm1021-detect.patch drivers-i2c-busses-i2c-isch-timeout.patch From 301b94a6b13cd5d76e49b9a0af500b29a6abbc45 Mon Sep 17 00:00:00 2001 From: Jeffrey Townsend Date: Tue, 3 Jan 2017 19:57:01 +0000 Subject: [PATCH 255/255] This functionality was previously provided by an AS7716-specific patch. The changes are now controlled by module parameters and those modules parameters can be specified by the platform when necessary. --- ...ethernet-broadcom-tg3-preamble-reset.patch | 44 +++++++++++++++++++ .../kernels/3.2.65-1+deb7u2/patches/series | 1 + 2 files changed, 45 insertions(+) create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-net-ethernet-broadcom-tg3-preamble-reset.patch diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-net-ethernet-broadcom-tg3-preamble-reset.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-net-ethernet-broadcom-tg3-preamble-reset.patch new file mode 100644 index 00000000..2610f9ed --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/drivers-net-ethernet-broadcom-tg3-preamble-reset.patch @@ -0,0 +1,44 @@ +diff -urpN a/drivers/net/ethernet/broadcom/tg3/tg3.c b/drivers/net/ethernet/broadcom/tg3/tg3.c +--- a/drivers/net/ethernet/broadcom/tg3/tg3.c 2017-01-03 10:58:35.130883468 -0800 ++++ b/drivers/net/ethernet/broadcom/tg3/tg3.c 2017-01-03 11:22:46.910914971 -0800 +@@ -324,6 +324,14 @@ module_param(tg3_disable_eee, int, 0); + MODULE_PARM_DESC(tg3_disable_eee, "Disable Energy Efficient Ethernet (EEE) support"); + #endif + ++static int short_preamble = 0; ++module_param(short_preamble, int, 0); ++MODULE_PARM_DESC(short_preamble, "Enable short preamble."); ++ ++static int bcm5718s_reset = 0; ++module_param(bcm5718s_reset, int, 0); ++MODULE_PARM_DESC(bcm5718s_reset, "Enable BCM5718S reset support."); ++ + #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 + #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 + +@@ -1628,6 +1635,12 @@ static void tg3_mdio_config_5785(struct + static void tg3_mdio_start(struct tg3 *tp) + { + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; ++ ++ if(short_preamble) { ++ netdev_info(tp->dev, "Setting short preamble..."); ++ tp->mi_mode |= MAC_MI_MODE_SHORT_PREAMBLE; ++ } ++ + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + +@@ -2899,6 +2911,12 @@ static int tg3_phy_reset(struct tg3 *tp) + } + } + ++ if (bcm5718s_reset && tp->phy_id == TG3_PHY_ID_BCM5718S) { ++ netdev_info(tp->dev, "BCM5718S reset..."); ++ __tg3_writephy(tp, 0x8, 0x10, 0x1d0); /* set internal phy 0x8 to make linkup */ ++ __tg3_writephy(tp, 0x1f, 0x4, 0x5e1); /* enable 10/100 cability of external phy */ ++ } ++ + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index be705fac..40e11a5f 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -243,3 +243,4 @@ arch-intel-reboot-cf9-cold.patch drivers-hwmon-adm1021-detect.patch drivers-i2c-busses-i2c-isch-timeout.patch CVE-2016-5195.patch +drivers-net-ethernet-broadcom-tg3-preamble-reset.patch