diff --git a/builds/any/installer/installer.sh.in b/builds/any/installer/installer.sh.in
index da7a24cb..76b33987 100644
--- a/builds/any/installer/installer.sh.in
+++ b/builds/any/installer/installer.sh.in
@@ -548,6 +548,9 @@ if test -f preinstall.sh; then
./preinstall.sh $rootdir
fi
+# make sure any GPT data is valid and clean
+installer_fixup_gpt || :
+
chroot "${rootdir}" $installer_shell
if test -f "$postinst"; then
diff --git a/docker/tools/PKG.yml b/docker/tools/PKG.yml
index e763aff6..e1d6d2be 100644
--- a/docker/tools/PKG.yml
+++ b/docker/tools/PKG.yml
@@ -1,6 +1,6 @@
common:
arch: all
- version: 1.3.0
+ version: 1.4.0
copyright: Copyright 2013, 2014, 2015 Big Switch Networks
maintainer: support@bigswitch.com
support: opennetworklinux@googlegroups.com
diff --git a/make/kbuild.mk b/make/kbuild.mk
index 3a49a162..ade86c31 100644
--- a/make/kbuild.mk
+++ b/make/kbuild.mk
@@ -107,6 +107,7 @@ K_ARCHIVE_URL := https://www.kernel.org/pub/linux/kernel/v$(K_MAJOR_VERSION).x/$
endif
K_SOURCE_DIR := $(K_TARGET_DIR)/$(K_NAME)
K_MBUILD_DIR := $(K_SOURCE_DIR)-mbuild
+K_INSTALL_MOD_PATH := $(K_TARGET_DIR)
K_DTBS_DIR := $(K_SOURCE_DIR)-dtbs
#
@@ -153,6 +154,12 @@ K_MAKE := $(MAKE) -C $(K_SOURCE_DIR)
#
build: setup
+$(K_MAKE) $(K_BUILD_TARGET)
+ +$(K_MAKE) modules
+ +$(K_MAKE) modules_install INSTALL_MOD_PATH=$(K_INSTALL_MOD_PATH)
+ find $(K_INSTALL_MOD_PATH) -type l -name source -delete
+ find $(K_INSTALL_MOD_PATH) -type l -name build -delete
+
+
ifdef K_COPY_SRC
ifdef K_COPY_DST
ifdef K_COPY_GZIP
@@ -180,6 +187,7 @@ mbuild: build
$(foreach f,$(MODSYNCLIST),$(ONL)/tools/scripts/tree-copy.sh $(K_SOURCE_DIR) $(f) $(K_MBUILD_DIR);)
find $(K_MBUILD_DIR) -name "*.o*" -delete
find $(K_MBUILD_DIR) -name "*.c" -delete
+ find $(K_MBUILD_DIR) -name "*.ko" -delete
$(foreach f,$(MODSYNCKEEP), cp $(K_SOURCE_DIR)/$(f) $(K_MBUILD_DIR)/$(f) || true;)
dtbs: mbuild
diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/boot b/packages/base/all/initrds/loader-initrd-files/src/bin/boot
index a8cfd7af..c83658d7 100755
--- a/packages/base/all/initrds/loader-initrd-files/src/bin/boot
+++ b/packages/base/all/initrds/loader-initrd-files/src/bin/boot
@@ -85,10 +85,42 @@ shift
[ ! "${testonly}" ] || set -x
+# set up some tempfs for our download
+
+swi_kmin=1048576
+
+workdir=$(mktemp -d -t boot-tmpfs-XXXXXX)
+
+export TMPDIR=$workdir
+# export this tempfs as temporary space for swiprep below
+
+echo "creating ${swi_kmin}k of tmpfs in $workdir"
+mount -v -t tmpfs -o size=${swi_kmin}k tmpfs $workdir
+workmnt=$workdir
+
+do_cleanup() {
+ cd /tmp
+ if [ "$workmnt" ]; then
+ if grep -q "$workmnt" /proc/mounts; then
+ umount -v "$workmnt" || :
+ fi
+ fi
+ rm -fr "$workdir"
+}
+trap "do_cleanup" 0 1
+
unset swipath host bhost port dir file dev user password scope
case "${SWI}" in
nfs://*/|dir:*)
echo "Mounting ${SWI}"
+
+ # do not use the ephemeral temporary directory for
+ # locally-mounted directories
+ if test "$workmnt"; then
+ umount "$workmnt" || :
+ fi
+ unset TMPDIR
+
swipath=$(swimount $SWI)
if [ "$rootfs" ]; then
[ -d "${swipath}/${rootfs}" ] || { echo "${SWI}${rootfs} must be an unpacked rootfs"; exit 1; }
@@ -131,6 +163,10 @@ fi
if [ "$testonly" ]; then
echo "swipath=$swipath rootfs=$rootfs"
echo "Stop here"
+
+ trap "" 0 1
+ # leave temporary directory and mounts
+
exit 0
fi
@@ -140,6 +176,45 @@ if [ -d "${swipath}" ]; then
umount -l /newroot 2>/dev/null || :
mount --bind "${swipath}/${rootfs}" /newroot
else
+
+ ##############################
+ #
+ # swiprep will (1) unpack the squashfs image to a file,
+ # and (2) extract the filesystem to /newroot.
+ #
+ # We need to make sure there is enough disk space for this...
+ #
+ ##############################
+
+ set dummy $(df -k -P "$workmnt" | tail -1)
+ tmpavail=$5
+
+ # estimate the squashfs size based on the largest one here
+ # (there may be more than one arch in the SWI file)
+ squashsz=0
+ ifs=$IFS; IFS=$CR
+ for line in $(unzip -ql "$swipath"); do
+ IFS=$ifs
+ set dummy $line
+ case "$5" in
+ *.sqsh)
+ if [ "$2" -gt $squashsz ]; then
+ squashsz=$2
+ fi
+ ;;
+ esac
+ done
+ IFS=$ifs
+
+ # pad by a little to account for inodes and such
+ squashsz=$(( $squashsz * 105 / 100 ))
+
+ if [ $squashsz -gt $tmpavail ]; then
+ tmpsz=$(( $swi_kmin + $squashsz - $tmpavail ))
+ echo "Resizing tmpfs to ${tmpsz}k"
+ mount -o remount,size=${tmpsz}k $workmnt
+ fi
+
swiprep --overlay "${swipath}${rootfs}" --unmount --swiref "$swistamp" /newroot
swiprep --record "${swipath}${rootfs}" --swiref "$swistamp" /newroot
fi
@@ -152,6 +227,10 @@ if [ -f /lib/boot-custom ]; then
. /lib/boot-custom
fi
+# done with the temporary dirs and mounts
+trap "" 0 1
+do_cleanup || :
+
echo "Switching rootfs" # limit 16 chars since serial buffer is not flushed
kill -QUIT 1 # exec /bin/switchroot as PID 1
sleep 30
@@ -162,4 +241,5 @@ exit 1
# Local variables:
# mode: sh
# sh-basic-offset: 4
+# sh-indentation: 4
# End:
diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/swiprep b/packages/base/all/initrds/loader-initrd-files/src/bin/swiprep
index 426b773e..88cb76c6 100755
--- a/packages/base/all/initrds/loader-initrd-files/src/bin/swiprep
+++ b/packages/base/all/initrds/loader-initrd-files/src/bin/swiprep
@@ -141,15 +141,20 @@ case $(uname -m) in
ARCH_LIST="armel"
;;
aarch64)
- ARCH_LIST="arm64"
- ;;
+ ARCH_LIST="arm64"
+ ;;
*)
- q;;
+ ;;
esac
if test "${mode_install}${mode_overlay}"; then
for arch in $ARCH_LIST; do
- unzip -q "$swipath" "rootfs-${arch}.sqsh" -d "$workdir"
+ if unzip -q "$swipath" "rootfs-${arch}.sqsh" -d "$workdir"; then
+ :
+ else
+ echo "*** unzip of root squashfs failed" 1>&2
+ rm -f "$workdir/rootfs-${arch}.sqsh"
+ fi
if test -s "$workdir/rootfs-${arch}.sqsh"; then
mv "$workdir/rootfs-${arch}.sqsh" "$workdir/rootfs.sqsh"
break;
diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/udhcpc b/packages/base/all/initrds/loader-initrd-files/src/bin/udhcpc
index 349f3946..1d5ff3f1 100644
--- a/packages/base/all/initrds/loader-initrd-files/src/bin/udhcpc
+++ b/packages/base/all/initrds/loader-initrd-files/src/bin/udhcpc
@@ -26,4 +26,4 @@
. /lib/customize.sh
kill $(cat /tmp/udhcpc.pid 2>/dev/null) 2>/dev/null
-exec busybox udhcpc -V $ONL_UDHCPC_VENDOR -p /tmp/udhcpc.pid -s /lib/udhcpc-script "$@"
+exec busybox udhcpc -O url -V $ONL_UDHCPC_VENDOR -p /tmp/udhcpc.pid -s /lib/udhcpc-script "$@"
diff --git a/packages/base/all/initrds/loader-initrd-files/src/lib/udhcpc-script b/packages/base/all/initrds/loader-initrd-files/src/lib/udhcpc-script
index cd0a62ea..e2094e17 100755
--- a/packages/base/all/initrds/loader-initrd-files/src/lib/udhcpc-script
+++ b/packages/base/all/initrds/loader-initrd-files/src/lib/udhcpc-script
@@ -1,22 +1,22 @@
#!/bin/sh
############################################################
#
-#
-# Copyright 2013, 2014 BigSwitch Networks, Inc.
-#
+#
+# Copyright 2013, 2014 BigSwitch Networks, Inc.
+#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.eclipse.org/legal/epl-v10.html
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
-#
+#
#
############################################################
#
@@ -26,6 +26,9 @@
[ "${interface}" ] || exit
+URUN=/var/run/udhcpc
+rm -rf ${URUN} && mkdir -p ${URUN}
+
case "$1" in
deconfig)
ifconfig ${interface} 0.0.0.0
@@ -36,6 +39,9 @@ case "$1" in
: >/etc/resolv.conf
[ ! "${domain}" ] || echo "search ${domain}" >>/etc/resolv.conf
[ ! "${dns}" ] || echo "nameserver ${dns}" >>/etc/resolv.conf
- [ ! "${boot_file}" ] || echo "${boot_file}" >/tmp/udhcpc.boot_file
+ [ ! "${boot_file}" ] || echo "${boot_file}" > ${URUN}/boot_file
+ [ ! "${siaddr}" ] || echo "${siaddr}" > ${URUN}/siaddr
+ [ ! "${url}" ] || echo "${url}" > ${URUN}/url
;;
+
esac
diff --git a/packages/base/all/vendor-config-onl/src/bin/onlfit b/packages/base/all/vendor-config-onl/src/bin/onlfit
index 2b5ac815..657cbef4 100755
--- a/packages/base/all/vendor-config-onl/src/bin/onlfit
+++ b/packages/base/all/vendor-config-onl/src/bin/onlfit
@@ -5,9 +5,28 @@ if [ -z "$1" ]; then
exit 1
fi
+ARCH=`uname -m`
+case $ARCH in
+ armv7l|ppc)
+ ;;
+ *)
+ echo "This script cannot be used on $ARCH platforms."
+ exit 1
+ ;;
+esac
+
PLATFORM=$(cat /etc/onl/platform)
dir=`mktemp -d`
-(cd $dir && wget $1)
-onlfs rw boot mv $dir/* /mnt/onl/boot/${PLATFORM}.itb
+rc=
+
+if (cd $dir && wget $1); then
+ onlfs rw boot mv $dir/* /mnt/onl/boot/${PLATFORM}.itb
+ rc=0
+else
+ echo "Download failed."
+ rc=1
+fi
+
rmdir $dir
+exit $rc
diff --git a/packages/base/all/vendor-config-onl/src/bin/onlinitrd b/packages/base/all/vendor-config-onl/src/bin/onlinitrd
new file mode 100755
index 00000000..db0fc36a
--- /dev/null
+++ b/packages/base/all/vendor-config-onl/src/bin/onlinitrd
@@ -0,0 +1,32 @@
+#!/bin/sh
+############################################################
+if [ -z "$1" ]; then
+ echo "usage: $0 "
+ exit 1
+fi
+
+ARCH=`uname -m`
+case $ARCH in
+ x86_64)
+ ;;
+ *)
+ echo "This script cannot be used on $ARCH platforms."
+ exit 1
+ ;;
+esac
+
+PLATFORM=$(cat /etc/onl/platform)
+
+dir=`mktemp -d`
+rc=
+
+if (cd $dir && wget $1); then
+ onlfs rw boot mv $dir/* /mnt/onl/boot/${PLATFORM}.cpio.gz
+ rc=0
+else
+ echo "Download failed."
+ rc=1
+fi
+
+rmdir $dir
+exit $rc
diff --git a/packages/base/all/vendor-config-onl/src/bin/onlkernel b/packages/base/all/vendor-config-onl/src/bin/onlkernel
index 051a1729..e77a6cc5 100755
--- a/packages/base/all/vendor-config-onl/src/bin/onlkernel
+++ b/packages/base/all/vendor-config-onl/src/bin/onlkernel
@@ -5,7 +5,26 @@ if [ -z "$1" ]; then
exit 1
fi
+ARCH=`uname -m`
+case $ARCH in
+ x86_64)
+ ;;
+ *)
+ echo "This script cannot be used on $ARCH platforms."
+ exit 1
+ ;;
+esac
+
dir=`mktemp -d`
-(cd $dir && wget $1)
-onlfs rw boot mv $dir/* /mnt/onl/boot
+rc=
+
+if (cd $dir && wget $1); then
+ onlfs rw boot mv $dir/* /mnt/onl/boot
+ rc=0
+else
+ echo "Download failed."
+ rc=1
+fi
+
rmdir $dir
+exit $rc
diff --git a/packages/base/all/vendor-config-onl/src/lib/install/lib.sh b/packages/base/all/vendor-config-onl/src/lib/install/lib.sh
index 74dd273f..62bfb6cf 100644
--- a/packages/base/all/vendor-config-onl/src/lib/install/lib.sh
+++ b/packages/base/all/vendor-config-onl/src/lib/install/lib.sh
@@ -186,6 +186,119 @@ visit_blkid()
return 0
}
+##############################
+#
+# Fixup a corrupted GPT partition, within reason
+# See SWL-3971
+#
+##############################
+
+blkid_find_gpt_boot() {
+ local dev label
+ dev=$1; shift
+ label=$1; shift
+ rest="$@"
+
+ installer_say "Examining $dev --> $label"
+
+ # EFI partition shows up as a valid partition with blkid
+ if test "$label" = "EFI System"; then
+ installer_say "Found EFI System partition at $dev"
+ ESP_DEVICE=$(echo "$dev" | sed -e 's/[0-9]$//')
+
+ # this is definitely the boot disk
+ return 2
+ fi
+
+ # sometimes this is hidden from blkid (no filesystem)
+ if test "$label" = "GRUB-BOOT"; then
+ installer_say "Found GRUB boot partition at $dev"
+ GRUB_DEVICE=$(echo "$dev" | sed -e 's/[0-9]$//')
+
+ # probably the boot disk, look for a GPT header
+ return 0
+ fi
+
+ # shows up in blkid but may not be GPT
+ if test "$label" = "ONIE-BOOT"; then
+ installer_say "Found ONIE boot partition at $dev"
+ ONIE_DEVICE=$(echo "$dev" | sed -e 's/[0-9]$//')
+
+ # probably the boot disk, look for a GPT header
+ return 0
+ fi
+
+ # not found, skip
+ return 0
+}
+
+installer_fixup_gpt() {
+ local buf dat sts dev
+
+ buf=$(mktemp -u -t sgdisk-XXXXXX)
+
+ ESP_DEVICE=
+ GRUB_DEVICE=
+ ONIE_DEVICE=
+ visit_blkid blkid_find_gpt_boot
+
+ dev=
+ if test -b "$ESP_DEVICE"; then
+ dev=$ESP_DEVICE
+ elif test -b "$GRUB_DEVICE"; then
+ sgdisk -p "$GRUB_DEVICE" > "$buf" 2>&1 || :
+ if grep -q GUID "$buf"; then
+ dev=$GRUB_DEVICE
+ fi
+ elif test -b "$ONIE_DEVICE"; then
+ sgdisk -p "$ONIE_DEVICE" > "$buf" 2>&1 || :
+ if grep -q GUID "$buf"; then
+ # here we assume that the ONIE boot partition is on
+ # the boot disk
+ # (additionally we could also look for 'GRUB-BOOT')
+ dev=$ONIE_DEVICE
+ fi
+ fi
+ test -b "$dev" || return 0
+
+ # see if it's a clean GPT partition table
+ if sgdisk -p "$dev" > "$buf" 2>&1; then
+ sts=0
+ else
+ sts=$?
+ fi
+ if test $sts -ne 0; then
+ cat "$buf" 1>&2
+ rm -f "$buf"
+ installer_say "Cannot reliably get GPT partition table"
+ return 1
+ fi
+
+ case $(cat "$buf") in
+ *Caution*|*Warning*)
+ cat $buf 1>&2
+ installer_say "Found issues with the GPT partition table"
+ rm -f "$buf"
+ ;;
+ *)
+ installer_say "Found a clean GPT partition table"
+ rm -f "$buf"
+ return 0
+ ;;
+ esac
+
+ installer_say "Attempting to correct the GPT partition table"
+
+ # this is the simple method; gdisk/sfgdisk will correct
+ # simple errors but not horrendous faults
+ dat=$(mktemp -u -t sgdisk-XXXXXX)
+ sgdisk -b "$dat" "$dev" || return 1
+ sgdisk -l "$dat" "$dev" || return 1
+ rm -f "$dat"
+
+ return 0
+}
+
# Local variables
# mode: sh
# sh-basic-offset: 2
diff --git a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py
index d7b78e22..e804b184 100755
--- a/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py
+++ b/packages/base/all/vendor-config-onl/src/python/onl/platform/base.py
@@ -354,36 +354,34 @@ class OnlPlatformBase(object):
return self.platform_info.CPLD_VERSIONS
def dmi_versions(self):
- # Note - the dmidecode module returns empty lists for powerpc systems.
- if platform.machine() != "x86_64":
- return {}
-
- try:
- import dmidecode
- except ImportError:
- return {}
-
- fields = [
- {
- 'name': 'DMI BIOS Version',
- 'subsystem': dmidecode.bios,
- 'dmi_type' : 0,
- 'key' : 'Version',
- },
-
- {
- 'name': 'DMI System Version',
- 'subsystem': dmidecode.system,
- 'dmi_type' : 1,
- 'key' : 'Version',
- },
- ]
rv = {}
- for field in fields:
- for v in field['subsystem']().values():
- if type(v) is dict and v['dmi_type'] == field['dmi_type']:
- rv[field['name']] = v['data'][field['key']]
+ arches = [ 'x86_64' ]
+ if platform.machine() in arches:
+ try:
+ import dmidecode
+ fields = [
+ {
+ 'name': 'DMI BIOS Version',
+ 'subsystem': dmidecode.bios,
+ 'dmi_type' : 0,
+ 'key' : 'Version',
+ },
+ {
+ 'name': 'DMI System Version',
+ 'subsystem': dmidecode.system,
+ 'dmi_type' : 1,
+ 'key' : 'Version',
+ },
+ ]
+ # Todo -- disable dmidecode library warnings to stderr
+ # or figure out how to clear the warning log in the decode module.
+ for field in fields:
+ for v in field['subsystem']().values():
+ if type(v) is dict and v['dmi_type'] == field['dmi_type']:
+ rv[field['name']] = v['data'][field['key']]
+ except:
+ pass
return rv
def upgrade_manifest(self, type_, override_dir=None):
diff --git a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile
index 832ae83c..54cb65cb 100644
--- a/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile
+++ b/packages/base/amd64/kernels/kernel-3.16-lts-x86-64-all/builds/Makefile
@@ -13,8 +13,8 @@ THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
include $(ONL)/make/config.mk
kernel:
- $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL)
rm -rf lib
+ $(MAKE) -C $(ONL)/packages/base/any/kernels/3.16-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL)
ARCH=x86_64 $(ONL)/tools/scripts/kmodbuild.sh linux-3.16.39-mbuild "$(wildcard $(ONL)/packages/base/any/kernels/modules/*)" onl/onl/common
clean:
diff --git a/packages/base/amd64/kernels/kernel-4.9-lts-x86-64-all/builds/Makefile b/packages/base/amd64/kernels/kernel-4.9-lts-x86-64-all/builds/Makefile
index 2bb565a0..422af6c5 100644
--- a/packages/base/amd64/kernels/kernel-4.9-lts-x86-64-all/builds/Makefile
+++ b/packages/base/amd64/kernels/kernel-4.9-lts-x86-64-all/builds/Makefile
@@ -13,8 +13,8 @@ THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
include $(ONL)/make/config.mk
kernel:
- $(MAKE) -C $(ONL)/packages/base/any/kernels/4.9-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL)
rm -rf lib
+ $(MAKE) -C $(ONL)/packages/base/any/kernels/4.9-lts/configs/x86_64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL)
ARCH=x86_64 $(ONL)/tools/scripts/kmodbuild.sh linux-4.9.30-mbuild "$(wildcard $(ONL)/packages/base/any/kernels/modules/*)" onl/onl/common
clean:
diff --git a/packages/base/any/initrds/buildroot/builds/Makefile b/packages/base/any/initrds/buildroot/builds/Makefile
index 00963f51..61dc72c6 100644
--- a/packages/base/any/initrds/buildroot/builds/Makefile
+++ b/packages/base/any/initrds/buildroot/builds/Makefile
@@ -26,7 +26,7 @@ all: setup $(BUILDROOT_ARCHDIRS)
clean:
rm -rf $(BUILDROOT_ARCHDIRS)
-
+ rm -rf .setup.done
setup: setup-pyroute2 setup-dnspython setup-libyaml setup-pyyaml setup-jq setup-pyparted
cp $(wildcard patches/busybox*.patch) $(BUILDROOT_SOURCE)/package/busybox/
diff --git a/packages/base/any/initrds/buildroot/builds/patches/busybox-003-additional-dhcp-options.patch b/packages/base/any/initrds/buildroot/builds/patches/busybox-003-additional-dhcp-options.patch
new file mode 100644
index 00000000..75f954aa
--- /dev/null
+++ b/packages/base/any/initrds/buildroot/builds/patches/busybox-003-additional-dhcp-options.patch
@@ -0,0 +1,126 @@
+dhcp additional options patch
+
+Copyright (C) 2013 Curt Brune
+Copyright (C) 2014 david_yang
+Copyright (C) 2017 Jeffrey Townsend
+
+SPDX-License-Identifier: GPL-2.0
+
+Enable the send/receive of additional DHCP options:
+
+ DHCP_LOG_SERVER
+ DHCP_WWW_SERVER
+ DHCP_DEFAULT_URL
+
+diff -urpN a/networking/udhcp/common.c b/networking/udhcp/common.c
+--- a/networking/udhcp/common.c 2017-07-18 15:11:59.626055248 +0000
++++ b/networking/udhcp/common.c 2017-07-18 15:09:47.942052391 +0000
+@@ -26,7 +26,7 @@ const struct dhcp_optflag dhcp_optflags[
+ // { OPTION_IP | OPTION_LIST , 0x04 }, /* DHCP_TIME_SERVER */
+ // { OPTION_IP | OPTION_LIST , 0x05 }, /* DHCP_NAME_SERVER */
+ { OPTION_IP | OPTION_LIST | OPTION_REQ, 0x06 }, /* DHCP_DNS_SERVER */
+-// { OPTION_IP | OPTION_LIST , 0x07 }, /* DHCP_LOG_SERVER */
++ { OPTION_IP | OPTION_LIST , 0x07 }, /* DHCP_LOG_SERVER */
+ // { OPTION_IP | OPTION_LIST , 0x08 }, /* DHCP_COOKIE_SERVER */
+ { OPTION_IP | OPTION_LIST , 0x09 }, /* DHCP_LPR_SERVER */
+ { OPTION_STRING_HOST | OPTION_REQ, 0x0c }, /* DHCP_HOST_NAME */
+@@ -44,6 +44,7 @@ const struct dhcp_optflag dhcp_optflags[
+ { OPTION_STRING_HOST , 0x28 }, /* DHCP_NIS_DOMAIN */
+ { OPTION_IP | OPTION_LIST , 0x29 }, /* DHCP_NIS_SERVER */
+ { OPTION_IP | OPTION_LIST | OPTION_REQ, 0x2a }, /* DHCP_NTP_SERVER */
++ { OPTION_BIN , 0x2b }, /* DHCP_VENDOR_OPTS */
+ { OPTION_IP | OPTION_LIST , 0x2c }, /* DHCP_WINS_SERVER */
+ { OPTION_U32 , 0x33 }, /* DHCP_LEASE_TIME */
+ { OPTION_IP , 0x36 }, /* DHCP_SERVER_ID */
+@@ -51,18 +52,22 @@ const struct dhcp_optflag dhcp_optflags[
+ //TODO: must be combined with 'sname' and 'file' handling:
+ { OPTION_STRING_HOST , 0x42 }, /* DHCP_TFTP_SERVER_NAME */
+ { OPTION_STRING , 0x43 }, /* DHCP_BOOT_FILE */
++ { OPTION_IP | OPTION_LIST , 0x48 }, /* DHCP_WWW_SERVER */
+ //TODO: not a string, but a set of LASCII strings:
+ // { OPTION_STRING , 0x4D }, /* DHCP_USER_CLASS */
++ { OPTION_STRING , 0x72 }, /* DHCP_DEFAULT_URL */
+ #if ENABLE_FEATURE_UDHCP_RFC3397
+ { OPTION_DNS_STRING | OPTION_LIST , 0x77 }, /* DHCP_DOMAIN_SEARCH */
+ { OPTION_SIP_SERVERS , 0x78 }, /* DHCP_SIP_SERVERS */
+ #endif
+ { OPTION_STATIC_ROUTES | OPTION_LIST , 0x79 }, /* DHCP_STATIC_ROUTES */
++ { OPTION_BIN , 0x7d }, /* DHCP_VIVSO_OPTS */
+ #if ENABLE_FEATURE_UDHCP_8021Q
+ { OPTION_U16 , 0x84 }, /* DHCP_VLAN_ID */
+ { OPTION_U8 , 0x85 }, /* DHCP_VLAN_PRIORITY */
+ #endif
+ { OPTION_6RD , 0xd4 }, /* DHCP_6RD */
++ { OPTION_IP , 0x96 }, /* DHCP_TFTP_SERVER_IP */
+ { OPTION_STATIC_ROUTES | OPTION_LIST , 0xf9 }, /* DHCP_MS_STATIC_ROUTES */
+ { OPTION_STRING , 0xfc }, /* DHCP_WPAD */
+
+@@ -95,7 +100,7 @@ const char dhcp_option_strings[] ALIGN1
+ // "timesrv" "\0" /* DHCP_TIME_SERVER */
+ // "namesrv" "\0" /* DHCP_NAME_SERVER */
+ "dns" "\0" /* DHCP_DNS_SERVER */
+-// "logsrv" "\0" /* DHCP_LOG_SERVER */
++ "logsrv" "\0" /* DHCP_LOG_SERVER */
+ // "cookiesrv" "\0" /* DHCP_COOKIE_SERVER */
+ "lprsrv" "\0" /* DHCP_LPR_SERVER */
+ "hostname" "\0" /* DHCP_HOST_NAME */
+@@ -110,13 +115,16 @@ const char dhcp_option_strings[] ALIGN1
+ "nisdomain" "\0" /* DHCP_NIS_DOMAIN */
+ "nissrv" "\0" /* DHCP_NIS_SERVER */
+ "ntpsrv" "\0" /* DHCP_NTP_SERVER */
++ "vendoropts" "\0" /* DHCP_VENDOR_OPTS */
+ "wins" "\0" /* DHCP_WINS_SERVER */
+ "lease" "\0" /* DHCP_LEASE_TIME */
+ "serverid" "\0" /* DHCP_SERVER_ID */
+ "message" "\0" /* DHCP_ERR_MESSAGE */
+ "tftp" "\0" /* DHCP_TFTP_SERVER_NAME */
+ "bootfile" "\0" /* DHCP_BOOT_FILE */
++ "wwwsrv" "\0" /* DHCP_WWW_SERVER */
+ // "userclass" "\0" /* DHCP_USER_CLASS */
++ "url" "\0" /* DHCP_DEFAULT_URL */
+ #if ENABLE_FEATURE_UDHCP_RFC3397
+ "search" "\0" /* DHCP_DOMAIN_SEARCH */
+ // doesn't work in udhcpd.conf since OPTION_SIP_SERVERS
+@@ -124,11 +132,13 @@ const char dhcp_option_strings[] ALIGN1
+ "sipsrv" "\0" /* DHCP_SIP_SERVERS */
+ #endif
+ "staticroutes" "\0"/* DHCP_STATIC_ROUTES */
++ "vivso" "\0" /* DHCP_VIVSO_OPTS */
+ #if ENABLE_FEATURE_UDHCP_8021Q
+ "vlanid" "\0" /* DHCP_VLAN_ID */
+ "vlanpriority" "\0"/* DHCP_VLAN_PRIORITY */
+ #endif
+ "ip6rd" "\0" /* DHCP_6RD */
++ "tftpsiaddr" "\0" /* DHCP_TFTP_SERVER_IP */
+ "msstaticroutes""\0"/* DHCP_MS_STATIC_ROUTES */
+ "wpad" "\0" /* DHCP_WPAD */
+ ;
+@@ -145,6 +155,7 @@ const uint8_t dhcp_option_lengths[] ALIG
+ [OPTION_IP] = 4,
+ [OPTION_IP_PAIR] = 8,
+ // [OPTION_BOOLEAN] = 1,
++ [OPTION_BIN] = 1, /* ignored by udhcp_str2optset */
+ [OPTION_STRING] = 1, /* ignored by udhcp_str2optset */
+ [OPTION_STRING_HOST] = 1, /* ignored by udhcp_str2optset */
+ #if ENABLE_FEATURE_UDHCP_RFC3397
+diff -urpN a/networking/udhcp/dhcpc.c b/networking/udhcp/dhcpc.c
+--- a/networking/udhcp/dhcpc.c 2017-07-18 15:11:59.626055248 +0000
++++ b/networking/udhcp/dhcpc.c 2017-07-18 15:11:11.066054194 +0000
+@@ -100,6 +100,7 @@ static const uint8_t len_of_option_as_st
+ [OPTION_IP_PAIR ] = sizeof("255.255.255.255 ") * 2,
+ [OPTION_STATIC_ROUTES ] = sizeof("255.255.255.255/32 255.255.255.255 "),
+ [OPTION_6RD ] = sizeof("32 128 ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff 255.255.255.255 "),
++ [OPTION_BIN ] = 2,
+ [OPTION_STRING ] = 1,
+ [OPTION_STRING_HOST ] = 1,
+ #if ENABLE_FEATURE_UDHCP_RFC3397
+@@ -240,6 +241,9 @@ static NOINLINE char *xmalloc_optname_op
+ dest += sprintf(dest, type == OPTION_U32 ? "%lu" : "%ld", (unsigned long) ntohl(val_u32));
+ break;
+ }
++ case OPTION_BIN:
++ *bin2hex(dest, (void*) option, len) = '\0';
++ return ret;
+ /* Note: options which use 'return' instead of 'break'
+ * (for example, OPTION_STRING) skip the code which handles
+ * the case of list of options.
diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config
index 2038c572..90f68706 100644
--- a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config
+++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config
@@ -470,7 +470,9 @@ CONFIG_X86_PAT=y
CONFIG_ARCH_USES_PG_UNCACHED=y
CONFIG_ARCH_RANDOM=y
CONFIG_X86_SMAP=y
-# CONFIG_EFI is not set
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+# CONFIG_EFI_MIXED is not set
CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
@@ -521,6 +523,7 @@ CONFIG_ACPI_CONTAINER=y
# CONFIG_ACPI_SBS is not set
# CONFIG_ACPI_HED is not set
CONFIG_ACPI_CUSTOM_METHOD=y
+# CONFIG_ACPI_BGRT is not set
# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
# CONFIG_ACPI_APEI is not set
# CONFIG_ACPI_EXTLOG is not set
@@ -2971,6 +2974,12 @@ CONFIG_ISCSI_IBFT_FIND=y
CONFIG_ISCSI_IBFT=y
# CONFIG_GOOGLE_FIRMWARE is not set
+#
+# EFI (Extensible Firmware Interface) Support
+#
+# CONFIG_EFI_VARS is not set
+CONFIG_EFI_RUNTIME_MAP=y
+
#
# File systems
#
@@ -3102,6 +3111,7 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_UFS_FS is not set
# CONFIG_EXOFS_FS is not set
# CONFIG_F2FS_FS is not set
+CONFIG_EFIVAR_FS=y
CONFIG_ORE=y
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
@@ -3365,6 +3375,7 @@ CONFIG_STRICT_DEVMEM=y
CONFIG_X86_VERBOSE_BOOTUP=y
CONFIG_EARLY_PRINTK=y
# CONFIG_EARLY_PRINTK_DBGP is not set
+# CONFIG_EARLY_PRINTK_EFI is not set
# CONFIG_X86_PTDUMP is not set
CONFIG_DEBUG_RODATA=y
# CONFIG_DEBUG_RODATA_TEST is not set
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-igb-netberg-aurora.patch b/packages/base/any/kernels/3.16-lts/patches/driver-igb-netberg-aurora.patch
new file mode 100644
index 00000000..7ed0f44d
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-igb-netberg-aurora.patch
@@ -0,0 +1,400 @@
+diff -Nu a/drivers/net/ethernet/intel/igb/bcm_phy.c b/drivers/net/ethernet/intel/igb/bcm_phy.c
+--- a/drivers/net/ethernet/intel/igb/bcm_phy.c 1970-01-01 08:00:00.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/bcm_phy.c 2017-06-20 17:00:46.000000000 +0800
+@@ -0,0 +1,206 @@
++#include "e1000_hw.h"
++#include "linux/brcmphy.h"
++
++/*
++ * 1000Base-T Control Register
++ */
++#define MII_BCM54XX_AUX_CTL_ENCODE(val) (((val & 0x7) << 12)|(val & 0x7))
++
++/*
++ * MII Link Advertisment
++ */
++#define MII_ANA_ASF (1 << 0) /* Advertise Selector Field */
++#define MII_ANA_HD_10 (1 << 5) /* Half duplex 10Mb/s supported */
++#define MII_ANA_FD_10 (1 << 6) /* Full duplex 10Mb/s supported */
++#define MII_ANA_HD_100 (1 << 7) /* Half duplex 100Mb/s supported */
++#define MII_ANA_FD_100 (1 << 8) /* Full duplex 100Mb/s supported */
++#define MII_ANA_T4 (1 << 9) /* T4 */
++#define MII_ANA_PAUSE (1 << 10)/* Pause supported */
++#define MII_ANA_ASYM_PAUSE (1 << 11)/* Asymmetric pause supported */
++#define MII_ANA_RF (1 << 13)/* Remote fault */
++#define MII_ANA_NP (1 << 15)/* Next Page */
++
++#define MII_ANA_ASF_802_3 (1) /* 802.3 PHY */
++
++/*
++ * BCM54XX: Shadow registers
++ * Shadow values go into bits [14:10] of register 0x1c to select a shadow
++ * register to access.
++ */
++#define BCM54XX_SHD_AUTODETECT 0x1e /* 11110: Auto detect Regisrer */
++#define BCM54XX_SHD_MODE 0x1f /* 11111: Mode Control Register */
++#define BCM54XX_SHD_MODE_SER 1<<6
++
++/*
++ * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
++ * 0x1c shadow registers.
++ */
++
++int bcmphy_write(struct e1000_hw *hw,u32 reg, u16 regval)
++{
++ u32 ret;
++ struct e1000_phy_info *phy = &hw->phy;
++
++ ret = phy->ops.write_reg(hw,reg, regval);
++ return ret;
++}
++
++u16 bcmphy_read(struct e1000_hw *hw, u32 reg)
++{
++ u16 val;
++ struct e1000_phy_info *phy = &hw->phy;
++
++ phy->ops.read_reg(hw,reg, &val);
++ return val;
++}
++
++static int bcm54xx_shadow_read(struct e1000_hw *hw, u16 shadow)
++{
++ bcmphy_write(hw, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
++ return MII_BCM54XX_SHD_DATA(bcmphy_read(hw, MII_BCM54XX_SHD));
++}
++
++static int bcm54xx_shadow_write(struct e1000_hw *hw, u16 shadow, u16 val)
++{
++ return bcmphy_write(hw, MII_BCM54XX_SHD,
++ MII_BCM54XX_SHD_WRITE |
++ MII_BCM54XX_SHD_VAL(shadow) |
++ MII_BCM54XX_SHD_DATA(val));
++}
++
++static int bcm54xx_auxctl_write(struct e1000_hw *hw, u16 regnum, u16 val)
++{
++ return bcmphy_write(hw, MII_BCM54XX_AUX_CTL, (regnum | val));
++}
++
++static int bcm54xx_config_init(struct e1000_hw *hw)
++{
++ int reg, err;
++
++ reg = bcmphy_read(hw, MII_BCM54XX_ECR);
++ if (reg < 0)
++ return reg;
++
++ /* Mask interrupts globally. */
++ reg |= MII_BCM54XX_ECR_IM;
++ err = bcmphy_write(hw, MII_BCM54XX_ECR, reg);
++ if (err < 0)
++ return err;
++
++ /* Unmask events we are interested in. */
++ reg = ~(MII_BCM54XX_INT_DUPLEX |
++ MII_BCM54XX_INT_SPEED |
++ MII_BCM54XX_INT_LINK);
++ err = bcmphy_write(hw, MII_BCM54XX_IMR, reg);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++
++void bcm54616s_linkup(struct e1000_hw *hw, int speed, int duplex)
++{
++ u16 regval;
++
++ /* set speed and full duplex*/
++ regval = bcmphy_read(hw,PHY_CONTROL);
++ regval &= ~(MII_CR_SPEED_SELECT_MSB |
++ MII_CR_SPEED_SELECT_LSB |
++ MII_CR_FULL_DUPLEX);
++
++ switch(speed) {
++ case SPEED_10:
++ regval |= MII_CR_SPEED_10;
++ break;
++ case SPEED_100:
++ regval |= MII_CR_SPEED_100;
++ break;
++ case SPEED_1000:
++ default:
++ regval |= MII_CR_SPEED_1000;
++ break;
++ }
++
++ switch(duplex) {
++ case FULL_DUPLEX:
++ regval |= MII_CR_FULL_DUPLEX;
++ break;
++ }
++
++ bcmphy_write(hw,PHY_CONTROL, regval);
++
++ regval = bcmphy_read(hw, PHY_CONTROL);
++ regval &= ~(MII_CR_ISOLATE);
++ bcmphy_write(hw, PHY_CONTROL, regval);
++}
++
++int bcm54616s_config_init(struct e1000_hw *hw)
++{
++ int err, reg;
++ u16 regval;
++ int i;
++
++ /* reset PHY */
++ regval = (1<<15);
++ bcmphy_write(hw, PHY_CONTROL, regval);
++
++ mdelay(10);
++
++ /* disable Power down and iso */
++ regval = bcmphy_read(hw,PHY_CONTROL);
++ regval &= ~(MII_CR_POWER_DOWN | MII_CR_ISOLATE);
++ bcmphy_write(hw, PHY_CONTROL, regval);
++
++ /* disable suport I */
++ /*0000 0100 1100 0010 */
++ bcm54xx_auxctl_write(hw, 0, 0x04c2);
++
++ regval = bcmphy_read(hw, MII_BCM54XX_AUX_CTL);
++
++ /* set 1000base-T */
++ regval = bcmphy_read(hw, PHY_1000T_CTRL);
++ regval |= (CR_1000T_FD_CAPS | CR_1000T_REPEATER_DTE);
++ bcmphy_write(hw, PHY_1000T_CTRL, regval);
++
++ /* set ctrl */
++ regval = (MII_CR_SPEED_1000 |
++ MII_CR_FULL_DUPLEX |
++ MII_CR_SPEED_SELECT_MSB);
++ bcmphy_write(hw, PHY_CONTROL, regval);
++
++ /* Setup read from auxilary control shadow register 7 */
++ bcmphy_write(hw, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUX_CTL_ENCODE(7));
++
++ /* Read Misc Control register */
++ reg = ((bcmphy_read(hw, MII_BCM54XX_AUX_CTL) & 0x8FFF) | 0x8010);
++ bcmphy_write(hw, MII_BCM54XX_AUX_CTL, reg);
++
++ /* Enable auto-detect and copper prefer */
++ bcm54xx_shadow_write(hw, BCM54XX_SHD_AUTODETECT, 0x31);
++
++ err = bcm54xx_config_init(hw);
++
++ /* set link parner */
++ regval = MII_ANA_ASF_802_3;
++ regval |= MII_ANA_HD_10;
++ regval |= MII_ANA_HD_100;
++ regval |= MII_ANA_FD_10;
++ regval |= MII_ANA_FD_100;
++ regval |= MII_ANA_ASYM_PAUSE;
++ regval |= (MII_ANA_PAUSE | MII_ANA_ASYM_PAUSE);
++ regval |= MII_ANA_PAUSE;
++ bcmphy_write(hw, PHY_AUTONEG_ADV, reg);
++
++ i=0;
++ while (1) {
++ regval = bcm54xx_shadow_read(hw,BCM54XX_SHD_MODE);
++ if (regval & BCM54XX_SHD_MODE_SER)
++ break;
++ if (i++ > 500) {
++ //printk("SERDES no link %x\n",regval);
++ break;
++ }
++ mdelay(1); /* 1 ms */
++ }
++ return err;
++}
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2017-06-20 16:44:29.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2017-06-20 17:00:52.000000000 +0800
+@@ -317,6 +317,10 @@
+ break;
+ case BCM54616_E_PHY_ID:
+ phy->type = e1000_phy_bcm54616;
++ phy->ops.check_polarity = NULL;
++ phy->ops.get_info = igb_get_phy_info_bcm;
++ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_bcm;
++ bcm54616s_config_init(hw);
+ break;
+ case BCM50210S_E_PHY_ID:
+ break;
+@@ -1636,6 +1640,7 @@
+ ret_val = igb_e1000_copper_link_setup_82577(hw);
+ break;
+ case e1000_phy_bcm54616:
++ ret_val = igb_copper_link_setup_bcm(hw);
+ break;
+ case e1000_phy_bcm5461s:
+ break;
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.h 2017-06-20 16:44:27.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.h 2017-06-20 17:00:57.000000000 +0800
+@@ -25,6 +25,8 @@
+ #ifndef _E1000_82575_H_
+ #define _E1000_82575_H_
+
++extern void bcm54616s_linkup(struct e1000_hw *hw,int speed , int duplex);
++extern int bcm54616s_config_init(struct e1000_hw *hw);
+ #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2017-06-20 16:44:27.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2017-06-20 17:01:05.000000000 +0800
+@@ -1187,6 +1187,19 @@
+ return E1000_SUCCESS;
+ }
+
++s32 igb_copper_link_setup_bcm(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data;
++
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
++ phy_data &= ~(MII_CR_ISOLATE);
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
++
++ return 0;
++}
++
+ /**
+ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+@@ -1720,6 +1733,62 @@
+ return ret_val;
+ }
+
++s32 igb_phy_force_speed_duplex_bcm(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_data;
++ bool link;
++
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
++ if (ret_val)
++ return ret_val;
++
++ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
++
++ phy_data &= ~(MII_CR_POWER_DOWN | MII_CR_ISOLATE);
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
++ if (ret_val)
++ return ret_val;
++
++ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
++ * forced whenever speed and duplex are forced.
++ */
++ #if 0
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
++ if (ret_val)
++ return ret_val;
++
++ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
++ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
++
++ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
++ if (ret_val)
++ return ret_val;
++
++ hw_dbg("IGP PSCR: %X\n", phy_data);
++ #endif
++ udelay(1);
++
++ if (phy->autoneg_wait_to_complete) {
++ DEBUGFUNC("Waiting for forced speed/duplex link on IGP phy.\n");
++
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
++ if (ret_val)
++ return ret_val;
++
++ if (!link)
++ DEBUGFUNC("Link taking longer than expected.\n");
++
++ /* Try once more */
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
++ }
++
++ return ret_val;
++}
++
+ /**
+ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+@@ -2614,6 +2683,29 @@
+ }
+
+ return ret_val;
++}
++
++s32 igb_get_phy_info_bcm(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ bool link;
++
++ if (phy->media_type != e1000_media_type_copper) {
++ DEBUGFUNC("Phy info is only valid for copper media\n");
++ return -E1000_ERR_CONFIG;
++ }
++
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
++ if (ret_val)
++ return ret_val;
++
++ if (!link) {
++ DEBUGFUNC("Phy info is only valid if link is up\n");
++ return -E1000_ERR_CONFIG;
++ }
++
++ return ret_val;
+ }
+
+ /**
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2017-06-20 16:44:27.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2017-06-20 17:01:24.000000000 +0800
+@@ -99,6 +99,9 @@
+ s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override);
+ bool e1000_is_mphy_ready(struct e1000_hw *hw);
++s32 igb_copper_link_setup_bcm(struct e1000_hw *hw);
++s32 igb_phy_force_speed_duplex_bcm(struct e1000_hw *hw);
++s32 igb_get_phy_info_bcm(struct e1000_hw *hw);
+
+ #define E1000_MAX_PHY_ADDR 8
+
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+--- a/drivers/net/ethernet/intel/igb/igb_main.c 2017-06-20 16:44:27.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/igb_main.c 2017-06-20 17:01:29.000000000 +0800
+@@ -4814,6 +4814,14 @@
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
++ switch (hw->phy.type) {
++ case e1000_phy_bcm54616:
++ bcm54616s_linkup(hw, adapter->link_speed, adapter->link_duplex);
++ break;
++ default:
++ break;
++ }
++
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* Links status message must follow this format */
+ netdev_info(netdev,
+diff -Nu a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
+--- a/drivers/net/ethernet/intel/igb/Makefile 2017-06-20 16:44:27.000000000 +0800
++++ b/drivers/net/ethernet/intel/igb/Makefile 2017-06-20 17:01:34.000000000 +0800
+@@ -35,4 +35,4 @@
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+ e1000_i210.o igb_ptp.o igb_hwmon.o \
+ e1000_manage.o igb_param.o kcompat.o e1000_api.o \
+- igb_vmdq.o igb_procfs.o igb_debugfs.o
++ igb_vmdq.o igb_procfs.o igb_debugfs.o bcm_phy.o
diff --git a/packages/base/any/kernels/3.16-lts/patches/series b/packages/base/any/kernels/3.16-lts/patches/series
index 473d70a2..8c9db7bd 100644
--- a/packages/base/any/kernels/3.16-lts/patches/series
+++ b/packages/base/any/kernels/3.16-lts/patches/series
@@ -25,3 +25,4 @@ platform-powerpc-85xx-Makefile.patch
platform-powerpc-dni-7448-r0.patch
platform-powerpc-quanta-lb9-r0.patch
driver-support-intel-igb-bcm50210-phy.patch
+driver-igb-netberg-aurora.patch
diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config b/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config
index 4c80980e..a023c63c 100644
--- a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config
+++ b/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config
@@ -2302,8 +2302,13 @@ CONFIG_DMA_OF=y
# CONFIG_DMATEST is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-# CONFIG_VFIO is not set
# CONFIG_VIRT_DRIVERS is not set
+
+CONFIG_VFIO_IOMMU_TYPE1=y
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VFIO_FSL_MC=y
+
CONFIG_VIRTIO=y
#
diff --git a/packages/base/any/kernels/3.18.25/patches/0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch b/packages/base/any/kernels/3.18.25/patches/0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch
new file mode 100644
index 00000000..ad0e4a06
--- /dev/null
+++ b/packages/base/any/kernels/3.18.25/patches/0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch
@@ -0,0 +1,101360 @@
+From 7ab86f28bfb4d36d4d741a41941a0aa971124d88 Mon Sep 17 00:00:00 2001
+From: "Chenyin.Ha"
+Date: Fri, 19 May 2017 13:44:09 +0800
+Subject: [PATCH] Patch set for booting ls2088rdb with vfio
+
+---
+ Documentation/IRQ-domain.txt | 71 +
+ Documentation/devicetree/bindings/arm/fsl.txt | 15 +
+ Documentation/devicetree/bindings/arm/gic.txt | 8 +-
+ .../devicetree/bindings/clock/qoriq-clock.txt | 64 +-
+ Documentation/devicetree/bindings/i2c/i2c-imx.txt | 11 +
+ .../devicetree/bindings/i2c/i2c-mux-pca954x.txt | 3 +
+ .../bindings/memory-controllers/fsl/ifc.txt | 3 +
+ .../devicetree/bindings/pci/designware-pcie.txt | 3 +-
+ .../devicetree/bindings/powerpc/fsl/board.txt | 14 +-
+ Documentation/devicetree/bindings/usb/dwc3.txt | 3 +-
+ Documentation/devicetree/of_selftest.txt | 20 +-
+ Documentation/devicetree/todo.txt | 1 -
+ MAINTAINERS | 60 +
+ arch/arm/Kconfig | 3 +
+ arch/arm/Makefile | 8 +-
+ arch/arm/boot/dts/Makefile | 12 +-
+ arch/arm/include/asm/dma-mapping.h | 10 +-
+ arch/arm/include/asm/mach/pci.h | 12 +-
+ arch/arm/include/asm/pci.h | 7 -
+ arch/arm/kernel/bios32.c | 39 +-
+ arch/arm/mach-iop13xx/msi.c | 10 +-
+ arch/arm64/Kconfig | 8 +-
+ arch/arm64/Makefile | 11 +-
+ arch/arm64/boot/dts/Makefile | 2 +-
+ arch/arm64/boot/dts/Makefile.rej | 10 +
+ arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts | 249 ++
+ arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts | 256 ++
+ arch/arm64/boot/dts/fsl-ls2080a.dtsi | 729 +++++
+ arch/arm64/boot/dts/fsl-ls2088a.dtsi | 833 ++++++
+ arch/arm64/boot/dts/include/dt-bindings | 1 +
+ arch/arm64/boot/dts/thermal.h | 17 +
+ arch/arm64/configs/defconfig | 1 +
+ arch/arm64/configs/nxp_ls2088rdb_config | 3034 ++++++++++++++++++++
+ arch/arm64/include/asm/device.h | 1 +
+ arch/arm64/include/asm/dma-mapping.h | 16 +-
+ arch/arm64/include/asm/io.h | 1 +
+ arch/arm64/include/asm/mmu_context.h | 43 +
+ arch/arm64/include/asm/page.h | 6 +-
+ arch/arm64/include/asm/pgtable-hwdef.h | 7 +-
+ arch/arm64/include/asm/pgtable.h | 8 +
+ arch/arm64/kernel/head.S | 37 +
+ arch/arm64/kernel/smp.c | 1 +
+ arch/arm64/mm/mmu.c | 7 +-
+ arch/arm64/mm/proc-macros.S | 10 +
+ arch/arm64/mm/proc.S | 3 +
+ arch/ia64/kernel/msi_ia64.c | 8 +-
+ arch/ia64/sn/kernel/msi_sn.c | 8 +-
+ arch/mips/pci/msi-octeon.c | 2 +-
+ arch/mips/pci/msi-xlp.c | 12 +-
+ arch/mips/pci/pci-xlr.c | 2 +-
+ arch/powerpc/include/asm/mpc85xx.h | 94 -
+ arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | 3 +-
+ arch/powerpc/platforms/85xx/mpc85xx_mds.c | 2 +-
+ arch/powerpc/platforms/85xx/mpc85xx_rdb.c | 2 +-
+ arch/powerpc/platforms/85xx/p1022_ds.c | 2 +-
+ arch/powerpc/platforms/85xx/p1022_rdk.c | 2 +-
+ arch/powerpc/platforms/85xx/smp.c | 2 +-
+ arch/powerpc/platforms/85xx/twr_p102x.c | 2 +-
+ arch/powerpc/platforms/86xx/mpc8610_hpcd.c | 2 +-
+ arch/powerpc/platforms/cell/axon_msi.c | 8 +-
+ arch/powerpc/platforms/cell/interrupt.c | 3 +-
+ arch/powerpc/platforms/embedded6xx/flipper-pic.c | 3 +-
+ arch/powerpc/platforms/powermac/pic.c | 3 +-
+ arch/powerpc/platforms/powernv/pci.c | 2 +-
+ arch/powerpc/platforms/ps3/interrupt.c | 3 +-
+ arch/powerpc/platforms/pseries/msi.c | 2 +-
+ arch/powerpc/sysdev/ehv_pic.c | 3 +-
+ arch/powerpc/sysdev/fsl_msi.c | 6 +-
+ arch/powerpc/sysdev/i8259.c | 3 +-
+ arch/powerpc/sysdev/ipic.c | 3 +-
+ arch/powerpc/sysdev/mpic.c | 3 +-
+ arch/powerpc/sysdev/mpic_pasemi_msi.c | 6 +-
+ arch/powerpc/sysdev/mpic_u3msi.c | 6 +-
+ arch/powerpc/sysdev/ppc4xx_hsta_msi.c | 2 +-
+ arch/powerpc/sysdev/ppc4xx_msi.c | 2 +-
+ arch/powerpc/sysdev/qe_lib/qe_ic.c | 3 +-
+ arch/powerpc/sysdev/xics/ics-opal.c | 2 +-
+ arch/powerpc/sysdev/xics/ics-rtas.c | 2 +-
+ arch/powerpc/sysdev/xics/xics-common.c | 3 +-
+ arch/s390/pci/pci.c | 10 +-
+ arch/sparc/kernel/pci_msi.c | 10 +-
+ arch/tile/kernel/pci_gx.c | 8 +-
+ arch/x86/include/asm/x86_init.h | 3 -
+ arch/x86/kernel/apic/io_apic.c | 8 +-
+ arch/x86/kernel/x86_init.c | 10 -
+ arch/x86/pci/bus_numa.c | 4 +-
+ arch/x86/pci/xen.c | 23 +-
+ drivers/acpi/acpi_lpss.c | 8 +-
+ drivers/acpi/acpi_platform.c | 4 +-
+ drivers/acpi/resource.c | 17 +-
+ drivers/base/core.c | 3 +
+ drivers/base/platform.c | 1 +
+ drivers/block/loop.c | 18 +
+ drivers/clk/Kconfig | 10 +-
+ drivers/clk/Makefile | 2 +-
+ drivers/clk/clk-qoriq.c | 1256 ++++++++
+ drivers/cpufreq/Kconfig.powerpc | 2 +-
+ drivers/dma/acpi-dma.c | 10 +-
+ drivers/i2c/busses/Kconfig | 4 +-
+ drivers/i2c/busses/i2c-imx.c | 373 ++-
+ drivers/i2c/muxes/i2c-mux-pca9541.c | 4 +-
+ drivers/i2c/muxes/i2c-mux-pca954x.c | 57 +-
+ drivers/iommu/Kconfig | 34 +-
+ drivers/iommu/Makefile | 2 +
+ drivers/iommu/amd_iommu.c | 6 +-
+ drivers/iommu/arm-smmu.c | 1382 +++++----
+ drivers/iommu/exynos-iommu.c | 2 +-
+ drivers/iommu/fsl_pamu.c | 3 +-
+ drivers/iommu/intel-iommu.c | 1 +
+ drivers/iommu/io-pgtable-arm.c | 997 +++++++
+ drivers/iommu/io-pgtable.c | 82 +
+ drivers/iommu/io-pgtable.h | 143 +
+ drivers/iommu/iommu.c | 111 +-
+ drivers/iommu/ipmmu-vmsa.c | 2 +-
+ drivers/iommu/irq_remapping.c | 8 -
+ drivers/iommu/msm_iommu.c | 1 +
+ drivers/iommu/of_iommu.c | 95 +
+ drivers/iommu/omap-iommu.c | 1 +
+ drivers/iommu/shmobile-iommu.c | 1 +
+ drivers/iommu/shmobile-ipmmu.c | 1 -
+ drivers/iommu/tegra-gart.c | 1 -
+ drivers/iommu/tegra-smmu.c | 2 +-
+ drivers/irqchip/Kconfig | 12 +
+ drivers/irqchip/Makefile | 2 +
+ drivers/irqchip/irq-armada-370-xp.c | 16 +-
+ drivers/irqchip/irq-atmel-aic.c | 40 +-
+ drivers/irqchip/irq-atmel-aic5.c | 65 +-
+ drivers/irqchip/irq-gic-common.c | 18 +-
+ drivers/irqchip/irq-gic-common.h | 2 +-
+ drivers/irqchip/irq-gic-v2m.c | 333 +++
+ drivers/irqchip/irq-gic-v3-its.c | 1630 +++++++++++
+ drivers/irqchip/irq-gic-v3.c | 180 +-
+ drivers/irqchip/irq-gic.c | 90 +-
+ drivers/irqchip/irq-hip04.c | 9 +-
+ drivers/irqchip/irq-sunxi-nmi.c | 4 +-
+ drivers/irqchip/irq-tb10x.c | 4 +-
+ drivers/memory/Kconfig | 2 +-
+ drivers/memory/fsl_ifc.c | 77 +-
+ drivers/mfd/vexpress-sysreg.c | 2 +-
+ drivers/mmc/card/block.c | 4 +
+ drivers/mmc/host/Kconfig | 10 +-
+ drivers/mmc/host/sdhci-esdhc.h | 9 +-
+ drivers/mmc/host/sdhci-of-esdhc.c | 680 ++++-
+ drivers/mmc/host/sdhci.c | 250 +-
+ drivers/mmc/host/sdhci.h | 42 +
+ drivers/mtd/nand/Kconfig | 2 +-
+ drivers/mtd/nand/fsl_ifc_nand.c | 301 +-
+ drivers/net/ethernet/freescale/Kconfig | 8 +-
+ drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 +-
+ drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 2 +-
+ .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 4 +-
+ .../net/ethernet/freescale/fs_enet/mii-bitbang.c | 2 +-
+ drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 4 +-
+ drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 +-
+ drivers/net/ethernet/freescale/gianfar.c | 8 +-
+ drivers/net/ethernet/freescale/gianfar_ptp.c | 2 +-
+ drivers/net/ethernet/freescale/ucc_geth.c | 2 +-
+ drivers/net/ethernet/freescale/xgmac_mdio.c | 194 +-
+ drivers/net/ethernet/intel/igb/e1000_82575.c | 6 +
+ drivers/net/ethernet/intel/igb/e1000_defines.h | 1 +
+ drivers/net/ethernet/intel/igb/e1000_hw.h | 1 +
+ drivers/net/ethernet/intel/igb/igb_main.c | 1 +
+ drivers/net/phy/Kconfig | 19 +-
+ drivers/net/phy/Makefile | 5 +-
+ drivers/net/phy/aquantia.c | 201 ++
+ drivers/net/phy/at803x.c | 4 +
+ drivers/net/phy/fixed.c | 336 ---
+ drivers/net/phy/fixed_phy.c | 370 +++
+ drivers/net/phy/fsl_10gkr.c | 1467 ++++++++++
+ drivers/net/phy/marvell.c | 11 +
+ drivers/net/phy/mdio_bus.c | 34 +-
+ drivers/net/phy/phy.c | 19 +-
+ drivers/net/phy/phy_device.c | 90 +-
+ drivers/net/phy/realtek.c | 82 +-
+ drivers/net/phy/teranetics.c | 135 +
+ drivers/of/base.c | 53 +-
+ drivers/of/device.c | 84 +
+ drivers/of/dynamic.c | 13 -
+ drivers/of/fdt.c | 30 +-
+ drivers/of/irq.c | 21 +
+ drivers/of/of_pci.c | 34 +-
+ drivers/of/pdt.c | 27 +-
+ drivers/of/platform.c | 139 +-
+ drivers/of/selftest.c | 71 +-
+ drivers/pci/Kconfig | 6 +
+ drivers/pci/Makefile | 1 +
+ drivers/pci/access.c | 87 +
+ drivers/pci/bus.c | 18 +-
+ drivers/pci/host-bridge.c | 22 +-
+ drivers/pci/host/Kconfig | 19 +-
+ drivers/pci/host/Makefile | 3 +
+ drivers/pci/host/pci-dra7xx.c | 8 +-
+ drivers/pci/host/pci-exynos.c | 5 +-
+ drivers/pci/host/pci-host-generic.c | 229 +-
+ drivers/pci/host/pci-keystone-dw.c | 37 +-
+ drivers/pci/host/pci-keystone.h | 4 +-
+ drivers/pci/host/pci-layerscape.c | 729 +++++
+ drivers/pci/host/pci-layerscape.h | 13 +
+ drivers/pci/host/pci-mvebu.c | 17 +-
+ drivers/pci/host/pci-tegra.c | 22 +-
+ drivers/pci/host/pci-xgene-msi.c | 595 ++++
+ drivers/pci/host/pci-xgene.c | 25 +-
+ drivers/pci/host/pcie-designware.c | 665 ++---
+ drivers/pci/host/pcie-designware.h | 24 +-
+ drivers/pci/host/pcie-rcar.c | 22 +-
+ drivers/pci/host/pcie-xilinx.c | 64 +-
+ drivers/pci/msi.c | 533 ++--
+ drivers/pci/pci.c | 1 +
+ drivers/pci/pci.h | 21 +
+ drivers/pci/pcie/portdrv_core.c | 31 +-
+ drivers/pci/probe.c | 29 +-
+ drivers/pci/quirks.c | 10 +-
+ drivers/pci/remove.c | 2 +
+ drivers/pci/search.c | 5 +-
+ drivers/pci/setup-bus.c | 1 +
+ drivers/pci/setup-irq.c | 1 +
+ drivers/pci/xen-pcifront.c | 2 +-
+ drivers/power/reset/Kconfig | 6 +
+ drivers/power/reset/Makefile | 1 +
+ drivers/power/reset/ls-reboot.c | 93 +
+ drivers/soc/Kconfig | 13 +
+ drivers/soc/Makefile | 1 +
+ drivers/soc/fsl/Kconfig | 6 +
+ drivers/soc/fsl/Kconfig.arm | 25 +
+ drivers/soc/fsl/Makefile | 6 +
+ drivers/soc/fsl/guts.c | 123 +
+ drivers/soc/fsl/ls1/Kconfig | 11 +
+ drivers/soc/fsl/ls1/Makefile | 1 +
+ drivers/soc/fsl/ls1/ftm_alarm.c | 274 ++
+ drivers/staging/Kconfig | 4 +
+ drivers/staging/Makefile | 2 +
+ drivers/staging/fsl-dpaa2/Kconfig | 12 +
+ drivers/staging/fsl-dpaa2/Makefile | 6 +
+ drivers/staging/fsl-dpaa2/ethernet/Kconfig | 36 +
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 317 ++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2957 +++++++++++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 397 +++
+ drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 732 +++++
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 +++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 ++++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 +++++++++++++++++
+ drivers/staging/fsl-dpaa2/mac/Kconfig | 24 +
+ drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 +++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 694 +++++
+ drivers/staging/fsl-mc/Kconfig | 1 +
+ drivers/staging/fsl-mc/Makefile | 2 +
+ drivers/staging/fsl-mc/TODO | 13 +
+ drivers/staging/fsl-mc/bus/Kconfig | 45 +
+ drivers/staging/fsl-mc/bus/Makefile | 24 +
+ drivers/staging/fsl-mc/bus/dpbp.c | 459 +++
+ drivers/staging/fsl-mc/bus/dpcon.c | 407 +++
+ drivers/staging/fsl-mc/bus/dpio/Makefile | 9 +
+ drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 401 +++
+ drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 +
+ drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 +++
+ drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 ++++++
+ drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 +++
+ drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 ++
+ drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 +
+ drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 +++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +
+ drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 ++++++++
+ drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 ++
+ drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 ++
+ drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 ++
+ drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 +
+ drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 56 +
+ drivers/staging/fsl-mc/bus/dpmcp.c | 318 ++
+ drivers/staging/fsl-mc/bus/dpmcp.h | 323 +++
+ drivers/staging/fsl-mc/bus/dpmng-cmd.h | 47 +
+ drivers/staging/fsl-mc/bus/dpmng.c | 85 +
+ drivers/staging/fsl-mc/bus/dprc-cmd.h | 87 +
+ drivers/staging/fsl-mc/bus/dprc-driver.c | 1084 +++++++
+ drivers/staging/fsl-mc/bus/dprc.c | 1218 ++++++++
+ drivers/staging/fsl-mc/bus/mc-allocator.c | 716 +++++
+ drivers/staging/fsl-mc/bus/mc-bus.c | 1347 +++++++++
+ drivers/staging/fsl-mc/bus/mc-ioctl.h | 25 +
+ drivers/staging/fsl-mc/bus/mc-restool.c | 312 ++
+ drivers/staging/fsl-mc/bus/mc-sys.c | 677 +++++
+ drivers/staging/fsl-mc/include/dpbp-cmd.h | 62 +
+ drivers/staging/fsl-mc/include/dpbp.h | 438 +++
+ drivers/staging/fsl-mc/include/dpcon-cmd.h | 162 ++
+ drivers/staging/fsl-mc/include/dpcon.h | 407 +++
+ drivers/staging/fsl-mc/include/dpmac-cmd.h | 192 ++
+ drivers/staging/fsl-mc/include/dpmac.h | 528 ++++
+ drivers/staging/fsl-mc/include/dpmng.h | 80 +
+ drivers/staging/fsl-mc/include/dprc.h | 990 +++++++
+ drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 +++++
+ drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 ++++
+ drivers/staging/fsl-mc/include/mc-cmd.h | 133 +
+ drivers/staging/fsl-mc/include/mc-private.h | 168 ++
+ drivers/staging/fsl-mc/include/mc-sys.h | 128 +
+ drivers/staging/fsl-mc/include/mc.h | 244 ++
+ drivers/staging/fsl-mc/include/net.h | 481 ++++
+ drivers/usb/core/config.c | 3 +-
+ drivers/usb/core/driver.c | 6 +-
+ drivers/usb/core/hcd-pci.c | 9 +
+ drivers/usb/core/hub.c | 66 +-
+ drivers/usb/core/quirks.c | 6 +
+ drivers/usb/dwc3/core.c | 76 +-
+ drivers/usb/dwc3/core.h | 8 +
+ drivers/usb/dwc3/host.c | 6 +
+ drivers/usb/host/xhci-pci.c | 114 +-
+ drivers/usb/host/xhci-ring.c | 6 +-
+ drivers/usb/host/xhci.c | 34 +-
+ drivers/usb/host/xhci.h | 3 +
+ drivers/vfio/Kconfig | 5 +-
+ drivers/vfio/Makefile | 1 +
+ drivers/vfio/fsl-mc/Kconfig | 9 +
+ drivers/vfio/fsl-mc/Makefile | 2 +
+ drivers/vfio/fsl-mc/vfio_fsl_mc.c | 603 ++++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 273 ++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 43 +
+ drivers/vfio/pci/vfio_pci_intrs.c | 2 +-
+ drivers/vfio/vfio_iommu_type1.c | 5 +-
+ fs/Kconfig | 1 +
+ fs/Makefile | 1 +
+ fs/aufs/Kconfig | 185 ++
+ fs/aufs/Makefile | 44 +
+ fs/aufs/aufs.h | 59 +
+ fs/aufs/branch.c | 1402 +++++++++
+ fs/aufs/branch.h | 279 ++
+ fs/aufs/conf.mk | 38 +
+ fs/aufs/cpup.c | 1368 +++++++++
+ fs/aufs/cpup.h | 94 +
+ fs/aufs/dbgaufs.c | 432 +++
+ fs/aufs/dbgaufs.h | 48 +
+ fs/aufs/dcsub.c | 224 ++
+ fs/aufs/dcsub.h | 123 +
+ fs/aufs/debug.c | 436 +++
+ fs/aufs/debug.h | 228 ++
+ fs/aufs/dentry.c | 1129 ++++++++
+ fs/aufs/dentry.h | 234 ++
+ fs/aufs/dinfo.c | 544 ++++
+ fs/aufs/dir.c | 756 +++++
+ fs/aufs/dir.h | 131 +
+ fs/aufs/dynop.c | 379 +++
+ fs/aufs/dynop.h | 76 +
+ fs/aufs/export.c | 831 ++++++
+ fs/aufs/f_op.c | 781 +++++
+ fs/aufs/fhsm.c | 426 +++
+ fs/aufs/file.c | 857 ++++++
+ fs/aufs/file.h | 291 ++
+ fs/aufs/finfo.c | 156 +
+ fs/aufs/fstype.h | 400 +++
+ fs/aufs/hfsnotify.c | 288 ++
+ fs/aufs/hfsplus.c | 56 +
+ fs/aufs/hnotify.c | 714 +++++
+ fs/aufs/i_op.c | 1460 ++++++++++
+ fs/aufs/i_op_add.c | 930 ++++++
+ fs/aufs/i_op_del.c | 506 ++++
+ fs/aufs/i_op_ren.c | 1013 +++++++
+ fs/aufs/iinfo.c | 277 ++
+ fs/aufs/inode.c | 522 ++++
+ fs/aufs/inode.h | 686 +++++
+ fs/aufs/ioctl.c | 219 ++
+ fs/aufs/loop.c | 146 +
+ fs/aufs/loop.h | 52 +
+ fs/aufs/magic.mk | 30 +
+ fs/aufs/module.c | 222 ++
+ fs/aufs/module.h | 105 +
+ fs/aufs/mvdown.c | 703 +++++
+ fs/aufs/opts.c | 1878 ++++++++++++
+ fs/aufs/opts.h | 212 ++
+ fs/aufs/plink.c | 506 ++++
+ fs/aufs/poll.c | 52 +
+ fs/aufs/posix_acl.c | 98 +
+ fs/aufs/procfs.c | 169 ++
+ fs/aufs/rdu.c | 388 +++
+ fs/aufs/rwsem.h | 191 ++
+ fs/aufs/sbinfo.c | 348 +++
+ fs/aufs/spl.h | 111 +
+ fs/aufs/super.c | 1041 +++++++
+ fs/aufs/super.h | 626 ++++
+ fs/aufs/sysaufs.c | 104 +
+ fs/aufs/sysaufs.h | 101 +
+ fs/aufs/sysfs.c | 376 +++
+ fs/aufs/sysrq.c | 157 +
+ fs/aufs/vdir.c | 888 ++++++
+ fs/aufs/vfsub.c | 864 ++++++
+ fs/aufs/vfsub.h | 315 ++
+ fs/aufs/wbr_policy.c | 765 +++++
+ fs/aufs/whout.c | 1061 +++++++
+ fs/aufs/whout.h | 85 +
+ fs/aufs/wkq.c | 213 ++
+ fs/aufs/wkq.h | 91 +
+ fs/aufs/xattr.c | 344 +++
+ fs/aufs/xino.c | 1343 +++++++++
+ fs/buffer.c | 2 +-
+ fs/dcache.c | 2 +-
+ fs/fcntl.c | 4 +-
+ fs/inode.c | 2 +-
+ fs/proc/base.c | 2 +-
+ fs/proc/nommu.c | 5 +-
+ fs/proc/task_mmu.c | 7 +-
+ fs/proc/task_nommu.c | 5 +-
+ fs/splice.c | 10 +-
+ include/asm-generic/msi.h | 32 +
+ include/asm-generic/vmlinux.lds.h | 2 +
+ include/linux/acpi.h | 6 +-
+ include/linux/device.h | 24 +
+ include/linux/dma-mapping.h | 13 +-
+ include/linux/file.h | 1 +
+ include/linux/fs.h | 3 +
+ include/linux/fsl/guts.h | 195 ++
+ include/linux/fsl/svr.h | 95 +
+ include/linux/fsl_ifc.h | 116 +-
+ include/linux/interrupt.h | 14 +
+ include/linux/iommu.h | 76 +-
+ include/linux/iopoll.h | 144 +
+ include/linux/irq.h | 75 +-
+ include/linux/irqchip/arm-gic-v3.h | 165 ++
+ include/linux/irqchip/arm-gic.h | 2 +
+ include/linux/irqdomain.h | 127 +-
+ include/linux/irqhandler.h | 14 +
+ include/linux/mm.h | 22 +
+ include/linux/mm_types.h | 2 +
+ include/linux/mmc/sdhci.h | 16 +-
+ include/linux/msi.h | 199 +-
+ include/linux/of.h | 11 +-
+ include/linux/of_device.h | 3 +
+ include/linux/of_iommu.h | 25 +
+ include/linux/of_irq.h | 1 +
+ include/linux/of_pci.h | 15 +-
+ include/linux/of_pdt.h | 3 +-
+ include/linux/of_platform.h | 6 +
+ include/linux/pci.h | 32 +-
+ include/linux/phy.h | 1 +
+ include/linux/phy_fixed.h | 11 +-
+ include/linux/resource_ext.h | 77 +
+ include/linux/splice.h | 6 +
+ include/linux/usb/quirks.h | 3 +
+ include/trace/events/iommu.h | 31 +-
+ include/uapi/linux/Kbuild | 1 +
+ include/uapi/linux/aufs_type.h | 419 +++
+ include/uapi/linux/vfio.h | 5 +
+ kernel/fork.c | 2 +-
+ kernel/irq/Kconfig | 15 +
+ kernel/irq/Makefile | 1 +
+ kernel/irq/chip.c | 163 +-
+ kernel/irq/generic-chip.c | 36 +-
+ kernel/irq/irqdomain.c | 585 +++-
+ kernel/irq/manage.c | 93 +
+ kernel/irq/msi.c | 356 +++
+ kernel/resource.c | 25 +
+ mm/Makefile | 2 +-
+ mm/filemap.c | 2 +-
+ mm/fremap.c | 16 +-
+ mm/memory.c | 2 +-
+ mm/mmap.c | 12 +-
+ mm/nommu.c | 10 +-
+ mm/prfile.c | 86 +
+ scripts/Kbuild.include | 6 +
+ scripts/Makefile.dtbinst | 51 +
+ scripts/Makefile.lib | 12 -
+ sound/soc/fsl/mpc8610_hpcd.c | 2 +-
+ sound/soc/fsl/p1022_ds.c | 2 +-
+ sound/soc/fsl/p1022_rdk.c | 2 +-
+ 467 files changed, 87181 insertions(+), 3457 deletions(-)
+ create mode 100644 arch/arm64/boot/dts/Makefile.rej
+ create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts
+ create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts
+ create mode 100644 arch/arm64/boot/dts/fsl-ls2080a.dtsi
+ create mode 100644 arch/arm64/boot/dts/fsl-ls2088a.dtsi
+ create mode 120000 arch/arm64/boot/dts/include/dt-bindings
+ create mode 100644 arch/arm64/boot/dts/thermal.h
+ create mode 100644 arch/arm64/configs/nxp_ls2088rdb_config
+ delete mode 100644 arch/powerpc/include/asm/mpc85xx.h
+ create mode 100644 drivers/clk/clk-qoriq.c
+ create mode 100644 drivers/iommu/io-pgtable-arm.c
+ create mode 100644 drivers/iommu/io-pgtable.c
+ create mode 100644 drivers/iommu/io-pgtable.h
+ create mode 100644 drivers/irqchip/irq-gic-v2m.c
+ create mode 100644 drivers/irqchip/irq-gic-v3-its.c
+ create mode 100644 drivers/net/phy/aquantia.c
+ delete mode 100644 drivers/net/phy/fixed.c
+ create mode 100644 drivers/net/phy/fixed_phy.c
+ create mode 100644 drivers/net/phy/fsl_10gkr.c
+ create mode 100644 drivers/net/phy/teranetics.c
+ create mode 100644 drivers/pci/host/pci-layerscape.c
+ create mode 100644 drivers/pci/host/pci-layerscape.h
+ create mode 100644 drivers/pci/host/pci-xgene-msi.c
+ create mode 100644 drivers/power/reset/ls-reboot.c
+ create mode 100644 drivers/soc/fsl/Kconfig
+ create mode 100644 drivers/soc/fsl/Kconfig.arm
+ create mode 100644 drivers/soc/fsl/Makefile
+ create mode 100644 drivers/soc/fsl/guts.c
+ create mode 100644 drivers/soc/fsl/ls1/Kconfig
+ create mode 100644 drivers/soc/fsl/ls1/Makefile
+ create mode 100644 drivers/soc/fsl/ls1/ftm_alarm.c
+ create mode 100644 drivers/staging/fsl-dpaa2/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
+ create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
+ create mode 100644 drivers/staging/fsl-mc/Kconfig
+ create mode 100644 drivers/staging/fsl-mc/Makefile
+ create mode 100644 drivers/staging/fsl-mc/TODO
+ create mode 100644 drivers/staging/fsl-mc/bus/Kconfig
+ create mode 100644 drivers/staging/fsl-mc/bus/Makefile
+ create mode 100644 drivers/staging/fsl-mc/bus/dpbp.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dpmng.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/bus/dprc-driver.c
+ create mode 100644 drivers/staging/fsl-mc/bus/dprc.c
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-allocator.c
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-bus.c
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c
+ create mode 100644 drivers/staging/fsl-mc/bus/mc-sys.c
+ create mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpbp.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpcon-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpcon.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpmac-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpmac.h
+ create mode 100644 drivers/staging/fsl-mc/include/dpmng.h
+ create mode 100644 drivers/staging/fsl-mc/include/dprc.h
+ create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h
+ create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h
+ create mode 100644 drivers/staging/fsl-mc/include/mc-cmd.h
+ create mode 100644 drivers/staging/fsl-mc/include/mc-private.h
+ create mode 100644 drivers/staging/fsl-mc/include/mc-sys.h
+ create mode 100644 drivers/staging/fsl-mc/include/mc.h
+ create mode 100644 drivers/staging/fsl-mc/include/net.h
+ create mode 100644 drivers/vfio/fsl-mc/Kconfig
+ create mode 100644 drivers/vfio/fsl-mc/Makefile
+ create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
+ create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+ create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+ create mode 100644 fs/aufs/Kconfig
+ create mode 100644 fs/aufs/Makefile
+ create mode 100644 fs/aufs/aufs.h
+ create mode 100644 fs/aufs/branch.c
+ create mode 100644 fs/aufs/branch.h
+ create mode 100644 fs/aufs/conf.mk
+ create mode 100644 fs/aufs/cpup.c
+ create mode 100644 fs/aufs/cpup.h
+ create mode 100644 fs/aufs/dbgaufs.c
+ create mode 100644 fs/aufs/dbgaufs.h
+ create mode 100644 fs/aufs/dcsub.c
+ create mode 100644 fs/aufs/dcsub.h
+ create mode 100644 fs/aufs/debug.c
+ create mode 100644 fs/aufs/debug.h
+ create mode 100644 fs/aufs/dentry.c
+ create mode 100644 fs/aufs/dentry.h
+ create mode 100644 fs/aufs/dinfo.c
+ create mode 100644 fs/aufs/dir.c
+ create mode 100644 fs/aufs/dir.h
+ create mode 100644 fs/aufs/dynop.c
+ create mode 100644 fs/aufs/dynop.h
+ create mode 100644 fs/aufs/export.c
+ create mode 100644 fs/aufs/f_op.c
+ create mode 100644 fs/aufs/fhsm.c
+ create mode 100644 fs/aufs/file.c
+ create mode 100644 fs/aufs/file.h
+ create mode 100644 fs/aufs/finfo.c
+ create mode 100644 fs/aufs/fstype.h
+ create mode 100644 fs/aufs/hfsnotify.c
+ create mode 100644 fs/aufs/hfsplus.c
+ create mode 100644 fs/aufs/hnotify.c
+ create mode 100644 fs/aufs/i_op.c
+ create mode 100644 fs/aufs/i_op_add.c
+ create mode 100644 fs/aufs/i_op_del.c
+ create mode 100644 fs/aufs/i_op_ren.c
+ create mode 100644 fs/aufs/iinfo.c
+ create mode 100644 fs/aufs/inode.c
+ create mode 100644 fs/aufs/inode.h
+ create mode 100644 fs/aufs/ioctl.c
+ create mode 100644 fs/aufs/loop.c
+ create mode 100644 fs/aufs/loop.h
+ create mode 100644 fs/aufs/magic.mk
+ create mode 100644 fs/aufs/module.c
+ create mode 100644 fs/aufs/module.h
+ create mode 100644 fs/aufs/mvdown.c
+ create mode 100644 fs/aufs/opts.c
+ create mode 100644 fs/aufs/opts.h
+ create mode 100644 fs/aufs/plink.c
+ create mode 100644 fs/aufs/poll.c
+ create mode 100644 fs/aufs/posix_acl.c
+ create mode 100644 fs/aufs/procfs.c
+ create mode 100644 fs/aufs/rdu.c
+ create mode 100644 fs/aufs/rwsem.h
+ create mode 100644 fs/aufs/sbinfo.c
+ create mode 100644 fs/aufs/spl.h
+ create mode 100644 fs/aufs/super.c
+ create mode 100644 fs/aufs/super.h
+ create mode 100644 fs/aufs/sysaufs.c
+ create mode 100644 fs/aufs/sysaufs.h
+ create mode 100644 fs/aufs/sysfs.c
+ create mode 100644 fs/aufs/sysrq.c
+ create mode 100644 fs/aufs/vdir.c
+ create mode 100644 fs/aufs/vfsub.c
+ create mode 100644 fs/aufs/vfsub.h
+ create mode 100644 fs/aufs/wbr_policy.c
+ create mode 100644 fs/aufs/whout.c
+ create mode 100644 fs/aufs/whout.h
+ create mode 100644 fs/aufs/wkq.c
+ create mode 100644 fs/aufs/wkq.h
+ create mode 100644 fs/aufs/xattr.c
+ create mode 100644 fs/aufs/xino.c
+ create mode 100644 include/asm-generic/msi.h
+ create mode 100644 include/linux/fsl/guts.h
+ create mode 100644 include/linux/fsl/svr.h
+ create mode 100644 include/linux/iopoll.h
+ create mode 100644 include/linux/irqhandler.h
+ create mode 100644 include/linux/resource_ext.h
+ create mode 100644 include/uapi/linux/aufs_type.h
+ create mode 100644 kernel/irq/msi.c
+ create mode 100644 mm/prfile.c
+ create mode 100644 scripts/Makefile.dtbinst
+
+diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
+index 8a8b82c..39cfa72 100644
+--- a/Documentation/IRQ-domain.txt
++++ b/Documentation/IRQ-domain.txt
+@@ -151,3 +151,74 @@ used and no descriptor gets allocated it is very important to make sure
+ that the driver using the simple domain call irq_create_mapping()
+ before any irq_find_mapping() since the latter will actually work
+ for the static IRQ assignment case.
++
++==== Hierarchy IRQ domain ====
++On some architectures, there may be multiple interrupt controllers
++involved in delivering an interrupt from the device to the target CPU.
++Let's look at a typical interrupt delivering path on x86 platforms:
++
++Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU
++
++There are three interrupt controllers involved:
++1) IOAPIC controller
++2) Interrupt remapping controller
++3) Local APIC controller
++
++To support such a hardware topology and make software architecture match
++hardware architecture, an irq_domain data structure is built for each
++interrupt controller and those irq_domains are organized into hierarchy.
++When building irq_domain hierarchy, the irq_domain near to the device is
++child and the irq_domain near to CPU is parent. So a hierarchy structure
++as below will be built for the example above.
++ CPU Vector irq_domain (root irq_domain to manage CPU vectors)
++ ^
++ |
++ Interrupt Remapping irq_domain (manage irq_remapping entries)
++ ^
++ |
++ IOAPIC irq_domain (manage IOAPIC delivery entries/pins)
++
++There are four major interfaces to use hierarchy irq_domain:
++1) irq_domain_alloc_irqs(): allocate IRQ descriptors and interrupt
++ controller related resources to deliver these interrupts.
++2) irq_domain_free_irqs(): free IRQ descriptors and interrupt controller
++ related resources associated with these interrupts.
++3) irq_domain_activate_irq(): activate interrupt controller hardware to
++ deliver the interrupt.
++3) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
++ to stop delivering the interrupt.
++
++Following changes are needed to support hierarchy irq_domain.
++1) a new field 'parent' is added to struct irq_domain; it's used to
++ maintain irq_domain hierarchy information.
++2) a new field 'parent_data' is added to struct irq_data; it's used to
++ build hierarchy irq_data to match hierarchy irq_domains. The irq_data
++ is used to store irq_domain pointer and hardware irq number.
++3) new callbacks are added to struct irq_domain_ops to support hierarchy
++ irq_domain operations.
++
++With support of hierarchy irq_domain and hierarchy irq_data ready, an
++irq_domain structure is built for each interrupt controller, and an
++irq_data structure is allocated for each irq_domain associated with an
++IRQ. Now we could go one step further to support stacked(hierarchy)
++irq_chip. That is, an irq_chip is associated with each irq_data along
++the hierarchy. A child irq_chip may implement a required action by
++itself or by cooperating with its parent irq_chip.
++
++With stacked irq_chip, interrupt controller driver only needs to deal
++with the hardware managed by itself and may ask for services from its
++parent irq_chip when needed. So we could achieve a much cleaner
++software architecture.
++
++For an interrupt controller driver to support hierarchy irq_domain, it
++needs to:
++1) Implement irq_domain_ops.alloc and irq_domain_ops.free
++2) Optionally implement irq_domain_ops.activate and
++ irq_domain_ops.deactivate.
++3) Optionally implement an irq_chip to manage the interrupt controller
++ hardware.
++4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap,
++ they are unused with hierarchy irq_domain.
++
++Hierarchy irq_domain may also be used to support other architectures,
++such as ARM, ARM64 etc.
+diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
+index e935d7d..5c9f338 100644
+--- a/Documentation/devicetree/bindings/arm/fsl.txt
++++ b/Documentation/devicetree/bindings/arm/fsl.txt
+@@ -74,3 +74,18 @@ Required root node properties:
+ i.MX6q generic board
+ Required root node properties:
+ - compatible = "fsl,imx6q";
++
+++Freescale ARMv8 based Layerscape SoC family Device Tree Bindings
+++----------------------------------------------------------------
++
++LS2080A ARMv8 based Simulator model
++Required root node properties:
++ - compatible = "fsl,ls2080a-simu", "fsl,ls2080a";
++
++LS2080A ARMv8 based QDS Board
++Required root node properties:
++ - compatible = "fsl,ls2080a-qds", "fsl,ls2080a";
++
++LS2080A ARMv8 based RDB Board
++Required root node properties:
++ - compatible = "fsl,ls2080a-rdb", "fsl,ls2080a";
+diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
+index c7d2fa1..e87d3d7 100644
+--- a/Documentation/devicetree/bindings/arm/gic.txt
++++ b/Documentation/devicetree/bindings/arm/gic.txt
+@@ -31,12 +31,16 @@ Main node required properties:
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = low-to-high edge triggered
+- 2 = high-to-low edge triggered
++ 2 = high-to-low edge triggered (invalid for SPIs)
+ 4 = active high level-sensitive
+- 8 = active low level-sensitive
++ 8 = active low level-sensitive (invalid for SPIs).
+ bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of
+ the 8 possible cpus attached to the GIC. A bit set to '1' indicated
+ the interrupt is wired to that CPU. Only valid for PPI interrupts.
++ Also note that the configurability of PPI interrupts is IMPLEMENTATION
++ DEFINED and as such not guaranteed to be present (most SoC available
++ in 2014 seem to ignore the setting of this flag and use the hardware
++ default value).
+
+ - reg : Specifies base physical address(s) and size of the GIC registers. The
+ first region is the GIC distributor register base and size. The 2nd region is
+diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+index 5666812..128fc72 100644
+--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
++++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+@@ -1,6 +1,6 @@
+-* Clock Block on Freescale CoreNet Platforms
++* Clock Block on Freescale QorIQ Platforms
+
+-Freescale CoreNet chips take primary clocking input from the external
++Freescale QorIQ chips take primary clocking input from the external
+ SYSCLK signal. The SYSCLK input (frequency) is multiplied using
+ multiple phase locked loops (PLL) to create a variety of frequencies
+ which can then be passed to a variety of internal logic, including
+@@ -13,14 +13,16 @@ which the chip complies.
+ Chassis Version Example Chips
+ --------------- -------------
+ 1.0 p4080, p5020, p5040
+-2.0 t4240, b4860, t1040
++2.0 t4240, b4860
+
+ 1. Clock Block Binding
+
+ Required properties:
+-- compatible: Should contain a specific clock block compatible string
+- and a single chassis clock compatible string.
+- Clock block strings include, but not limited to, one of the:
++- compatible: Should contain a chip-specific clock block compatible
++ string and (if applicable) may contain a chassis-version clock
++ compatible string.
++
++ Chip-specific strings are of the form "fsl,-clockgen", such as:
+ * "fsl,p2041-clockgen"
+ * "fsl,p3041-clockgen"
+ * "fsl,p4080-clockgen"
+@@ -29,15 +31,15 @@ Required properties:
+ * "fsl,t4240-clockgen"
+ * "fsl,b4420-clockgen"
+ * "fsl,b4860-clockgen"
+- Chassis clock strings include:
++ * "fsl,ls1021a-clockgen"
++ Chassis-version clock strings include:
+ * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
+ * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
+ - reg: Describes the address of the device's resources within the
+ address space defined by its parent bus, and resource zero
+ represents the clock register set
+-- clock-frequency: Input system clock frequency
+
+-Recommended properties:
++Optional properties:
+ - ranges: Allows valid translation between child's address space and
+ parent's. Must be present if the device has sub-nodes.
+ - #address-cells: Specifies the number of cells used to represent
+@@ -46,8 +48,46 @@ Recommended properties:
+ - #size-cells: Specifies the number of cells used to represent
+ the size of an address. Must be present if the device has
+ sub-nodes and set to 1 if present
++- clock-frequency: Input system clock frequency (SYSCLK)
++- clocks: If clock-frequency is not specified, sysclk may be provided
++ as an input clock. Either clock-frequency or clocks must be
++ provided.
++
++2. Clock Provider
++
++The clockgen node should act as a clock provider, though in older device
++trees the children of the clockgen node are the clock providers.
++
++When the clockgen node is a clock provider, #clock-cells = <2>.
++The first cell of the clock specifier is the clock type, and the
++second cell is the clock index for the specified type.
++
++ Type# Name Index Cell
++ 0 sysclk must be 0
++ 1 cmux index (n in CLKCnCSR)
++ 2 hwaccel index (n in CLKCGnHWACSR)
++ 3 fman 0 for fm1, 1 for fm2
++ 4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4
++
++3. Example
++
++ clockgen: global-utilities@e1000 {
++ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
++ clock-frequency = <133333333>;
++ reg = <0xe1000 0x1000>;
++ #clock-cells = <2>;
++ };
++
++ fman@400000 {
++ ...
++ clocks = <&clockgen 3 0>;
++ ...
++ };
++}
++4. Legacy Child Nodes
+
+-2. Clock Provider/Consumer Binding
++NOTE: These nodes are deprecated. Kernels should continue to support
++device trees with these nodes, but new device trees should not use them.
+
+ Most of the bindings are from the common clock binding[1].
+ [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+@@ -79,7 +119,7 @@ Recommended properties:
+ - reg: Should be the offset and length of clock block base address.
+ The length should be 4.
+
+-Example for clock block and clock provider:
++Legacy Example:
+ / {
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
+@@ -131,7 +171,7 @@ Example for clock block and clock provider:
+ };
+ }
+
+-Example for clock consumer:
++Example for legacy clock consumer:
+
+ / {
+ cpu0: PowerPC,e5500@0 {
+diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
+index 4a8513e..52d37fd 100644
+--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt
++++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
+@@ -11,6 +11,8 @@ Required properties:
+ Optional properties:
+ - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
+ The absence of the propoerty indicates the default frequency 100 kHz.
++- dmas: A list of two dma specifiers, one for each entry in dma-names.
++- dma-names: should contain "tx" and "rx".
+
+ Examples:
+
+@@ -26,3 +28,12 @@ i2c@70038000 { /* HS-I2C on i.MX51 */
+ interrupts = <64>;
+ clock-frequency = <400000>;
+ };
++
++i2c0: i2c@40066000 { /* i2c0 on vf610 */
++ compatible = "fsl,vf610-i2c";
++ reg = <0x40066000 0x1000>;
++ interrupts =<0 71 0x04>;
++ dmas = <&edma0 0 50>,
++ <&edma0 0 51>;
++ dma-names = "rx","tx";
++};
+diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+index 34a3fb6..cf53d5f 100644
+--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
++++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+@@ -16,6 +16,9 @@ Required Properties:
+ Optional Properties:
+
+ - reset-gpios: Reference to the GPIO connected to the reset input.
++ - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
++ children in idle state. This is necessary for example, if there are several
++ multiplexers on the bus and the devices behind them use same I2C addresses.
+
+
+ Example:
+diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
+index d5e3704..89427b0 100644
+--- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
++++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
+@@ -18,6 +18,8 @@ Properties:
+ interrupt (NAND_EVTER_STAT). If there is only one,
+ that interrupt reports both types of event.
+
++- little-endian : If this property is absent, the big-endian mode will
++ be in use as default for registers.
+
+ - ranges : Each range corresponds to a single chipselect, and covers
+ the entire access window as configured.
+@@ -34,6 +36,7 @@ Example:
+ #size-cells = <1>;
+ reg = <0x0 0xffe1e000 0 0x2000>;
+ interrupts = <16 2 19 2>;
++ little-endian;
+
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0x0 0xee000000 0x02000000
+diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
+index 9f4faa8..0036ab3 100644
+--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
++++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
+@@ -14,7 +14,6 @@ Required properties:
+ - interrupt-map-mask and interrupt-map: standard PCI properties
+ to define the mapping of the PCIe interface to interrupt
+ numbers.
+-- num-lanes: number of lanes to use
+ - clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: Must include the following entries:
+@@ -22,6 +21,8 @@ Required properties:
+ - "pcie_bus"
+
+ Optional properties:
++- num-lanes: number of lanes to use (this property should be specified unless
++ the link is brought already up in BIOS)
+ - reset-gpio: gpio pin number of power good signal
+ - bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
+ specify this property, to keep backwards compatibility a range of 0x00-0xff
+diff --git a/Documentation/devicetree/bindings/powerpc/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt
+index cff38bd..89c90f4 100644
+--- a/Documentation/devicetree/bindings/powerpc/fsl/board.txt
++++ b/Documentation/devicetree/bindings/powerpc/fsl/board.txt
+@@ -21,11 +21,14 @@ Example:
+
+ This is the memory-mapped registers for on board FPGA.
+
+-Required properities:
++Required properties:
+ - compatible: should be a board-specific string followed by a string
+ indicating the type of FPGA. Example:
+- "fsl,-fpga", "fsl,fpga-pixis"
++ "fsl,-fpga", "fsl,fpga-pixis" or
++ "fsl,-fpga", "fsl,fpga-qixis"
+ - reg: should contain the address and the length of the FPGA register set.
++
++Optional properties:
+ - interrupt-parent: should specify phandle for the interrupt controller.
+ - interrupts: should specify event (wakeup) IRQ.
+
+@@ -38,6 +41,13 @@ Example (P1022DS):
+ interrupts = <8 8 0 0>;
+ };
+
++Example (LS2080A-RDB):
++
++ cpld@3,0 {
++ compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis";
++ reg = <0x3 0 0x10000>;
++ };
++
+ * Freescale BCSR GPIO banks
+
+ Some BCSR registers act as simple GPIO controllers, each such
+diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
+index 471366d..1f9900c 100644
+--- a/Documentation/devicetree/bindings/usb/dwc3.txt
++++ b/Documentation/devicetree/bindings/usb/dwc3.txt
+@@ -1,6 +1,7 @@
+ synopsys DWC3 CORE
+
+-DWC3- USB3 CONTROLLER
++DWC3- USB3 CONTROLLER. Complies to the generic USB binding properties
++ as described in 'usb/generic.txt'
+
+ Required properties:
+ - compatible: must be "snps,dwc3"
+diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt
+index 1e3d5c9..57a808b 100644
+--- a/Documentation/devicetree/of_selftest.txt
++++ b/Documentation/devicetree/of_selftest.txt
+@@ -63,7 +63,6 @@ struct device_node {
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+- struct device_node *allnext; /* next in list of all nodes */
+ ...
+ };
+
+@@ -99,12 +98,6 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null
+ Figure 1: Generic structure of un-flattened device tree
+
+
+-*allnext: it is used to link all the nodes of DT into a list. So, for the
+- above tree the list would be as follows:
+-
+-root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2->
+-child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null
+-
+ Before executing OF selftest, it is required to attach the test data to
+ machine's device tree (if present). So, when selftest_data_add() is called,
+ at first it reads the flattened device tree data linked into the kernel image
+@@ -131,11 +124,6 @@ root ('/')
+ test-child01 null null null
+
+
+-allnext list:
+-
+-root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2
+-->test-sibling3->null
+-
+ Figure 2: Example test data tree to be attached to live tree.
+
+ According to the scenario above, the live tree is already present so it isn't
+@@ -204,8 +192,6 @@ detached and then moving up the parent nodes are removed, and eventually the
+ whole tree). selftest_data_remove() calls detach_node_and_children() that uses
+ of_detach_node() to detach the nodes from the live device tree.
+
+-To detach a node, of_detach_node() first updates all_next linked list, by
+-attaching the previous node's allnext to current node's allnext pointer. And
+-then, it either updates the child pointer of given node's parent to its
+-sibling or attaches the previous sibling to the given node's sibling, as
+-appropriate. That is it :)
++To detach a node, of_detach_node() either updates the child pointer of given
++node's parent to its sibling or attaches the previous sibling to the given
++node's sibling, as appropriate. That is it :)
+diff --git a/Documentation/devicetree/todo.txt b/Documentation/devicetree/todo.txt
+index c3cf065..b5139d1 100644
+--- a/Documentation/devicetree/todo.txt
++++ b/Documentation/devicetree/todo.txt
+@@ -2,7 +2,6 @@ Todo list for devicetree:
+
+ === General structure ===
+ - Switch from custom lists to (h)list_head for nodes and properties structure
+-- Remove of_allnodes list and iterate using list of child nodes alone
+
+ === CONFIG_OF_DYNAMIC ===
+ - Switch to RCU for tree updates and get rid of global spinlock
+diff --git a/MAINTAINERS b/MAINTAINERS
+index c721042..cb2296a 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1562,6 +1562,7 @@ M: Will Deacon
+ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+ S: Maintained
+ F: drivers/iommu/arm-smmu.c
++F: drivers/iommu/io-pgtable-arm.c
+
+ ARM64 PORT (AARCH64 ARCHITECTURE)
+ M: Catalin Marinas
+@@ -1795,6 +1796,20 @@ F: include/linux/audit.h
+ F: include/uapi/linux/audit.h
+ F: kernel/audit*
+
++AUFS (advanced multi layered unification filesystem) FILESYSTEM
++M: "J. R. Okajima"
++L: linux-unionfs@vger.kernel.org
++L: aufs-users@lists.sourceforge.net (members only)
++W: http://aufs.sourceforge.net
++T: git://git.code.sf.net/p/aufs/aufs3-linux
++T: git://github.com/sfjro/aufs3-linux.git
++S: Supported
++F: Documentation/filesystems/aufs/
++F: Documentation/ABI/testing/debugfs-aufs
++F: Documentation/ABI/testing/sysfs-aufs
++F: fs/aufs/
++F: include/uapi/linux/aufs_type.h
++
+ AUXILIARY DISPLAY DRIVERS
+ M: Miguel Ojeda Sandonis
+ W: http://miguelojeda.es/auxdisplay.htm
+@@ -3972,6 +3987,33 @@ F: sound/soc/fsl/fsl*
+ F: sound/soc/fsl/imx*
+ F: sound/soc/fsl/mpc8610_hpcd.c
+
++FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
++M: J. German Rivera
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-mc/
++
++FREESCALE DPAA2 ETH DRIVER
++M: Ioana Radulescu
++M: Bogdan Hamciuc
++M: Cristian Sovaiala
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-dpaa2/ethernet/
++
++FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER
++M: Lijun Pan
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-mc/bus/mc-ioctl.h
++F: drivers/staging/fsl-mc/bus/mc-restool.c
++
++FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER
++M: Alex Marginean
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-dpaa2/mac/
++
+ FREEVXFS FILESYSTEM
+ M: Christoph Hellwig
+ W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
+@@ -7047,6 +7089,16 @@ S: Maintained
+ F: Documentation/devicetree/bindings/pci/xgene-pci.txt
+ F: drivers/pci/host/pci-xgene.c
+
++PCI DRIVER FOR FREESCALE LAYERSCAPE
++M: Minghuan Lian
++M: Mingkai Hu
++M: Roy Zang
++L: linuxppc-dev@lists.ozlabs.org
++L: linux-pci@vger.kernel.org
++L: linux-arm-kernel@lists.infradead.org
++S: Maintained
++F: drivers/pci/host/*layerscape*
++
+ PCI DRIVER FOR IMX6
+ M: Richard Zhu
+ M: Lucas Stach
+@@ -7122,6 +7174,14 @@ L: linux-pci@vger.kernel.org
+ S: Maintained
+ F: drivers/pci/host/*spear*
+
++PCI MSI DRIVER FOR APPLIEDMICRO XGENE
++M: Duc Dang
++L: linux-pci@vger.kernel.org
++L: linux-arm-kernel@lists.infradead.org
++S: Maintained
++F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
++F: drivers/pci/host/pci-xgene-msi.c
++
+ PCMCIA SUBSYSTEM
+ P: Linux PCMCIA Team
+ L: linux-pcmcia@lists.infradead.org
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 89c4b5c..29544f0 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1292,6 +1292,9 @@ config PCI_DOMAINS
+ bool
+ depends on PCI
+
++config PCI_DOMAINS_GENERIC
++ def_bool PCI_DOMAINS
++
+ config PCI_NANOENGINE
+ bool "BSE nanoEngine PCI support"
+ depends on SA1100_NANOENGINE
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index b5d7988..93a30a2 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -320,8 +320,12 @@ $(INSTALL_TARGETS):
+ $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@
+
+ PHONY += dtbs dtbs_install
+-dtbs dtbs_install: prepare scripts
+- $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $@
++
++dtbs: prepare scripts
++ $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE)
++
++dtbs_install:
++ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts MACHINE=$(MACHINE)
+
+ # We use MRPROPER_FILES and CLEAN_FILES now
+ archclean:
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
+index 38c89ca..6e784fa 100644
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -517,15 +517,7 @@ dtb-$(CONFIG_MACH_DOVE) += dove-cm-a510.dtb \
+ dove-dove-db.dtb
+ dtb-$(CONFIG_ARCH_MEDIATEK) += mt6589-aquaris5.dtb
+
+-targets += dtbs dtbs_install
+-targets += $(dtb-y)
+ endif
+
+-# *.dtb used to be generated in the directory above. Clean out the
+-# old build results so people don't accidentally use them.
+-dtbs: $(addprefix $(obj)/, $(dtb-y))
+- $(Q)rm -f $(obj)/../*.dtb
+-
+-clean-files := *.dtb
+-
+-dtbs_install: $(addsuffix _dtbinst_, $(dtb-y))
++always := $(dtb-y)
++clean-files := *.dtb
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index 85738b2..f3c0d95 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -121,12 +121,14 @@ static inline unsigned long dma_max_pfn(struct device *dev)
+ }
+ #define dma_max_pfn(dev) dma_max_pfn(dev)
+
+-static inline int set_arch_dma_coherent_ops(struct device *dev)
++static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
++ u64 size, struct iommu_ops *iommu,
++ bool coherent)
+ {
+- set_dma_ops(dev, &arm_coherent_dma_ops);
+- return 0;
++ if (coherent)
++ set_dma_ops(dev, &arm_coherent_dma_ops);
+ }
+-#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
++#define arch_setup_dma_ops arch_setup_dma_ops
+
+ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+ {
+diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
+index 7fc4278..c074e7a 100644
+--- a/arch/arm/include/asm/mach/pci.h
++++ b/arch/arm/include/asm/mach/pci.h
+@@ -19,9 +19,7 @@ struct pci_bus;
+ struct device;
+
+ struct hw_pci {
+-#ifdef CONFIG_PCI_DOMAINS
+- int domain;
+-#endif
++ struct msi_controller *msi_ctrl;
+ struct pci_ops *ops;
+ int nr_controllers;
+ void **private_data;
+@@ -36,16 +34,14 @@ struct hw_pci {
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
+- void (*add_bus)(struct pci_bus *bus);
+- void (*remove_bus)(struct pci_bus *bus);
+ };
+
+ /*
+ * Per-controller structure
+ */
+ struct pci_sys_data {
+-#ifdef CONFIG_PCI_DOMAINS
+- int domain;
++#ifdef CONFIG_PCI_MSI
++ struct msi_controller *msi_ctrl;
+ #endif
+ struct list_head node;
+ int busnr; /* primary bus number */
+@@ -65,8 +61,6 @@ struct pci_sys_data {
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align);
+- void (*add_bus)(struct pci_bus *bus);
+- void (*remove_bus)(struct pci_bus *bus);
+ void *private_data; /* platform controller private data */
+ };
+
+diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
+index 7e95d85..585dc33 100644
+--- a/arch/arm/include/asm/pci.h
++++ b/arch/arm/include/asm/pci.h
+@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void)
+ }
+
+ #ifdef CONFIG_PCI_DOMAINS
+-static inline int pci_domain_nr(struct pci_bus *bus)
+-{
+- struct pci_sys_data *root = bus->sysdata;
+-
+- return root->domain;
+-}
+-
+ static inline int pci_proc_domain(struct pci_bus *bus)
+ {
+ return pci_domain_nr(bus);
+diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
+index 17a26c1..a5cd259 100644
+--- a/arch/arm/kernel/bios32.c
++++ b/arch/arm/kernel/bios32.c
+@@ -18,6 +18,15 @@
+
+ static int debug_pci;
+
++#ifdef CONFIG_PCI_MSI
++struct msi_controller *pcibios_msi_controller(struct pci_dev *dev)
++{
++ struct pci_sys_data *sysdata = dev->bus->sysdata;
++
++ return sysdata->msi_ctrl;
++}
++#endif
++
+ /*
+ * We can't use pci_get_device() here since we are
+ * called from interrupt context.
+@@ -360,20 +369,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
+ }
+ EXPORT_SYMBOL(pcibios_fixup_bus);
+
+-void pcibios_add_bus(struct pci_bus *bus)
+-{
+- struct pci_sys_data *sys = bus->sysdata;
+- if (sys->add_bus)
+- sys->add_bus(bus);
+-}
+-
+-void pcibios_remove_bus(struct pci_bus *bus)
+-{
+- struct pci_sys_data *sys = bus->sysdata;
+- if (sys->remove_bus)
+- sys->remove_bus(bus);
+-}
+-
+ /*
+ * Swizzle the device pin each time we cross a bridge. If a platform does
+ * not provide a swizzle function, we perform the standard PCI swizzling.
+@@ -427,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
+ {
+ int ret;
+- struct pci_host_bridge_window *window;
++ struct resource_entry *window;
+
+ if (list_empty(&sys->resources)) {
+ pci_add_resource_offset(&sys->resources,
+ &iomem_resource, sys->mem_offset);
+ }
+
+- list_for_each_entry(window, &sys->resources, list) {
++ resource_list_for_each_entry(window, &sys->resources)
+ if (resource_type(window->res) == IORESOURCE_IO)
+ return 0;
+- }
+
+ sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io;
+ sys->io_res.end = (busnr + 1) * SZ_64K - 1;
+@@ -468,15 +462,13 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
+ if (!sys)
+ panic("PCI: unable to allocate sys data!");
+
+-#ifdef CONFIG_PCI_DOMAINS
+- sys->domain = hw->domain;
++#ifdef CONFIG_PCI_MSI
++ sys->msi_ctrl = hw->msi_ctrl;
+ #endif
+ sys->busnr = busnr;
+ sys->swizzle = hw->swizzle;
+ sys->map_irq = hw->map_irq;
+ sys->align_resource = hw->align_resource;
+- sys->add_bus = hw->add_bus;
+- sys->remove_bus = hw->remove_bus;
+ INIT_LIST_HEAD(&sys->resources);
+
+ if (hw->private_data)
+@@ -494,8 +486,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
+ if (hw->scan)
+ sys->bus = hw->scan(nr, sys);
+ else
+- sys->bus = pci_scan_root_bus(parent, sys->busnr,
+- hw->ops, sys, &sys->resources);
++ sys->bus = pci_scan_root_bus_msi(parent,
++ sys->busnr, hw->ops, sys,
++ &sys->resources, hw->msi_ctrl);
+
+ if (!sys->bus)
+ panic("PCI: unable to scan bus!");
+diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
+index e7730cf..9f89e76 100644
+--- a/arch/arm/mach-iop13xx/msi.c
++++ b/arch/arm/mach-iop13xx/msi.c
+@@ -126,10 +126,10 @@ static void iop13xx_msi_nop(struct irq_data *d)
+ static struct irq_chip iop13xx_msi_chip = {
+ .name = "PCI-MSI",
+ .irq_ack = iop13xx_msi_nop,
+- .irq_enable = unmask_msi_irq,
+- .irq_disable = mask_msi_irq,
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
++ .irq_enable = pci_msi_unmask_irq,
++ .irq_disable = pci_msi_mask_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
+ };
+
+ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+@@ -153,7 +153,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+ id = iop13xx_cpu_id();
+ msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f);
+
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq);
+
+ return 0;
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 00b9c48..329f5f4 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -13,7 +13,9 @@ config ARM64
+ select ARM_ARCH_TIMER
+ select ARM_GIC
+ select AUDIT_ARCH_COMPAT_GENERIC
++ select ARM_GIC_V2M if PCI_MSI
+ select ARM_GIC_V3
++ select ARM_GIC_V3_ITS if PCI_MSI
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS
+ select COMMON_CLK
+@@ -166,6 +168,11 @@ config ARCH_XGENE
+ help
+ This enables support for AppliedMicro X-Gene SOC Family
+
++config ARCH_LAYERSCAPE
++ bool "ARMv8 based Freescale Layerscape SoC family"
++ help
++ This enables support for the Freescale Layerscape SoC family.
++
+ endmenu
+
+ menu "Bus support"
+@@ -366,7 +373,6 @@ config ARM64_VA_BITS_42
+
+ config ARM64_VA_BITS_48
+ bool "48-bit"
+- depends on !ARM_SMMU
+
+ endchoice
+
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 2d54c55..7cf8a29 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -74,8 +74,13 @@ zinstall install: vmlinux
+ %.dtb: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+-dtbs: scripts
+- $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
++PHONY += dtbs dtbs_install
++
++dtbs: prepare scripts
++ $(Q)$(MAKE) $(build)=$(boot)/dts
++
++dtbs_install:
++ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts
+
+ PHONY += vdso_install
+ vdso_install:
+@@ -84,11 +89,13 @@ vdso_install:
+ # We use MRPROPER_FILES and CLEAN_FILES now
+ archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
++ $(Q)$(MAKE) $(clean)=$(boot)/dts
+
+ define archhelp
+ echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
+ echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
+ echo '* dtbs - Build device tree blobs for enabled boards'
++ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
+ echo ' install - Install uncompressed kernel'
+ echo ' zinstall - Install compressed kernel'
+ echo ' Install using (your) ~/bin/installkernel or'
+diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
+index f8001a6..2644389 100644
+--- a/arch/arm64/boot/dts/Makefile
++++ b/arch/arm64/boot/dts/Makefile
+@@ -1,6 +1,6 @@
+ dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb
+ dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
+-dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
++dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb arm64-nxp-ls2088ardb-r1.dtb
+
+ targets += dtbs
+ targets += $(dtb-y)
+diff --git a/arch/arm64/boot/dts/Makefile.rej b/arch/arm64/boot/dts/Makefile.rej
+new file mode 100644
+index 0000000..3610e7d
+--- /dev/null
++++ b/arch/arm64/boot/dts/Makefile.rej
+@@ -0,0 +1,10 @@
++--- arch/arm64/boot/dts/Makefile
+++++ arch/arm64/boot/dts/Makefile
++@@ -1,6 +1,7 @@
++ dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb
++ dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
++ dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
+++dtb-$(CONFIG_ARCH_LAYERSCAPE) += arm64-nxp-ls2080ardb-r0.dtb
++
++ targets += dtbs
++ targets += $(dtb-y)
+diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts
+new file mode 100644
+index 0000000..5da2834
+--- /dev/null
++++ b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts
+@@ -0,0 +1,249 @@
++/*
++ * Device Tree file for NXP LS2080a RDB board
++ *
++ */
++
++/dts-v1/;
++
++#include "fsl-ls2080a.dtsi"
++
++/ {
++ model = "arm64-nxp-ls2080ardb-r0";
++ compatible = "fsl,ls2080a-rdb", "fsl,ls2080a";
++};
++
++&esdhc {
++ status = "okay";
++};
++
++&ifc {
++ status = "okay";
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000
++ 0x2 0x0 0x5 0x30000000 0x00010000
++ 0x3 0x0 0x5 0x20000000 0x00010000>;
++
++ nor@0,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "cfi-flash";
++ reg = <0x0 0x0 0x8000000>;
++ bank-width = <2>;
++ device-width = <1>;
++
++ partition@0 {
++ /* SoC RCW, this location must not be altered */
++ reg = <0x0 0x100000>;
++ label = "rcw (RO)";
++ read-only;
++ };
++
++ partition@1 {
++ /* U-Boot image */
++ reg = <0x100000 0x100000>;
++ label = "uboot";
++ };
++
++ partition@2 {
++ /* U-Boot environment varialbes, 1MB */
++ reg = <0x200000 0x100000>;
++ label = "uboot-env";
++ env_size = <0x20000>;
++ };
++
++ partition@3 {
++ /* MC firmware, 4MB*/
++ reg = <0x300000 0x400000>;
++ label = "mc_firmware";
++ };
++
++ partition@4 {
++ /* MC DPL Blob, 1MB */
++ reg = <0x700000 0x100000>;
++ label = "mc_dpl_blob";
++ };
++
++ partition@5 {
++ /* MC DPC Blob, 1MB */
++ reg = <0x800000 0x100000>;
++ label = "mc_dpc_blob";
++ };
++
++ partition@6 {
++ /* AIOP FW, 4MB */
++ reg = <0x900000 0x400000>;
++ label = "aiop_fw";
++ };
++
++ partition@7 {
++ /* DebugServerFW, 2MB */
++ reg = <0xd00000 0x200000>;
++ label = "DebugServer_fw";
++ };
++ };
++
++ nand@2,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "fsl,ifc-nand";
++ reg = <0x2 0x0 0x10000>;
++ };
++
++ cpld@3,0 {
++ reg = <0x3 0x0 0x10000>;
++ compatible = "fsl,ls2080a-rdb-qixis", "fsl,fpga-qixis";
++ };
++
++};
++
++&i2c0 {
++ status = "okay";
++ pca9547@75 {
++ compatible = "nxp,pca9547";
++ reg = <0x75>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ i2c-mux-never-disable;
++ i2c@1 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x01>;
++ rtc@68 {
++ compatible = "dallas,ds3232";
++ reg = <0x68>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ adt7481@4c {
++ compatible = "adi,adt7461";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&i2c1 {
++ status = "disabled";
++};
++
++&i2c2 {
++ status = "disabled";
++};
++
++&i2c3 {
++ status = "disabled";
++};
++
++&dspi {
++ status = "okay";
++ dflash0: n25q512a {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <0>;
++ };
++};
++
++&qspi {
++ status = "disabled";
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&sata1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
++
++&emdio1 {
++ status = "disabled";
++ /* CS4340 PHYs */
++ mdio1_phy1: emdio1_phy@1 {
++ reg = <0x10>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy2: emdio1_phy@2 {
++ reg = <0x11>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy3: emdio1_phy@3 {
++ reg = <0x12>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy4: emdio1_phy@4 {
++ reg = <0x13>;
++ phy-connection-type = "xfi";
++ };
++};
++
++&emdio2 {
++ /* AQR405 PHYs */
++ mdio2_phy1: emdio2_phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 1 0x4>; /* Level high type */
++ reg = <0x0>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy2: emdio2_phy@2 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 2 0x4>; /* Level high type */
++ reg = <0x1>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy3: emdio2_phy@3 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 4 0x4>; /* Level high type */
++ reg = <0x2>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy4: emdio2_phy@4 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 5 0x4>; /* Level high type */
++ reg = <0x3>;
++ phy-connection-type = "xfi";
++ };
++};
++
++/* Update DPMAC connections to external PHYs, under the assumption of
++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board.
++ */
++&dpmac1 {
++ phy-handle = <&mdio1_phy1>;
++};
++&dpmac2 {
++ phy-handle = <&mdio1_phy2>;
++};
++&dpmac3 {
++ phy-handle = <&mdio1_phy3>;
++};
++&dpmac4 {
++ phy-handle = <&mdio1_phy4>;
++};
++&dpmac5 {
++ phy-handle = <&mdio2_phy1>;
++};
++&dpmac6 {
++ phy-handle = <&mdio2_phy2>;
++};
++&dpmac7 {
++ phy-handle = <&mdio2_phy3>;
++};
++&dpmac8 {
++ phy-handle = <&mdio2_phy4>;
++};
+diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts
+new file mode 100644
+index 0000000..0433cf2
+--- /dev/null
++++ b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts
+@@ -0,0 +1,256 @@
++/*
++ * Device Tree file for NXP LS2088a RDB board
++ *
++ * Copyright (C) 2016, Freescale Semiconductor
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++/dts-v1/;
++
++#include "fsl-ls2088a.dtsi"
++
++/ {
++ model = "arm64-nxp-ls2088ardb-r1";
++ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a";
++};
++
++&esdhc {
++ status = "okay";
++};
++
++&ifc {
++ status = "okay";
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000
++ 0x2 0x0 0x5 0x30000000 0x00010000
++ 0x3 0x0 0x5 0x20000000 0x00010000>;
++
++ nor@0,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "cfi-flash";
++ reg = <0x0 0x0 0x8000000>;
++ bank-width = <2>;
++ device-width = <1>;
++
++ partition@0 {
++ /* SoC RCW, this location must not be altered */
++ reg = <0x0 0x100000>;
++ label = "rcw (RO)";
++ read-only;
++ };
++
++ partition@1 {
++ /* U-Boot image */
++ reg = <0x100000 0x100000>;
++ label = "uboot";
++ };
++
++ partition@2 {
++ /* U-Boot environment varialbes, 1MB */
++ reg = <0x200000 0x100000>;
++ label = "uboot-env";
++ env_size = <0x20000>;
++ };
++
++ partition@3 {
++ /* MC firmware, 4MB*/
++ reg = <0x300000 0x400000>;
++ label = "mc_firmware";
++ };
++
++ partition@4 {
++ /* MC DPL Blob, 1MB */
++ reg = <0x700000 0x100000>;
++ label = "mc_dpl_blob";
++ };
++
++ partition@5 {
++ /* MC DPC Blob, 1MB */
++ reg = <0x800000 0x100000>;
++ label = "mc_dpc_blob";
++ };
++
++ partition@6 {
++ /* AIOP FW, 4MB */
++ reg = <0x900000 0x400000>;
++ label = "aiop_fw";
++ };
++
++ partition@7 {
++ /* DebugServerFW, 2MB */
++ reg = <0xd00000 0x200000>;
++ label = "DebugServer_fw";
++ };
++ };
++
++ nand@2,0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "fsl,ifc-nand";
++ reg = <0x2 0x0 0x10000>;
++ };
++
++ cpld@3,0 {
++ reg = <0x3 0x0 0x10000>;
++ compatible = "fsl,ls2088a-rdb-qixis", "fsl,fpga-qixis";
++ };
++};
++
++&ftm0 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++ pca9547@75 {
++ compatible = "nxp,pca9547";
++ reg = <0x75>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ i2c-mux-never-disable;
++ i2c@1 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x01>;
++ rtc@68 {
++ compatible = "dallas,ds3232";
++ reg = <0x68>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ adt7481@4c {
++ compatible = "adi,adt7461";
++ reg = <0x4c>;
++ };
++ };
++ };
++};
++
++&i2c1 {
++ status = "disabled";
++};
++
++&i2c2 {
++ status = "disabled";
++};
++
++&i2c3 {
++ status = "disabled";
++};
++
++&dspi {
++ status = "okay";
++ dflash0: n25q512a {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "st,m25p80";
++ spi-max-frequency = <3000000>;
++ reg = <0>;
++ };
++};
++
++&qspi {
++ status = "disabled";
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&sata1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
++
++&emdio1 {
++ /* CS4340 PHYs */
++ mdio1_phy1: emdio1_phy@1 {
++ reg = <0x10>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy2: emdio1_phy@2 {
++ reg = <0x11>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy3: emdio1_phy@3 {
++ reg = <0x12>;
++ phy-connection-type = "xfi";
++ };
++ mdio1_phy4: emdio1_phy@4 {
++ reg = <0x13>;
++ phy-connection-type = "xfi";
++ };
++};
++
++&emdio2 {
++ /* AQR405 PHYs */
++ mdio2_phy1: emdio2_phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 1 0x4>; /* Level high type */
++ reg = <0x0>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy2: emdio2_phy@2 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 2 0x4>; /* Level high type */
++ reg = <0x1>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy3: emdio2_phy@3 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 4 0x4>; /* Level high type */
++ reg = <0x2>;
++ phy-connection-type = "xfi";
++ };
++ mdio2_phy4: emdio2_phy@4 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ interrupts = <0 5 0x4>; /* Level high type */
++ reg = <0x3>;
++ phy-connection-type = "xfi";
++ };
++};
++
++/* Update DPMAC connections to external PHYs, under the assumption of
++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board.
++ */
++&dpmac1 {
++ phy-handle = <&mdio1_phy1>;
++};
++&dpmac2 {
++ phy-handle = <&mdio1_phy2>;
++};
++&dpmac3 {
++ phy-handle = <&mdio1_phy3>;
++};
++&dpmac4 {
++ phy-handle = <&mdio1_phy4>;
++};
++&dpmac5 {
++ phy-handle = <&mdio2_phy1>;
++};
++&dpmac6 {
++ phy-handle = <&mdio2_phy2>;
++};
++&dpmac7 {
++ phy-handle = <&mdio2_phy3>;
++};
++&dpmac8 {
++ phy-handle = <&mdio2_phy4>;
++};
+diff --git a/arch/arm64/boot/dts/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/fsl-ls2080a.dtsi
+new file mode 100644
+index 0000000..5e53b04
+--- /dev/null
++++ b/arch/arm64/boot/dts/fsl-ls2080a.dtsi
+@@ -0,0 +1,729 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-2080A family SoC.
++ *
++ * Copyright (C) 2014-2015, Freescale Semiconductor
++ *
++ * Bhupesh Sharma
++ * Harninder Rai
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include
++
++/memreserve/ 0x80000000 0x00010000;
++
++/ {
++ compatible = "fsl,ls2080a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ /* We have 4 clusters having 2 Cortex-A57 cores each */
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x0>;
++ clocks = <&clockgen 1 0>;
++ #cooling-cells = <2>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x1>;
++ clocks = <&clockgen 1 0>;
++ };
++
++ cpu2: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x100>;
++ clocks = <&clockgen 1 1>;
++ #cooling-cells = <2>;
++ };
++
++ cpu3: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x101>;
++ clocks = <&clockgen 1 1>;
++ };
++
++ cpu4: cpu@200 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x200>;
++ clocks = <&clockgen 1 2>;
++ #cooling-cells = <2>;
++ };
++
++ cpu5: cpu@201 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x201>;
++ clocks = <&clockgen 1 2>;
++ };
++
++ cpu6: cpu@300 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x300>;
++ clocks = <&clockgen 1 3>;
++ #cooling-cells = <2>;
++ };
++
++ cpu7: cpu@301 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57";
++ reg = <0x0 0x301>;
++ clocks = <&clockgen 1 3>;
++ };
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
++ };
++
++ gic: interrupt-controller@6000000 {
++ compatible = "arm,gic-v3";
++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
++ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */
++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
++ #interrupt-cells = <3>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ interrupt-controller;
++ interrupts = <1 9 0x4>;
++
++ its: gic-its@6020000 {
++ compatible = "arm,gic-v3-its";
++ msi-controller;
++ reg = <0x0 0x6020000 0 0x20000>;
++ };
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ clockgen: clocking@1300000 {
++ compatible = "fsl,ls2080a-clockgen";
++ reg = <0 0x1300000 0 0xa0000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ tmu: tmu@1f80000 {
++ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu";
++ reg = <0x0 0x1f80000 0x0 0x10000>;
++ interrupts = <0 23 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration = <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ little-endian;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++
++ thermal-sensors = <&tmu 4>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++ cpu_crit: cpu-crit {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map1 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map2 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu4 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map3 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu6 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ serial0: serial@21c0500 {
++ device_type = "serial";
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0500 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ serial1: serial@21c0600 {
++ device_type = "serial";
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0600 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@2320000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio@2330000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ /* TODO: WRIOP (CCSR?) */
++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B96000 0x0 0x1000>;
++ device_type = "mdio"; /* TODO: is this necessary? */
++ little-endian; /* force the driver in LE mode */
++
++ /* Not necessary on the QDS, but needed on the RDB */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B97000 0x0 0x1000>;
++ device_type = "mdio"; /* TODO: is this necessary? */
++ little-endian; /* force the driver in LE mode */
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ ifc: ifc@2240000 {
++ compatible = "fsl,ifc", "simple-bus";
++ reg = <0x0 0x2240000 0x0 0x20000>;
++ interrupts = <0 21 0x4>; /* Level high type */
++ little-endian;
++ #address-cells = <2>;
++ #size-cells = <1>;
++
++ ranges = <0 0 0x5 0x80000000 0x08000000
++ 2 0 0x5 0x30000000 0x00010000
++ 3 0 0x5 0x20000000 0x00010000>;
++ };
++
++ esdhc: esdhc@2140000 {
++ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
++ reg = <0x0 0x2140000 0x0 0x10000>;
++ interrupts = <0 28 0x4>; /* Level high type */
++ clock-frequency = <0>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ little-endian;
++ bus-width = <4>;
++ };
++
++ ftm0: ftm0@2800000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x2800000 0x0 0x10000>;
++ interrupts = <0 44 4>;
++ };
++
++ reset: reset@1E60000 {
++ compatible = "fsl,ls-reset";
++ reg = <0x0 0x1E60000 0x0 0x10000>;
++ };
++
++ dspi: dspi@2100000 {
++ compatible = "fsl,ls2085a-dspi", "fsl,ls2080a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2100000 0x0 0x10000>;
++ interrupts = <0 26 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <0>;
++ };
++
++ i2c0: i2c@2000000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2000000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c1: i2c@2010000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2010000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c2: i2c@2020000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2020000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c3: i2c@2030000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2030000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ qspi: quadspi@20c0000 {
++ compatible = "fsl,ls2080a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x20c0000 0x0 0x10000>,
++ <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "qspi_en", "qspi";
++ };
++
++ pcie@3400000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x10 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 108 0x4>; /* Level high type */
++ interrupt-names = "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
++ <0000 0 0 2 &gic 0 0 0 110 4>,
++ <0000 0 0 3 &gic 0 0 0 111 4>,
++ <0000 0 0 4 &gic 0 0 0 112 4>;
++ };
++
++ pcie@3500000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x12 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 113 0x4>; /* Level high type */
++ interrupt-names = "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
++ <0000 0 0 2 &gic 0 0 0 115 4>,
++ <0000 0 0 3 &gic 0 0 0 116 4>,
++ <0000 0 0 4 &gic 0 0 0 117 4>;
++ };
++
++ pcie@3600000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x14 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 118 0x4>; /* Level high type */
++ interrupt-names = "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ num-lanes = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
++ <0000 0 0 2 &gic 0 0 0 120 4>,
++ <0000 0 0 3 &gic 0 0 0 121 4>,
++ <0000 0 0 4 &gic 0 0 0 122 4>;
++ };
++
++ pcie@3700000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
++ 0x16 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 123 0x4>; /* Level high type */
++ interrupt-names = "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
++ <0000 0 0 2 &gic 0 0 0 125 4>,
++ <0000 0 0 3 &gic 0 0 0 126 4>,
++ <0000 0 0 4 &gic 0 0 0 127 4>;
++ };
++
++ sata0: sata@3200000 {
++ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>;
++ interrupts = <0 133 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ };
++
++ sata1: sata@3210000 {
++ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci";
++ reg = <0x0 0x3210000 0x0 0x10000>;
++ interrupts = <0 136 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ };
++
++ usb0: usb3@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ };
++
++ usb1: usb3@3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ };
++
++ smmu: iommu@5000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x5000000 0 0x800000>;
++ #global-interrupts = <12>;
++ interrupts = <0 13 4>, /* global secure fault */
++ <0 14 4>, /* combined secure interrupt */
++ <0 15 4>, /* global non-secure fault */
++ <0 16 4>, /* combined non-secure interrupt */
++ /* performance counter interrupts 0-7 */
++ <0 211 4>,
++ <0 212 4>,
++ <0 213 4>,
++ <0 214 4>,
++ <0 215 4>,
++ <0 216 4>,
++ <0 217 4>,
++ <0 218 4>,
++ /* per context interrupt, 64 interrupts */
++ <0 146 4>,
++ <0 147 4>,
++ <0 148 4>,
++ <0 149 4>,
++ <0 150 4>,
++ <0 151 4>,
++ <0 152 4>,
++ <0 153 4>,
++ <0 154 4>,
++ <0 155 4>,
++ <0 156 4>,
++ <0 157 4>,
++ <0 158 4>,
++ <0 159 4>,
++ <0 160 4>,
++ <0 161 4>,
++ <0 162 4>,
++ <0 163 4>,
++ <0 164 4>,
++ <0 165 4>,
++ <0 166 4>,
++ <0 167 4>,
++ <0 168 4>,
++ <0 169 4>,
++ <0 170 4>,
++ <0 171 4>,
++ <0 172 4>,
++ <0 173 4>,
++ <0 174 4>,
++ <0 175 4>,
++ <0 176 4>,
++ <0 177 4>,
++ <0 178 4>,
++ <0 179 4>,
++ <0 180 4>,
++ <0 181 4>,
++ <0 182 4>,
++ <0 183 4>,
++ <0 184 4>,
++ <0 185 4>,
++ <0 186 4>,
++ <0 187 4>,
++ <0 188 4>,
++ <0 189 4>,
++ <0 190 4>,
++ <0 191 4>,
++ <0 192 4>,
++ <0 193 4>,
++ <0 194 4>,
++ <0 195 4>,
++ <0 196 4>,
++ <0 197 4>,
++ <0 198 4>,
++ <0 199 4>,
++ <0 200 4>,
++ <0 201 4>,
++ <0 202 4>,
++ <0 203 4>,
++ <0 204 4>,
++ <0 205 4>,
++ <0 206 4>,
++ <0 207 4>,
++ <0 208 4>,
++ <0 209 4>;
++ mmu-masters = <&fsl_mc 0x300 0>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */
++ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */
++ <1 11 0x1>, /* Virtual PPI, edge triggered */
++ <1 10 0x1>; /* Hypervisor PPI, edge triggered */
++ arm,reread-timer;
++ };
++
++ fsl_mc: fsl-mc@80c000000 {
++ compatible = "fsl,qoriq-mc";
++ #stream-id-cells = <2>;
++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
++ msi-parent = <&its>;
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ /*
++ * Define the maximum number of MACs present on the SoC.
++ * They won't necessarily be all probed, since the
++ * Data Path Layout file and the MC firmware can put fewer
++ * actual DPMAC objects on the MC bus.
++ */
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac@1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <1>;
++ };
++ dpmac2: dpmac@2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <2>;
++ };
++ dpmac3: dpmac@3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <3>;
++ };
++ dpmac4: dpmac@4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <4>;
++ };
++ dpmac5: dpmac@5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <5>;
++ };
++ dpmac6: dpmac@6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <6>;
++ };
++ dpmac7: dpmac@7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <7>;
++ };
++ dpmac8: dpmac@8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <8>;
++ };
++ dpmac9: dpmac@9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <9>;
++ };
++ dpmac10: dpmac@10 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++ dpmac11: dpmac@11 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xb>;
++ };
++ dpmac12: dpmac@12 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xc>;
++ };
++ dpmac13: dpmac@13 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xd>;
++ };
++ dpmac14: dpmac@14 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xe>;
++ };
++ dpmac15: dpmac@15 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xf>;
++ };
++ dpmac16: dpmac@16 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x10>;
++ };
++ };
++ };
++
++ ccn@4000000 {
++ compatible = "arm,ccn-504";
++ reg = <0x0 0x04000000 0x0 0x01000000>;
++ interrupts = <0 12 4>;
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>;
++ /* DRAM space 1 - 2 GB DRAM */
++ };
++};
+diff --git a/arch/arm64/boot/dts/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/fsl-ls2088a.dtsi
+new file mode 100644
+index 0000000..2e3529a
+--- /dev/null
++++ b/arch/arm64/boot/dts/fsl-ls2088a.dtsi
+@@ -0,0 +1,833 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-2088A family SoC.
++ *
++ * Copyright (C) 2016, Freescale Semiconductor
++ *
++ * Abhimanyu Saini
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPLv2 or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ * a) This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ * b) Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following
++ * conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "thermal.h"
++
++/memreserve/ 0x80000000 0x00010000;
++
++/ {
++ compatible = "fsl,ls2088a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x0>;
++ clocks = <&clockgen 1 0>;
++ #cooling-cells = <2>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x1>;
++ clocks = <&clockgen 1 0>;
++ };
++
++ cpu2: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x100>;
++ clocks = <&clockgen 1 1>;
++ #cooling-cells = <2>;
++ };
++
++ cpu3: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x101>;
++ clocks = <&clockgen 1 1>;
++ };
++
++ cpu4: cpu@200 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x200>;
++ clocks = <&clockgen 1 2>;
++ #cooling-cells = <2>;
++ };
++
++ cpu5: cpu@201 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x201>;
++ clocks = <&clockgen 1 2>;
++ };
++
++ cpu6: cpu@300 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x300>;
++ clocks = <&clockgen 1 3>;
++ #cooling-cells = <2>;
++ };
++
++ cpu7: cpu@301 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ reg = <0x0 0x301>;
++ clocks = <&clockgen 1 3>;
++ };
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
++ };
++
++ gic: interrupt-controller@6000000 {
++ compatible = "arm,gic-v3";
++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
++ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */
++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
++ #interrupt-cells = <3>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ interrupt-controller;
++ interrupts = <1 9 0x4>;
++
++ its: gic-its@6020000 {
++ compatible = "arm,gic-v3-its";
++ msi-controller;
++ reg = <0x0 0x6020000 0 0x20000>;
++ };
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ clockgen: clocking@1300000 {
++ compatible = "fsl,ls2088a-clockgen";
++ reg = <0 0x1300000 0 0xa0000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ tmu: tmu@1f80000 {
++ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu", "fsl,ls2088a-tmu";
++ reg = <0x0 0x1f80000 0x0 0x10000>;
++ interrupts = <0 23 0x4>;
++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
++ fsl,tmu-calibration = <0x00000000 0x00000026
++ 0x00000001 0x0000002d
++ 0x00000002 0x00000032
++ 0x00000003 0x00000039
++ 0x00000004 0x0000003f
++ 0x00000005 0x00000046
++ 0x00000006 0x0000004d
++ 0x00000007 0x00000054
++ 0x00000008 0x0000005a
++ 0x00000009 0x00000061
++ 0x0000000a 0x0000006a
++ 0x0000000b 0x00000071
++
++ 0x00010000 0x00000025
++ 0x00010001 0x0000002c
++ 0x00010002 0x00000035
++ 0x00010003 0x0000003d
++ 0x00010004 0x00000045
++ 0x00010005 0x0000004e
++ 0x00010006 0x00000057
++ 0x00010007 0x00000061
++ 0x00010008 0x0000006b
++ 0x00010009 0x00000076
++
++ 0x00020000 0x00000029
++ 0x00020001 0x00000033
++ 0x00020002 0x0000003d
++ 0x00020003 0x00000049
++ 0x00020004 0x00000056
++ 0x00020005 0x00000061
++ 0x00020006 0x0000006d
++
++ 0x00030000 0x00000021
++ 0x00030001 0x0000002a
++ 0x00030002 0x0000003c
++ 0x00030003 0x0000004e>;
++ little-endian;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++
++ thermal-sensors = <&tmu 4>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++ cpu_crit: cpu-crit {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map1 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map2 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu4 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ map3 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu6 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++ };
++
++ serial0: serial@21c0500 {
++ device_type = "serial";
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0500 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++
++ serial1: serial@21c0600 {
++ device_type = "serial";
++ compatible = "fsl,ns16550", "ns16550a";
++ reg = <0x0 0x21c0600 0x0 0x100>;
++ clocks = <&clockgen 4 3>;
++ interrupts = <0 32 0x4>; /* Level high type */
++ };
++ cluster1_core0_watchdog: wdt@c000000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc000000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster1_core1_watchdog: wdt@c010000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc010000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core0_watchdog: wdt@c100000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc100000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster2_core1_watchdog: wdt@c110000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc110000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster3_core0_watchdog: wdt@c200000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc200000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster3_core1_watchdog: wdt@c210000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc210000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster4_core0_watchdog: wdt@c300000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc300000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ cluster4_core1_watchdog: wdt@c310000 {
++ compatible = "arm,sp805-wdt", "arm,primecell";
++ reg = <0x0 0xc310000 0x0 0x1000>;
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "apb_pclk", "wdog_clk";
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <0 36 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@2320000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio@2330000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <0 37 0x4>; /* Level high type */
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ /* TODO: WRIOP (CCSR?) */
++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B96000 0x0 0x1000>;
++ device_type = "mdio"; /* TODO: is this necessary? */
++ little-endian; /* force the driver in LE mode */
++
++ /* Not necessary on the QDS, but needed on the RDB */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8B97000 0x0 0x1000>;
++ device_type = "mdio"; /* TODO: is this necessary? */
++ little-endian; /* force the driver in LE mode */
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ ifc: ifc@2240000 {
++ compatible = "fsl,ifc", "simple-bus";
++ reg = <0x0 0x2240000 0x0 0x20000>;
++ interrupts = <0 21 0x4>; /* Level high type */
++ little-endian;
++ #address-cells = <2>;
++ #size-cells = <1>;
++
++ ranges = <0 0 0x5 0x80000000 0x08000000
++ 2 0 0x5 0x30000000 0x00010000
++ 3 0 0x5 0x20000000 0x00010000>;
++ };
++
++ esdhc: esdhc@2140000 {
++ compatible = "fsl,ls2088a-esdhc", "fsl,ls2080a-esdhc",
++ "fsl,esdhc";
++ reg = <0x0 0x2140000 0x0 0x10000>;
++ interrupts = <0 28 0x4>; /* Level high type */
++ clock-frequency = <0>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ little-endian;
++ bus-width = <4>;
++ };
++
++ ftm0: ftm0@2800000 {
++ compatible = "fsl,ftm-alarm";
++ reg = <0x0 0x2800000 0x0 0x10000>;
++ interrupts = <0 44 4>;
++ };
++
++ reset: reset@1E60000 {
++ compatible = "fsl,ls-reset";
++ reg = <0x0 0x1E60000 0x0 0x10000>;
++ };
++
++ dspi: dspi@2100000 {
++ compatible = "fsl,ls2088a-dspi", "fsl,ls2085a-dspi",
++ "fsl,ls2080a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2100000 0x0 0x10000>;
++ interrupts = <0 26 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <0>;
++ };
++
++ i2c0: i2c@2000000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2000000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c1: i2c@2010000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2010000 0x0 0x10000>;
++ interrupts = <0 34 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c2: i2c@2020000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2020000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ i2c3: i2c@2030000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2030000 0x0 0x10000>;
++ interrupts = <0 35 0x4>; /* Level high type */
++ clock-names = "i2c";
++ clocks = <&clockgen 4 3>;
++ };
++
++ qspi: quadspi@20c0000 {
++ compatible = "fsl,ls2088a-qspi", "fsl,ls2080a-qspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x20c0000 0x0 0x10000>,
++ <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "QuadSPI", "QuadSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "qspi_en", "qspi";
++ };
++
++ pcie1: pcie@3400000 {
++ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie",
++ "fsl,ls2085a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x20 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 108 0x4>; /* Level high type */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ fsl,lut_diff;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
++ <0000 0 0 2 &gic 0 0 0 110 4>,
++ <0000 0 0 3 &gic 0 0 0 111 4>,
++ <0000 0 0 4 &gic 0 0 0 112 4>;
++ };
++
++ pcie2: pcie@3500000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie",
++ "fsl,ls2085a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x28 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 113 0x4>; /* Level high type */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ fsl,lut_diff;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
++ <0000 0 0 2 &gic 0 0 0 115 4>,
++ <0000 0 0 3 &gic 0 0 0 116 4>,
++ <0000 0 0 4 &gic 0 0 0 117 4>;
++ };
++
++ pcie3: pcie@3600000 {
++ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie",
++ "fsl,ls2085a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x30 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 118 0x4>; /* Level high type */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ fsl,lut_diff;
++ num-lanes = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
++ <0000 0 0 2 &gic 0 0 0 120 4>,
++ <0000 0 0 3 &gic 0 0 0 121 4>,
++ <0000 0 0 4 &gic 0 0 0 122 4>;
++ };
++
++ pcie4: pcie@3700000 {
++ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie",
++ "fsl,ls2085a-pcie", "snps,dw-pcie";
++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
++ 0x38 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 123 0x4>; /* Level high type */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ fsl,lut_diff;
++ num-lanes = <4>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
++ <0000 0 0 2 &gic 0 0 0 125 4>,
++ <0000 0 0 3 &gic 0 0 0 126 4>,
++ <0000 0 0 4 &gic 0 0 0 127 4>;
++ };
++
++ sata0: sata@3200000 {
++ status = "disabled";
++ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>;
++ interrupts = <0 133 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ };
++
++ sata1: sata@3210000 {
++ status = "disabled";
++ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci";
++ reg = <0x0 0x3210000 0x0 0x10000>;
++ interrupts = <0 136 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>;
++ };
++
++ usb0: usb3@3100000 {
++ status = "disabled";
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ usb1: usb3@3110000 {
++ status = "disabled";
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ configure-gfladj;
++ snps,dis_rxdet_inp3_quirk;
++ };
++
++ smmu: iommu@5000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x5000000 0 0x800000>;
++ #global-interrupts = <12>;
++ interrupts = <0 13 4>, /* global secure fault */
++ <0 14 4>, /* combined secure interrupt */
++ <0 15 4>, /* global non-secure fault */
++ <0 16 4>, /* combined non-secure interrupt */
++ /* performance counter interrupts 0-7 */
++ <0 211 4>,
++ <0 212 4>,
++ <0 213 4>,
++ <0 214 4>,
++ <0 215 4>,
++ <0 216 4>,
++ <0 217 4>,
++ <0 218 4>,
++ /* per context interrupt, 64 interrupts */
++ <0 146 4>,
++ <0 147 4>,
++ <0 148 4>,
++ <0 149 4>,
++ <0 150 4>,
++ <0 151 4>,
++ <0 152 4>,
++ <0 153 4>,
++ <0 154 4>,
++ <0 155 4>,
++ <0 156 4>,
++ <0 157 4>,
++ <0 158 4>,
++ <0 159 4>,
++ <0 160 4>,
++ <0 161 4>,
++ <0 162 4>,
++ <0 163 4>,
++ <0 164 4>,
++ <0 165 4>,
++ <0 166 4>,
++ <0 167 4>,
++ <0 168 4>,
++ <0 169 4>,
++ <0 170 4>,
++ <0 171 4>,
++ <0 172 4>,
++ <0 173 4>,
++ <0 174 4>,
++ <0 175 4>,
++ <0 176 4>,
++ <0 177 4>,
++ <0 178 4>,
++ <0 179 4>,
++ <0 180 4>,
++ <0 181 4>,
++ <0 182 4>,
++ <0 183 4>,
++ <0 184 4>,
++ <0 185 4>,
++ <0 186 4>,
++ <0 187 4>,
++ <0 188 4>,
++ <0 189 4>,
++ <0 190 4>,
++ <0 191 4>,
++ <0 192 4>,
++ <0 193 4>,
++ <0 194 4>,
++ <0 195 4>,
++ <0 196 4>,
++ <0 197 4>,
++ <0 198 4>,
++ <0 199 4>,
++ <0 200 4>,
++ <0 201 4>,
++ <0 202 4>,
++ <0 203 4>,
++ <0 204 4>,
++ <0 205 4>,
++ <0 206 4>,
++ <0 207 4>,
++ <0 208 4>,
++ <0 209 4>;
++ mmu-masters = <&fsl_mc 0x300 0>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */
++ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */
++ <1 11 0x1>, /* Virtual PPI, edge triggered */
++ <1 10 0x1>; /* Hypervisor PPI, edge triggered */
++ arm,reread-timer;
++ fsl,erratum-a008585;
++ };
++
++ fsl_mc: fsl-mc@80c000000 {
++ compatible = "fsl,qoriq-mc";
++ #stream-id-cells = <2>;
++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
++ msi-parent = <&its>;
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ /*
++ * Define the maximum number of MACs present on the SoC.
++ * They won't necessarily be all probed, since the
++ * Data Path Layout file and the MC firmware can put fewer
++ * actual DPMAC objects on the MC bus.
++ */
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac@1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <1>;
++ };
++ dpmac2: dpmac@2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <2>;
++ };
++ dpmac3: dpmac@3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <3>;
++ };
++ dpmac4: dpmac@4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <4>;
++ };
++ dpmac5: dpmac@5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <5>;
++ };
++ dpmac6: dpmac@6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <6>;
++ };
++ dpmac7: dpmac@7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <7>;
++ };
++ dpmac8: dpmac@8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <8>;
++ };
++ dpmac9: dpmac@9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <9>;
++ };
++ dpmac10: dpmac@10 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++ dpmac11: dpmac@11 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xb>;
++ };
++ dpmac12: dpmac@12 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xc>;
++ };
++ dpmac13: dpmac@13 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xd>;
++ };
++ dpmac14: dpmac@14 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xe>;
++ };
++ dpmac15: dpmac@15 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xf>;
++ };
++ dpmac16: dpmac@16 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x10>;
++ };
++ };
++ };
++
++ ccn@4000000 {
++ compatible = "arm,ccn-504";
++ reg = <0x0 0x04000000 0x0 0x01000000>;
++ interrupts = <0 12 4>;
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>;
++ /* DRAM space 1 - 2 GB DRAM */
++ };
++};
+diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings
+new file mode 120000
+index 0000000..08c00e4
+--- /dev/null
++++ b/arch/arm64/boot/dts/include/dt-bindings
+@@ -0,0 +1 @@
++../../../../../include/dt-bindings
+\ No newline at end of file
+diff --git a/arch/arm64/boot/dts/thermal.h b/arch/arm64/boot/dts/thermal.h
+new file mode 100644
+index 0000000..59822a9
+--- /dev/null
++++ b/arch/arm64/boot/dts/thermal.h
+@@ -0,0 +1,17 @@
++/*
++ * This header provides constants for most thermal bindings.
++ *
++ * Copyright (C) 2013 Texas Instruments
++ * Eduardo Valentin
++ *
++ * GPLv2 only
++ */
++
++#ifndef _DT_BINDINGS_THERMAL_THERMAL_H
++#define _DT_BINDINGS_THERMAL_THERMAL_H
++
++/* On cooling devices upper and lower limits */
++#define THERMAL_NO_LIMIT (-1UL)
++
++#endif
++
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index dd301be..3852a77 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -32,6 +32,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
++CONFIG_ARCH_LAYERSCAPE=y
+ CONFIG_ARCH_THUNDER=y
+ CONFIG_ARCH_VEXPRESS=y
+ CONFIG_ARCH_XGENE=y
+diff --git a/arch/arm64/configs/nxp_ls2088rdb_config b/arch/arm64/configs/nxp_ls2088rdb_config
+new file mode 100644
+index 0000000..f1127f9
+--- /dev/null
++++ b/arch/arm64/configs/nxp_ls2088rdb_config
+@@ -0,0 +1,3034 @@
++#
++# Automatically generated file; DO NOT EDIT.
++# Linux/arm64 3.18.25 Kernel Configuration
++#
++CONFIG_ARM64=y
++CONFIG_64BIT=y
++CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
++CONFIG_MMU=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CSUM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_ZONE_DMA=y
++CONFIG_HAVE_GENERIC_RCU_GUP=y
++CONFIG_ARCH_DMA_ADDR_T_64BIT=y
++CONFIG_NEED_DMA_MAP_STATE=y
++CONFIG_NEED_SG_DMA_LENGTH=y
++CONFIG_SWIOTLB=y
++CONFIG_IOMMU_HELPER=y
++CONFIG_KERNEL_MODE_NEON=y
++CONFIG_FIX_EARLYCON_MEM=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++CONFIG_IRQ_WORK=y
++CONFIG_BUILDTIME_EXTABLE_SORT=y
++
++#
++# General setup
++#
++CONFIG_INIT_ENV_ARG_LIMIT=32
++CONFIG_CROSS_COMPILE="aarch64-linux-gnu-"
++# CONFIG_COMPILE_TEST is not set
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_DEFAULT_HOSTNAME="(none)"
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_SYSVIPC_SYSCTL=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_POSIX_MQUEUE_SYSCTL=y
++CONFIG_CROSS_MEMORY_ATTACH=y
++# CONFIG_FHANDLE is not set
++CONFIG_USELIB=y
++CONFIG_AUDIT=y
++CONFIG_HAVE_ARCH_AUDITSYSCALL=y
++# CONFIG_AUDITSYSCALL is not set
++
++#
++# IRQ subsystem
++#
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_IRQ_SHOW=y
++CONFIG_HARDIRQS_SW_RESEND=y
++CONFIG_IRQ_DOMAIN=y
++CONFIG_IRQ_DOMAIN_HIERARCHY=y
++CONFIG_GENERIC_MSI_IRQ=y
++CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
++CONFIG_HANDLE_DOMAIN_IRQ=y
++# CONFIG_IRQ_DOMAIN_DEBUG is not set
++CONFIG_SPARSE_IRQ=y
++CONFIG_GENERIC_TIME_VSYSCALL=y
++CONFIG_GENERIC_CLOCKEVENTS=y
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
++CONFIG_ARCH_HAS_TICK_BROADCAST=y
++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
++
++#
++# Timers subsystem
++#
++CONFIG_TICK_ONESHOT=y
++CONFIG_NO_HZ_COMMON=y
++# CONFIG_HZ_PERIODIC is not set
++CONFIG_NO_HZ_IDLE=y
++# CONFIG_NO_HZ_FULL is not set
++# CONFIG_NO_HZ is not set
++CONFIG_HIGH_RES_TIMERS=y
++
++#
++# CPU/Task time and stats accounting
++#
++CONFIG_TICK_CPU_ACCOUNTING=y
++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++
++#
++# RCU Subsystem
++#
++CONFIG_TREE_PREEMPT_RCU=y
++CONFIG_PREEMPT_RCU=y
++# CONFIG_TASKS_RCU is not set
++CONFIG_RCU_STALL_COMMON=y
++# CONFIG_RCU_USER_QS is not set
++CONFIG_RCU_FANOUT=64
++CONFIG_RCU_FANOUT_LEAF=16
++# CONFIG_RCU_FANOUT_EXACT is not set
++# CONFIG_RCU_FAST_NO_HZ is not set
++# CONFIG_TREE_RCU_TRACE is not set
++# CONFIG_RCU_BOOST is not set
++# CONFIG_RCU_NOCB_CPU is not set
++CONFIG_BUILD_BIN2C=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
++CONFIG_GENERIC_SCHED_CLOCK=y
++CONFIG_CGROUPS=y
++# CONFIG_CGROUP_DEBUG is not set
++# CONFIG_CGROUP_FREEZER is not set
++# CONFIG_CGROUP_DEVICE is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_CGROUP_CPUACCT is not set
++CONFIG_RESOURCE_COUNTERS=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++CONFIG_MEMCG_SWAP_ENABLED=y
++CONFIG_MEMCG_KMEM=y
++CONFIG_CGROUP_HUGETLB=y
++# CONFIG_CGROUP_PERF is not set
++CONFIG_CGROUP_SCHED=y
++CONFIG_FAIR_GROUP_SCHED=y
++# CONFIG_CFS_BANDWIDTH is not set
++# CONFIG_RT_GROUP_SCHED is not set
++# CONFIG_BLK_CGROUP is not set
++# CONFIG_CHECKPOINT_RESTORE is not set
++CONFIG_NAMESPACES=y
++# CONFIG_UTS_NS is not set
++# CONFIG_IPC_NS is not set
++# CONFIG_USER_NS is not set
++# CONFIG_PID_NS is not set
++CONFIG_NET_NS=y
++CONFIG_SCHED_AUTOGROUP=y
++# CONFIG_SYSFS_DEPRECATED is not set
++# CONFIG_RELAY is not set
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_RD_GZIP=y
++CONFIG_RD_BZIP2=y
++CONFIG_RD_LZMA=y
++CONFIG_RD_XZ=y
++CONFIG_RD_LZO=y
++CONFIG_RD_LZ4=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SYSCTL=y
++CONFIG_ANON_INODES=y
++CONFIG_HAVE_UID16=y
++CONFIG_SYSCTL_EXCEPTION_TRACE=y
++CONFIG_BPF=y
++# CONFIG_EXPERT is not set
++CONFIG_UID16=y
++# CONFIG_SGETMASK_SYSCALL is not set
++CONFIG_SYSFS_SYSCALL=y
++# CONFIG_SYSCTL_SYSCALL is not set
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SIGNALFD=y
++CONFIG_TIMERFD=y
++CONFIG_EVENTFD=y
++# CONFIG_BPF_SYSCALL is not set
++CONFIG_SHMEM=y
++CONFIG_AIO=y
++CONFIG_ADVISE_SYSCALLS=y
++CONFIG_PCI_QUIRKS=y
++# CONFIG_EMBEDDED is not set
++CONFIG_HAVE_PERF_EVENTS=y
++CONFIG_PERF_USE_VMALLOC=y
++
++#
++# Kernel Performance Events And Counters
++#
++CONFIG_PERF_EVENTS=y
++# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
++CONFIG_VM_EVENT_COUNTERS=y
++# CONFIG_COMPAT_BRK is not set
++CONFIG_SLAB=y
++# CONFIG_SLUB is not set
++# CONFIG_SYSTEM_TRUSTED_KEYRING is not set
++CONFIG_PROFILING=y
++CONFIG_JUMP_LABEL=y
++# CONFIG_UPROBES is not set
++# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
++CONFIG_HAVE_ARCH_TRACEHOOK=y
++CONFIG_HAVE_DMA_ATTRS=y
++CONFIG_HAVE_DMA_CONTIGUOUS=y
++CONFIG_GENERIC_SMP_IDLE_THREAD=y
++CONFIG_HAVE_CLK=y
++CONFIG_HAVE_DMA_API_DEBUG=y
++CONFIG_HAVE_HW_BREAKPOINT=y
++CONFIG_HAVE_PERF_REGS=y
++CONFIG_HAVE_PERF_USER_STACK_DUMP=y
++CONFIG_HAVE_ARCH_JUMP_LABEL=y
++CONFIG_HAVE_RCU_TABLE_FREE=y
++CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
++CONFIG_HAVE_CC_STACKPROTECTOR=y
++# CONFIG_CC_STACKPROTECTOR is not set
++CONFIG_CC_STACKPROTECTOR_NONE=y
++# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
++# CONFIG_CC_STACKPROTECTOR_STRONG is not set
++CONFIG_HAVE_CONTEXT_TRACKING=y
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
++CONFIG_MODULES_USE_ELF_RELA=y
++CONFIG_CLONE_BACKWARDS=y
++CONFIG_OLD_SIGSUSPEND3=y
++CONFIG_COMPAT_OLD_SIGACTION=y
++
++#
++# GCOV-based kernel profiling
++#
++# CONFIG_GCOV_KERNEL is not set
++CONFIG_HAVE_GENERIC_DMA_COHERENT=y
++CONFIG_SLABINFO=y
++CONFIG_RT_MUTEXES=y
++CONFIG_BASE_SMALL=0
++CONFIG_MODULES=y
++CONFIG_MODULE_FORCE_LOAD=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_MODULE_SIG is not set
++# CONFIG_MODULE_COMPRESS is not set
++CONFIG_STOP_MACHINE=y
++CONFIG_BLOCK=y
++# CONFIG_BLK_DEV_BSG is not set
++# CONFIG_BLK_DEV_BSGLIB is not set
++# CONFIG_BLK_DEV_INTEGRITY is not set
++# CONFIG_BLK_CMDLINE_PARSER is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_AIX_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++# CONFIG_SYSV68_PARTITION is not set
++# CONFIG_CMDLINE_PARTITION is not set
++CONFIG_BLOCK_COMPAT=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_DEADLINE is not set
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++CONFIG_PREEMPT_NOTIFIERS=y
++CONFIG_UNINLINE_SPIN_UNLOCK=y
++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
++CONFIG_MUTEX_SPIN_ON_OWNER=y
++CONFIG_RWSEM_SPIN_ON_OWNER=y
++CONFIG_FREEZER=y
++
++#
++# Platform selection
++#
++CONFIG_ARCH_THUNDER=y
++CONFIG_ARCH_VEXPRESS=y
++CONFIG_ARCH_XGENE=y
++CONFIG_ARCH_LAYERSCAPE=y
++
++#
++# Bus support
++#
++CONFIG_ARM_AMBA=y
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_PCI_DOMAINS_GENERIC=y
++CONFIG_PCI_SYSCALL=y
++CONFIG_PCI_MSI=y
++CONFIG_PCI_MSI_IRQ_DOMAIN=y
++# CONFIG_PCI_DEBUG is not set
++# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
++# CONFIG_PCI_STUB is not set
++# CONFIG_PCI_IOV is not set
++# CONFIG_PCI_PRI is not set
++# CONFIG_PCI_PASID is not set
++
++#
++# PCI host controller drivers
++#
++CONFIG_PCIE_DW=y
++# CONFIG_PCI_HOST_GENERIC is not set
++CONFIG_PCI_XGENE=y
++CONFIG_PCI_XGENE_MSI=y
++CONFIG_PCI_LAYERSCAPE=y
++CONFIG_PCIEPORTBUS=y
++CONFIG_PCIEAER=y
++# CONFIG_PCIE_ECRC is not set
++# CONFIG_PCIEAER_INJECT is not set
++CONFIG_PCIEASPM=y
++# CONFIG_PCIEASPM_DEBUG is not set
++CONFIG_PCIEASPM_DEFAULT=y
++# CONFIG_PCIEASPM_POWERSAVE is not set
++# CONFIG_PCIEASPM_PERFORMANCE is not set
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Kernel Features
++#
++
++#
++# ARM errata workarounds via the alternatives framework
++#
++CONFIG_ARM64_ERRATUM_826319=y
++CONFIG_ARM64_ERRATUM_827319=y
++CONFIG_ARM64_ERRATUM_824069=y
++CONFIG_ARM64_ERRATUM_819472=y
++CONFIG_ARM64_ERRATUM_832075=y
++CONFIG_ARM64_ERRATUM_845719=y
++CONFIG_ARM64_4K_PAGES=y
++# CONFIG_ARM64_64K_PAGES is not set
++# CONFIG_ARM64_VA_BITS_39 is not set
++CONFIG_ARM64_VA_BITS_48=y
++CONFIG_ARM64_VA_BITS=48
++CONFIG_ARM64_PGTABLE_LEVELS=4
++# CONFIG_CPU_BIG_ENDIAN is not set
++CONFIG_SMP=y
++# CONFIG_SCHED_MC is not set
++# CONFIG_SCHED_SMT is not set
++CONFIG_NR_CPUS=64
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_PREEMPT_NONE is not set
++# CONFIG_PREEMPT_VOLUNTARY is not set
++CONFIG_PREEMPT=y
++CONFIG_PREEMPT_COUNT=y
++CONFIG_HZ=100
++CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_HAVE_ARCH_PFN_VALID=y
++CONFIG_HW_PERF_EVENTS=y
++CONFIG_SYS_SUPPORTS_HUGETLBFS=y
++CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
++CONFIG_SPARSEMEM_VMEMMAP=y
++CONFIG_HAVE_MEMBLOCK=y
++CONFIG_NO_BOOTMEM=y
++CONFIG_MEMORY_ISOLATION=y
++# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
++CONFIG_PAGEFLAGS_EXTENDED=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_MEMORY_BALLOON=y
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_PHYS_ADDR_T_64BIT=y
++CONFIG_ZONE_DMA_FLAG=1
++CONFIG_BOUNCE=y
++CONFIG_MMU_NOTIFIER=y
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
++# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
++# CONFIG_CLEANCACHE is not set
++# CONFIG_FRONTSWAP is not set
++CONFIG_CMA=y
++# CONFIG_CMA_DEBUG is not set
++CONFIG_CMA_AREAS=7
++# CONFIG_ZPOOL is not set
++# CONFIG_ZBUD is not set
++# CONFIG_ZSMALLOC is not set
++CONFIG_GENERIC_EARLY_IOREMAP=y
++# CONFIG_XEN is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++
++#
++# Boot options
++#
++CONFIG_CMDLINE="console=ttyAMA0"
++# CONFIG_CMDLINE_FORCE is not set
++CONFIG_EFI_STUB=y
++CONFIG_EFI=y
++
++#
++# Userspace binary formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_COMPAT_BINFMT_ELF=y
++CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
++CONFIG_BINFMT_SCRIPT=y
++# CONFIG_HAVE_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++CONFIG_COREDUMP=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Power management options
++#
++CONFIG_SUSPEND=y
++CONFIG_SUSPEND_FREEZER=y
++CONFIG_PM_SLEEP=y
++CONFIG_PM_SLEEP_SMP=y
++# CONFIG_PM_AUTOSLEEP is not set
++# CONFIG_PM_WAKELOCKS is not set
++# CONFIG_PM_RUNTIME is not set
++CONFIG_PM=y
++# CONFIG_PM_DEBUG is not set
++CONFIG_PM_CLK=y
++# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
++CONFIG_CPU_PM=y
++CONFIG_ARCH_SUSPEND_POSSIBLE=y
++CONFIG_ARM64_CPU_SUSPEND=y
++
++#
++# CPU Power Management
++#
++
++#
++# CPU Idle
++#
++# CONFIG_CPU_IDLE is not set
++# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
++
++#
++# CPU Frequency scaling
++#
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_COMMON=y
++CONFIG_CPU_FREQ_STAT=y
++# CONFIG_CPU_FREQ_STAT_DETAILS is not set
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++# CONFIG_CPUFREQ_DT is not set
++
++#
++# ARM CPU frequency scaling drivers
++#
++# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set
++CONFIG_ARM64_ERRATUM_843419=y
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_DIAG is not set
++CONFIG_UNIX=y
++# CONFIG_UNIX_DIAG is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_ALGO=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_XFRM_SUB_POLICY is not set
++# CONFIG_XFRM_MIGRATE is not set
++# CONFIG_XFRM_STATISTICS is not set
++CONFIG_XFRM_IPCOMP=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_FIB_TRIE_STATS=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_VERBOSE is not set
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE_DEMUX is not set
++CONFIG_NET_IP_TUNNEL=y
++CONFIG_IP_MROUTE=y
++# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
++# CONFIG_IP_PIMSM_V1 is not set
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_NET_IPVTI is not set
++# CONFIG_NET_UDP_TUNNEL is not set
++# CONFIG_NET_FOU is not set
++# CONFIG_GENEVE is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_XFRM_MODE_BEET=y
++# CONFIG_INET_LRO is not set
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_INET_UDP_DIAG is not set
++CONFIG_TCP_CONG_ADVANCED=y
++CONFIG_TCP_CONG_BIC=y
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_TCP_CONG_WESTWOOD=y
++CONFIG_TCP_CONG_HTCP=y
++# CONFIG_TCP_CONG_HSTCP is not set
++# CONFIG_TCP_CONG_HYBLA is not set
++# CONFIG_TCP_CONG_VEGAS is not set
++# CONFIG_TCP_CONG_SCALABLE is not set
++# CONFIG_TCP_CONG_LP is not set
++# CONFIG_TCP_CONG_VENO is not set
++# CONFIG_TCP_CONG_YEAH is not set
++# CONFIG_TCP_CONG_ILLINOIS is not set
++# CONFIG_TCP_CONG_DCTCP is not set
++# CONFIG_DEFAULT_BIC is not set
++CONFIG_DEFAULT_CUBIC=y
++# CONFIG_DEFAULT_HTCP is not set
++# CONFIG_DEFAULT_WESTWOOD is not set
++# CONFIG_DEFAULT_RENO is not set
++CONFIG_DEFAULT_TCP_CONG="cubic"
++# CONFIG_TCP_MD5SIG is not set
++CONFIG_IPV6=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++CONFIG_IPV6_OPTIMISTIC_DAD=y
++CONFIG_INET6_AH=y
++CONFIG_INET6_ESP=y
++CONFIG_INET6_IPCOMP=y
++CONFIG_IPV6_MIP6=y
++CONFIG_INET6_XFRM_TUNNEL=y
++CONFIG_INET6_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_BEET=y
++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
++# CONFIG_IPV6_VTI is not set
++CONFIG_IPV6_SIT=y
++# CONFIG_IPV6_SIT_6RD is not set
++CONFIG_IPV6_NDISC_NODETYPE=y
++CONFIG_IPV6_TUNNEL=y
++# CONFIG_IPV6_GRE is not set
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IPV6_SUBTREES=y
++# CONFIG_IPV6_MROUTE is not set
++# CONFIG_NETLABEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NET_PTP_CLASSIFY=y
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
++# CONFIG_NETFILTER is not set
++# CONFIG_IP_DCCP is not set
++# CONFIG_IP_SCTP is not set
++# CONFIG_RDS is not set
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_L2TP is not set
++CONFIG_STP=m
++CONFIG_BRIDGE=m
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++# CONFIG_BRIDGE_VLAN_FILTERING is not set
++CONFIG_HAVE_NET_DSA=y
++CONFIG_VLAN_8021Q=y
++# CONFIG_VLAN_8021Q_GVRP is not set
++# CONFIG_VLAN_8021Q_MVRP is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=m
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_PHONET is not set
++# CONFIG_6LOWPAN is not set
++# CONFIG_IEEE802154 is not set
++CONFIG_NET_SCHED=y
++
++#
++# Queueing/Scheduling
++#
++# CONFIG_NET_SCH_CBQ is not set
++# CONFIG_NET_SCH_HTB is not set
++# CONFIG_NET_SCH_HFSC is not set
++# CONFIG_NET_SCH_PRIO is not set
++# CONFIG_NET_SCH_MULTIQ is not set
++# CONFIG_NET_SCH_RED is not set
++# CONFIG_NET_SCH_SFB is not set
++# CONFIG_NET_SCH_SFQ is not set
++# CONFIG_NET_SCH_TEQL is not set
++# CONFIG_NET_SCH_TBF is not set
++# CONFIG_NET_SCH_GRED is not set
++# CONFIG_NET_SCH_DSMARK is not set
++# CONFIG_NET_SCH_NETEM is not set
++# CONFIG_NET_SCH_DRR is not set
++# CONFIG_NET_SCH_MQPRIO is not set
++# CONFIG_NET_SCH_CHOKE is not set
++# CONFIG_NET_SCH_QFQ is not set
++# CONFIG_NET_SCH_CODEL is not set
++# CONFIG_NET_SCH_FQ_CODEL is not set
++# CONFIG_NET_SCH_FQ is not set
++# CONFIG_NET_SCH_HHF is not set
++# CONFIG_NET_SCH_PIE is not set
++# CONFIG_NET_SCH_PLUG is not set
++
++#
++# Classification
++#
++# CONFIG_NET_CLS_BASIC is not set
++# CONFIG_NET_CLS_TCINDEX is not set
++# CONFIG_NET_CLS_ROUTE4 is not set
++# CONFIG_NET_CLS_FW is not set
++# CONFIG_NET_CLS_U32 is not set
++# CONFIG_NET_CLS_RSVP is not set
++# CONFIG_NET_CLS_RSVP6 is not set
++# CONFIG_NET_CLS_FLOW is not set
++# CONFIG_NET_CLS_CGROUP is not set
++# CONFIG_NET_CLS_BPF is not set
++# CONFIG_NET_EMATCH is not set
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_SCH_FIFO=y
++CONFIG_DCB=y
++CONFIG_DNS_RESOLVER=y
++# CONFIG_BATMAN_ADV is not set
++# CONFIG_OPENVSWITCH is not set
++# CONFIG_VSOCKETS is not set
++# CONFIG_NETLINK_MMAP is not set
++# CONFIG_NETLINK_DIAG is not set
++# CONFIG_NET_MPLS_GSO is not set
++# CONFIG_HSR is not set
++CONFIG_RPS=y
++CONFIG_RFS_ACCEL=y
++CONFIG_XPS=y
++# CONFIG_CGROUP_NET_PRIO is not set
++# CONFIG_CGROUP_NET_CLASSID is not set
++CONFIG_NET_RX_BUSY_POLL=y
++CONFIG_BQL=y
++CONFIG_BPF_JIT=y
++CONFIG_NET_FLOW_LIMIT=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_CAN is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_AF_RXRPC is not set
++CONFIG_FIB_RULES=y
++# CONFIG_WIRELESS is not set
++# CONFIG_WIMAX is not set
++# CONFIG_RFKILL is not set
++# CONFIG_RFKILL_REGULATOR is not set
++CONFIG_NET_9P=y
++CONFIG_NET_9P_VIRTIO=y
++# CONFIG_NET_9P_DEBUG is not set
++# CONFIG_CAIF is not set
++# CONFIG_CEPH_LIB is not set
++# CONFIG_NFC is not set
++CONFIG_HAVE_BPF_JIT=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_UEVENT_HELPER=y
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++CONFIG_FIRMWARE_IN_KERNEL=y
++CONFIG_EXTRA_FIRMWARE=""
++# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
++CONFIG_ALLOW_DEV_COREDUMP=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_DEBUG_DEVRES is not set
++# CONFIG_SYS_HYPERVISOR is not set
++# CONFIG_GENERIC_CPU_DEVICES is not set
++CONFIG_GENERIC_CPU_AUTOPROBE=y
++CONFIG_REGMAP=y
++CONFIG_REGMAP_MMIO=y
++# CONFIG_DMA_SHARED_BUFFER is not set
++CONFIG_DMA_CMA=y
++
++#
++# Default contiguous memory area size:
++#
++CONFIG_CMA_SIZE_MBYTES=16
++CONFIG_CMA_SIZE_SEL_MBYTES=y
++# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set
++# CONFIG_CMA_SIZE_SEL_MIN is not set
++# CONFIG_CMA_SIZE_SEL_MAX is not set
++CONFIG_CMA_ALIGNMENT=8
++
++#
++# Bus devices
++#
++# CONFIG_ARM_CCN is not set
++CONFIG_VEXPRESS_CONFIG=y
++# CONFIG_CONNECTOR is not set
++CONFIG_MTD=y
++# CONFIG_MTD_TESTS is not set
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_OF_PARTS=y
++# CONFIG_MTD_AR7_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_FTL=y
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_SM_FTL is not set
++# CONFIG_MTD_OOPS is not set
++# CONFIG_MTD_SWAP is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=y
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_GEN_PROBE=y
++CONFIG_MTD_CFI_ADV_OPTIONS=y
++CONFIG_MTD_CFI_NOSWAP=y
++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_GEOMETRY is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_OTP is not set
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_CFI_AMDSTD=y
++CONFIG_MTD_CFI_STAA=y
++CONFIG_MTD_CFI_UTIL=y
++CONFIG_MTD_RAM=y
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++CONFIG_MTD_PHYSMAP=y
++# CONFIG_MTD_PHYSMAP_COMPAT is not set
++CONFIG_MTD_PHYSMAP_OF=y
++# CONFIG_MTD_INTEL_VR_NOR is not set
++CONFIG_MTD_PLATRAM=y
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_DATAFLASH is not set
++CONFIG_MTD_M25P80=y
++# CONFIG_MTD_SST25L is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOCG3 is not set
++CONFIG_MTD_NAND_ECC=y
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND=y
++# CONFIG_MTD_NAND_ECC_BCH is not set
++# CONFIG_MTD_SM_COMMON is not set
++# CONFIG_MTD_NAND_DENALI is not set
++CONFIG_MTD_NAND_GPIO=y
++# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set
++CONFIG_MTD_NAND_IDS=y
++# CONFIG_MTD_NAND_RICOH is not set
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_NAND_DOCG4 is not set
++# CONFIG_MTD_NAND_CAFE is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++# CONFIG_MTD_NAND_PLATFORM is not set
++CONFIG_MTD_NAND_FSL_IFC=y
++# CONFIG_MTD_ONENAND is not set
++
++#
++# LPDDR & LPDDR2 PCM memory drivers
++#
++# CONFIG_MTD_LPDDR is not set
++CONFIG_MTD_SPI_NOR=y
++CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
++# CONFIG_MTD_UBI is not set
++CONFIG_DTC=y
++CONFIG_OF=y
++
++#
++# Device Tree and Open Firmware support
++#
++# CONFIG_OF_SELFTEST is not set
++CONFIG_OF_FLATTREE=y
++CONFIG_OF_EARLY_FLATTREE=y
++CONFIG_OF_ADDRESS=y
++CONFIG_OF_ADDRESS_PCI=y
++CONFIG_OF_IRQ=y
++CONFIG_OF_NET=y
++CONFIG_OF_MDIO=y
++CONFIG_OF_PCI=y
++CONFIG_OF_PCI_IRQ=y
++CONFIG_OF_MTD=y
++CONFIG_OF_RESERVED_MEM=y
++# CONFIG_PARPORT is not set
++CONFIG_BLK_DEV=y
++# CONFIG_BLK_DEV_NULL_BLK is not set
++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_DRBD is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_NVME is not set
++# CONFIG_BLK_DEV_SKD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=262144
++# CONFIG_BLK_DEV_XIP is not set
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++CONFIG_VIRTIO_BLK=y
++# CONFIG_BLK_DEV_RBD is not set
++# CONFIG_BLK_DEV_RSXX is not set
++
++#
++# Misc devices
++#
++# CONFIG_SENSORS_LIS3LV02D is not set
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_DUMMY_IRQ is not set
++# CONFIG_PHANTOM is not set
++# CONFIG_SGI_IOC4 is not set
++# CONFIG_TIFM_CORE is not set
++# CONFIG_ICS932S401 is not set
++# CONFIG_ENCLOSURE_SERVICES is not set
++# CONFIG_HP_ILO is not set
++# CONFIG_APDS9802ALS is not set
++# CONFIG_ISL29003 is not set
++# CONFIG_ISL29020 is not set
++# CONFIG_SENSORS_TSL2550 is not set
++# CONFIG_SENSORS_BH1780 is not set
++# CONFIG_SENSORS_BH1770 is not set
++# CONFIG_SENSORS_APDS990X is not set
++# CONFIG_HMC6352 is not set
++# CONFIG_DS1682 is not set
++# CONFIG_TI_DAC7512 is not set
++# CONFIG_BMP085_I2C is not set
++# CONFIG_BMP085_SPI is not set
++# CONFIG_USB_SWITCH_FSA9480 is not set
++# CONFIG_LATTICE_ECP3_CONFIG is not set
++# CONFIG_SRAM is not set
++CONFIG_VEXPRESS_SYSCFG=y
++# CONFIG_C2PORT is not set
++
++#
++# EEPROM support
++#
++CONFIG_EEPROM_AT24=y
++CONFIG_EEPROM_AT25=y
++# CONFIG_EEPROM_LEGACY is not set
++# CONFIG_EEPROM_MAX6875 is not set
++# CONFIG_EEPROM_93CX6 is not set
++# CONFIG_EEPROM_93XX46 is not set
++# CONFIG_CB710_CORE is not set
++
++#
++# Texas Instruments shared transport line discipline
++#
++# CONFIG_TI_ST is not set
++# CONFIG_SENSORS_LIS3_SPI is not set
++# CONFIG_SENSORS_LIS3_I2C is not set
++
++#
++# Altera FPGA firmware download module
++#
++# CONFIG_ALTERA_STAPL is not set
++
++#
++# Intel MIC Bus Driver
++#
++
++#
++# Intel MIC Host Driver
++#
++
++#
++# Intel MIC Card Driver
++#
++# CONFIG_GENWQE is not set
++# CONFIG_ECHO is not set
++# CONFIG_CXL_BASE is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI_MOD=y
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_DMA=y
++# CONFIG_SCSI_NETLINK is not set
++# CONFIG_SCSI_MQ_DEFAULT is not set
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++# CONFIG_SCSI_SCAN_ASYNC is not set
++
++#
++# SCSI Transports
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++# CONFIG_SCSI_SAS_LIBSAS is not set
++# CONFIG_SCSI_SRP_ATTRS is not set
++# CONFIG_SCSI_LOWLEVEL is not set
++# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
++# CONFIG_SCSI_DH is not set
++# CONFIG_SCSI_OSD_INITIATOR is not set
++CONFIG_HAVE_PATA_PLATFORM=y
++CONFIG_ATA=y
++# CONFIG_ATA_NONSTANDARD is not set
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_SATA_PMP=y
++
++#
++# Controllers with non-SFF native interface
++#
++CONFIG_SATA_AHCI=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_XGENE=y
++# CONFIG_SATA_INIC162X is not set
++# CONFIG_SATA_ACARD_AHCI is not set
++# CONFIG_SATA_SIL24 is not set
++CONFIG_ATA_SFF=y
++
++#
++# SFF controllers with custom DMA interface
++#
++# CONFIG_PDC_ADMA is not set
++# CONFIG_SATA_QSTOR is not set
++# CONFIG_SATA_SX4 is not set
++CONFIG_ATA_BMDMA=y
++
++#
++# SATA SFF controllers with BMDMA
++#
++# CONFIG_ATA_PIIX is not set
++# CONFIG_SATA_MV is not set
++# CONFIG_SATA_NV is not set
++# CONFIG_SATA_PROMISE is not set
++# CONFIG_SATA_SIL is not set
++# CONFIG_SATA_SIS is not set
++# CONFIG_SATA_SVW is not set
++# CONFIG_SATA_ULI is not set
++# CONFIG_SATA_VIA is not set
++# CONFIG_SATA_VITESSE is not set
++
++#
++# PATA SFF controllers with BMDMA
++#
++# CONFIG_PATA_ALI is not set
++# CONFIG_PATA_AMD is not set
++# CONFIG_PATA_ARTOP is not set
++# CONFIG_PATA_ATIIXP is not set
++# CONFIG_PATA_ATP867X is not set
++# CONFIG_PATA_CMD64X is not set
++# CONFIG_PATA_CYPRESS is not set
++# CONFIG_PATA_EFAR is not set
++# CONFIG_PATA_HPT366 is not set
++# CONFIG_PATA_HPT37X is not set
++# CONFIG_PATA_HPT3X2N is not set
++# CONFIG_PATA_HPT3X3 is not set
++# CONFIG_PATA_IT8213 is not set
++# CONFIG_PATA_IT821X is not set
++# CONFIG_PATA_JMICRON is not set
++# CONFIG_PATA_MARVELL is not set
++# CONFIG_PATA_NETCELL is not set
++# CONFIG_PATA_NINJA32 is not set
++# CONFIG_PATA_NS87415 is not set
++# CONFIG_PATA_OLDPIIX is not set
++# CONFIG_PATA_OPTIDMA is not set
++# CONFIG_PATA_PDC2027X is not set
++# CONFIG_PATA_PDC_OLD is not set
++# CONFIG_PATA_RADISYS is not set
++# CONFIG_PATA_RDC is not set
++# CONFIG_PATA_SCH is not set
++# CONFIG_PATA_SERVERWORKS is not set
++# CONFIG_PATA_SIL680 is not set
++# CONFIG_PATA_SIS is not set
++# CONFIG_PATA_TOSHIBA is not set
++# CONFIG_PATA_TRIFLEX is not set
++# CONFIG_PATA_VIA is not set
++# CONFIG_PATA_WINBOND is not set
++
++#
++# PIO-only SFF controllers
++#
++# CONFIG_PATA_CMD640_PCI is not set
++# CONFIG_PATA_MPIIX is not set
++# CONFIG_PATA_NS87410 is not set
++# CONFIG_PATA_OPTI is not set
++# CONFIG_PATA_PLATFORM is not set
++# CONFIG_PATA_RZ1000 is not set
++
++#
++# Generic fallback / legacy drivers
++#
++# CONFIG_ATA_GENERIC is not set
++# CONFIG_PATA_LEGACY is not set
++# CONFIG_MD is not set
++# CONFIG_TARGET_CORE is not set
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_FIREWIRE is not set
++# CONFIG_FIREWIRE_NOSY is not set
++# CONFIG_I2O is not set
++CONFIG_NETDEVICES=y
++CONFIG_MII=y
++CONFIG_NET_CORE=y
++# CONFIG_BONDING is not set
++# CONFIG_DUMMY is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_NET_FC is not set
++# CONFIG_NET_TEAM is not set
++CONFIG_MACVLAN=y
++# CONFIG_MACVTAP is not set
++# CONFIG_VXLAN is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++CONFIG_TUN=y
++# CONFIG_VETH is not set
++CONFIG_VIRTIO_NET=y
++# CONFIG_NLMON is not set
++# CONFIG_ARCNET is not set
++
++#
++# CAIF transport drivers
++#
++
++#
++# Distributed Switch Architecture drivers
++#
++# CONFIG_NET_DSA_MV88E6XXX is not set
++# CONFIG_NET_DSA_MV88E6060 is not set
++# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
++# CONFIG_NET_DSA_MV88E6131 is not set
++# CONFIG_NET_DSA_MV88E6123_61_65 is not set
++# CONFIG_NET_DSA_MV88E6171 is not set
++# CONFIG_NET_DSA_BCM_SF2 is not set
++CONFIG_ETHERNET=y
++CONFIG_NET_VENDOR_3COM=y
++# CONFIG_VORTEX is not set
++# CONFIG_TYPHOON is not set
++CONFIG_NET_VENDOR_ADAPTEC=y
++# CONFIG_ADAPTEC_STARFIRE is not set
++CONFIG_NET_VENDOR_AGERE=y
++# CONFIG_ET131X is not set
++CONFIG_NET_VENDOR_ALTEON=y
++# CONFIG_ACENIC is not set
++# CONFIG_ALTERA_TSE is not set
++CONFIG_NET_VENDOR_AMD=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD_XGBE is not set
++CONFIG_NET_XGENE=y
++CONFIG_NET_VENDOR_ARC=y
++# CONFIG_ARC_EMAC is not set
++# CONFIG_EMAC_ROCKCHIP is not set
++CONFIG_NET_VENDOR_ATHEROS=y
++# CONFIG_ATL2 is not set
++# CONFIG_ATL1 is not set
++# CONFIG_ATL1E is not set
++# CONFIG_ATL1C is not set
++# CONFIG_ALX is not set
++CONFIG_NET_VENDOR_BROADCOM=y
++# CONFIG_B44 is not set
++# CONFIG_BCMGENET is not set
++# CONFIG_BNX2 is not set
++# CONFIG_CNIC is not set
++# CONFIG_TIGON3 is not set
++# CONFIG_BNX2X is not set
++# CONFIG_SYSTEMPORT is not set
++CONFIG_NET_VENDOR_BROCADE=y
++# CONFIG_BNA is not set
++CONFIG_NET_VENDOR_CHELSIO=y
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_CHELSIO_T3 is not set
++# CONFIG_CHELSIO_T4 is not set
++# CONFIG_CHELSIO_T4VF is not set
++CONFIG_NET_VENDOR_CISCO=y
++# CONFIG_ENIC is not set
++# CONFIG_DNET is not set
++CONFIG_NET_VENDOR_DEC=y
++# CONFIG_NET_TULIP is not set
++CONFIG_NET_VENDOR_DLINK=y
++# CONFIG_DL2K is not set
++# CONFIG_SUNDANCE is not set
++CONFIG_NET_VENDOR_EMULEX=y
++# CONFIG_BE2NET is not set
++CONFIG_NET_VENDOR_EXAR=y
++# CONFIG_S2IO is not set
++# CONFIG_VXGE is not set
++CONFIG_NET_VENDOR_FREESCALE=y
++# CONFIG_FSL_PQ_MDIO is not set
++CONFIG_FSL_XGMAC_MDIO=y
++CONFIG_NET_VENDOR_HP=y
++# CONFIG_HP100 is not set
++CONFIG_NET_VENDOR_INTEL=y
++# CONFIG_E100 is not set
++CONFIG_E1000=y
++CONFIG_E1000E=y
++# CONFIG_IGB is not set
++# CONFIG_IGBVF is not set
++# CONFIG_IXGB is not set
++# CONFIG_IXGBE is not set
++# CONFIG_IXGBEVF is not set
++# CONFIG_I40E is not set
++# CONFIG_I40EVF is not set
++# CONFIG_FM10K is not set
++CONFIG_NET_VENDOR_I825XX=y
++# CONFIG_IP1000 is not set
++# CONFIG_JME is not set
++CONFIG_NET_VENDOR_MARVELL=y
++# CONFIG_MVMDIO is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++CONFIG_NET_VENDOR_MELLANOX=y
++# CONFIG_MLX4_EN is not set
++# CONFIG_MLX4_CORE is not set
++# CONFIG_MLX5_CORE is not set
++CONFIG_NET_VENDOR_MICREL=y
++# CONFIG_KS8842 is not set
++# CONFIG_KS8851 is not set
++# CONFIG_KS8851_MLL is not set
++# CONFIG_KSZ884X_PCI is not set
++CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++CONFIG_NET_VENDOR_MYRI=y
++# CONFIG_MYRI10GE is not set
++# CONFIG_FEALNX is not set
++CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NATSEMI is not set
++# CONFIG_NS83820 is not set
++CONFIG_NET_VENDOR_8390=y
++# CONFIG_NE2K_PCI is not set
++CONFIG_NET_VENDOR_NVIDIA=y
++# CONFIG_FORCEDETH is not set
++CONFIG_NET_VENDOR_OKI=y
++# CONFIG_ETHOC is not set
++CONFIG_NET_PACKET_ENGINE=y
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++CONFIG_NET_VENDOR_QLOGIC=y
++# CONFIG_QLA3XXX is not set
++# CONFIG_QLCNIC is not set
++# CONFIG_QLGE is not set
++# CONFIG_NETXEN_NIC is not set
++CONFIG_NET_VENDOR_QUALCOMM=y
++# CONFIG_QCA7000 is not set
++CONFIG_NET_VENDOR_REALTEK=y
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_R8169 is not set
++CONFIG_NET_VENDOR_RDC=y
++# CONFIG_R6040 is not set
++CONFIG_NET_VENDOR_SAMSUNG=y
++# CONFIG_SXGBE_ETH is not set
++CONFIG_NET_VENDOR_SEEQ=y
++CONFIG_NET_VENDOR_SILAN=y
++# CONFIG_SC92031 is not set
++CONFIG_NET_VENDOR_SIS=y
++# CONFIG_SIS900 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SFC is not set
++CONFIG_NET_VENDOR_SMSC=y
++CONFIG_SMC91X=y
++# CONFIG_EPIC100 is not set
++CONFIG_SMSC911X=y
++# CONFIG_SMSC911X_ARCH_HOOKS is not set
++# CONFIG_SMSC9420 is not set
++CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++CONFIG_NET_VENDOR_SUN=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NIU is not set
++CONFIG_NET_VENDOR_TEHUTI=y
++# CONFIG_TEHUTI is not set
++CONFIG_NET_VENDOR_TI=y
++# CONFIG_TLAN is not set
++CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_RHINE is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_NET_VENDOR_WIZNET=y
++# CONFIG_WIZNET_W5100 is not set
++# CONFIG_WIZNET_W5300 is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++CONFIG_PHYLIB=y
++
++#
++# MII PHY device drivers
++#
++CONFIG_AQUANTIA_PHY=y
++# CONFIG_AT803X_PHY is not set
++# CONFIG_AMD_PHY is not set
++# CONFIG_AMD_XGBE_PHY is not set
++# CONFIG_MARVELL_PHY is not set
++# CONFIG_DAVICOM_PHY is not set
++# CONFIG_QSEMI_PHY is not set
++# CONFIG_LXT_PHY is not set
++# CONFIG_CICADA_PHY is not set
++CONFIG_VITESSE_PHY=y
++# CONFIG_TERANETICS_PHY is not set
++CONFIG_SMSC_PHY=y
++CONFIG_BROADCOM_PHY=y
++# CONFIG_BCM7XXX_PHY is not set
++# CONFIG_BCM87XX_PHY is not set
++# CONFIG_ICPLUS_PHY is not set
++CONFIG_REALTEK_PHY=y
++# CONFIG_NATIONAL_PHY is not set
++# CONFIG_STE10XP is not set
++# CONFIG_LSI_ET1011C_PHY is not set
++# CONFIG_MICREL_PHY is not set
++CONFIG_FIXED_PHY=y
++# CONFIG_MDIO_BITBANG is not set
++CONFIG_MDIO_BUS_MUX=y
++# CONFIG_MDIO_BUS_MUX_GPIO is not set
++CONFIG_MDIO_BUS_MUX_MMIOREG=y
++# CONFIG_FSL_10GBASE_KR is not set
++# CONFIG_MDIO_BCM_UNIMAC is not set
++# CONFIG_MICREL_KS8995MA is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++CONFIG_USB_NET_DRIVERS=y
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_RTL8152 is not set
++# CONFIG_USB_USBNET is not set
++# CONFIG_USB_IPHETH is not set
++# CONFIG_WLAN is not set
++
++#
++# Enable WiMAX (Networking options) to see the WiMAX drivers
++#
++# CONFIG_WAN is not set
++# CONFIG_VMXNET3 is not set
++# CONFIG_ISDN is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++# CONFIG_INPUT_FF_MEMLESS is not set
++# CONFIG_INPUT_POLLDEV is not set
++# CONFIG_INPUT_SPARSEKMAP is not set
++# CONFIG_INPUT_MATRIXKMAP is not set
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ADP5588 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_GPIO is not set
++# CONFIG_KEYBOARD_GPIO_POLLED is not set
++# CONFIG_KEYBOARD_TCA6416 is not set
++# CONFIG_KEYBOARD_TCA8418 is not set
++# CONFIG_KEYBOARD_MATRIX is not set
++# CONFIG_KEYBOARD_LM8333 is not set
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_SAMSUNG is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_OMAP4 is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_CAP1106 is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_PS2_ALPS=y
++CONFIG_MOUSE_PS2_LOGIPS2PP=y
++CONFIG_MOUSE_PS2_SYNAPTICS=y
++CONFIG_MOUSE_PS2_CYPRESS=y
++CONFIG_MOUSE_PS2_TRACKPOINT=y
++# CONFIG_MOUSE_PS2_ELANTECH is not set
++# CONFIG_MOUSE_PS2_SENTELIC is not set
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_APPLETOUCH is not set
++# CONFIG_MOUSE_BCM5974 is not set
++# CONFIG_MOUSE_CYAPA is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_MOUSE_GPIO is not set
++# CONFIG_MOUSE_SYNAPTICS_I2C is not set
++# CONFIG_MOUSE_SYNAPTICS_USB is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TABLET is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_SERPORT is not set
++CONFIG_SERIO_AMBAKMI=y
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_SERIO_ALTERA_PS2 is not set
++# CONFIG_SERIO_PS2MULT is not set
++# CONFIG_SERIO_ARC_PS2 is not set
++# CONFIG_SERIO_APBPS2 is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_TTY=y
++CONFIG_VT=y
++CONFIG_CONSOLE_TRANSLATIONS=y
++CONFIG_VT_CONSOLE=y
++CONFIG_VT_CONSOLE_SLEEP=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++CONFIG_UNIX98_PTYS=y
++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=16
++# CONFIG_SERIAL_NONSTANDARD is not set
++# CONFIG_NOZOMI is not set
++# CONFIG_N_GSM is not set
++# CONFIG_TRACE_SINK is not set
++CONFIG_DEVKMEM=y
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_EARLYCON=y
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_DMA=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++# CONFIG_SERIAL_8250_DW is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_AMBA_PL010 is not set
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set
++# CONFIG_SERIAL_MAX3100 is not set
++# CONFIG_SERIAL_MAX310X is not set
++# CONFIG_SERIAL_MFD_HSU is not set
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_SERIAL_OF_PLATFORM=y
++# CONFIG_SERIAL_SCCNXP is not set
++# CONFIG_SERIAL_SC16IS7XX is not set
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++# CONFIG_SERIAL_IFX6X60 is not set
++# CONFIG_SERIAL_XILINX_PS_UART is not set
++# CONFIG_SERIAL_ARC is not set
++# CONFIG_SERIAL_RP2 is not set
++# CONFIG_SERIAL_FSL_LPUART is not set
++CONFIG_HVC_DRIVER=y
++CONFIG_VIRTIO_CONSOLE=y
++# CONFIG_IPMI_HANDLER is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_HW_RANDOM_TIMERIOMEM is not set
++# CONFIG_HW_RANDOM_VIRTIO is not set
++CONFIG_HW_RANDOM_XGENE=y
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# PCMCIA character devices
++#
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_TCG_TPM is not set
++CONFIG_DEVPORT=y
++# CONFIG_XILLYBUS is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_BOARDINFO=y
++CONFIG_I2C_COMPAT=y
++CONFIG_I2C_CHARDEV=y
++CONFIG_I2C_MUX=y
++
++#
++# Multiplexer I2C Chip support
++#
++# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
++# CONFIG_I2C_MUX_GPIO is not set
++# CONFIG_I2C_MUX_PCA9541 is not set
++CONFIG_I2C_MUX_PCA954x=y
++CONFIG_I2C_HELPER_AUTO=y
++
++#
++# I2C Hardware Bus support
++#
++
++#
++# PC SMBus host controller drivers
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_ISCH is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++
++#
++# I2C system bus drivers (mostly embedded / system-on-chip)
++#
++# CONFIG_I2C_CBUS_GPIO is not set
++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
++# CONFIG_I2C_DESIGNWARE_PCI is not set
++# CONFIG_I2C_GPIO is not set
++CONFIG_I2C_IMX=y
++# CONFIG_I2C_NOMADIK is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PCA_PLATFORM is not set
++# CONFIG_I2C_PXA_PCI is not set
++# CONFIG_I2C_RK3X is not set
++# CONFIG_I2C_SIMTEC is not set
++# CONFIG_I2C_VERSATILE is not set
++# CONFIG_I2C_XILINX is not set
++
++#
++# External I2C/SMBus adapter drivers
++#
++# CONFIG_I2C_DIOLAN_U2C is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_TINY_USB is not set
++
++#
++# Other I2C/SMBus bus drivers
++#
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++# CONFIG_SPI_ALTERA is not set
++# CONFIG_SPI_BITBANG is not set
++# CONFIG_SPI_GPIO is not set
++# CONFIG_SPI_FSL_SPI is not set
++# CONFIG_SPI_OC_TINY is not set
++CONFIG_SPI_PL022=y
++# CONFIG_SPI_PXA2XX is not set
++# CONFIG_SPI_PXA2XX_PCI is not set
++# CONFIG_SPI_ROCKCHIP is not set
++# CONFIG_SPI_SC18IS602 is not set
++# CONFIG_SPI_XCOMM is not set
++# CONFIG_SPI_XILINX is not set
++# CONFIG_SPI_DESIGNWARE is not set
++
++#
++# SPI Protocol Masters
++#
++# CONFIG_SPI_SPIDEV is not set
++# CONFIG_SPI_TLE62X0 is not set
++# CONFIG_SPMI is not set
++# CONFIG_HSI is not set
++
++#
++# PPS support
++#
++CONFIG_PPS=y
++# CONFIG_PPS_DEBUG is not set
++# CONFIG_NTP_PPS is not set
++
++#
++# PPS clients support
++#
++# CONFIG_PPS_CLIENT_KTIMER is not set
++# CONFIG_PPS_CLIENT_LDISC is not set
++# CONFIG_PPS_CLIENT_GPIO is not set
++
++#
++# PPS generators support
++#
++
++#
++# PTP clock support
++#
++CONFIG_PTP_1588_CLOCK=y
++
++#
++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
++#
++CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
++CONFIG_ARCH_REQUIRE_GPIOLIB=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_DEVRES=y
++CONFIG_OF_GPIO=y
++CONFIG_GPIOLIB_IRQCHIP=y
++# CONFIG_DEBUG_GPIO is not set
++# CONFIG_GPIO_SYSFS is not set
++CONFIG_GPIO_GENERIC=y
++
++#
++# Memory mapped GPIO drivers:
++#
++CONFIG_GPIO_GENERIC_PLATFORM=y
++# CONFIG_GPIO_DWAPB is not set
++CONFIG_GPIO_PL061=y
++# CONFIG_GPIO_SCH311X is not set
++# CONFIG_GPIO_SYSCON is not set
++CONFIG_GPIO_XGENE=y
++# CONFIG_GPIO_VX855 is not set
++# CONFIG_GPIO_GRGPIO is not set
++
++#
++# I2C GPIO expanders:
++#
++# CONFIG_GPIO_MAX7300 is not set
++# CONFIG_GPIO_MAX732X is not set
++# CONFIG_GPIO_PCA953X is not set
++# CONFIG_GPIO_PCF857X is not set
++# CONFIG_GPIO_SX150X is not set
++# CONFIG_GPIO_ADP5588 is not set
++# CONFIG_GPIO_ADNP is not set
++
++#
++# PCI GPIO expanders:
++#
++# CONFIG_GPIO_BT8XX is not set
++# CONFIG_GPIO_AMD8111 is not set
++# CONFIG_GPIO_ML_IOH is not set
++# CONFIG_GPIO_RDC321X is not set
++
++#
++# SPI GPIO expanders:
++#
++# CONFIG_GPIO_MAX7301 is not set
++# CONFIG_GPIO_MCP23S08 is not set
++# CONFIG_GPIO_MC33880 is not set
++# CONFIG_GPIO_74X164 is not set
++
++#
++# AC97 GPIO expanders:
++#
++
++#
++# LPC GPIO expanders:
++#
++
++#
++# MODULbus GPIO expanders:
++#
++
++#
++# USB GPIO expanders:
++#
++# CONFIG_W1 is not set
++CONFIG_POWER_SUPPLY=y
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++# CONFIG_PDA_POWER is not set
++# CONFIG_TEST_POWER is not set
++# CONFIG_BATTERY_DS2780 is not set
++# CONFIG_BATTERY_DS2781 is not set
++# CONFIG_BATTERY_DS2782 is not set
++# CONFIG_BATTERY_SBS is not set
++# CONFIG_BATTERY_BQ27x00 is not set
++# CONFIG_BATTERY_MAX17040 is not set
++# CONFIG_BATTERY_MAX17042 is not set
++# CONFIG_CHARGER_MAX8903 is not set
++# CONFIG_CHARGER_LP8727 is not set
++# CONFIG_CHARGER_GPIO is not set
++# CONFIG_CHARGER_MANAGER is not set
++# CONFIG_CHARGER_BQ2415X is not set
++# CONFIG_CHARGER_BQ24190 is not set
++# CONFIG_CHARGER_BQ24735 is not set
++# CONFIG_CHARGER_SMB347 is not set
++CONFIG_POWER_RESET=y
++# CONFIG_POWER_RESET_GPIO is not set
++# CONFIG_POWER_RESET_GPIO_RESTART is not set
++# CONFIG_POWER_RESET_LTC2952 is not set
++CONFIG_POWER_RESET_VEXPRESS=y
++# CONFIG_POWER_RESET_XGENE is not set
++# CONFIG_POWER_RESET_SYSCON is not set
++CONFIG_POWER_RESET_LAYERSCAPE=y
++# CONFIG_POWER_AVS is not set
++# CONFIG_HWMON is not set
++# CONFIG_THERMAL is not set
++# CONFIG_WATCHDOG is not set
++CONFIG_SSB_POSSIBLE=y
++
++#
++# Sonics Silicon Backplane
++#
++# CONFIG_SSB is not set
++CONFIG_BCMA_POSSIBLE=y
++
++#
++# Broadcom specific AMBA
++#
++# CONFIG_BCMA is not set
++
++#
++# Multifunction device drivers
++#
++CONFIG_MFD_CORE=y
++# CONFIG_MFD_AS3711 is not set
++# CONFIG_MFD_AS3722 is not set
++# CONFIG_PMIC_ADP5520 is not set
++# CONFIG_MFD_AAT2870_CORE is not set
++# CONFIG_MFD_BCM590XX is not set
++# CONFIG_MFD_AXP20X is not set
++# CONFIG_MFD_CROS_EC is not set
++# CONFIG_PMIC_DA903X is not set
++# CONFIG_MFD_DA9052_SPI is not set
++# CONFIG_MFD_DA9052_I2C is not set
++# CONFIG_MFD_DA9055 is not set
++# CONFIG_MFD_DA9063 is not set
++# CONFIG_MFD_MC13XXX_SPI is not set
++# CONFIG_MFD_MC13XXX_I2C is not set
++# CONFIG_MFD_HI6421_PMIC is not set
++# CONFIG_HTC_PASIC3 is not set
++# CONFIG_HTC_I2CPLD is not set
++# CONFIG_LPC_ICH is not set
++# CONFIG_LPC_SCH is not set
++# CONFIG_INTEL_SOC_PMIC is not set
++# CONFIG_MFD_JANZ_CMODIO is not set
++# CONFIG_MFD_KEMPLD is not set
++# CONFIG_MFD_88PM800 is not set
++# CONFIG_MFD_88PM805 is not set
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_MFD_MAX14577 is not set
++# CONFIG_MFD_MAX77686 is not set
++# CONFIG_MFD_MAX77693 is not set
++# CONFIG_MFD_MAX8907 is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_MAX8997 is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_MENF21BMC is not set
++# CONFIG_EZX_PCAP is not set
++# CONFIG_MFD_VIPERBOARD is not set
++# CONFIG_MFD_RETU is not set
++# CONFIG_MFD_PCF50633 is not set
++# CONFIG_MFD_RDC321X is not set
++# CONFIG_MFD_RTSX_PCI is not set
++# CONFIG_MFD_RTSX_USB is not set
++# CONFIG_MFD_RC5T583 is not set
++# CONFIG_MFD_RK808 is not set
++# CONFIG_MFD_RN5T618 is not set
++# CONFIG_MFD_SEC_CORE is not set
++# CONFIG_MFD_SI476X_CORE is not set
++# CONFIG_MFD_SM501 is not set
++# CONFIG_MFD_SMSC is not set
++# CONFIG_ABX500_CORE is not set
++# CONFIG_MFD_STMPE is not set
++CONFIG_MFD_SYSCON=y
++# CONFIG_MFD_TI_AM335X_TSCADC is not set
++# CONFIG_MFD_LP3943 is not set
++# CONFIG_MFD_LP8788 is not set
++# CONFIG_MFD_PALMAS is not set
++# CONFIG_TPS6105X is not set
++# CONFIG_TPS65010 is not set
++# CONFIG_TPS6507X is not set
++# CONFIG_MFD_TPS65090 is not set
++# CONFIG_MFD_TPS65217 is not set
++# CONFIG_MFD_TPS65218 is not set
++# CONFIG_MFD_TPS6586X is not set
++# CONFIG_MFD_TPS65910 is not set
++# CONFIG_MFD_TPS65912 is not set
++# CONFIG_MFD_TPS65912_I2C is not set
++# CONFIG_MFD_TPS65912_SPI is not set
++# CONFIG_MFD_TPS80031 is not set
++# CONFIG_TWL4030_CORE is not set
++# CONFIG_TWL6040_CORE is not set
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_LM3533 is not set
++# CONFIG_MFD_TC3589X is not set
++# CONFIG_MFD_TMIO is not set
++# CONFIG_MFD_VX855 is not set
++# CONFIG_MFD_ARIZONA_I2C is not set
++# CONFIG_MFD_ARIZONA_SPI is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM831X_I2C is not set
++# CONFIG_MFD_WM831X_SPI is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8994 is not set
++CONFIG_MFD_VEXPRESS_SYSREG=y
++CONFIG_REGULATOR=y
++# CONFIG_REGULATOR_DEBUG is not set
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
++# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
++# CONFIG_REGULATOR_ACT8865 is not set
++# CONFIG_REGULATOR_AD5398 is not set
++# CONFIG_REGULATOR_ANATOP is not set
++# CONFIG_REGULATOR_DA9210 is not set
++# CONFIG_REGULATOR_DA9211 is not set
++# CONFIG_REGULATOR_FAN53555 is not set
++# CONFIG_REGULATOR_GPIO is not set
++# CONFIG_REGULATOR_ISL9305 is not set
++# CONFIG_REGULATOR_ISL6271A is not set
++# CONFIG_REGULATOR_LP3971 is not set
++# CONFIG_REGULATOR_LP3972 is not set
++# CONFIG_REGULATOR_LP872X is not set
++# CONFIG_REGULATOR_LP8755 is not set
++# CONFIG_REGULATOR_LTC3589 is not set
++# CONFIG_REGULATOR_MAX1586 is not set
++# CONFIG_REGULATOR_MAX8649 is not set
++# CONFIG_REGULATOR_MAX8660 is not set
++# CONFIG_REGULATOR_MAX8952 is not set
++# CONFIG_REGULATOR_MAX8973 is not set
++# CONFIG_REGULATOR_PFUZE100 is not set
++# CONFIG_REGULATOR_TPS51632 is not set
++# CONFIG_REGULATOR_TPS62360 is not set
++# CONFIG_REGULATOR_TPS65023 is not set
++# CONFIG_REGULATOR_TPS6507X is not set
++# CONFIG_REGULATOR_TPS6524X is not set
++# CONFIG_REGULATOR_VEXPRESS is not set
++# CONFIG_MEDIA_SUPPORT is not set
++
++#
++# Graphics support
++#
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++
++#
++# Direct Rendering Manager
++#
++# CONFIG_DRM is not set
++
++#
++# Frame buffer Devices
++#
++CONFIG_FB=y
++# CONFIG_FIRMWARE_EDID is not set
++CONFIG_FB_CMDLINE=y
++# CONFIG_FB_DDC is not set
++# CONFIG_FB_BOOT_VESA_SUPPORT is not set
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
++# CONFIG_FB_SYS_FILLRECT is not set
++# CONFIG_FB_SYS_COPYAREA is not set
++# CONFIG_FB_SYS_IMAGEBLIT is not set
++# CONFIG_FB_FOREIGN_ENDIAN is not set
++# CONFIG_FB_SYS_FOPS is not set
++# CONFIG_FB_SVGALIB is not set
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++
++#
++# Frame buffer hardware drivers
++#
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++CONFIG_FB_ARMCLCD=y
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_OPENCORES is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_I740 is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_S3 is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_VT8623 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_ARK is not set
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_CARMINE is not set
++# CONFIG_FB_SMSCUFX is not set
++# CONFIG_FB_UDL is not set
++# CONFIG_FB_VIRTUAL is not set
++# CONFIG_FB_METRONOME is not set
++# CONFIG_FB_MB862XX is not set
++# CONFIG_FB_BROADSHEET is not set
++# CONFIG_FB_AUO_K190X is not set
++# CONFIG_FB_SIMPLE is not set
++# CONFIG_FB_SSD1307 is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++# CONFIG_VGASTATE is not set
++CONFIG_VIDEOMODE_HELPERS=y
++
++#
++# Console display driver support
++#
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_SOUND is not set
++
++#
++# HID support
++#
++CONFIG_HID=y
++# CONFIG_HID_BATTERY_STRENGTH is not set
++# CONFIG_HIDRAW is not set
++# CONFIG_UHID is not set
++CONFIG_HID_GENERIC=y
++
++#
++# Special HID drivers
++#
++CONFIG_HID_A4TECH=y
++# CONFIG_HID_ACRUX is not set
++CONFIG_HID_APPLE=y
++# CONFIG_HID_APPLEIR is not set
++# CONFIG_HID_AUREAL is not set
++CONFIG_HID_BELKIN=y
++CONFIG_HID_CHERRY=y
++CONFIG_HID_CHICONY=y
++# CONFIG_HID_CP2112 is not set
++CONFIG_HID_CYPRESS=y
++# CONFIG_HID_DRAGONRISE is not set
++# CONFIG_HID_EMS_FF is not set
++# CONFIG_HID_ELECOM is not set
++# CONFIG_HID_ELO is not set
++CONFIG_HID_EZKEY=y
++# CONFIG_HID_HOLTEK is not set
++# CONFIG_HID_HUION is not set
++# CONFIG_HID_KEYTOUCH is not set
++# CONFIG_HID_KYE is not set
++# CONFIG_HID_UCLOGIC is not set
++# CONFIG_HID_WALTOP is not set
++# CONFIG_HID_GYRATION is not set
++# CONFIG_HID_ICADE is not set
++# CONFIG_HID_TWINHAN is not set
++CONFIG_HID_KENSINGTON=y
++# CONFIG_HID_LCPOWER is not set
++# CONFIG_HID_LENOVO is not set
++CONFIG_HID_LOGITECH=y
++# CONFIG_HID_LOGITECH_HIDPP is not set
++# CONFIG_LOGITECH_FF is not set
++# CONFIG_LOGIRUMBLEPAD2_FF is not set
++# CONFIG_LOGIG940_FF is not set
++# CONFIG_LOGIWHEELS_FF is not set
++# CONFIG_HID_MAGICMOUSE is not set
++CONFIG_HID_MICROSOFT=y
++CONFIG_HID_MONTEREY=y
++# CONFIG_HID_MULTITOUCH is not set
++# CONFIG_HID_NTRIG is not set
++# CONFIG_HID_ORTEK is not set
++# CONFIG_HID_PANTHERLORD is not set
++# CONFIG_HID_PENMOUNT is not set
++# CONFIG_HID_PETALYNX is not set
++# CONFIG_HID_PICOLCD is not set
++# CONFIG_HID_PRIMAX is not set
++# CONFIG_HID_ROCCAT is not set
++# CONFIG_HID_SAITEK is not set
++# CONFIG_HID_SAMSUNG is not set
++# CONFIG_HID_SPEEDLINK is not set
++# CONFIG_HID_STEELSERIES is not set
++# CONFIG_HID_SUNPLUS is not set
++# CONFIG_HID_RMI is not set
++# CONFIG_HID_GREENASIA is not set
++# CONFIG_HID_SMARTJOYPLUS is not set
++# CONFIG_HID_TIVO is not set
++# CONFIG_HID_TOPSEED is not set
++# CONFIG_HID_THRUSTMASTER is not set
++# CONFIG_HID_WACOM is not set
++# CONFIG_HID_XINMO is not set
++# CONFIG_HID_ZEROPLUS is not set
++# CONFIG_HID_ZYDACRON is not set
++# CONFIG_HID_SENSOR_HUB is not set
++
++#
++# USB HID support
++#
++CONFIG_USB_HID=y
++# CONFIG_HID_PID is not set
++# CONFIG_USB_HIDDEV is not set
++
++#
++# I2C HID support
++#
++# CONFIG_I2C_HID is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_SUPPORT=y
++CONFIG_USB_COMMON=y
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB=y
++# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEFAULT_PERSIST=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG_WHITELIST is not set
++# CONFIG_USB_OTG_FSM is not set
++# CONFIG_USB_MON is not set
++# CONFIG_USB_WUSB_CBAF is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_C67X00_HCD is not set
++CONFIG_USB_XHCI_HCD=y
++CONFIG_USB_XHCI_PCI=y
++CONFIG_USB_XHCI_PLATFORM=y
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_EHCI_PCI=y
++CONFIG_USB_EHCI_HCD_PLATFORM=y
++# CONFIG_USB_OXU210HP_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_ISP1760_HCD=y
++# CONFIG_USB_ISP1362_HCD is not set
++# CONFIG_USB_FUSBH200_HCD is not set
++# CONFIG_USB_FOTG210_HCD is not set
++# CONFIG_USB_MAX3421_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PCI=y
++CONFIG_USB_OHCI_HCD_PLATFORM=y
++# CONFIG_USB_UHCI_HCD is not set
++# CONFIG_USB_SL811_HCD is not set
++# CONFIG_USB_R8A66597_HCD is not set
++# CONFIG_USB_HCD_TEST_MODE is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++# CONFIG_USB_WDM is not set
++# CONFIG_USB_TMC is not set
++
++#
++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
++#
++
++#
++# also be needed; see USB_STORAGE Help for more info
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_REALTEK is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_STORAGE_ONETOUCH is not set
++# CONFIG_USB_STORAGE_KARMA is not set
++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
++# CONFIG_USB_STORAGE_ENE_UB6250 is not set
++# CONFIG_USB_UAS is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++# CONFIG_USBIP_CORE is not set
++# CONFIG_USB_MUSB_HDRC is not set
++CONFIG_USB_DWC3=y
++CONFIG_USB_DWC3_HOST=y
++
++#
++# Platform Glue Driver Support
++#
++CONFIG_USB_DWC3_PCI=y
++
++#
++# Debugging features
++#
++# CONFIG_USB_DWC3_DEBUG is not set
++# CONFIG_DWC3_HOST_USB3_LPM_ENABLE is not set
++# CONFIG_USB_DWC2 is not set
++# CONFIG_USB_CHIPIDEA is not set
++
++#
++# USB port drivers
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_ADUTUX is not set
++# CONFIG_USB_SEVSEG is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_FTDI_ELAN is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TRANCEVIBRATOR is not set
++# CONFIG_USB_IOWARRIOR is not set
++# CONFIG_USB_TEST is not set
++# CONFIG_USB_EHSET_TEST_FIXTURE is not set
++# CONFIG_USB_ISIGHTFW is not set
++# CONFIG_USB_YUREX is not set
++# CONFIG_USB_EZUSB_FX2 is not set
++# CONFIG_USB_HSIC_USB3503 is not set
++# CONFIG_USB_LINK_LAYER_TEST is not set
++
++#
++# USB Physical Layer drivers
++#
++# CONFIG_USB_PHY is not set
++# CONFIG_NOP_USB_XCEIV is not set
++# CONFIG_USB_GPIO_VBUS is not set
++# CONFIG_USB_ISP1301 is not set
++CONFIG_USB_ULPI=y
++# CONFIG_USB_GADGET is not set
++# CONFIG_UWB is not set
++CONFIG_MMC=y
++# CONFIG_MMC_DEBUG is not set
++# CONFIG_MMC_CLKGATE is not set
++
++#
++# MMC/SD/SDIO Card Drivers
++#
++CONFIG_MMC_BLOCK=y
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++# CONFIG_SDIO_UART is not set
++# CONFIG_MMC_TEST is not set
++
++#
++# MMC/SD/SDIO Host Controller Drivers
++#
++CONFIG_MMC_ARMMMCI=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_IO_ACCESSORS=y
++# CONFIG_MMC_SDHCI_PCI is not set
++CONFIG_MMC_SDHCI_PLTFM=y
++# CONFIG_MMC_SDHCI_OF_ARASAN is not set
++CONFIG_MMC_SDHCI_OF_ESDHC=y
++# CONFIG_MMC_SDHCI_PXAV3 is not set
++# CONFIG_MMC_SDHCI_PXAV2 is not set
++# CONFIG_MMC_TIFM_SD is not set
++CONFIG_MMC_SPI=y
++# CONFIG_MMC_CB710 is not set
++# CONFIG_MMC_VIA_SDMMC is not set
++# CONFIG_MMC_VUB300 is not set
++# CONFIG_MMC_USHC is not set
++# CONFIG_MMC_USDHI6ROL0 is not set
++# CONFIG_MEMSTICK is not set
++# CONFIG_NEW_LEDS is not set
++# CONFIG_ACCESSIBILITY is not set
++# CONFIG_INFINIBAND is not set
++CONFIG_RTC_LIB=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_HCTOSYS=y
++CONFIG_RTC_SYSTOHC=y
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++# CONFIG_RTC_DEBUG is not set
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++# CONFIG_RTC_DRV_TEST is not set
++
++#
++# I2C RTC drivers
++#
++# CONFIG_RTC_DRV_DS1307 is not set
++# CONFIG_RTC_DRV_DS1374 is not set
++# CONFIG_RTC_DRV_DS1672 is not set
++CONFIG_RTC_DRV_DS3232=y
++# CONFIG_RTC_DRV_HYM8563 is not set
++# CONFIG_RTC_DRV_MAX6900 is not set
++# CONFIG_RTC_DRV_RS5C372 is not set
++# CONFIG_RTC_DRV_ISL1208 is not set
++# CONFIG_RTC_DRV_ISL12022 is not set
++# CONFIG_RTC_DRV_ISL12057 is not set
++# CONFIG_RTC_DRV_X1205 is not set
++# CONFIG_RTC_DRV_PCF2127 is not set
++# CONFIG_RTC_DRV_PCF8523 is not set
++# CONFIG_RTC_DRV_PCF8563 is not set
++# CONFIG_RTC_DRV_PCF85063 is not set
++# CONFIG_RTC_DRV_PCF8583 is not set
++# CONFIG_RTC_DRV_M41T80 is not set
++# CONFIG_RTC_DRV_BQ32K is not set
++# CONFIG_RTC_DRV_S35390A is not set
++# CONFIG_RTC_DRV_FM3130 is not set
++# CONFIG_RTC_DRV_RX8581 is not set
++# CONFIG_RTC_DRV_RX8025 is not set
++# CONFIG_RTC_DRV_EM3027 is not set
++# CONFIG_RTC_DRV_RV3029C2 is not set
++
++#
++# SPI RTC drivers
++#
++# CONFIG_RTC_DRV_M41T93 is not set
++# CONFIG_RTC_DRV_M41T94 is not set
++# CONFIG_RTC_DRV_DS1305 is not set
++# CONFIG_RTC_DRV_DS1343 is not set
++# CONFIG_RTC_DRV_DS1347 is not set
++# CONFIG_RTC_DRV_DS1390 is not set
++# CONFIG_RTC_DRV_MAX6902 is not set
++# CONFIG_RTC_DRV_R9701 is not set
++# CONFIG_RTC_DRV_RS5C348 is not set
++# CONFIG_RTC_DRV_DS3234 is not set
++# CONFIG_RTC_DRV_PCF2123 is not set
++# CONFIG_RTC_DRV_RX4581 is not set
++# CONFIG_RTC_DRV_MCP795 is not set
++
++#
++# Platform RTC drivers
++#
++# CONFIG_RTC_DRV_DS1286 is not set
++# CONFIG_RTC_DRV_DS1511 is not set
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++# CONFIG_RTC_DRV_DS2404 is not set
++CONFIG_RTC_DRV_EFI=y
++# CONFIG_RTC_DRV_STK17TA8 is not set
++# CONFIG_RTC_DRV_M48T86 is not set
++# CONFIG_RTC_DRV_M48T35 is not set
++# CONFIG_RTC_DRV_M48T59 is not set
++# CONFIG_RTC_DRV_MSM6242 is not set
++# CONFIG_RTC_DRV_BQ4802 is not set
++# CONFIG_RTC_DRV_RP5C01 is not set
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# on-CPU RTC drivers
++#
++# CONFIG_RTC_DRV_PL030 is not set
++# CONFIG_RTC_DRV_PL031 is not set
++# CONFIG_RTC_DRV_SNVS is not set
++CONFIG_RTC_DRV_XGENE=y
++
++#
++# HID Sensor RTC drivers
++#
++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
++CONFIG_DMADEVICES=y
++# CONFIG_DMADEVICES_DEBUG is not set
++
++#
++# DMA Devices
++#
++# CONFIG_AMBA_PL08X is not set
++# CONFIG_DW_DMAC_CORE is not set
++# CONFIG_DW_DMAC is not set
++# CONFIG_DW_DMAC_PCI is not set
++# CONFIG_PL330_DMA is not set
++# CONFIG_FSL_EDMA is not set
++CONFIG_DMA_ENGINE=y
++CONFIG_DMA_OF=y
++
++#
++# DMA Clients
++#
++# CONFIG_ASYNC_TX_DMA is not set
++# CONFIG_DMATEST is not set
++# CONFIG_AUXDISPLAY is not set
++# CONFIG_UIO is not set
++# CONFIG_VFIO_IOMMU_TYPE1 is not set
++CONFIG_VFIO=y
++CONFIG_VFIO_PCI=y
++CONFIG_VFIO_FSL_MC=y
++# CONFIG_VIRT_DRIVERS is not set
++CONFIG_VIRTIO=y
++
++#
++# Virtio drivers
++#
++CONFIG_VIRTIO_PCI=y
++CONFIG_VIRTIO_BALLOON=y
++CONFIG_VIRTIO_MMIO=y
++# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
++
++#
++# Microsoft Hyper-V guest support
++#
++CONFIG_STAGING=y
++# CONFIG_COMEDI is not set
++# CONFIG_RTS5208 is not set
++# CONFIG_FB_XGI is not set
++# CONFIG_BCM_WIMAX is not set
++# CONFIG_FT1000 is not set
++
++#
++# Speakup console speech
++#
++# CONFIG_SPEAKUP is not set
++# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
++# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
++# CONFIG_STAGING_MEDIA is not set
++
++#
++# Android
++#
++# CONFIG_ANDROID is not set
++# CONFIG_USB_WPAN_HCD is not set
++# CONFIG_WIMAX_GDM72XX is not set
++# CONFIG_LTE_GDM724X is not set
++# CONFIG_MTD_SPINAND_MT29F is not set
++# CONFIG_LUSTRE_FS is not set
++# CONFIG_DGNC is not set
++# CONFIG_DGAP is not set
++# CONFIG_GS_FPGABOOT is not set
++CONFIG_FSL_MC_BUS=y
++CONFIG_FSL_MC_RESTOOL=y
++CONFIG_FSL_MC_DPIO=y
++# CONFIG_FSL_QBMAN_DEBUG is not set
++CONFIG_FSL_DPAA2=y
++CONFIG_FSL_DPAA2_ETH=y
++# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set
++CONFIG_FSL_DPAA2_MAC=y
++# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set
++
++#
++# SOC (System On Chip) specific Drivers
++#
++# CONFIG_SOC_TI is not set
++CONFIG_FSL_SOC_DRIVERS=y
++CONFIG_FSL_GUTS=y
++CONFIG_LS_SOC_DRIVERS=y
++CONFIG_CLKDEV_LOOKUP=y
++CONFIG_HAVE_CLK_PREPARE=y
++CONFIG_COMMON_CLK=y
++
++#
++# Common Clock Framework
++#
++CONFIG_COMMON_CLK_VERSATILE=y
++CONFIG_CLK_SP810=y
++CONFIG_CLK_VEXPRESS_OSC=y
++# CONFIG_COMMON_CLK_SI5351 is not set
++# CONFIG_COMMON_CLK_SI570 is not set
++CONFIG_CLK_QORIQ=y
++CONFIG_COMMON_CLK_XGENE=y
++# CONFIG_COMMON_CLK_PXA is not set
++# CONFIG_COMMON_CLK_QCOM is not set
++
++#
++# Hardware Spinlock drivers
++#
++
++#
++# Clock Source drivers
++#
++CONFIG_CLKSRC_OF=y
++CONFIG_CLKSRC_MMIO=y
++CONFIG_ARM_ARCH_TIMER=y
++CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
++# CONFIG_ATMEL_PIT is not set
++# CONFIG_SH_TIMER_CMT is not set
++# CONFIG_SH_TIMER_MTU2 is not set
++# CONFIG_SH_TIMER_TMU is not set
++# CONFIG_EM_TIMER_STI is not set
++CONFIG_CLKSRC_VERSATILE=y
++# CONFIG_MAILBOX is not set
++CONFIG_IOMMU_API=y
++CONFIG_IOMMU_SUPPORT=y
++
++#
++# Generic IOMMU Pagetable Support
++#
++CONFIG_IOMMU_IO_PGTABLE=y
++CONFIG_IOMMU_IO_PGTABLE_LPAE=y
++# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set
++CONFIG_OF_IOMMU=y
++CONFIG_ARM_SMMU=y
++
++#
++# Remoteproc drivers
++#
++# CONFIG_STE_MODEM_RPROC is not set
++
++#
++# Rpmsg drivers
++#
++
++#
++# SOC (System On Chip) specific Drivers
++#
++# CONFIG_PM_DEVFREQ is not set
++# CONFIG_EXTCON is not set
++CONFIG_MEMORY=y
++CONFIG_FSL_IFC=y
++# CONFIG_IIO is not set
++# CONFIG_VME_BUS is not set
++# CONFIG_PWM is not set
++CONFIG_IRQCHIP=y
++CONFIG_ARM_GIC=y
++CONFIG_ARM_GIC_V2M=y
++CONFIG_ARM_GIC_V3=y
++CONFIG_ARM_GIC_V3_ITS=y
++# CONFIG_IPACK_BUS is not set
++CONFIG_RESET_CONTROLLER=y
++# CONFIG_FMC is not set
++
++#
++# PHY Subsystem
++#
++CONFIG_GENERIC_PHY=y
++# CONFIG_BCM_KONA_USB2_PHY is not set
++CONFIG_PHY_XGENE=y
++# CONFIG_POWERCAP is not set
++# CONFIG_MCB is not set
++CONFIG_RAS=y
++# CONFIG_THUNDERBOLT is not set
++
++#
++# Firmware Drivers
++#
++# CONFIG_FIRMWARE_MEMMAP is not set
++
++#
++# EFI (Extensible Firmware Interface) Support
++#
++# CONFIG_EFI_VARS is not set
++CONFIG_EFI_PARAMS_FROM_FDT=y
++CONFIG_EFI_RUNTIME_WRAPPERS=y
++CONFIG_EFI_ARMSTUB=y
++
++#
++# File systems
++#
++CONFIG_DCACHE_WORD_ACCESS=y
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
++# CONFIG_EXT3_FS_XATTR is not set
++CONFIG_EXT4_FS=y
++# CONFIG_EXT4_FS_POSIX_ACL is not set
++# CONFIG_EXT4_FS_SECURITY is not set
++# CONFIG_EXT4_DEBUG is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_JBD2=y
++# CONFIG_JBD2_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_GFS2_FS is not set
++# CONFIG_BTRFS_FS is not set
++# CONFIG_NILFS2_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++CONFIG_FILE_LOCKING=y
++CONFIG_FSNOTIFY=y
++CONFIG_DNOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_FANOTIFY=y
++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
++# CONFIG_QUOTA is not set
++# CONFIG_QUOTACTL is not set
++# CONFIG_AUTOFS4_FS is not set
++CONFIG_FUSE_FS=y
++CONFIG_CUSE=y
++CONFIG_OVERLAY_FS=y
++
++#
++# Caches
++#
++# CONFIG_FSCACHE is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++# CONFIG_ISO9660_FS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++# CONFIG_PROC_KCORE is not set
++CONFIG_PROC_SYSCTL=y
++CONFIG_PROC_PAGE_MONITOR=y
++CONFIG_KERNFS=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_POSIX_ACL is not set
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++# CONFIG_CONFIGFS_FS is not set
++CONFIG_MISC_FILESYSTEMS=y
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_ECRYPT_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
++CONFIG_JFFS2_SUMMARY=y
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++# CONFIG_JFFS2_LZO is not set
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_LOGFS is not set
++# CONFIG_CRAMFS is not set
++CONFIG_SQUASHFS=y
++CONFIG_SQUASHFS_FILE_CACHE=y
++# CONFIG_SQUASHFS_FILE_DIRECT is not set
++CONFIG_SQUASHFS_DECOMP_SINGLE=y
++# CONFIG_SQUASHFS_DECOMP_MULTI is not set
++# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
++CONFIG_SQUASHFS_XATTR=y
++CONFIG_SQUASHFS_ZLIB=y
++CONFIG_SQUASHFS_LZO=y
++CONFIG_SQUASHFS_XZ=y
++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
++# CONFIG_SQUASHFS_EMBEDDED is not set
++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
++# CONFIG_VXFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_OMFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_QNX6FS_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_PSTORE is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++# CONFIG_F2FS_FS is not set
++# CONFIG_EFIVAR_FS is not set
++# CONFIG_AUFS_FS is not set
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V2=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_SWAP is not set
++# CONFIG_NFS_V4_1 is not set
++CONFIG_ROOT_NFS=y
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_NFS_USE_KERNEL_DNS=y
++# CONFIG_NFSD is not set
++CONFIG_GRACE_PERIOD=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++# CONFIG_SUNRPC_DEBUG is not set
++# CONFIG_CEPH_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++CONFIG_9P_FS=y
++# CONFIG_9P_FS_POSIX_ACL is not set
++# CONFIG_9P_FS_SECURITY is not set
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_MAC_ROMAN is not set
++# CONFIG_NLS_MAC_CELTIC is not set
++# CONFIG_NLS_MAC_CENTEURO is not set
++# CONFIG_NLS_MAC_CROATIAN is not set
++# CONFIG_NLS_MAC_CYRILLIC is not set
++# CONFIG_NLS_MAC_GAELIC is not set
++# CONFIG_NLS_MAC_GREEK is not set
++# CONFIG_NLS_MAC_ICELAND is not set
++# CONFIG_NLS_MAC_INUIT is not set
++# CONFIG_NLS_MAC_ROMANIAN is not set
++# CONFIG_NLS_MAC_TURKISH is not set
++CONFIG_NLS_UTF8=y
++CONFIG_HAVE_KVM_IRQCHIP=y
++CONFIG_KVM_MMIO=y
++CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
++CONFIG_VIRTUALIZATION=y
++CONFIG_KVM=y
++CONFIG_KVM_ARM_HOST=y
++CONFIG_KVM_ARM_MAX_VCPUS=8
++CONFIG_KVM_ARM_VGIC=y
++CONFIG_KVM_ARM_TIMER=y
++
++#
++# Kernel hacking
++#
++
++#
++# printk and dmesg options
++#
++CONFIG_PRINTK_TIME=y
++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
++# CONFIG_BOOT_PRINTK_DELAY is not set
++# CONFIG_DYNAMIC_DEBUG is not set
++
++#
++# Compile-time checks and compiler options
++#
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_INFO_REDUCED is not set
++# CONFIG_DEBUG_INFO_SPLIT is not set
++# CONFIG_DEBUG_INFO_DWARF4 is not set
++CONFIG_ENABLE_WARN_DEPRECATED=y
++CONFIG_ENABLE_MUST_CHECK=y
++CONFIG_FRAME_WARN=2048
++# CONFIG_STRIP_ASM_SYMS is not set
++# CONFIG_READABLE_ASM is not set
++# CONFIG_UNUSED_SYMBOLS is not set
++CONFIG_DEBUG_FS=y
++# CONFIG_HEADERS_CHECK is not set
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++CONFIG_ARCH_WANT_FRAME_POINTERS=y
++CONFIG_FRAME_POINTER=y
++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
++CONFIG_DEBUG_KERNEL=y
++
++#
++# Memory Debugging
++#
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_OBJECTS is not set
++# CONFIG_DEBUG_SLAB is not set
++CONFIG_HAVE_DEBUG_KMEMLEAK=y
++# CONFIG_DEBUG_KMEMLEAK is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_DEBUG_MEMORY_INIT=y
++# CONFIG_DEBUG_PER_CPU_MAPS is not set
++# CONFIG_DEBUG_SHIRQ is not set
++
++#
++# Debug Lockups and Hangs
++#
++CONFIG_LOCKUP_DETECTOR=y
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
++CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_ON_OOPS_VALUE=0
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_SCHED_DEBUG=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_SCHED_STACK_END_CHECK is not set
++# CONFIG_TIMER_STATS is not set
++CONFIG_DEBUG_PREEMPT=y
++
++#
++# Lock Debugging (spinlocks, mutexes, etc...)
++#
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_LOCK_STAT is not set
++# CONFIG_DEBUG_ATOMIC_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_LOCK_TORTURE_TEST is not set
++# CONFIG_STACKTRACE is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_HAVE_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_LIST is not set
++# CONFIG_DEBUG_PI_LIST is not set
++# CONFIG_DEBUG_SG is not set
++# CONFIG_DEBUG_NOTIFIERS is not set
++# CONFIG_DEBUG_CREDENTIALS is not set
++
++#
++# RCU Debugging
++#
++# CONFIG_SPARSE_RCU_POINTER is not set
++# CONFIG_TORTURE_TEST is not set
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=21
++CONFIG_RCU_CPU_STALL_VERBOSE=y
++# CONFIG_RCU_CPU_STALL_INFO is not set
++# CONFIG_RCU_TRACE is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++# CONFIG_NOTIFIER_ERROR_INJECTION is not set
++# CONFIG_FAULT_INJECTION is not set
++CONFIG_HAVE_FUNCTION_TRACER=y
++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
++CONFIG_HAVE_DYNAMIC_FTRACE=y
++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
++CONFIG_HAVE_C_RECORDMCOUNT=y
++CONFIG_TRACING_SUPPORT=y
++# CONFIG_FTRACE is not set
++
++#
++# Runtime Testing
++#
++# CONFIG_LKDTM is not set
++# CONFIG_TEST_LIST_SORT is not set
++# CONFIG_BACKTRACE_SELF_TEST is not set
++# CONFIG_RBTREE_TEST is not set
++# CONFIG_INTERVAL_TREE_TEST is not set
++# CONFIG_PERCPU_TEST is not set
++# CONFIG_ATOMIC64_SELFTEST is not set
++# CONFIG_TEST_STRING_HELPERS is not set
++# CONFIG_TEST_KSTRTOX is not set
++# CONFIG_TEST_RHASHTABLE is not set
++# CONFIG_DMA_API_DEBUG is not set
++# CONFIG_TEST_LKM is not set
++# CONFIG_TEST_USER_COPY is not set
++# CONFIG_TEST_BPF is not set
++# CONFIG_TEST_FIRMWARE is not set
++# CONFIG_TEST_UDELAY is not set
++# CONFIG_SAMPLES is not set
++CONFIG_HAVE_ARCH_KGDB=y
++# CONFIG_KGDB is not set
++# CONFIG_STRICT_DEVMEM is not set
++CONFIG_PID_IN_CONTEXTIDR=y
++# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set
++# CONFIG_DEBUG_SET_MODULE_RONX is not set
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_PERSISTENT_KEYRINGS is not set
++# CONFIG_BIG_KEYS is not set
++# CONFIG_ENCRYPTED_KEYS is not set
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY=y
++# CONFIG_SECURITYFS is not set
++# CONFIG_SECURITY_NETWORK is not set
++# CONFIG_SECURITY_PATH is not set
++# CONFIG_SECURITY_SMACK is not set
++# CONFIG_SECURITY_TOMOYO is not set
++# CONFIG_SECURITY_APPARMOR is not set
++# CONFIG_SECURITY_YAMA is not set
++CONFIG_INTEGRITY=y
++# CONFIG_INTEGRITY_SIGNATURE is not set
++CONFIG_INTEGRITY_AUDIT=y
++# CONFIG_IMA is not set
++# CONFIG_EVM is not set
++CONFIG_DEFAULT_SECURITY_DAC=y
++CONFIG_DEFAULT_SECURITY=""
++CONFIG_CRYPTO=y
++
++#
++# Crypto core or helper
++#
++CONFIG_CRYPTO_ALGAPI=y
++CONFIG_CRYPTO_ALGAPI2=y
++CONFIG_CRYPTO_AEAD=y
++CONFIG_CRYPTO_AEAD2=y
++CONFIG_CRYPTO_BLKCIPHER=y
++CONFIG_CRYPTO_BLKCIPHER2=y
++CONFIG_CRYPTO_HASH=y
++CONFIG_CRYPTO_HASH2=y
++CONFIG_CRYPTO_RNG=y
++CONFIG_CRYPTO_RNG2=y
++CONFIG_CRYPTO_PCOMP2=y
++CONFIG_CRYPTO_MANAGER=y
++CONFIG_CRYPTO_MANAGER2=y
++# CONFIG_CRYPTO_USER is not set
++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
++# CONFIG_CRYPTO_GF128MUL is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_PCRYPT is not set
++CONFIG_CRYPTO_WORKQUEUE=y
++CONFIG_CRYPTO_CRYPTD=y
++# CONFIG_CRYPTO_MCRYPTD is not set
++CONFIG_CRYPTO_AUTHENC=y
++# CONFIG_CRYPTO_TEST is not set
++CONFIG_CRYPTO_ABLK_HELPER=y
++
++#
++# Authenticated Encryption with Associated Data
++#
++# CONFIG_CRYPTO_CCM is not set
++# CONFIG_CRYPTO_GCM is not set
++# CONFIG_CRYPTO_SEQIV is not set
++
++#
++# Block modes
++#
++CONFIG_CRYPTO_CBC=y
++# CONFIG_CRYPTO_CTR is not set
++# CONFIG_CRYPTO_CTS is not set
++# CONFIG_CRYPTO_ECB is not set
++# CONFIG_CRYPTO_LRW is not set
++# CONFIG_CRYPTO_PCBC is not set
++# CONFIG_CRYPTO_XTS is not set
++
++#
++# Hash modes
++#
++# CONFIG_CRYPTO_CMAC is not set
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_XCBC is not set
++# CONFIG_CRYPTO_VMAC is not set
++
++#
++# Digest
++#
++CONFIG_CRYPTO_CRC32C=y
++# CONFIG_CRYPTO_CRC32 is not set
++# CONFIG_CRYPTO_CRCT10DIF is not set
++# CONFIG_CRYPTO_GHASH is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_RMD128 is not set
++# CONFIG_CRYPTO_RMD160 is not set
++# CONFIG_CRYPTO_RMD256 is not set
++# CONFIG_CRYPTO_RMD320 is not set
++CONFIG_CRYPTO_SHA1=y
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_WP512 is not set
++
++#
++# Ciphers
++#
++CONFIG_CRYPTO_AES=y
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_CAMELLIA is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_FCRYPT is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_SALSA20 is not set
++# CONFIG_CRYPTO_SEED is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++
++#
++# Compression
++#
++CONFIG_CRYPTO_DEFLATE=y
++# CONFIG_CRYPTO_ZLIB is not set
++# CONFIG_CRYPTO_LZO is not set
++# CONFIG_CRYPTO_LZ4 is not set
++# CONFIG_CRYPTO_LZ4HC is not set
++
++#
++# Random Number Generation
++#
++CONFIG_CRYPTO_ANSI_CPRNG=y
++# CONFIG_CRYPTO_DRBG_MENU is not set
++# CONFIG_CRYPTO_USER_API_HASH is not set
++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
++CONFIG_CRYPTO_HW=y
++# CONFIG_CRYPTO_DEV_CCP is not set
++# CONFIG_ASYMMETRIC_KEY_TYPE is not set
++CONFIG_ARM64_CRYPTO=y
++CONFIG_CRYPTO_SHA1_ARM64_CE=y
++CONFIG_CRYPTO_SHA2_ARM64_CE=y
++CONFIG_CRYPTO_GHASH_ARM64_CE=y
++CONFIG_CRYPTO_AES_ARM64_CE=y
++CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
++CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
++CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
++# CONFIG_BINARY_PRINTF is not set
++
++#
++# Library routines
++#
++CONFIG_BITREVERSE=y
++CONFIG_GENERIC_STRNCPY_FROM_USER=y
++CONFIG_GENERIC_STRNLEN_USER=y
++CONFIG_GENERIC_NET_UTILS=y
++CONFIG_GENERIC_PCI_IOMAP=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_IO=y
++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
++# CONFIG_CRC_CCITT is not set
++CONFIG_CRC16=y
++# CONFIG_CRC_T10DIF is not set
++CONFIG_CRC_ITU_T=y
++CONFIG_CRC32=y
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC32_SLICEBY8=y
++# CONFIG_CRC32_SLICEBY4 is not set
++# CONFIG_CRC32_SARWATE is not set
++# CONFIG_CRC32_BIT is not set
++CONFIG_CRC7=y
++# CONFIG_LIBCRC32C is not set
++# CONFIG_CRC8 is not set
++CONFIG_AUDIT_GENERIC=y
++CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
++CONFIG_AUDIT_COMPAT_GENERIC=y
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_LZO_COMPRESS=y
++CONFIG_LZO_DECOMPRESS=y
++CONFIG_LZ4_DECOMPRESS=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++CONFIG_XZ_DEC_IA64=y
++CONFIG_XZ_DEC_ARM=y
++CONFIG_XZ_DEC_ARMTHUMB=y
++CONFIG_XZ_DEC_SPARC=y
++CONFIG_XZ_DEC_BCJ=y
++# CONFIG_XZ_DEC_TEST is not set
++CONFIG_DECOMPRESS_GZIP=y
++CONFIG_DECOMPRESS_BZIP2=y
++CONFIG_DECOMPRESS_LZMA=y
++CONFIG_DECOMPRESS_XZ=y
++CONFIG_DECOMPRESS_LZO=y
++CONFIG_DECOMPRESS_LZ4=y
++CONFIG_GENERIC_ALLOCATOR=y
++CONFIG_ASSOCIATIVE_ARRAY=y
++CONFIG_HAS_IOMEM=y
++CONFIG_HAS_IOPORT_MAP=y
++CONFIG_HAS_DMA=y
++CONFIG_CPU_RMAP=y
++CONFIG_DQL=y
++CONFIG_GLOB=y
++# CONFIG_GLOB_SELFTEST is not set
++CONFIG_NLATTR=y
++CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
++CONFIG_AVERAGE=y
++# CONFIG_CORDIC is not set
++# CONFIG_DDR is not set
++CONFIG_LIBFDT=y
++CONFIG_OID_REGISTRY=y
++CONFIG_UCS2_STRING=y
++CONFIG_FONT_SUPPORT=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_ARCH_HAS_SG_CHAIN=y
+diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
+index cf98b36..243ef25 100644
+--- a/arch/arm64/include/asm/device.h
++++ b/arch/arm64/include/asm/device.h
+@@ -21,6 +21,7 @@ struct dev_archdata {
+ #ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+ #endif
++ bool dma_coherent;
+ };
+
+ struct pdev_archdata {
+diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
+index adeae3f..9ce3e68 100644
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -52,12 +52,20 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+ dev->archdata.dma_ops = ops;
+ }
+
+-static inline int set_arch_dma_coherent_ops(struct device *dev)
++static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
++ struct iommu_ops *iommu, bool coherent)
+ {
+- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+- return 0;
++ dev->archdata.dma_coherent = coherent;
++ if (coherent)
++ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
++}
++#define arch_setup_dma_ops arch_setup_dma_ops
++
++/* do not use this function in a driver */
++static inline bool is_device_dma_coherent(struct device *dev)
++{
++ return dev->archdata.dma_coherent;
+ }
+-#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+
+ #include
+
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index 75825b6..f58e31a 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -249,6 +249,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
+ #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+ #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+ #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
++#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS))
+ #define iounmap __iounmap
+
+ #define ARCH_HAS_IOREMAP_WC
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index 101a42b..8ec41e5 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void)
+ : "r" (ttbr));
+ }
+
++/*
++ * TCR.T0SZ value to use when the ID map is active. Usually equals
++ * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
++ * physical memory, in which case it will be smaller.
++ */
++extern u64 idmap_t0sz;
++
++static inline bool __cpu_uses_extended_idmap(void)
++{
++ return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
++ unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
++}
++
++static inline void __cpu_set_tcr_t0sz(u64 t0sz)
++{
++ unsigned long tcr;
++
++ if (__cpu_uses_extended_idmap())
++ asm volatile (
++ " mrs %0, tcr_el1 ;"
++ " bfi %0, %1, %2, %3 ;"
++ " msr tcr_el1, %0 ;"
++ " isb"
++ : "=&r" (tcr)
++ : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
++}
++
++/*
++ * Set TCR.T0SZ to the value appropriate for activating the identity map.
++ */
++static inline void cpu_set_idmap_tcr_t0sz(void)
++{
++ __cpu_set_tcr_t0sz(idmap_t0sz);
++}
++
++/*
++ * Set TCR.T0SZ to its default value (based on VA_BITS)
++ */
++static inline void cpu_set_default_tcr_t0sz(void)
++{
++ __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS));
++}
++
+ static inline void switch_new_context(struct mm_struct *mm)
+ {
+ unsigned long flags;
+diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
+index 22b1623..3d02b18 100644
+--- a/arch/arm64/include/asm/page.h
++++ b/arch/arm64/include/asm/page.h
+@@ -33,7 +33,9 @@
+ * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
+ * map the kernel. With the 64K page configuration, swapper and idmap need to
+ * map to pte level. The swapper also maps the FDT (see __create_page_tables
+- * for more information).
++ * for more information). Note that the number of ID map translation levels
++ * could be increased on the fly if system RAM is out of reach for the default
++ * VA range, so 3 pages are reserved in all cases.
+ */
+ #ifdef CONFIG_ARM64_64K_PAGES
+ #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS)
+@@ -42,7 +44,7 @@
+ #endif
+
+ #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
+-#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE)
++#define IDMAP_DIR_SIZE (3 * PAGE_SIZE)
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index 88174e0..500b74e 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -142,7 +142,12 @@
+ /*
+ * TCR flags.
+ */
+-#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0))
++#define TCR_T0SZ_OFFSET 0
++#define TCR_T1SZ_OFFSET 16
++#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
++#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
++#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
++#define TCR_TxSZ_WIDTH 6
+ #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24))
+ #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24))
+ #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24))
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 41a43bf..9b417b8 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
+ #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+ #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+ #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
++#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+
+ #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+@@ -321,6 +322,13 @@ static inline int has_transparent_hugepage(void)
+ #define pgprot_device(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
+ #define __HAVE_PHYS_MEM_ACCESS_PROT
++#define pgprot_cached_ns(prot) \
++ __pgprot(pgprot_val(pgprot_cached(prot)) & ~PTE_SHARED)
++#define pgprot_cached(prot) \
++ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
++ PTE_PXN | PTE_UXN)
++
++
+ struct file;
+ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 2877dd8..ca02239 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -592,6 +592,43 @@ __create_page_tables:
+ mov x0, x25 // idmap_pg_dir
+ ldr x3, =KERNEL_START
+ add x3, x3, x28 // __pa(KERNEL_START)
++
++#ifndef CONFIG_ARM64_VA_BITS_48
++#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
++#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
++
++ /*
++ * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
++ * created that covers system RAM if that is located sufficiently high
++ * in the physical address space. So for the ID map, use an extended
++ * virtual range in that case, by configuring an additional translation
++ * level.
++ * First, we have to verify our assumption that the current value of
++ * VA_BITS was chosen such that all translation levels are fully
++ * utilised, and that lowering T0SZ will always result in an additional
++ * translation level to be configured.
++ */
++#if VA_BITS != EXTRA_SHIFT
++#error "Mismatch between VA_BITS and page size/number of translation levels"
++#endif
++
++ /*
++ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
++ * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
++ * this number conveniently equals the number of leading zeroes in
++ * the physical address of KERNEL_END.
++ */
++ adrp x5, KERNEL_END
++ clz x5, x5
++ cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
++ b.ge 1f // .. then skip additional level
++
++ str_l x5, idmap_t0sz, x6
++
++ create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
++1:
++#endif
++
+ create_pgd_entry x0, x3, x5, x6
+ ldr x6, =KERNEL_END
+ mov x5, x3 // __pa(KERNEL_START)
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 0ef8789..5657692 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -152,6 +152,7 @@ asmlinkage void secondary_start_kernel(void)
+ */
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
++ cpu_set_default_tcr_t0sz();
+
+ preempt_disable();
+ trace_hardirqs_off();
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index f4f8b50..53bbff9 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -37,6 +37,8 @@
+
+ #include "mm.h"
+
++u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
++
+ /*
+ * Empty_zero_page is a special page that is used for zero-initialized data
+ * and COW.
+@@ -369,6 +371,7 @@ void __init paging_init(void)
+ */
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
++ cpu_set_default_tcr_t0sz();
+ }
+
+ /*
+@@ -376,8 +379,10 @@ void __init paging_init(void)
+ */
+ void setup_mm_for_reboot(void)
+ {
+- cpu_switch_mm(idmap_pg_dir, &init_mm);
++ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
++ cpu_set_idmap_tcr_t0sz();
++ cpu_switch_mm(idmap_pg_dir, &init_mm);
+ }
+
+ /*
+diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
+index 005d29e..4c4d93c 100644
+--- a/arch/arm64/mm/proc-macros.S
++++ b/arch/arm64/mm/proc-macros.S
+@@ -52,3 +52,13 @@
+ mov \reg, #4 // bytes per word
+ lsl \reg, \reg, \tmp // actual cache line size
+ .endm
++
++/*
++ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
++ */
++ .macro tcr_set_idmap_t0sz, valreg, tmpreg
++#ifndef CONFIG_ARM64_VA_BITS_48
++ ldr_l \tmpreg, idmap_t0sz
++ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
++#endif
++ .endm
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 4e778b1..cbea872 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume)
+ msr cpacr_el1, x6
+ msr ttbr0_el1, x1
+ msr ttbr1_el1, x7
++ tcr_set_idmap_t0sz x8, x7
+ msr tcr_el1, x8
+ msr vbar_el1, x9
+ msr mdscr_el1, x10
+@@ -233,6 +234,8 @@ ENTRY(__cpu_setup)
+ */
+ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
++ tcr_set_idmap_t0sz x10, x9
++
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
+ * TCR_EL1.
+diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
+index 8c3730c..8ae36ea 100644
+--- a/arch/ia64/kernel/msi_ia64.c
++++ b/arch/ia64/kernel/msi_ia64.c
+@@ -35,7 +35,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
+ data |= MSI_DATA_VECTOR(irq_to_vector(irq));
+ msg.data = data;
+
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ cpumask_copy(idata->affinity, cpumask_of(cpu));
+
+ return 0;
+@@ -71,7 +71,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+ MSI_DATA_DELIVERY_FIXED |
+ MSI_DATA_VECTOR(vector);
+
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
+
+ return 0;
+@@ -102,8 +102,8 @@ static int ia64_msi_retrigger_irq(struct irq_data *data)
+ */
+ static struct irq_chip ia64_msi_chip = {
+ .name = "PCI-MSI",
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
+ .irq_ack = ia64_ack_msi_irq,
+ #ifdef CONFIG_SMP
+ .irq_set_affinity = ia64_set_msi_irq_affinity,
+diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
+index 446e779..a0eb27b 100644
+--- a/arch/ia64/sn/kernel/msi_sn.c
++++ b/arch/ia64/sn/kernel/msi_sn.c
+@@ -145,7 +145,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
+ msg.data = 0x100 + irq;
+
+ irq_set_msi_desc(irq, entry);
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
+
+ return 0;
+@@ -205,7 +205,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data,
+ msg.address_hi = (u32)(bus_addr >> 32);
+ msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
+
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ cpumask_copy(data->affinity, cpu_mask);
+
+ return 0;
+@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(struct irq_data *data)
+
+ static struct irq_chip sn_msi_chip = {
+ .name = "PCI-MSI",
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
+ .irq_ack = sn_ack_msi_irq,
+ #ifdef CONFIG_SMP
+ .irq_set_affinity = sn_set_msi_irq_affinity,
+diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
+index 63bbe07..cffaaf4 100644
+--- a/arch/mips/pci/msi-octeon.c
++++ b/arch/mips/pci/msi-octeon.c
+@@ -178,7 +178,7 @@ msi_irq_allocated:
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+
+ irq_set_msi_desc(irq, desc);
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ return 0;
+ }
+
+diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
+index f7ac3ed..6a40f24 100644
+--- a/arch/mips/pci/msi-xlp.c
++++ b/arch/mips/pci/msi-xlp.c
+@@ -217,7 +217,7 @@ static void xlp_msix_mask_ack(struct irq_data *d)
+
+ msixvec = nlm_irq_msixvec(d->irq);
+ link = nlm_irq_msixlink(msixvec);
+- mask_msi_irq(d);
++ pci_msi_mask_irq(d);
+ md = irq_data_get_irq_handler_data(d);
+
+ /* Ack MSI on bridge */
+@@ -239,10 +239,10 @@ static void xlp_msix_mask_ack(struct irq_data *d)
+
+ static struct irq_chip xlp_msix_chip = {
+ .name = "XLP-MSIX",
+- .irq_enable = unmask_msi_irq,
+- .irq_disable = mask_msi_irq,
++ .irq_enable = pci_msi_unmask_irq,
++ .irq_disable = pci_msi_mask_irq,
+ .irq_mask_ack = xlp_msix_mask_ack,
+- .irq_unmask = unmask_msi_irq,
++ .irq_unmask = pci_msi_unmask_irq,
+ };
+
+ void arch_teardown_msi_irq(unsigned int irq)
+@@ -345,7 +345,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
+ if (ret < 0)
+ return ret;
+
+- write_msi_msg(xirq, &msg);
++ pci_write_msi_msg(xirq, &msg);
+ return 0;
+ }
+
+@@ -446,7 +446,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
+ if (ret < 0)
+ return ret;
+
+- write_msi_msg(xirq, &msg);
++ pci_write_msi_msg(xirq, &msg);
+ return 0;
+ }
+
+diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
+index 0dde803..26d2dab 100644
+--- a/arch/mips/pci/pci-xlr.c
++++ b/arch/mips/pci/pci-xlr.c
+@@ -260,7 +260,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+ if (ret < 0)
+ return ret;
+
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ return 0;
+ }
+ #endif
+diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
+deleted file mode 100644
+index 3bef74a..0000000
+--- a/arch/powerpc/include/asm/mpc85xx.h
++++ /dev/null
+@@ -1,94 +0,0 @@
+-/*
+- * MPC85xx cpu type detection
+- *
+- * Copyright 2011-2012 Freescale Semiconductor, Inc.
+- *
+- * This is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
+-
+-#ifndef __ASM_PPC_MPC85XX_H
+-#define __ASM_PPC_MPC85XX_H
+-
+-#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
+-#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
+-#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
+-
+-/* Some parts define SVR[0:23] as the SOC version */
+-#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
+-
+-#define SVR_8533 0x803400
+-#define SVR_8535 0x803701
+-#define SVR_8536 0x803700
+-#define SVR_8540 0x803000
+-#define SVR_8541 0x807200
+-#define SVR_8543 0x803200
+-#define SVR_8544 0x803401
+-#define SVR_8545 0x803102
+-#define SVR_8547 0x803101
+-#define SVR_8548 0x803100
+-#define SVR_8555 0x807100
+-#define SVR_8560 0x807000
+-#define SVR_8567 0x807501
+-#define SVR_8568 0x807500
+-#define SVR_8569 0x808000
+-#define SVR_8572 0x80E000
+-#define SVR_P1010 0x80F100
+-#define SVR_P1011 0x80E500
+-#define SVR_P1012 0x80E501
+-#define SVR_P1013 0x80E700
+-#define SVR_P1014 0x80F101
+-#define SVR_P1017 0x80F700
+-#define SVR_P1020 0x80E400
+-#define SVR_P1021 0x80E401
+-#define SVR_P1022 0x80E600
+-#define SVR_P1023 0x80F600
+-#define SVR_P1024 0x80E402
+-#define SVR_P1025 0x80E403
+-#define SVR_P2010 0x80E300
+-#define SVR_P2020 0x80E200
+-#define SVR_P2040 0x821000
+-#define SVR_P2041 0x821001
+-#define SVR_P3041 0x821103
+-#define SVR_P4040 0x820100
+-#define SVR_P4080 0x820000
+-#define SVR_P5010 0x822100
+-#define SVR_P5020 0x822000
+-#define SVR_P5021 0X820500
+-#define SVR_P5040 0x820400
+-#define SVR_T4240 0x824000
+-#define SVR_T4120 0x824001
+-#define SVR_T4160 0x824100
+-#define SVR_C291 0x850000
+-#define SVR_C292 0x850020
+-#define SVR_C293 0x850030
+-#define SVR_B4860 0X868000
+-#define SVR_G4860 0x868001
+-#define SVR_G4060 0x868003
+-#define SVR_B4440 0x868100
+-#define SVR_G4440 0x868101
+-#define SVR_B4420 0x868102
+-#define SVR_B4220 0x868103
+-#define SVR_T1040 0x852000
+-#define SVR_T1041 0x852001
+-#define SVR_T1042 0x852002
+-#define SVR_T1020 0x852100
+-#define SVR_T1021 0x852101
+-#define SVR_T1022 0x852102
+-#define SVR_T2080 0x853000
+-#define SVR_T2081 0x853100
+-
+-#define SVR_8610 0x80A000
+-#define SVR_8641 0x809000
+-#define SVR_8641D 0x809001
+-
+-#define SVR_9130 0x860001
+-#define SVR_9131 0x860000
+-#define SVR_9132 0x861000
+-#define SVR_9232 0x861400
+-
+-#define SVR_Unknown 0xFFFFFF
+-
+-#endif
+diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+index ca3a062..11090ab 100644
+--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
++++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+@@ -123,7 +123,8 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
+ }
+
+ static int
+-cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
++cpld_pic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ return cpld_pic_node == node;
+ }
+diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+index a392e94..f0be439 100644
+--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
++++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+@@ -34,6 +34,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #include
+@@ -51,7 +52,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include "smp.h"
+
+ #include "mpc85xx.h"
+diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+index e358bed..50dcc00 100644
+--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
++++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+@@ -17,6 +17,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #include
+@@ -27,7 +28,6 @@
+ #include
+ #include
+ #include
+-#include
+
+ #include
+ #include
+diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
+index 6ac986d..371df82 100644
+--- a/arch/powerpc/platforms/85xx/p1022_ds.c
++++ b/arch/powerpc/platforms/85xx/p1022_ds.c
+@@ -16,6 +16,7 @@
+ * kind, whether express or implied.
+ */
+
++#include
+ #include
+ #include
+ #include
+@@ -25,7 +26,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include
+ #include "smp.h"
+
+diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
+index 7a180f0..4f8fc5f 100644
+--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
++++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
+@@ -12,6 +12,7 @@
+ * kind, whether express or implied.
+ */
+
++#include
+ #include
+ #include
+ #include
+@@ -21,7 +22,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include "smp.h"
+
+ #include "mpc85xx.h"
+diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
+index d7c1e69..3956455 100644
+--- a/arch/powerpc/platforms/85xx/smp.c
++++ b/arch/powerpc/platforms/85xx/smp.c
+@@ -19,6 +19,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #include
+@@ -26,7 +27,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include
+ #include
+
+diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
+index 1eadb6d..2799120 100644
+--- a/arch/powerpc/platforms/85xx/twr_p102x.c
++++ b/arch/powerpc/platforms/85xx/twr_p102x.c
+@@ -15,6 +15,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+ #include
+
+@@ -23,7 +24,6 @@
+ #include
+ #include
+ #include
+-#include
+
+ #include
+ #include
+diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+index 55413a5..437a9c3 100644
+--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
++++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+@@ -24,6 +24,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #include
+@@ -38,7 +39,6 @@
+ #include
+ #include
+ #include
+-#include
+
+ #include "mpc86xx.h"
+
+diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
+index 862b327..0883994 100644
+--- a/arch/powerpc/platforms/cell/axon_msi.c
++++ b/arch/powerpc/platforms/cell/axon_msi.c
+@@ -279,7 +279,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+
+ irq_set_msi_desc(virq, entry);
+ msg.data = virq;
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+ }
+
+ return 0;
+@@ -301,9 +301,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
+ }
+
+ static struct irq_chip msic_irq_chip = {
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
+- .irq_shutdown = mask_msi_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++ .irq_shutdown = pci_msi_mask_irq,
+ .name = "AXON-MSI",
+ };
+
+diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
+index 28e558d..109d236 100644
+--- a/arch/powerpc/platforms/cell/interrupt.c
++++ b/arch/powerpc/platforms/cell/interrupt.c
+@@ -222,7 +222,8 @@ void iic_request_IPIs(void)
+ #endif /* CONFIG_SMP */
+
+
+-static int iic_host_match(struct irq_domain *h, struct device_node *node)
++static int iic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ return of_device_is_compatible(node,
+ "IBM,CBEA-Internal-Interrupt-Controller");
+diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+index 4cde8e7..b7866e0 100644
+--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
++++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+@@ -108,7 +108,8 @@ static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
+ return 0;
+ }
+
+-static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
++static int flipper_pic_match(struct irq_domain *h, struct device_node *np,
++ enum irq_domain_bus_token bus_token)
+ {
+ return 1;
+ }
+diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
+index 4c24bf6..246cab4 100644
+--- a/arch/powerpc/platforms/powermac/pic.c
++++ b/arch/powerpc/platforms/powermac/pic.c
+@@ -268,7 +268,8 @@ static struct irqaction gatwick_cascade_action = {
+ .name = "cascade",
+ };
+
+-static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
++static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ /* We match all, we don't always have a node anyway */
+ return 1;
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index 9ff55d5..019991d 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -90,7 +90,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ return rc;
+ }
+ irq_set_msi_desc(virq, entry);
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+ }
+ return 0;
+ }
+diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
+index 5f3b232..df0c086 100644
+--- a/arch/powerpc/platforms/ps3/interrupt.c
++++ b/arch/powerpc/platforms/ps3/interrupt.c
+@@ -678,7 +678,8 @@ static int ps3_host_map(struct irq_domain *h, unsigned int virq,
+ return 0;
+ }
+
+-static int ps3_host_match(struct irq_domain *h, struct device_node *np)
++static int ps3_host_match(struct irq_domain *h, struct device_node *np,
++ enum irq_domain_bus_token bus_token)
+ {
+ /* Match all */
+ return 1;
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index 8b909e9..691a154 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -476,7 +476,7 @@ again:
+ irq_set_msi_desc(virq, entry);
+
+ /* Read config space back so we can restore after reset */
+- __read_msi_msg(entry, &msg);
++ __pci_read_msi_msg(entry, &msg);
+ entry->msg = msg;
+ }
+
+diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
+index 2d20f10..eca0b00 100644
+--- a/arch/powerpc/sysdev/ehv_pic.c
++++ b/arch/powerpc/sysdev/ehv_pic.c
+@@ -177,7 +177,8 @@ unsigned int ehv_pic_get_irq(void)
+ return irq_linear_revmap(global_ehv_pic->irqhost, irq);
+ }
+
+-static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
++static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ /* Exact match, unless ehv_pic node is NULL */
+ return h->of_node == NULL || h->of_node == node;
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index ea6b3a1..f13282c 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -82,8 +82,8 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
+
+
+ static struct irq_chip fsl_msi_chip = {
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
+ .irq_ack = fsl_msi_end_irq,
+ .irq_print_chip = fsl_msi_print_chip,
+ };
+@@ -243,7 +243,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ irq_set_msi_desc(virq, entry);
+
+ fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+ }
+ return 0;
+
+diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
+index 45598da..8c3756c 100644
+--- a/arch/powerpc/sysdev/i8259.c
++++ b/arch/powerpc/sysdev/i8259.c
+@@ -162,7 +162,8 @@ static struct resource pic_edgectrl_iores = {
+ .flags = IORESOURCE_BUSY,
+ };
+
+-static int i8259_host_match(struct irq_domain *h, struct device_node *node)
++static int i8259_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ return h->of_node == NULL || h->of_node == node;
+ }
+diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
+index b50f978..1b9b00f 100644
+--- a/arch/powerpc/sysdev/ipic.c
++++ b/arch/powerpc/sysdev/ipic.c
+@@ -672,7 +672,8 @@ static struct irq_chip ipic_edge_irq_chip = {
+ .irq_set_type = ipic_set_irq_type,
+ };
+
+-static int ipic_host_match(struct irq_domain *h, struct device_node *node)
++static int ipic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ /* Exact match, unless ipic node is NULL */
+ return h->of_node == NULL || h->of_node == node;
+diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
+index 89cec0e..bf6f77e 100644
+--- a/arch/powerpc/sysdev/mpic.c
++++ b/arch/powerpc/sysdev/mpic.c
+@@ -1009,7 +1009,8 @@ static struct irq_chip mpic_irq_ht_chip = {
+ #endif /* CONFIG_MPIC_U3_HT_IRQS */
+
+
+-static int mpic_host_match(struct irq_domain *h, struct device_node *node)
++static int mpic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ /* Exact match, unless mpic node is NULL */
+ return h->of_node == NULL || h->of_node == node;
+diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+index a6add4a..5a4c474 100644
+--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+@@ -42,7 +42,7 @@ static struct mpic *msi_mpic;
+ static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
+ {
+ pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
+- mask_msi_irq(data);
++ pci_msi_mask_irq(data);
+ mpic_mask_irq(data);
+ }
+
+@@ -50,7 +50,7 @@ static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
+ {
+ pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
+ mpic_unmask_irq(data);
+- unmask_msi_irq(data);
++ pci_msi_unmask_irq(data);
+ }
+
+ static struct irq_chip mpic_pasemi_msi_chip = {
+@@ -138,7 +138,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ * register to generate MSI [512...1023]
+ */
+ msg.data = hwirq-0x200;
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+ }
+
+ return 0;
+diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
+index db35a40..65880cc 100644
+--- a/arch/powerpc/sysdev/mpic_u3msi.c
++++ b/arch/powerpc/sysdev/mpic_u3msi.c
+@@ -25,14 +25,14 @@ static struct mpic *msi_mpic;
+
+ static void mpic_u3msi_mask_irq(struct irq_data *data)
+ {
+- mask_msi_irq(data);
++ pci_msi_mask_irq(data);
+ mpic_mask_irq(data);
+ }
+
+ static void mpic_u3msi_unmask_irq(struct irq_data *data)
+ {
+ mpic_unmask_irq(data);
+- unmask_msi_irq(data);
++ pci_msi_unmask_irq(data);
+ }
+
+ static struct irq_chip mpic_u3msi_chip = {
+@@ -172,7 +172,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
+ virq, hwirq, (unsigned long)addr);
+ msg.data = hwirq;
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+
+ hwirq++;
+ }
+diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+index a6a4dbd..908105f 100644
+--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
++++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+@@ -85,7 +85,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1);
+ return -EINVAL;
+ }
+- write_msi_msg(hwirq, &msg);
++ pci_write_msi_msg(hwirq, &msg);
+ }
+
+ return 0;
+diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
+index 85d9c18..c6df3e2 100644
+--- a/arch/powerpc/sysdev/ppc4xx_msi.c
++++ b/arch/powerpc/sysdev/ppc4xx_msi.c
+@@ -116,7 +116,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+
+ irq_set_msi_desc(virq, entry);
+ msg.data = int_no;
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+ }
+ return 0;
+ }
+diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
+index b2b87c3..a433b3d 100644
+--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
++++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
+@@ -245,7 +245,8 @@ static struct irq_chip qe_ic_irq_chip = {
+ .irq_mask_ack = qe_ic_mask_irq,
+ };
+
+-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
++static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ /* Exact match, unless qe_ic node is NULL */
+ return h->of_node == NULL || h->of_node == node;
+diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
+index 3c6ee1b..4ba554e 100644
+--- a/arch/powerpc/sysdev/xics/ics-opal.c
++++ b/arch/powerpc/sysdev/xics/ics-opal.c
+@@ -73,7 +73,7 @@ static unsigned int ics_opal_startup(struct irq_data *d)
+ * at that level, so we do it here by hand.
+ */
+ if (d->msi_desc)
+- unmask_msi_irq(d);
++ pci_msi_unmask_irq(d);
+ #endif
+
+ /* unmask it */
+diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
+index 936575d..bc81335 100644
+--- a/arch/powerpc/sysdev/xics/ics-rtas.c
++++ b/arch/powerpc/sysdev/xics/ics-rtas.c
+@@ -76,7 +76,7 @@ static unsigned int ics_rtas_startup(struct irq_data *d)
+ * at that level, so we do it here by hand.
+ */
+ if (d->msi_desc)
+- unmask_msi_irq(d);
++ pci_msi_unmask_irq(d);
+ #endif
+ /* unmask it */
+ ics_rtas_unmask_irq(d);
+diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
+index fe0cca4..13ab716 100644
+--- a/arch/powerpc/sysdev/xics/xics-common.c
++++ b/arch/powerpc/sysdev/xics/xics-common.c
+@@ -300,7 +300,8 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
+ }
+ #endif /* CONFIG_SMP */
+
+-static int xics_host_match(struct irq_domain *h, struct device_node *node)
++static int xics_host_match(struct irq_domain *h, struct device_node *node,
++ enum irq_domain_bus_token bus_token)
+ {
+ struct ics *ics;
+
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 2fa7b14..d59c825 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock);
+
+ static struct irq_chip zpci_irq_chip = {
+ .name = "zPCI",
+- .irq_unmask = unmask_msi_irq,
+- .irq_mask = mask_msi_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++ .irq_mask = pci_msi_mask_irq,
+ };
+
+ static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
+@@ -403,7 +403,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ msg.data = hwirq;
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ msg.address_hi = zdev->msi_addr >> 32;
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ airq_iv_set_data(zdev->aibv, hwirq, irq);
+ hwirq++;
+ }
+@@ -448,9 +448,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
+ /* Release MSI interrupts */
+ list_for_each_entry(msi, &pdev->msi_list, list) {
+ if (msi->msi_attrib.is_msix)
+- default_msix_mask_irq(msi, 1);
++ __pci_msix_desc_mask_irq(msi, 1);
+ else
+- default_msi_mask_irq(msi, 1, 1);
++ __pci_msi_desc_mask_irq(msi, 1, 1);
+ irq_set_msi_desc(msi->irq, NULL);
+ irq_free_desc(msi->irq);
+ msi->msg.address_lo = 0;
+diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
+index 580651a..84e16d8 100644
+--- a/arch/sparc/kernel/pci_msi.c
++++ b/arch/sparc/kernel/pci_msi.c
+@@ -111,10 +111,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
+
+ static struct irq_chip msi_irq = {
+ .name = "PCI-MSI",
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
+- .irq_enable = unmask_msi_irq,
+- .irq_disable = mask_msi_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++ .irq_enable = pci_msi_unmask_irq,
++ .irq_disable = pci_msi_mask_irq,
+ /* XXX affinity XXX */
+ };
+
+@@ -161,7 +161,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p,
+ msg.data = msi;
+
+ irq_set_msi_desc(*irq_p, entry);
+- write_msi_msg(*irq_p, &msg);
++ pci_write_msi_msg(*irq_p, &msg);
+
+ return 0;
+
+diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
+index e39f9c5..e717af2 100644
+--- a/arch/tile/kernel/pci_gx.c
++++ b/arch/tile/kernel/pci_gx.c
+@@ -1453,7 +1453,7 @@ static struct pci_ops tile_cfg_ops = {
+ static unsigned int tilegx_msi_startup(struct irq_data *d)
+ {
+ if (d->msi_desc)
+- unmask_msi_irq(d);
++ pci_msi_unmask_irq(d);
+
+ return 0;
+ }
+@@ -1465,14 +1465,14 @@ static void tilegx_msi_ack(struct irq_data *d)
+
+ static void tilegx_msi_mask(struct irq_data *d)
+ {
+- mask_msi_irq(d);
++ pci_msi_mask_irq(d);
+ __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
+ }
+
+ static void tilegx_msi_unmask(struct irq_data *d)
+ {
+ __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
+- unmask_msi_irq(d);
++ pci_msi_unmask_irq(d);
+ }
+
+ static struct irq_chip tilegx_msi_chip = {
+@@ -1590,7 +1590,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+ msg.address_hi = msi_addr >> 32;
+ msg.address_lo = msi_addr & 0xffffffff;
+
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+ irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
+ irq_set_handler_data(irq, controller);
+
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index e45e4da..f58a9c7 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -172,7 +172,6 @@ struct x86_platform_ops {
+
+ struct pci_dev;
+ struct msi_msg;
+-struct msi_desc;
+
+ struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+@@ -183,8 +182,6 @@ struct x86_msi_ops {
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+ void (*restore_msi_irqs)(struct pci_dev *dev);
+ int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
+- u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
+- u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
+ };
+
+ struct IO_APIC_route_entry;
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 1183d54..7ffe0a2 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -3158,7 +3158,7 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+- __write_msi_msg(data->msi_desc, &msg);
++ __pci_write_msi_msg(data->msi_desc, &msg);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+ }
+@@ -3169,8 +3169,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+ */
+ static struct irq_chip msi_chip = {
+ .name = "PCI-MSI",
+- .irq_unmask = unmask_msi_irq,
+- .irq_mask = mask_msi_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++ .irq_mask = pci_msi_mask_irq,
+ .irq_ack = ack_apic_edge,
+ .irq_set_affinity = msi_set_affinity,
+ .irq_retrigger = ioapic_retrigger_irq,
+@@ -3196,7 +3196,7 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+ */
+ if (!irq_offset)
+- write_msi_msg(irq, &msg);
++ pci_write_msi_msg(irq, &msg);
+
+ setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index e48b674..234b072 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -116,8 +116,6 @@ struct x86_msi_ops x86_msi = {
+ .teardown_msi_irqs = default_teardown_msi_irqs,
+ .restore_msi_irqs = default_restore_msi_irqs,
+ .setup_hpet_msi = default_setup_hpet_msi,
+- .msi_mask_irq = default_msi_mask_irq,
+- .msix_mask_irq = default_msix_mask_irq,
+ };
+
+ /* MSI arch specific hooks */
+@@ -140,14 +138,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
+ {
+ x86_msi.restore_msi_irqs(dev);
+ }
+-u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+-{
+- return x86_msi.msi_mask_irq(desc, mask, flag);
+-}
+-u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
+-{
+- return x86_msi.msix_mask_irq(desc, flag);
+-}
+ #endif
+
+ struct x86_io_apic_ops x86_io_apic_ops = {
+diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
+index f3a2cfc..7bcf06a 100644
+--- a/arch/x86/pci/bus_numa.c
++++ b/arch/x86/pci/bus_numa.c
+@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
+ {
+ struct pci_root_info *info = x86_find_pci_root_info(bus);
+ struct pci_root_res *root_res;
+- struct pci_host_bridge_window *window;
++ struct resource_entry *window;
+ bool found = false;
+
+ if (!info)
+@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
+ bus);
+
+ /* already added by acpi ? */
+- list_for_each_entry(window, resources, list)
++ resource_list_for_each_entry(window, resources)
+ if (window->res->flags & IORESOURCE_BUS) {
+ found = true;
+ break;
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 6b3cf7c..878fb8e 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -229,7 +229,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ return 1;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+- __read_msi_msg(msidesc, &msg);
++ __pci_read_msi_msg(msidesc, &msg);
+ pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
+ ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
+ if (msg.data != XEN_PIRQ_MSI_DATA ||
+@@ -240,7 +240,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ goto error;
+ }
+ xen_msi_compose_msg(dev, pirq, &msg);
+- __write_msi_msg(msidesc, &msg);
++ __pci_write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
+ } else {
+ dev_dbg(&dev->dev,
+@@ -296,12 +296,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ map_irq.entry_nr = nvec;
+ } else if (type == PCI_CAP_ID_MSIX) {
+ int pos;
++ unsigned long flags;
+ u32 table_offset, bir;
+
+ pos = dev->msix_cap;
+ pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
++ flags = pci_resource_flags(dev, bir);
++ if (!flags || (flags & IORESOURCE_UNSET))
++ return -EINVAL;
+
+ map_irq.table_base = pci_resource_start(dev, bir);
+ map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
+@@ -394,14 +398,7 @@ static void xen_teardown_msi_irq(unsigned int irq)
+ {
+ xen_destroy_irq(irq);
+ }
+-static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+-{
+- return 0;
+-}
+-static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag)
+-{
+- return 0;
+-}
++
+ #endif
+
+ int __init pci_xen_init(void)
+@@ -425,8 +422,7 @@ int __init pci_xen_init(void)
+ x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+ x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
+- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq;
+- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
++ pci_msi_ignore_mask = 1;
+ #endif
+ return 0;
+ }
+@@ -460,8 +456,7 @@ int __init pci_xen_initial_domain(void)
+ x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+ x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
+- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq;
+- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
++ pci_msi_ignore_mask = 1;
+ #endif
+ __acpi_register_gsi = acpi_register_gsi_xen;
+ /* Pre-allocate legacy irqs */
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index fdb5701..0ad0ce6 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -325,7 +325,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
+ {
+ struct lpss_device_desc *dev_desc;
+ struct lpss_private_data *pdata;
+- struct resource_list_entry *rentry;
++ struct resource_entry *rentry;
+ struct list_head resource_list;
+ struct platform_device *pdev;
+ int ret;
+@@ -345,12 +345,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
+ goto err_out;
+
+ list_for_each_entry(rentry, &resource_list, node)
+- if (resource_type(&rentry->res) == IORESOURCE_MEM) {
++ if (resource_type(rentry->res) == IORESOURCE_MEM) {
+ if (dev_desc->prv_size_override)
+ pdata->mmio_size = dev_desc->prv_size_override;
+ else
+- pdata->mmio_size = resource_size(&rentry->res);
+- pdata->mmio_base = ioremap(rentry->res.start,
++ pdata->mmio_size = resource_size(rentry->res);
++ pdata->mmio_base = ioremap(rentry->res->start,
+ pdata->mmio_size);
+ break;
+ }
+diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
+index 6ba8beb..1284138 100644
+--- a/drivers/acpi/acpi_platform.c
++++ b/drivers/acpi/acpi_platform.c
+@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+ struct platform_device *pdev = NULL;
+ struct acpi_device *acpi_parent;
+ struct platform_device_info pdevinfo;
+- struct resource_list_entry *rentry;
++ struct resource_entry *rentry;
+ struct list_head resource_list;
+ struct resource *resources = NULL;
+ int count;
+@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node)
+- resources[count++] = rentry->res;
++ resources[count++] = *rentry->res;
+
+ acpi_dev_free_resource_list(&resource_list);
+ }
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 2ba8f02..e7f4aa0 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -415,12 +415,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
+ */
+ void acpi_dev_free_resource_list(struct list_head *list)
+ {
+- struct resource_list_entry *rentry, *re;
+-
+- list_for_each_entry_safe(rentry, re, list, node) {
+- list_del(&rentry->node);
+- kfree(rentry);
+- }
++ resource_list_free(list);
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
+
+@@ -435,15 +430,15 @@ struct res_proc_context {
+ static acpi_status acpi_dev_new_resource_entry(struct resource *r,
+ struct res_proc_context *c)
+ {
+- struct resource_list_entry *rentry;
++ struct resource_entry *rentry;
+
+- rentry = kmalloc(sizeof(*rentry), GFP_KERNEL);
++ rentry = resource_list_create_entry(NULL, 0);
+ if (!rentry) {
+ c->error = -ENOMEM;
+ return AE_NO_MEMORY;
+ }
+- rentry->res = *r;
+- list_add_tail(&rentry->node, c->list);
++ *rentry->res = *r;
++ resource_list_add_tail(rentry, c->list);
+ c->count++;
+ return AE_OK;
+ }
+@@ -503,7 +498,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
+ * returned as the final error code.
+ *
+ * The resultant struct resource objects are put on the list pointed to by
+- * @list, that must be empty initially, as members of struct resource_list_entry
++ * @list, that must be empty initially, as members of struct resource_entry
+ * objects. Callers of this routine should use %acpi_dev_free_resource_list() to
+ * free that list.
+ *
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 842d047..4c7a18f 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -661,6 +661,9 @@ void device_initialize(struct device *dev)
+ INIT_LIST_HEAD(&dev->devres_head);
+ device_pm_init(dev);
+ set_dev_node(dev, -1);
++#ifdef CONFIG_GENERIC_MSI_IRQ
++ INIT_LIST_HEAD(&dev->msi_list);
++#endif
+ }
+ EXPORT_SYMBOL_GPL(device_initialize);
+
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 317e0e4..b387fb9 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -1011,6 +1011,7 @@ int __init platform_bus_init(void)
+ error = bus_register(&platform_bus_type);
+ if (error)
+ device_unregister(&platform_bus);
++ of_platform_register_reconfig_notifier();
+ return error;
+ }
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 6cb1beb..12678be 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -692,6 +692,24 @@ static inline int is_loop_device(struct file *file)
+ return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+ }
+
++/*
++ * for AUFS
++ * no get/put for file.
++ */
++struct file *loop_backing_file(struct super_block *sb)
++{
++ struct file *ret;
++ struct loop_device *l;
++
++ ret = NULL;
++ if (MAJOR(sb->s_dev) == LOOP_MAJOR) {
++ l = sb->s_bdev->bd_disk->private_data;
++ ret = l->lo_backing_file;
++ }
++ return ret;
++}
++EXPORT_SYMBOL_GPL(loop_backing_file);
++
+ /* loop sysfs attributes */
+
+ static ssize_t loop_attr_show(struct device *dev, char *page,
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index 455fd17..38c8814 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -101,12 +101,12 @@ config COMMON_CLK_AXI_CLKGEN
+ Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
+ FPGAs. It is commonly used in Analog Devices' reference designs.
+
+-config CLK_PPC_CORENET
+- bool "Clock driver for PowerPC corenet platforms"
+- depends on PPC_E500MC && OF
++config CLK_QORIQ
++ bool "Clock driver for Freescale QorIQ platforms"
++ depends on (PPC_E500MC || ARM || ARM64) && OF
+ ---help---
+- This adds the clock driver support for Freescale PowerPC corenet
+- platforms using common clock framework.
++ This adds the clock driver support for Freescale QorIQ platforms
++ using common clock framework.
+
+ config COMMON_CLK_XGENE
+ bool "Clock driver for APM XGene SoC"
+diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
+index d5fba5b..4ff94cd 100644
+--- a/drivers/clk/Makefile
++++ b/drivers/clk/Makefile
+@@ -30,7 +30,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
+ obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+ obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
+ obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
+-obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
++obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
+ obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
+ obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
+ obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+new file mode 100644
+index 0000000..74051c9
+--- /dev/null
++++ b/drivers/clk/clk-qoriq.c
+@@ -0,0 +1,1256 @@
++/*
++ * Copyright 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * clock driver for Freescale QorIQ SoCs.
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#define PLL_DIV1 0
++#define PLL_DIV2 1
++#define PLL_DIV3 2
++#define PLL_DIV4 3
++
++#define PLATFORM_PLL 0
++#define CGA_PLL1 1
++#define CGA_PLL2 2
++#define CGA_PLL3 3
++#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
++#define CGB_PLL1 4
++#define CGB_PLL2 5
++
++struct clockgen_pll_div {
++ struct clk *clk;
++ char name[32];
++};
++
++struct clockgen_pll {
++ struct clockgen_pll_div div[4];
++};
++
++#define CLKSEL_VALID 1
++#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
++
++struct clockgen_sourceinfo {
++ u32 flags; /* CLKSEL_xxx */
++ int pll; /* CGx_PLLn */
++ int div; /* PLL_DIVn */
++};
++
++#define NUM_MUX_PARENTS 16
++
++struct clockgen_muxinfo {
++ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
++};
++
++#define NUM_HWACCEL 5
++#define NUM_CMUX 8
++
++struct clockgen;
++
++/*
++ * cmux freq must be >= platform pll.
++ * If not set, cmux freq must be >= platform pll/2
++ */
++#define CG_CMUX_GE_PLAT 1
++
++#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
++#define CG_VER3 4 /* version 3 cg: reg layout different */
++#define CG_LITTLE_ENDIAN 8
++
++struct clockgen_chipinfo {
++ const char *compat, *guts_compat;
++ const struct clockgen_muxinfo *cmux_groups[2];
++ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
++ void (*init_periph)(struct clockgen *cg);
++ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
++ u32 pll_mask; /* 1 << n bit set if PLL n is valid */
++ u32 flags; /* CG_xxx */
++};
++
++struct clockgen {
++ struct device_node *node;
++ void __iomem *regs;
++ struct clockgen_chipinfo info; /* mutable copy */
++ struct clk *sysclk;
++ struct clockgen_pll pll[6];
++ struct clk *cmux[NUM_CMUX];
++ struct clk *hwaccel[NUM_HWACCEL];
++ struct clk *fman[2];
++ struct ccsr_guts __iomem *guts;
++};
++
++static struct clockgen clockgen;
++
++static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
++{
++ if (cg->info.flags & CG_LITTLE_ENDIAN)
++ iowrite32(val, reg);
++ else
++ iowrite32be(val, reg);
++}
++
++static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
++{
++ u32 val;
++
++ if (cg->info.flags & CG_LITTLE_ENDIAN)
++ val = ioread32(reg);
++ else
++ val = ioread32be(reg);
++
++ return val;
++}
++
++static const struct clockgen_muxinfo p2041_cmux_grp1 = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ }
++};
++
++static const struct clockgen_muxinfo p2041_cmux_grp2 = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [4] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ }
++};
++
++static const struct clockgen_muxinfo p5020_cmux_grp1 = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
++ }
++};
++
++static const struct clockgen_muxinfo p5020_cmux_grp2 = {
++ {
++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ }
++};
++
++static const struct clockgen_muxinfo p5040_cmux_grp1 = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
++ [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
++ }
++};
++
++static const struct clockgen_muxinfo p5040_cmux_grp2 = {
++ {
++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ }
++};
++
++static const struct clockgen_muxinfo p4080_cmux_grp1 = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
++ }
++};
++
++static const struct clockgen_muxinfo p4080_cmux_grp2 = {
++ {
++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
++ [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
++ [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
++ [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
++ [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
++ }
++};
++
++static const struct clockgen_muxinfo t1023_cmux = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ }
++};
++
++static const struct clockgen_muxinfo t1040_cmux = {
++ {
++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ }
++};
++
++
++static const struct clockgen_muxinfo clockgen2_cmux_cga = {
++ {
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
++ {},
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
++ {},
++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
++ },
++};
++
++static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
++ {
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
++ {},
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
++ },
++};
++
++static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
++ {
++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
++ {},
++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
++ },
++};
++
++static const struct clockgen_muxinfo t1023_hwa1 = {
++ {
++ {},
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
++ },
++};
++
++static const struct clockgen_muxinfo t1023_hwa2 = {
++ {
++ [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ },
++};
++
++static const struct clockgen_muxinfo t2080_hwa1 = {
++ {
++ {},
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
++ },
++};
++
++static const struct clockgen_muxinfo t2080_hwa2 = {
++ {
++ {},
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
++ },
++};
++
++static const struct clockgen_muxinfo t4240_hwa1 = {
++ {
++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
++ {},
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
++ },
++};
++
++static const struct clockgen_muxinfo t4240_hwa4 = {
++ {
++ [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
++ [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
++ [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
++ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
++ [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
++ },
++};
++
++static const struct clockgen_muxinfo t4240_hwa5 = {
++ {
++ [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
++ [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
++ [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
++ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
++ [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
++ [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
++ },
++};
++
++#define RCWSR7_FM1_CLK_SEL 0x40000000
++#define RCWSR7_FM2_CLK_SEL 0x20000000
++#define RCWSR7_HWA_ASYNC_DIV 0x04000000
++
++static void __init p2041_init_periph(struct clockgen *cg)
++{
++ u32 reg;
++
++ reg = ioread32be(&cg->guts->rcwsr[7]);
++
++ if (reg & RCWSR7_FM1_CLK_SEL)
++ cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
++ else
++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
++}
++
++static void __init p4080_init_periph(struct clockgen *cg)
++{
++ u32 reg;
++
++ reg = ioread32be(&cg->guts->rcwsr[7]);
++
++ if (reg & RCWSR7_FM1_CLK_SEL)
++ cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
++ else
++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
++
++ if (reg & RCWSR7_FM2_CLK_SEL)
++ cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
++ else
++ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
++}
++
++static void __init p5020_init_periph(struct clockgen *cg)
++{
++ u32 reg;
++ int div = PLL_DIV2;
++
++ reg = ioread32be(&cg->guts->rcwsr[7]);
++ if (reg & RCWSR7_HWA_ASYNC_DIV)
++ div = PLL_DIV4;
++
++ if (reg & RCWSR7_FM1_CLK_SEL)
++ cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
++ else
++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
++}
++
++static void __init p5040_init_periph(struct clockgen *cg)
++{
++ u32 reg;
++ int div = PLL_DIV2;
++
++ reg = ioread32be(&cg->guts->rcwsr[7]);
++ if (reg & RCWSR7_HWA_ASYNC_DIV)
++ div = PLL_DIV4;
++
++ if (reg & RCWSR7_FM1_CLK_SEL)
++ cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
++ else
++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
++
++ if (reg & RCWSR7_FM2_CLK_SEL)
++ cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
++ else
++ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
++}
++
++static void __init t1023_init_periph(struct clockgen *cg)
++{
++ cg->fman[0] = cg->hwaccel[1];
++}
++
++static void __init t1040_init_periph(struct clockgen *cg)
++{
++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
++}
++
++static void __init t2080_init_periph(struct clockgen *cg)
++{
++ cg->fman[0] = cg->hwaccel[0];
++}
++
++static void __init t4240_init_periph(struct clockgen *cg)
++{
++ cg->fman[0] = cg->hwaccel[3];
++ cg->fman[1] = cg->hwaccel[4];
++}
++
++static const struct clockgen_chipinfo chipinfo[] = {
++ {
++ .compat = "fsl,b4420-clockgen",
++ .guts_compat = "fsl,b4860-device-config",
++ .init_periph = t2080_init_periph,
++ .cmux_groups = {
++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
++ },
++ .hwaccel = {
++ &t2080_hwa1
++ },
++ .cmux_to_group = {
++ 0, 1, 1, 1, -1
++ },
++ .pll_mask = 0x3f,
++ .flags = CG_PLL_8BIT,
++ },
++ {
++ .compat = "fsl,b4860-clockgen",
++ .guts_compat = "fsl,b4860-device-config",
++ .init_periph = t2080_init_periph,
++ .cmux_groups = {
++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
++ },
++ .hwaccel = {
++ &t2080_hwa1
++ },
++ .cmux_to_group = {
++ 0, 1, 1, 1, -1
++ },
++ .pll_mask = 0x3f,
++ .flags = CG_PLL_8BIT,
++ },
++ {
++ .compat = "fsl,ls1021a-clockgen",
++ .cmux_groups = {
++ &t1023_cmux
++ },
++ .cmux_to_group = {
++ 0, -1
++ },
++ .pll_mask = 0x03,
++ },
++ {
++ .compat = "fsl,ls2080a-clockgen",
++ .cmux_groups = {
++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
++ },
++ .cmux_to_group = {
++ 0, 0, 1, 1, -1
++ },
++ .pll_mask = 0x37,
++ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
++ },
++ {
++ .compat = "fsl,ls2088a-clockgen",
++ .cmux_groups = {
++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
++ },
++ .cmux_to_group = {
++ 0, 0, 1, 1, -1
++ },
++ .pll_mask = 0x37,
++ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
++ },
++ {
++ .compat = "fsl,p2041-clockgen",
++ .guts_compat = "fsl,qoriq-device-config-1.0",
++ .init_periph = p2041_init_periph,
++ .cmux_groups = {
++ &p2041_cmux_grp1, &p2041_cmux_grp2
++ },
++ .cmux_to_group = {
++ 0, 0, 1, 1, -1
++ },
++ .pll_mask = 0x07,
++ },
++ {
++ .compat = "fsl,p3041-clockgen",
++ .guts_compat = "fsl,qoriq-device-config-1.0",
++ .init_periph = p2041_init_periph,
++ .cmux_groups = {
++ &p2041_cmux_grp1, &p2041_cmux_grp2
++ },
++ .cmux_to_group = {
++ 0, 0, 1, 1, -1
++ },
++ .pll_mask = 0x07,
++ },
++ {
++ .compat = "fsl,p4080-clockgen",
++ .guts_compat = "fsl,qoriq-device-config-1.0",
++ .init_periph = p4080_init_periph,
++ .cmux_groups = {
++ &p4080_cmux_grp1, &p4080_cmux_grp2
++ },
++ .cmux_to_group = {
++ 0, 0, 0, 0, 1, 1, 1, 1
++ },
++ .pll_mask = 0x1f,
++ },
++ {
++ .compat = "fsl,p5020-clockgen",
++ .guts_compat = "fsl,qoriq-device-config-1.0",
++ .init_periph = p5020_init_periph,
++ .cmux_groups = {
++ &p2041_cmux_grp1, &p2041_cmux_grp2
++ },
++ .cmux_to_group = {
++ 0, 1, -1
++ },
++ .pll_mask = 0x07,
++ },
++ {
++ .compat = "fsl,p5040-clockgen",
++ .guts_compat = "fsl,p5040-device-config",
++ .init_periph = p5040_init_periph,
++ .cmux_groups = {
++ &p5040_cmux_grp1, &p5040_cmux_grp2
++ },
++ .cmux_to_group = {
++ 0, 0, 1, 1, -1
++ },
++ .pll_mask = 0x0f,
++ },
++ {
++ .compat = "fsl,t1023-clockgen",
++ .guts_compat = "fsl,t1023-device-config",
++ .init_periph = t1023_init_periph,
++ .cmux_groups = {
++ &t1023_cmux
++ },
++ .hwaccel = {
++ &t1023_hwa1, &t1023_hwa2
++ },
++ .cmux_to_group = {
++ 0, 0, -1
++ },
++ .pll_mask = 0x03,
++ .flags = CG_PLL_8BIT,
++ },
++ {
++ .compat = "fsl,t1040-clockgen",
++ .guts_compat = "fsl,t1040-device-config",
++ .init_periph = t1040_init_periph,
++ .cmux_groups = {
++ &t1040_cmux
++ },
++ .cmux_to_group = {
++ 0, 0, 0, 0, -1
++ },
++ .pll_mask = 0x07,
++ .flags = CG_PLL_8BIT,
++ },
++ {
++ .compat = "fsl,t2080-clockgen",
++ .guts_compat = "fsl,t2080-device-config",
++ .init_periph = t2080_init_periph,
++ .cmux_groups = {
++ &clockgen2_cmux_cga12
++ },
++ .hwaccel = {
++ &t2080_hwa1, &t2080_hwa2
++ },
++ .cmux_to_group = {
++ 0, -1
++ },
++ .pll_mask = 0x07,
++ .flags = CG_PLL_8BIT,
++ },
++ {
++ .compat = "fsl,t4240-clockgen",
++ .guts_compat = "fsl,t4240-device-config",
++ .init_periph = t4240_init_periph,
++ .cmux_groups = {
++ &clockgen2_cmux_cga, &clockgen2_cmux_cgb
++ },
++ .hwaccel = {
++ &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
++ },
++ .cmux_to_group = {
++ 0, 0, 1, -1
++ },
++ .pll_mask = 0x3f,
++ .flags = CG_PLL_8BIT,
++ },
++ {},
++};
++
++struct mux_hwclock {
++ struct clk_hw hw;
++ struct clockgen *cg;
++ const struct clockgen_muxinfo *info;
++ u32 __iomem *reg;
++ u8 parent_to_clksel[NUM_MUX_PARENTS];
++ s8 clksel_to_parent[NUM_MUX_PARENTS];
++ int num_parents;
++};
++
++#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
++#define CLKSEL_MASK 0x78000000
++#define CLKSEL_SHIFT 27
++
++static int mux_set_parent(struct clk_hw *hw, u8 idx)
++{
++ struct mux_hwclock *hwc = to_mux_hwclock(hw);
++ u32 clksel;
++
++ if (idx >= hwc->num_parents)
++ return -EINVAL;
++
++ clksel = hwc->parent_to_clksel[idx];
++ cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
++
++ return 0;
++}
++
++static u8 mux_get_parent(struct clk_hw *hw)
++{
++ struct mux_hwclock *hwc = to_mux_hwclock(hw);
++ u32 clksel;
++ s8 ret;
++
++ clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
++
++ ret = hwc->clksel_to_parent[clksel];
++ if (ret < 0) {
++ pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
++ return 0;
++ }
++
++ return ret;
++}
++
++static const struct clk_ops cmux_ops = {
++ .get_parent = mux_get_parent,
++ .set_parent = mux_set_parent,
++};
++
++/*
++ * Don't allow setting for now, as the clock options haven't been
++ * sanitized for additional restrictions.
++ */
++static const struct clk_ops hwaccel_ops = {
++ .get_parent = mux_get_parent,
++};
++
++static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
++ struct mux_hwclock *hwc,
++ int idx)
++{
++ int pll, div;
++
++ if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
++ return NULL;
++
++ pll = hwc->info->clksel[idx].pll;
++ div = hwc->info->clksel[idx].div;
++
++ return &cg->pll[pll].div[div];
++}
++
++static struct clk * __init create_mux_common(struct clockgen *cg,
++ struct mux_hwclock *hwc,
++ const struct clk_ops *ops,
++ unsigned long min_rate,
++ unsigned long pct80_rate,
++ const char *fmt, int idx)
++{
++ struct clk_init_data init = {};
++ struct clk *clk;
++ const struct clockgen_pll_div *div;
++ const char *parent_names[NUM_MUX_PARENTS];
++ char name[32];
++ int i, j;
++
++ snprintf(name, sizeof(name), fmt, idx);
++
++ for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
++ unsigned long rate;
++
++ hwc->clksel_to_parent[i] = -1;
++
++ div = get_pll_div(cg, hwc, i);
++ if (!div)
++ continue;
++
++ rate = clk_get_rate(div->clk);
++
++ if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
++ rate > pct80_rate)
++ continue;
++ if (rate < min_rate)
++ continue;
++
++ parent_names[j] = div->name;
++ hwc->parent_to_clksel[j] = i;
++ hwc->clksel_to_parent[i] = j;
++ j++;
++ }
++
++ init.name = name;
++ init.ops = ops;
++ init.parent_names = parent_names;
++ init.num_parents = hwc->num_parents = j;
++ init.flags = 0;
++ hwc->hw.init = &init;
++ hwc->cg = cg;
++
++ clk = clk_register(NULL, &hwc->hw);
++ if (IS_ERR(clk)) {
++ pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
++ PTR_ERR(clk));
++ kfree(hwc);
++ return NULL;
++ }
++
++ return clk;
++}
++
++static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
++{
++ struct mux_hwclock *hwc;
++ const struct clockgen_pll_div *div;
++ unsigned long plat_rate, min_rate;
++ u64 pct80_rate;
++ u32 clksel;
++
++ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
++ if (!hwc)
++ return NULL;
++
++ if (cg->info.flags & CG_VER3)
++ hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
++ else
++ hwc->reg = cg->regs + 0x20 * idx;
++
++ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
++
++ /*
++ * Find the rate for the default clksel, and treat it as the
++ * maximum rated core frequency. If this is an incorrect
++ * assumption, certain clock options (possibly including the
++ * default clksel) may be inappropriately excluded on certain
++ * chips.
++ */
++ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
++ div = get_pll_div(cg, hwc, clksel);
++ if (!div)
++ return NULL;
++
++ pct80_rate = clk_get_rate(div->clk);
++ pct80_rate *= 8;
++ do_div(pct80_rate, 10);
++
++ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
++
++ if (cg->info.flags & CG_CMUX_GE_PLAT)
++ min_rate = plat_rate;
++ else
++ min_rate = plat_rate / 2;
++
++ return create_mux_common(cg, hwc, &cmux_ops, min_rate,
++ pct80_rate, "cg-cmux%d", idx);
++}
++
++static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
++{
++ struct mux_hwclock *hwc;
++
++ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
++ if (!hwc)
++ return NULL;
++
++ hwc->reg = cg->regs + 0x20 * idx + 0x10;
++ hwc->info = cg->info.hwaccel[idx];
++
++ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
++ "cg-hwaccel%d", idx);
++}
++
++static void __init create_muxes(struct clockgen *cg)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
++ if (cg->info.cmux_to_group[i] < 0)
++ break;
++ if (cg->info.cmux_to_group[i] >=
++ ARRAY_SIZE(cg->info.cmux_groups)) {
++ WARN_ON_ONCE(1);
++ continue;
++ }
++
++ cg->cmux[i] = create_one_cmux(cg, i);
++ }
++
++ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
++ if (!cg->info.hwaccel[i])
++ continue;
++
++ cg->hwaccel[i] = create_one_hwaccel(cg, i);
++ }
++}
++
++static void __init clockgen_init(struct device_node *np);
++
++/* Legacy nodes may get probed before the parent clockgen node */
++static void __init legacy_init_clockgen(struct device_node *np)
++{
++ if (!clockgen.node)
++ clockgen_init(of_get_parent(np));
++}
++
++/* Legacy node */
++static void __init core_mux_init(struct device_node *np)
++{
++ struct clk *clk;
++ struct resource res;
++ int idx, rc;
++
++ legacy_init_clockgen(np);
++
++ if (of_address_to_resource(np, 0, &res))
++ return;
++
++ idx = (res.start & 0xf0) >> 5;
++ clk = clockgen.cmux[idx];
++
++ rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
++ if (rc) {
++ pr_err("%s: Couldn't register clk provider for node %s: %d\n",
++ __func__, np->name, rc);
++ return;
++ }
++}
++
++static struct clk *sysclk_from_fixed(struct device_node *node, const char *name)
++{
++ u32 rate;
++
++ if (of_property_read_u32(node, "clock-frequency", &rate))
++ return ERR_PTR(-ENODEV);
++
++ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
++}
++
++static struct clk *sysclk_from_parent(const char *name)
++{
++ struct clk *clk;
++ const char *parent_name;
++
++ clk = of_clk_get(clockgen.node, 0);
++ if (IS_ERR(clk))
++ return clk;
++
++ /* Register the input clock under the desired name. */
++ parent_name = __clk_get_name(clk);
++ clk = clk_register_fixed_factor(NULL, name, parent_name,
++ 0, 1, 1);
++ if (IS_ERR(clk))
++ pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
++ PTR_ERR(clk));
++
++ return clk;
++}
++
++static struct clk * __init create_sysclk(const char *name)
++{
++ struct device_node *sysclk;
++ struct clk *clk;
++
++ clk = sysclk_from_fixed(clockgen.node, name);
++ if (!IS_ERR(clk))
++ return clk;
++
++ clk = sysclk_from_parent(name);
++ if (!IS_ERR(clk))
++ return clk;
++
++ sysclk = of_get_child_by_name(clockgen.node, "sysclk");
++ if (sysclk) {
++ clk = sysclk_from_fixed(sysclk, name);
++ if (!IS_ERR(clk))
++ return clk;
++ }
++
++ pr_err("%s: No input clock\n", __func__);
++ return NULL;
++}
++
++/* Legacy node */
++static void __init sysclk_init(struct device_node *node)
++{
++ struct clk *clk;
++
++ legacy_init_clockgen(node);
++
++ clk = clockgen.sysclk;
++ if (clk)
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
++}
++
++#define PLL_KILL BIT(31)
++
++static void __init create_one_pll(struct clockgen *cg, int idx)
++{
++ u32 __iomem *reg;
++ u32 mult;
++ struct clockgen_pll *pll = &cg->pll[idx];
++ int i;
++
++ if (!(cg->info.pll_mask & (1 << idx)))
++ return;
++
++ if (cg->info.flags & CG_VER3) {
++ switch (idx) {
++ case PLATFORM_PLL:
++ reg = cg->regs + 0x60080;
++ break;
++ case CGA_PLL1:
++ reg = cg->regs + 0x80;
++ break;
++ case CGA_PLL2:
++ reg = cg->regs + 0xa0;
++ break;
++ case CGB_PLL1:
++ reg = cg->regs + 0x10080;
++ break;
++ case CGB_PLL2:
++ reg = cg->regs + 0x100a0;
++ break;
++ default:
++ WARN_ONCE(1, "index %d\n", idx);
++ return;
++ }
++ } else {
++ if (idx == PLATFORM_PLL)
++ reg = cg->regs + 0xc00;
++ else
++ reg = cg->regs + 0x800 + 0x20 * (idx - 1);
++ }
++
++ /* Get the multiple of PLL */
++ mult = cg_in(cg, reg);
++
++ /* Check if this PLL is disabled */
++ if (mult & PLL_KILL) {
++ pr_debug("%s(): pll %p disabled\n", __func__, reg);
++ return;
++ }
++
++ if ((cg->info.flags & CG_VER3) ||
++ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
++ mult = (mult & GENMASK(8, 1)) >> 1;
++ else
++ mult = (mult & GENMASK(6, 1)) >> 1;
++
++ for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
++ struct clk *clk;
++
++ snprintf(pll->div[i].name, sizeof(pll->div[i].name),
++ "cg-pll%d-div%d", idx, i + 1);
++
++ clk = clk_register_fixed_factor(NULL,
++ pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
++ if (IS_ERR(clk)) {
++ pr_err("%s: %s: register failed %ld\n",
++ __func__, pll->div[i].name, PTR_ERR(clk));
++ continue;
++ }
++
++ pll->div[i].clk = clk;
++ }
++}
++
++static void __init create_plls(struct clockgen *cg)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
++ create_one_pll(cg, i);
++}
++
++static void __init legacy_pll_init(struct device_node *np, int idx)
++{
++ struct clockgen_pll *pll;
++ struct clk_onecell_data *onecell_data;
++ struct clk **subclks;
++ int count, rc;
++
++ legacy_init_clockgen(np);
++
++ pll = &clockgen.pll[idx];
++ count = of_property_count_strings(np, "clock-output-names");
++
++ BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
++ subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
++ if (!subclks)
++ return;
++
++ onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
++ if (!onecell_data)
++ goto err_clks;
++
++ if (count <= 3) {
++ subclks[0] = pll->div[0].clk;
++ subclks[1] = pll->div[1].clk;
++ subclks[2] = pll->div[3].clk;
++ } else {
++ subclks[0] = pll->div[0].clk;
++ subclks[1] = pll->div[1].clk;
++ subclks[2] = pll->div[2].clk;
++ subclks[3] = pll->div[3].clk;
++ }
++
++ onecell_data->clks = subclks;
++ onecell_data->clk_num = count;
++
++ rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
++ if (rc) {
++ pr_err("%s: Couldn't register clk provider for node %s: %d\n",
++ __func__, np->name, rc);
++ goto err_cell;
++ }
++
++ return;
++err_cell:
++ kfree(onecell_data);
++err_clks:
++ kfree(subclks);
++}
++
++/* Legacy node */
++static void __init pltfrm_pll_init(struct device_node *np)
++{
++ legacy_pll_init(np, PLATFORM_PLL);
++}
++
++/* Legacy node */
++static void __init core_pll_init(struct device_node *np)
++{
++ struct resource res;
++ int idx;
++
++ if (of_address_to_resource(np, 0, &res))
++ return;
++
++ if ((res.start & 0xfff) == 0xc00) {
++ /*
++ * ls1021a devtree labels the platform PLL
++ * with the core PLL compatible
++ */
++ pltfrm_pll_init(np);
++ } else {
++ idx = (res.start & 0xf0) >> 5;
++ legacy_pll_init(np, CGA_PLL1 + idx);
++ }
++}
++
++static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
++{
++ struct clockgen *cg = data;
++ struct clk *clk;
++ struct clockgen_pll *pll;
++ u32 type, idx;
++
++ if (clkspec->args_count < 2) {
++ pr_err("%s: insufficient phandle args\n", __func__);
++ return ERR_PTR(-EINVAL);
++ }
++
++ type = clkspec->args[0];
++ idx = clkspec->args[1];
++
++ switch (type) {
++ case 0:
++ if (idx != 0)
++ goto bad_args;
++ clk = cg->sysclk;
++ break;
++ case 1:
++ if (idx >= ARRAY_SIZE(cg->cmux))
++ goto bad_args;
++ clk = cg->cmux[idx];
++ break;
++ case 2:
++ if (idx >= ARRAY_SIZE(cg->hwaccel))
++ goto bad_args;
++ clk = cg->hwaccel[idx];
++ break;
++ case 3:
++ if (idx >= ARRAY_SIZE(cg->fman))
++ goto bad_args;
++ clk = cg->fman[idx];
++ break;
++ case 4:
++ pll = &cg->pll[PLATFORM_PLL];
++ if (idx >= ARRAY_SIZE(pll->div))
++ goto bad_args;
++ clk = pll->div[idx].clk;
++ break;
++ default:
++ goto bad_args;
++ }
++
++ if (!clk)
++ return ERR_PTR(-ENOENT);
++ return clk;
++
++bad_args:
++ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
++ return ERR_PTR(-EINVAL);
++}
++
++#ifdef CONFIG_PPC
++
++static const u32 a4510_svrs[] __initconst = {
++ (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
++ (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
++ (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
++ (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
++ (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
++ (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
++ (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
++ (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
++ (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
++ (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
++ (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
++ (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
++ (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
++};
++
++#define SVR_SECURITY 0x80000 /* The Security (E) bit */
++
++static bool __init has_erratum_a4510(void)
++{
++ u32 svr = mfspr(SPRN_SVR);
++ int i;
++
++ svr &= ~SVR_SECURITY;
++
++ for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
++ if (svr == a4510_svrs[i])
++ return true;
++ }
++
++ return false;
++}
++#else
++static bool __init has_erratum_a4510(void)
++{
++ return false;
++}
++#endif
++
++static void __init clockgen_init(struct device_node *np)
++{
++ int i, ret;
++ bool is_old_ls1021a = false;
++
++ /* May have already been called by a legacy probe */
++ if (clockgen.node)
++ return;
++
++ clockgen.node = np;
++ clockgen.regs = of_iomap(np, 0);
++ if (!clockgen.regs &&
++ of_device_is_compatible(of_root, "fsl,ls1021a")) {
++ /* Compatibility hack for old, broken device trees */
++ clockgen.regs = ioremap(0x1ee1000, 0x1000);
++ is_old_ls1021a = true;
++ }
++ if (!clockgen.regs) {
++ pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
++ return;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
++ if (of_device_is_compatible(np, chipinfo[i].compat))
++ break;
++ if (is_old_ls1021a &&
++ !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(chipinfo)) {
++ pr_err("%s: unknown clockgen node %s\n", __func__,
++ np->full_name);
++ goto err;
++ }
++ clockgen.info = chipinfo[i];
++
++ if (clockgen.info.guts_compat) {
++ struct device_node *guts;
++
++ guts = of_find_compatible_node(NULL, NULL,
++ clockgen.info.guts_compat);
++ if (guts) {
++ clockgen.guts = of_iomap(guts, 0);
++ if (!clockgen.guts) {
++ pr_err("%s: Couldn't map %s regs\n", __func__,
++ guts->full_name);
++ }
++ }
++
++ }
++
++ if (has_erratum_a4510())
++ clockgen.info.flags |= CG_CMUX_GE_PLAT;
++
++ clockgen.sysclk = create_sysclk("cg-sysclk");
++ create_plls(&clockgen);
++ create_muxes(&clockgen);
++
++ if (clockgen.info.init_periph)
++ clockgen.info.init_periph(&clockgen);
++
++ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
++ if (ret) {
++ pr_err("%s: Couldn't register clk provider for node %s: %d\n",
++ __func__, np->name, ret);
++ }
++
++ return;
++err:
++ iounmap(clockgen.regs);
++ clockgen.regs = NULL;
++}
++
++CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_ls2088a, "fsl,ls2088a-clockgen", clockgen_init);
++
++/* Legacy nodes */
++CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
++CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
++CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
++CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
++CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
++CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
++CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
++CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
+diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
+index 72564b7..7ea2441 100644
+--- a/drivers/cpufreq/Kconfig.powerpc
++++ b/drivers/cpufreq/Kconfig.powerpc
+@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE
+ config PPC_CORENET_CPUFREQ
+ tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
+ depends on PPC_E500MC && OF && COMMON_CLK
+- select CLK_PPC_CORENET
++ select CLK_QORIQ
+ help
+ This adds the CPUFreq driver support for Freescale e500mc,
+ e5500 and e6500 series SoCs which are capable of changing
+diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
+index de361a1..5a63564 100644
+--- a/drivers/dma/acpi-dma.c
++++ b/drivers/dma/acpi-dma.c
+@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
+ {
+ const struct acpi_csrt_shared_info *si;
+ struct list_head resource_list;
+- struct resource_list_entry *rentry;
++ struct resource_entry *rentry;
+ resource_size_t mem = 0, irq = 0;
+ int ret;
+
+@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
+ return 0;
+
+ list_for_each_entry(rentry, &resource_list, node) {
+- if (resource_type(&rentry->res) == IORESOURCE_MEM)
+- mem = rentry->res.start;
+- else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
+- irq = rentry->res.start;
++ if (resource_type(rentry->res) == IORESOURCE_MEM)
++ mem = rentry->res->start;
++ else if (resource_type(rentry->res) == IORESOURCE_IRQ)
++ irq = rentry->res->start;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 06e99eb..bbf8ae4 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -526,10 +526,10 @@ config I2C_IBM_IIC
+
+ config I2C_IMX
+ tristate "IMX I2C interface"
+- depends on ARCH_MXC
++ depends on ARCH_MXC || ARCH_LAYERSCAPE
+ help
+ Say Y here if you want to use the IIC bus controller on
+- the Freescale i.MX/MXC processors.
++ the Freescale i.MX/MXC and layerscape processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-imx.
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index e9fb7cf..13f88f9 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -33,6 +33,10 @@
+ *******************************************************************************/
+
+ #include
++#include
++#include
++#include
++#include
+ #include
+ #include
+ #include
+@@ -47,6 +51,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+
+ /** Defines ********************************************************************
+@@ -58,6 +63,15 @@
+ /* Default value */
+ #define IMX_I2C_BIT_RATE 100000 /* 100kHz */
+
++/*
++ * Enable DMA if transfer byte size is bigger than this threshold.
++ * As the hardware request, it must bigger than 4 bytes.\
++ * I have set '16' here, maybe it's not the best but I think it's
++ * the appropriate.
++ */
++#define DMA_THRESHOLD 16
++#define DMA_TIMEOUT 1000
++
+ /* IMX I2C registers:
+ * the I2C register offset is different between SoCs,
+ * to provid support for all these chips, split the
+@@ -83,6 +97,7 @@
+ #define I2SR_IBB 0x20
+ #define I2SR_IAAS 0x40
+ #define I2SR_ICF 0x80
++#define I2CR_DMAEN 0x02
+ #define I2CR_RSTA 0x04
+ #define I2CR_TXAK 0x08
+ #define I2CR_MTX 0x10
+@@ -169,6 +184,17 @@ struct imx_i2c_hwdata {
+ unsigned i2cr_ien_opcode;
+ };
+
++struct imx_i2c_dma {
++ struct dma_chan *chan_tx;
++ struct dma_chan *chan_rx;
++ struct dma_chan *chan_using;
++ struct completion cmd_complete;
++ dma_addr_t dma_buf;
++ unsigned int dma_len;
++ enum dma_transfer_direction dma_transfer_dir;
++ enum dma_data_direction dma_data_dir;
++};
++
+ struct imx_i2c_struct {
+ struct i2c_adapter adapter;
+ struct clk *clk;
+@@ -181,6 +207,8 @@ struct imx_i2c_struct {
+ unsigned int cur_clk;
+ unsigned int bitrate;
+ const struct imx_i2c_hwdata *hwdata;
++
++ struct imx_i2c_dma *dma;
+ };
+
+ static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
+@@ -251,6 +279,162 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
+ return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift));
+ }
+
++/* Functions for DMA support */
++static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
++ dma_addr_t phy_addr)
++{
++ struct imx_i2c_dma *dma;
++ struct dma_slave_config dma_sconfig;
++ struct device *dev = &i2c_imx->adapter.dev;
++ int ret;
++
++ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
++ if (!dma)
++ return;
++
++ dma->chan_tx = dma_request_slave_channel(dev, "tx");
++ if (!dma->chan_tx) {
++ dev_dbg(dev, "can't request DMA tx channel\n");
++ goto fail_al;
++ }
++
++ dma_sconfig.dst_addr = phy_addr +
++ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift);
++ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ dma_sconfig.dst_maxburst = 1;
++ dma_sconfig.direction = DMA_MEM_TO_DEV;
++ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig);
++ if (ret < 0) {
++ dev_dbg(dev, "can't configure tx channel\n");
++ goto fail_tx;
++ }
++
++ dma->chan_rx = dma_request_slave_channel(dev, "rx");
++ if (!dma->chan_rx) {
++ dev_dbg(dev, "can't request DMA rx channel\n");
++ goto fail_tx;
++ }
++
++ dma_sconfig.src_addr = phy_addr +
++ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift);
++ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ dma_sconfig.src_maxburst = 1;
++ dma_sconfig.direction = DMA_DEV_TO_MEM;
++ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig);
++ if (ret < 0) {
++ dev_dbg(dev, "can't configure rx channel\n");
++ goto fail_rx;
++ }
++
++ i2c_imx->dma = dma;
++ init_completion(&dma->cmd_complete);
++ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
++ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
++
++ return;
++
++fail_rx:
++ dma_release_channel(dma->chan_rx);
++fail_tx:
++ dma_release_channel(dma->chan_tx);
++fail_al:
++ devm_kfree(dev, dma);
++ dev_info(dev, "can't use DMA\n");
++}
++
++static void i2c_imx_dma_callback(void *arg)
++{
++ struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg;
++ struct imx_i2c_dma *dma = i2c_imx->dma;
++
++ dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf,
++ dma->dma_len, dma->dma_data_dir);
++ complete(&dma->cmd_complete);
++}
++
++static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
++ struct i2c_msg *msgs)
++{
++ struct imx_i2c_dma *dma = i2c_imx->dma;
++ struct dma_async_tx_descriptor *txdesc;
++ struct device *dev = &i2c_imx->adapter.dev;
++ struct device *chan_dev = dma->chan_using->device->dev;
++
++ dma->dma_buf = dma_map_single(chan_dev, msgs->buf,
++ dma->dma_len, dma->dma_data_dir);
++ if (dma_mapping_error(chan_dev, dma->dma_buf)) {
++ dev_err(dev, "DMA mapping failed\n");
++ goto err_map;
++ }
++
++ txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf,
++ dma->dma_len, dma->dma_transfer_dir,
++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ if (!txdesc) {
++ dev_err(dev, "Not able to get desc for DMA xfer\n");
++ goto err_desc;
++ }
++
++ txdesc->callback = i2c_imx_dma_callback;
++ txdesc->callback_param = i2c_imx;
++ if (dma_submit_error(dmaengine_submit(txdesc))) {
++ dev_err(dev, "DMA submit failed\n");
++ goto err_submit;
++ }
++
++ dma_async_issue_pending(dma->chan_using);
++ return 0;
++
++err_submit:
++err_desc:
++ dma_unmap_single(chan_dev, dma->dma_buf,
++ dma->dma_len, dma->dma_data_dir);
++err_map:
++ return -EINVAL;
++}
++
++static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
++{
++ struct imx_i2c_dma *dma = i2c_imx->dma;
++
++ dma->dma_buf = 0;
++ dma->dma_len = 0;
++
++ dma_release_channel(dma->chan_tx);
++ dma->chan_tx = NULL;
++
++ dma_release_channel(dma->chan_rx);
++ dma->chan_rx = NULL;
++
++ dma->chan_using = NULL;
++}
++
++/*
++ * When a system reset does not cause all I2C devices to be reset, it is
++ * sometimes necessary to force the I2C module to become the I2C bus master
++ * out of reset and drive SCL A slave can hold bus low to cause bus hang.
++ * Thus, SDA can be driven low by another I2C device while this I2C module
++ * is coming out of reset and will stay low indefinitely.
++ * The I2C master has to generate 9 clock pulses to get the bus free or idle.
++ */
++static void imx_i2c_fixup(struct imx_i2c_struct *i2c_imx)
++{
++ int k;
++ u32 delay_val = 1000000 / i2c_imx->cur_clk + 1;
++
++ if (delay_val < 2)
++ delay_val = 2;
++
++ for (k = 9; k; k--) {
++ imx_i2c_write_reg(I2CR_IEN, i2c_imx, IMX_I2C_I2CR);
++ imx_i2c_write_reg((I2CR_MSTA | I2CR_MTX) & (~I2CR_IEN),
++ i2c_imx, IMX_I2C_I2CR);
++ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
++ udelay(delay_val << 1);
++ }
++}
++
+ /** Functions for IMX I2C adapter driver ***************************************
+ *******************************************************************************/
+
+@@ -276,8 +460,15 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
+ if (!for_busy && !(temp & I2SR_IBB))
+ break;
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
++ u8 status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
++
+ dev_dbg(&i2c_imx->adapter.dev,
+ "<%s> I2C bus is busy\n", __func__);
++ if ((status & (I2SR_ICF | I2SR_IBB | I2CR_TXAK)) != 0) {
++ imx_i2c_write_reg(status & ~I2SR_IAL, i2c_imx,
++ IMX_I2C_I2CR);
++ imx_i2c_fixup(i2c_imx);
++ }
+ return -ETIMEDOUT;
+ }
+ schedule();
+@@ -382,6 +573,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
+ i2c_imx->stopped = 0;
+
+ temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
++ temp &= ~I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ return result;
+ }
+@@ -395,6 +587,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
+ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp &= ~(I2CR_MSTA | I2CR_MTX);
++ if (i2c_imx->dma)
++ temp &= ~I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ }
+ if (is_imx1_i2c(i2c_imx)) {
+@@ -435,6 +629,157 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
+ return IRQ_NONE;
+ }
+
++static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
++ struct i2c_msg *msgs)
++{
++ int result;
++ unsigned long time_left;
++ unsigned int temp = 0;
++ unsigned long orig_jiffies = jiffies;
++ struct imx_i2c_dma *dma = i2c_imx->dma;
++ struct device *dev = &i2c_imx->adapter.dev;
++
++ dma->chan_using = dma->chan_tx;
++ dma->dma_transfer_dir = DMA_MEM_TO_DEV;
++ dma->dma_data_dir = DMA_TO_DEVICE;
++ dma->dma_len = msgs->len - 1;
++ result = i2c_imx_dma_xfer(i2c_imx, msgs);
++ if (result)
++ return result;
++
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ temp |= I2CR_DMAEN;
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
++
++ /*
++ * Write slave address.
++ * The first byte must be transmitted by the CPU.
++ */
++ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
++ reinit_completion(&i2c_imx->dma->cmd_complete);
++ time_left = wait_for_completion_timeout(
++ &i2c_imx->dma->cmd_complete,
++ msecs_to_jiffies(DMA_TIMEOUT));
++ if (time_left == 0) {
++ dmaengine_terminate_all(dma->chan_using);
++ return -ETIMEDOUT;
++ }
++
++ /* Waiting for transfer complete. */
++ while (1) {
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
++ if (temp & I2SR_ICF)
++ break;
++ if (time_after(jiffies, orig_jiffies +
++ msecs_to_jiffies(DMA_TIMEOUT))) {
++ dev_dbg(dev, "<%s> Timeout\n", __func__);
++ return -ETIMEDOUT;
++ }
++ schedule();
++ }
++
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ temp &= ~I2CR_DMAEN;
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
++
++ /* The last data byte must be transferred by the CPU. */
++ imx_i2c_write_reg(msgs->buf[msgs->len-1],
++ i2c_imx, IMX_I2C_I2DR);
++ result = i2c_imx_trx_complete(i2c_imx);
++ if (result)
++ return result;
++
++ return i2c_imx_acked(i2c_imx);
++}
++
++static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
++ struct i2c_msg *msgs, bool is_lastmsg)
++{
++ int result;
++ unsigned long time_left;
++ unsigned int temp;
++ unsigned long orig_jiffies = jiffies;
++ struct imx_i2c_dma *dma = i2c_imx->dma;
++ struct device *dev = &i2c_imx->adapter.dev;
++
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ temp |= I2CR_DMAEN;
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
++
++ dma->chan_using = dma->chan_rx;
++ dma->dma_transfer_dir = DMA_DEV_TO_MEM;
++ dma->dma_data_dir = DMA_FROM_DEVICE;
++ /* The last two data bytes must be transferred by the CPU. */
++ dma->dma_len = msgs->len - 2;
++ result = i2c_imx_dma_xfer(i2c_imx, msgs);
++ if (result)
++ return result;
++
++ reinit_completion(&i2c_imx->dma->cmd_complete);
++ time_left = wait_for_completion_timeout(
++ &i2c_imx->dma->cmd_complete,
++ msecs_to_jiffies(DMA_TIMEOUT));
++ if (time_left == 0) {
++ dmaengine_terminate_all(dma->chan_using);
++ return -ETIMEDOUT;
++ }
++
++ /* waiting for transfer complete. */
++ while (1) {
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
++ if (temp & I2SR_ICF)
++ break;
++ if (time_after(jiffies, orig_jiffies +
++ msecs_to_jiffies(DMA_TIMEOUT))) {
++ dev_dbg(dev, "<%s> Timeout\n", __func__);
++ return -ETIMEDOUT;
++ }
++ schedule();
++ }
++
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ temp &= ~I2CR_DMAEN;
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
++
++ /* read n-1 byte data */
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ temp |= I2CR_TXAK;
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
++
++ msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
++ /* read n byte data */
++ result = i2c_imx_trx_complete(i2c_imx);
++ if (result)
++ return result;
++
++ if (is_lastmsg) {
++ /*
++ * It must generate STOP before read I2DR to prevent
++ * controller from generating another clock cycle
++ */
++ dev_dbg(dev, "<%s> clear MSTA\n", __func__);
++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ temp &= ~(I2CR_MSTA | I2CR_MTX);
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
++ i2c_imx_bus_busy(i2c_imx, 0);
++ i2c_imx->stopped = 1;
++ } else {
++ /*
++ * For i2c master receiver repeat restart operation like:
++ * read -> repeat MSTA -> read/write
++ * The controller must set MTX before read the last byte in
++ * the first read operation, otherwise the first read cost
++ * one extra clock cycle.
++ */
++ temp = readb(i2c_imx->base + IMX_I2C_I2CR);
++ temp |= I2CR_MTX;
++ writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
++ }
++ msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
++
++ return 0;
++}
++
+ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
+ {
+ int i, result;
+@@ -504,6 +849,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
+
++ if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
++ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
++
+ /* read data */
+ for (i = 0; i < msgs->len; i++) {
+ u8 len = 0;
+@@ -577,6 +925,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+
++ /* workround for ERR010027: ensure that the I2C BUS is idle
++ before switching to master mode and attempting a Start cycle
++ */
++ result = i2c_imx_bus_busy(i2c_imx, 0);
++ if (result)
++ goto fail0;
++
+ /* Start I2C transfer */
+ result = i2c_imx_start(i2c_imx);
+ if (result)
+@@ -618,8 +973,12 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
+ #endif
+ if (msgs[i].flags & I2C_M_RD)
+ result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg);
+- else
+- result = i2c_imx_write(i2c_imx, &msgs[i]);
++ else {
++ if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
++ result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
++ else
++ result = i2c_imx_write(i2c_imx, &msgs[i]);
++ }
+ if (result)
+ goto fail0;
+ }
+@@ -654,6 +1013,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ int irq, ret;
++ dma_addr_t phy_addr;
+
+ dev_dbg(&pdev->dev, "<%s>\n", __func__);
+
+@@ -668,6 +1028,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
++ phy_addr = (dma_addr_t)res->start;
+ i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct),
+ GFP_KERNEL);
+ if (!i2c_imx)
+@@ -701,7 +1062,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ return ret;
+ }
+ /* Request IRQ */
+- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
++ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
+ pdev->name, i2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+@@ -743,6 +1104,9 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ i2c_imx->adapter.name);
+ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
+
++ /* Init DMA config if support*/
++ i2c_imx_dma_request(i2c_imx, phy_addr);
++
+ return 0; /* Return OK */
+
+ clk_disable:
+@@ -758,6 +1122,9 @@ static int i2c_imx_remove(struct platform_device *pdev)
+ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
+ i2c_del_adapter(&i2c_imx->adapter);
+
++ if (i2c_imx->dma)
++ i2c_imx_dma_free(i2c_imx);
++
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
+index cb77277..0c8d4d2 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
++++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
+@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
+ buf[0] = command;
+ buf[1] = val;
+ msg.buf = buf;
+- ret = adap->algo->master_xfer(adap, &msg, 1);
++ ret = __i2c_transfer(adap, &msg, 1);
+ } else {
+ union i2c_smbus_data data;
+
+@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command)
+ .buf = &val
+ }
+ };
+- ret = adap->algo->master_xfer(adap, msg, 2);
++ ret = __i2c_transfer(adap, msg, 2);
+ if (ret == 2)
+ ret = val;
+ else if (ret >= 0)
+diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
+index ec11b40..28540a4 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
+@@ -41,6 +41,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+ #include
+
+@@ -62,6 +63,7 @@ struct pca954x {
+ struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
+
+ u8 last_chan; /* last register value */
++ u8 disable_mux; /* do not disable mux if val not 0 */
+ };
+
+ struct chip_desc {
+@@ -133,7 +135,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
+ msg.len = 1;
+ buf[0] = val;
+ msg.buf = buf;
+- ret = adap->algo->master_xfer(adap, &msg, 1);
++ ret = __i2c_transfer(adap, &msg, 1);
+ } else {
+ union i2c_smbus_data data;
+ ret = adap->algo->smbus_xfer(adap, client->addr,
+@@ -173,6 +175,13 @@ static int pca954x_deselect_mux(struct i2c_adapter *adap,
+ {
+ struct pca954x *data = i2c_get_clientdata(client);
+
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ if (data->disable_mux != 0)
++ data->last_chan = chips[data->type].nchans;
++ else
++ data->last_chan = 0;
++ return pca954x_reg_write(adap, client, data->disable_mux);
++#endif
+ /* Deselect active channel */
+ data->last_chan = 0;
+ return pca954x_reg_write(adap, client, data->last_chan);
+@@ -186,6 +195,8 @@ static int pca954x_probe(struct i2c_client *client,
+ {
+ struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
+ struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
++ struct device_node *of_node = client->dev.of_node;
++ bool idle_disconnect_dt;
+ struct gpio_desc *gpio;
+ int num, force, class;
+ struct pca954x *data;
+@@ -198,27 +209,55 @@ static int pca954x_probe(struct i2c_client *client,
+ if (!data)
+ return -ENOMEM;
+
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ /* The point here is that you must not disable a mux if there
++ * are no pullups on the input or you mess up the I2C. This
++ * needs to be put into the DTS really as the kernel cannot
++ * know this otherwise.
++ */
++ data->type = id->driver_data;
++ data->disable_mux = of_node &&
++ of_property_read_bool(of_node, "i2c-mux-never-disable") &&
++ chips[data->type].muxtype == pca954x_ismux ?
++ chips[data->type].enable : 0;
++ /* force the first selection */
++ if (data->disable_mux != 0)
++ data->last_chan = chips[data->type].nchans;
++ else
++ data->last_chan = 0;
++#endif
+ i2c_set_clientdata(client, data);
+
+ /* Get the mux out of reset if a reset GPIO is specified. */
+- gpio = devm_gpiod_get(&client->dev, "reset");
+- if (!IS_ERR(gpio))
+- gpiod_direction_output(gpio, 0);
++ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW);
++ if (IS_ERR(gpio))
++ return PTR_ERR(gpio);
+
+ /* Write the mux register at addr to verify
+ * that the mux is in fact present. This also
+ * initializes the mux to disconnected state.
+ */
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) {
++#else
+ if (i2c_smbus_write_byte(client, 0) < 0) {
++#endif
+ dev_warn(&client->dev, "probe failed\n");
+ return -ENODEV;
+ }
+
++#ifndef CONFIG_ARCH_LAYERSCAPE
+ data->type = id->driver_data;
+ data->last_chan = 0; /* force the first selection */
++#endif
++
++ idle_disconnect_dt = of_node &&
++ of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+
+ /* Now create an adapter for each channel */
+ for (num = 0; num < chips[data->type].nchans; num++) {
++ bool idle_disconnect_pd = false;
++
+ force = 0; /* dynamic adap number */
+ class = 0; /* no class by default */
+ if (pdata) {
+@@ -229,12 +268,13 @@ static int pca954x_probe(struct i2c_client *client,
+ } else
+ /* discard unconfigured channels */
+ break;
++ idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
+ }
+
+ data->virt_adaps[num] =
+ i2c_add_mux_adapter(adap, &client->dev, client,
+ force, num, class, pca954x_select_chan,
+- (pdata && pdata->modes[num].deselect_on_exit)
++ (idle_disconnect_pd || idle_disconnect_dt)
+ ? pca954x_deselect_mux : NULL);
+
+ if (data->virt_adaps[num] == NULL) {
+@@ -280,6 +320,13 @@ static int pca954x_resume(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pca954x *data = i2c_get_clientdata(client);
+
++#ifdef CONFIG_ARCH_LAYERSCAPE
++ if (data->disable_mux != 0)
++ data->last_chan = chips[data->type].nchans;
++ else
++ data->last_chan = 0;
++ return i2c_smbus_write_byte(client, data->disable_mux);
++#endif
+ data->last_chan = 0;
+ return i2c_smbus_write_byte(client, 0);
+ }
+diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
+index dd51122..2cdcc76 100644
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -13,9 +13,35 @@ menuconfig IOMMU_SUPPORT
+
+ if IOMMU_SUPPORT
+
++menu "Generic IOMMU Pagetable Support"
++
++# Selected by the actual pagetable implementations
++config IOMMU_IO_PGTABLE
++ bool
++
++config IOMMU_IO_PGTABLE_LPAE
++ bool "ARMv7/v8 Long Descriptor Format"
++ select IOMMU_IO_PGTABLE
++ help
++ Enable support for the ARM long descriptor pagetable format.
++ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
++ sizes at both stage-1 and stage-2, as well as address spaces
++ up to 48-bits in size.
++
++config IOMMU_IO_PGTABLE_LPAE_SELFTEST
++ bool "LPAE selftests"
++ depends on IOMMU_IO_PGTABLE_LPAE
++ help
++ Enable self-tests for LPAE page table allocator. This performs
++ a series of page-table consistency checks during boot.
++
++ If unsure, say N here.
++
++endmenu
++
+ config OF_IOMMU
+ def_bool y
+- depends on OF
++ depends on OF && IOMMU_API
+
+ config FSL_PAMU
+ bool "Freescale IOMMU support"
+@@ -291,13 +317,13 @@ config SPAPR_TCE_IOMMU
+
+ config ARM_SMMU
+ bool "ARM Ltd. System MMU (SMMU) Support"
+- depends on ARM64 || (ARM_LPAE && OF)
++ depends on ARM64 || ARM
+ select IOMMU_API
++ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+- versions 1 and 2. The driver supports both v7l and v8l table
+- formats with 4k and 64k page sizes.
++ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
+index 16edef7..269cdd8 100644
+--- a/drivers/iommu/Makefile
++++ b/drivers/iommu/Makefile
+@@ -1,6 +1,8 @@
+ obj-$(CONFIG_IOMMU_API) += iommu.o
+ obj-$(CONFIG_IOMMU_API) += iommu-traces.o
+ obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
++obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
++obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+ obj-$(CONFIG_OF_IOMMU) += of_iommu.o
+ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index af3daf8..f7131fa 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -343,8 +343,9 @@ static u16 get_alias(struct device *dev)
+ */
+ if (pci_alias == devid &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+- pdev->dma_alias_devfn = ivrs_alias & 0xff;
++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID;
++ pdev->dma_alias_devid = PCI_DEVID(pdev->bus->number,
++ ivrs_alias & 0xff);
+ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+ dev_name(dev));
+@@ -3432,6 +3433,7 @@ static const struct iommu_ops amd_iommu_ops = {
+ .detach_dev = amd_iommu_detach_device,
+ .map = amd_iommu_map,
+ .unmap = amd_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = amd_iommu_iova_to_phys,
+ .pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ };
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 60558f7..10e584b 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -23,8 +23,6 @@
+ * - Stream-matching and stream-indexing
+ * - v7/v8 long-descriptor format
+ * - Non-secure access to the SMMU
+- * - 4k and 64k pages, with contiguous pte hints.
+- * - Up to 48-bit addressing (dependent on VA_BITS)
+ * - Context fault reporting
+ */
+
+@@ -36,7 +34,7 @@
+ #include
+ #include
+ #include
+-#include
++#include
+ #include
+ #include
+ #include
+@@ -46,6 +44,16 @@
+
+ #include
+
++#include "io-pgtable.h"
++
++#ifdef CONFIG_FSL_MC_BUS
++#include <../drivers/staging/fsl-mc/include/mc.h>
++#endif
++
++#ifdef CONFIG_PCI_LAYERSCAPE
++#include <../drivers/pci/host/pci-layerscape.h>
++#endif
++
+ #include
+
+ /* Maximum number of stream IDs assigned to a single device */
+@@ -71,40 +79,6 @@
+ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
+ ? 0x400 : 0))
+
+-/* Page table bits */
+-#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
+-#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
+-#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
+-#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
+-#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
+-#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
+-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
+-
+-#if PAGE_SIZE == SZ_4K
+-#define ARM_SMMU_PTE_CONT_ENTRIES 16
+-#elif PAGE_SIZE == SZ_64K
+-#define ARM_SMMU_PTE_CONT_ENTRIES 32
+-#else
+-#define ARM_SMMU_PTE_CONT_ENTRIES 1
+-#endif
+-
+-#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
+-#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
+-
+-/* Stage-1 PTE */
+-#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
+-#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
+-#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
+-#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
+-
+-/* Stage-2 PTE */
+-#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
+-#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
+-#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
+-#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
+-#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
+-#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
+-
+ /* Configuration registers */
+ #define ARM_SMMU_GR0_sCR0 0x0
+ #define sCR0_CLIENTPD (1 << 0)
+@@ -132,17 +106,12 @@
+ #define ARM_SMMU_GR0_sGFSYNR0 0x50
+ #define ARM_SMMU_GR0_sGFSYNR1 0x54
+ #define ARM_SMMU_GR0_sGFSYNR2 0x58
+-#define ARM_SMMU_GR0_PIDR0 0xfe0
+-#define ARM_SMMU_GR0_PIDR1 0xfe4
+-#define ARM_SMMU_GR0_PIDR2 0xfe8
+
+ #define ID0_S1TS (1 << 30)
+ #define ID0_S2TS (1 << 29)
+ #define ID0_NTS (1 << 28)
+ #define ID0_SMS (1 << 27)
+-#define ID0_PTFS_SHIFT 24
+-#define ID0_PTFS_MASK 0x2
+-#define ID0_PTFS_V8_ONLY 0x2
++#define ID0_ATOSNS (1 << 26)
+ #define ID0_CTTW (1 << 14)
+ #define ID0_NUMIRPT_SHIFT 16
+ #define ID0_NUMIRPT_MASK 0xff
+@@ -169,11 +138,7 @@
+ #define ID2_PTFS_16K (1 << 13)
+ #define ID2_PTFS_64K (1 << 14)
+
+-#define PIDR2_ARCH_SHIFT 4
+-#define PIDR2_ARCH_MASK 0xf
+-
+ /* Global TLB invalidation */
+-#define ARM_SMMU_GR0_STLBIALL 0x60
+ #define ARM_SMMU_GR0_TLBIVMID 0x64
+ #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
+ #define ARM_SMMU_GR0_TLBIALLH 0x6c
+@@ -231,13 +196,25 @@
+ #define ARM_SMMU_CB_TTBCR2 0x10
+ #define ARM_SMMU_CB_TTBR0_LO 0x20
+ #define ARM_SMMU_CB_TTBR0_HI 0x24
++#define ARM_SMMU_CB_TTBR1_LO 0x28
++#define ARM_SMMU_CB_TTBR1_HI 0x2c
+ #define ARM_SMMU_CB_TTBCR 0x30
+ #define ARM_SMMU_CB_S1_MAIR0 0x38
++#define ARM_SMMU_CB_S1_MAIR1 0x3c
++#define ARM_SMMU_CB_PAR_LO 0x50
++#define ARM_SMMU_CB_PAR_HI 0x54
+ #define ARM_SMMU_CB_FSR 0x58
+ #define ARM_SMMU_CB_FAR_LO 0x60
+ #define ARM_SMMU_CB_FAR_HI 0x64
+ #define ARM_SMMU_CB_FSYNR0 0x68
++#define ARM_SMMU_CB_S1_TLBIVA 0x600
+ #define ARM_SMMU_CB_S1_TLBIASID 0x610
++#define ARM_SMMU_CB_S1_TLBIVAL 0x620
++#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
++#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
++#define ARM_SMMU_CB_ATS1PR_LO 0x800
++#define ARM_SMMU_CB_ATS1PR_HI 0x804
++#define ARM_SMMU_CB_ATSR 0x8f0
+
+ #define SCTLR_S1_ASIDPNE (1 << 12)
+ #define SCTLR_CFCFG (1 << 7)
+@@ -249,64 +226,17 @@
+ #define SCTLR_M (1 << 0)
+ #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
+
+-#define RESUME_RETRY (0 << 0)
+-#define RESUME_TERMINATE (1 << 0)
+-
+-#define TTBCR_EAE (1 << 31)
++#define CB_PAR_F (1 << 0)
+
+-#define TTBCR_PASIZE_SHIFT 16
+-#define TTBCR_PASIZE_MASK 0x7
++#define ATSR_ACTIVE (1 << 0)
+
+-#define TTBCR_TG0_4K (0 << 14)
+-#define TTBCR_TG0_64K (1 << 14)
+-
+-#define TTBCR_SH0_SHIFT 12
+-#define TTBCR_SH0_MASK 0x3
+-#define TTBCR_SH_NS 0
+-#define TTBCR_SH_OS 2
+-#define TTBCR_SH_IS 3
+-
+-#define TTBCR_ORGN0_SHIFT 10
+-#define TTBCR_IRGN0_SHIFT 8
+-#define TTBCR_RGN_MASK 0x3
+-#define TTBCR_RGN_NC 0
+-#define TTBCR_RGN_WBWA 1
+-#define TTBCR_RGN_WT 2
+-#define TTBCR_RGN_WB 3
+-
+-#define TTBCR_SL0_SHIFT 6
+-#define TTBCR_SL0_MASK 0x3
+-#define TTBCR_SL0_LVL_2 0
+-#define TTBCR_SL0_LVL_1 1
+-
+-#define TTBCR_T1SZ_SHIFT 16
+-#define TTBCR_T0SZ_SHIFT 0
+-#define TTBCR_SZ_MASK 0xf
++#define RESUME_RETRY (0 << 0)
++#define RESUME_TERMINATE (1 << 0)
+
+ #define TTBCR2_SEP_SHIFT 15
+-#define TTBCR2_SEP_MASK 0x7
+-
+-#define TTBCR2_PASIZE_SHIFT 0
+-#define TTBCR2_PASIZE_MASK 0x7
+-
+-/* Common definitions for PASize and SEP fields */
+-#define TTBCR2_ADDR_32 0
+-#define TTBCR2_ADDR_36 1
+-#define TTBCR2_ADDR_40 2
+-#define TTBCR2_ADDR_42 3
+-#define TTBCR2_ADDR_44 4
+-#define TTBCR2_ADDR_48 5
+-
+-#define TTBRn_HI_ASID_SHIFT 16
+-
+-#define MAIR_ATTR_SHIFT(n) ((n) << 3)
+-#define MAIR_ATTR_MASK 0xff
+-#define MAIR_ATTR_DEVICE 0x04
+-#define MAIR_ATTR_NC 0x44
+-#define MAIR_ATTR_WBRWA 0xff
+-#define MAIR_ATTR_IDX_NC 0
+-#define MAIR_ATTR_IDX_CACHE 1
+-#define MAIR_ATTR_IDX_DEV 2
++#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
++
++#define TTBRn_HI_ASID_SHIFT 16
+
+ #define FSR_MULTI (1 << 31)
+ #define FSR_SS (1 << 30)
+@@ -345,6 +275,7 @@ struct arm_smmu_smr {
+ struct arm_smmu_master_cfg {
+ int num_streamids;
+ u16 streamids[MAX_MASTER_STREAMIDS];
++ u16 mask;
+ struct arm_smmu_smr *smrs;
+ };
+
+@@ -366,6 +297,7 @@ struct arm_smmu_device {
+ #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
+ #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
+ #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
++#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
+ u32 features;
+
+ #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+@@ -380,10 +312,9 @@ struct arm_smmu_device {
+ u32 num_mapping_groups;
+ DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+
+- unsigned long s1_input_size;
+- unsigned long s1_output_size;
+- unsigned long s2_input_size;
+- unsigned long s2_output_size;
++ unsigned long va_size;
++ unsigned long ipa_size;
++ unsigned long pa_size;
+
+ u32 num_global_irqs;
+ u32 num_context_irqs;
+@@ -397,19 +328,33 @@ struct arm_smmu_cfg {
+ u8 cbndx;
+ u8 irptndx;
+ u32 cbar;
+- pgd_t *pgd;
+ };
+ #define INVALID_IRPTNDX 0xff
+
+ #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
+ #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+
++enum arm_smmu_domain_stage {
++ ARM_SMMU_DOMAIN_S1 = 0,
++ ARM_SMMU_DOMAIN_S2,
++ ARM_SMMU_DOMAIN_NESTED,
++};
++
+ struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
++ struct io_pgtable_ops *pgtbl_ops;
++ spinlock_t pgtbl_lock;
+ struct arm_smmu_cfg cfg;
+- spinlock_t lock;
++ enum arm_smmu_domain_stage stage;
++ struct mutex init_mutex; /* Protects smmu pointer */
++ struct iommu_domain domain;
+ };
+
++static struct iommu_ops arm_smmu_ops;
++#ifdef CONFIG_FSL_MC_BUS
++static struct iommu_ops arm_fsl_mc_smmu_ops;
++#endif
++
+ static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+ static LIST_HEAD(arm_smmu_devices);
+
+@@ -422,6 +367,43 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+ { 0, NULL},
+ };
++#define CONFIG_AIOP_ERRATA
++#ifdef CONFIG_AIOP_ERRATA
++/*
++ * PL = 1, BMT = 1, VA = 1
++ */
++#define AIOP_SMR_VALUE 0x380
++/*
++ * Following should be set:
++ * SHCFG: 0x3
++ * MTCFG: 0x1
++ * MemAttr: 0xf
++ * Type: 0x1
++ * RACFG: 0x2
++ * WACFG: 0x2
++ */
++#define AIOP_S2CR_VALUE 0xA1FB00
++
++static void arm_smmu_aiop_attr_trans(struct arm_smmu_device *smmu)
++{
++ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
++ u16 mask = 0x7c7f;
++ int index;
++ u32 reg;
++ /* reserve one smr group for AIOP */
++ index = --smmu->num_mapping_groups;
++
++ reg = SMR_VALID | AIOP_SMR_VALUE << SMR_ID_SHIFT |
++ mask << SMR_MASK_SHIFT;
++ writel(reg, gr0_base + ARM_SMMU_GR0_SMR(index));
++ writel(AIOP_S2CR_VALUE, gr0_base + ARM_SMMU_GR0_S2CR(index));
++}
++#endif
++
++static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
++{
++ return container_of(dom, struct arm_smmu_domain, domain);
++}
+
+ static void parse_driver_options(struct arm_smmu_device *smmu)
+ {
+@@ -447,6 +429,16 @@ static struct device_node *dev_get_dev_node(struct device *dev)
+ return bus->bridge->parent->of_node;
+ }
+
++#ifdef CONFIG_FSL_MC_BUS
++ if (dev->bus == &fsl_mc_bus_type) {
++ /*
++ * Get to the MC device tree node.
++ */
++ while (dev->bus == &fsl_mc_bus_type)
++ dev = dev->parent;
++ }
++#endif
++
+ return dev->of_node;
+ }
+
+@@ -590,7 +582,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
+ }
+
+ /* Wait for any pending TLB invalidations to complete */
+-static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
++static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+ {
+ int count = 0;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+@@ -608,12 +600,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+ }
+ }
+
+-static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
++static void arm_smmu_tlb_sync(void *cookie)
+ {
++ struct arm_smmu_domain *smmu_domain = cookie;
++ __arm_smmu_tlb_sync(smmu_domain->smmu);
++}
++
++static void arm_smmu_tlb_inv_context(void *cookie)
++{
++ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+- void __iomem *base = ARM_SMMU_GR0(smmu);
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
++ void __iomem *base;
+
+ if (stage1) {
+ base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+@@ -625,16 +624,83 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
+ base + ARM_SMMU_GR0_TLBIVMID);
+ }
+
+- arm_smmu_tlb_sync(smmu);
++ __arm_smmu_tlb_sync(smmu);
++}
++
++static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
++ bool leaf, void *cookie)
++{
++ struct arm_smmu_domain *smmu_domain = cookie;
++ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
++ struct arm_smmu_device *smmu = smmu_domain->smmu;
++ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
++ void __iomem *reg;
++
++ if (stage1) {
++ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
++ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
++
++ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
++ iova &= ~12UL;
++ iova |= ARM_SMMU_CB_ASID(cfg);
++ writel_relaxed(iova, reg);
++#ifdef CONFIG_64BIT
++ } else {
++ iova >>= 12;
++ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
++ writeq_relaxed(iova, reg);
++#endif
++ }
++#ifdef CONFIG_64BIT
++ } else if (smmu->version == ARM_SMMU_V2) {
++ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
++ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
++ ARM_SMMU_CB_S2_TLBIIPAS2;
++ writeq_relaxed(iova >> 12, reg);
++#endif
++ } else {
++ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
++ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
++ }
++}
++
++static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
++{
++ struct arm_smmu_domain *smmu_domain = cookie;
++ struct arm_smmu_device *smmu = smmu_domain->smmu;
++ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
++
++
++ /* Ensure new page tables are visible to the hardware walker */
++ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
++ dsb(ishst);
++ } else {
++ /*
++ * If the SMMU can't walk tables in the CPU caches, treat them
++ * like non-coherent DMA since we need to flush the new entries
++ * all the way out to memory. There's no possibility of
++ * recursion here as the SMMU table walker will not be wired
++ * through another SMMU.
++ */
++ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
++ DMA_TO_DEVICE);
++ }
+ }
+
++static struct iommu_gather_ops arm_smmu_gather_ops = {
++ .tlb_flush_all = arm_smmu_tlb_inv_context,
++ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
++ .tlb_sync = arm_smmu_tlb_sync,
++ .flush_pgtable = arm_smmu_flush_pgtable,
++};
++
+ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
+ {
+ int flags, ret;
+ u32 fsr, far, fsynr, resume;
+ unsigned long iova;
+ struct iommu_domain *domain = dev;
+- struct arm_smmu_domain *smmu_domain = domain->priv;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *cb_base;
+@@ -705,29 +771,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
+ return IRQ_HANDLED;
+ }
+
+-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+- size_t size)
+-{
+- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+-
+-
+- /* Ensure new page tables are visible to the hardware walker */
+- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+- dsb(ishst);
+- } else {
+- /*
+- * If the SMMU can't walk tables in the CPU caches, treat them
+- * like non-coherent DMA since we need to flush the new entries
+- * all the way out to memory. There's no possibility of
+- * recursion here as the SMMU table walker will not be wired
+- * through another SMMU.
+- */
+- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+- DMA_TO_DEVICE);
+- }
+-}
+-
+-static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
++static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
++ struct io_pgtable_cfg *pgtbl_cfg)
+ {
+ u32 reg;
+ bool stage1;
+@@ -740,6 +785,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+ stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
++ if (smmu->version > ARM_SMMU_V1) {
++ /*
++ * CBA2R.
++ * *Must* be initialised before CBAR thanks to VMID16
++ * architectural oversight affected some implementations.
++ */
++#ifdef CONFIG_64BIT
++ reg = CBA2R_RW64_64BIT;
++#else
++ reg = CBA2R_RW64_32BIT;
++#endif
++ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
++ }
++
+ /* CBAR */
+ reg = cfg->cbar;
+ if (smmu->version == ARM_SMMU_V1)
+@@ -757,135 +816,51 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+ }
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
+
+- if (smmu->version > ARM_SMMU_V1) {
+- /* CBA2R */
+-#ifdef CONFIG_64BIT
+- reg = CBA2R_RW64_64BIT;
+-#else
+- reg = CBA2R_RW64_32BIT;
+-#endif
+- writel_relaxed(reg,
+- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
+-
+- /* TTBCR2 */
+- switch (smmu->s1_input_size) {
+- case 32:
+- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+- break;
+- case 36:
+- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+- break;
+- case 39:
+- case 40:
+- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+- break;
+- case 42:
+- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+- break;
+- case 44:
+- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+- break;
+- case 48:
+- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+- break;
+- }
+-
+- switch (smmu->s1_output_size) {
+- case 32:
+- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
+- break;
+- case 36:
+- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
+- break;
+- case 39:
+- case 40:
+- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
+- break;
+- case 42:
+- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
+- break;
+- case 44:
+- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
+- break;
+- case 48:
+- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
+- break;
+- }
+-
+- if (stage1)
+- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+- }
++ /* TTBRs */
++ if (stage1) {
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
++ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+
+- /* TTBR0 */
+- arm_smmu_flush_pgtable(smmu, cfg->pgd,
+- PTRS_PER_PGD * sizeof(pgd_t));
+- reg = __pa(cfg->pgd);
+- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+- reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
+- if (stage1)
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
++ } else {
++ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
++ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
++ }
+
+- /*
+- * TTBCR
+- * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
+- */
+- if (smmu->version > ARM_SMMU_V1) {
+- if (PAGE_SIZE == SZ_4K)
+- reg = TTBCR_TG0_4K;
+- else
+- reg = TTBCR_TG0_64K;
+-
+- if (!stage1) {
+- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
+-
+- switch (smmu->s2_output_size) {
+- case 32:
+- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+- break;
+- case 36:
+- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+- break;
+- case 40:
+- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+- break;
+- case 42:
+- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+- break;
+- case 44:
+- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+- break;
+- case 48:
+- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+- break;
+- }
+- } else {
+- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
++ /* TTBCR */
++ if (stage1) {
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
++ if (smmu->version > ARM_SMMU_V1) {
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
++ reg |= TTBCR2_SEP_UPSTREAM;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ }
+ } else {
+- reg = 0;
++ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ }
+
+- reg |= TTBCR_EAE |
+- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
+- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
+- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
+-
+- if (!stage1)
+- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+-
+- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+-
+- /* MAIR0 (stage-1 only) */
++ /* MAIRs (stage-1 only) */
+ if (stage1) {
+- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
+- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
+- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
++ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
+ }
+
+ /* SCTLR */
+- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
++ /* Disable stall mode */
++ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ if (stage1)
+ reg |= SCTLR_S1_ASIDPNE;
+ #ifdef __BIG_ENDIAN
+@@ -898,27 +873,69 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu)
+ {
+ int irq, start, ret = 0;
+- unsigned long flags;
+- struct arm_smmu_domain *smmu_domain = domain->priv;
++ unsigned long ias, oas;
++ struct io_pgtable_ops *pgtbl_ops;
++ struct io_pgtable_cfg pgtbl_cfg;
++ enum io_pgtable_fmt fmt;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+
+- spin_lock_irqsave(&smmu_domain->lock, flags);
++ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ goto out_unlock;
+
+- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
++ /*
++ * Mapping the requested stage onto what we support is surprisingly
++ * complicated, mainly because the spec allows S1+S2 SMMUs without
++ * support for nested translation. That means we end up with the
++ * following table:
++ *
++ * Requested Supported Actual
++ * S1 N S1
++ * S1 S1+S2 S1
++ * S1 S2 S2
++ * S1 S1 S1
++ * N N N
++ * N S1+S2 S2
++ * N S2 S2
++ * N S1 S1
++ *
++ * Note that you can't actually request stage-2 mappings.
++ */
++ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
++ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
++ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
++ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
++
++ switch (smmu_domain->stage) {
++ case ARM_SMMU_DOMAIN_S1:
++ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
++ start = smmu->num_s2_context_banks;
++ ias = smmu->va_size;
++ oas = smmu->ipa_size;
++ if (IS_ENABLED(CONFIG_64BIT))
++ fmt = ARM_64_LPAE_S1;
++ else
++ fmt = ARM_32_LPAE_S1;
++ break;
++ case ARM_SMMU_DOMAIN_NESTED:
+ /*
+ * We will likely want to change this if/when KVM gets
+ * involved.
+ */
+- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+- start = smmu->num_s2_context_banks;
+- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
+- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+- start = smmu->num_s2_context_banks;
+- } else {
++ case ARM_SMMU_DOMAIN_S2:
+ cfg->cbar = CBAR_TYPE_S2_TRANS;
+ start = 0;
++ ias = smmu->ipa_size;
++ oas = smmu->pa_size;
++ if (IS_ENABLED(CONFIG_64BIT))
++ fmt = ARM_64_LPAE_S2;
++ else
++ fmt = ARM_32_LPAE_S2;
++ break;
++ default:
++ ret = -EINVAL;
++ goto out_unlock;
+ }
+
+ ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+@@ -934,10 +951,33 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ cfg->irptndx = cfg->cbndx;
+ }
+
+- ACCESS_ONCE(smmu_domain->smmu) = smmu;
+- arm_smmu_init_context_bank(smmu_domain);
+- spin_unlock_irqrestore(&smmu_domain->lock, flags);
++ pgtbl_cfg = (struct io_pgtable_cfg) {
++ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
++ .ias = ias,
++ .oas = oas,
++ .tlb = &arm_smmu_gather_ops,
++ };
++
++ smmu_domain->smmu = smmu;
++ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
++ if (!pgtbl_ops) {
++ ret = -ENOMEM;
++ goto out_clear_smmu;
++ }
++
++ /* Update our support page sizes to reflect the page table format */
++ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
++#ifdef CONFIG_FSL_MC_BUS
++ arm_fsl_mc_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
++#endif
++
++ /* Initialise the context bank with our page table cfg */
++ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+
++ /*
++ * Request context fault interrupt. Do this last to avoid the
++ * handler seeing a half-initialised domain state.
++ */
+ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
+ "arm-smmu-context-fault", domain);
+@@ -947,16 +987,22 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ cfg->irptndx = INVALID_IRPTNDX;
+ }
+
++ mutex_unlock(&smmu_domain->init_mutex);
++
++ /* Publish page table ops for map/unmap */
++ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
+
++out_clear_smmu:
++ smmu_domain->smmu = NULL;
+ out_unlock:
+- spin_unlock_irqrestore(&smmu_domain->lock, flags);
++ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
+ }
+
+ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+ {
+- struct arm_smmu_domain *smmu_domain = domain->priv;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ void __iomem *cb_base;
+@@ -965,24 +1011,30 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+ if (!smmu)
+ return;
+
+- /* Disable the context bank and nuke the TLB before freeing it. */
++ /*
++ * Disable the context bank and free the page tables before freeing
++ * it.
++ */
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+- arm_smmu_tlb_inv_context(smmu_domain);
+
+ if (cfg->irptndx != INVALID_IRPTNDX) {
+ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ free_irq(irq, domain);
+ }
+
++ if (smmu_domain->pgtbl_ops)
++ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
++
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+ }
+
+-static int arm_smmu_domain_init(struct iommu_domain *domain)
++static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+ {
+ struct arm_smmu_domain *smmu_domain;
+- pgd_t *pgd;
+
++ if (type != IOMMU_DOMAIN_UNMANAGED)
++ return NULL;
+ /*
+ * Allocate the domain and initialise some of its data structures.
+ * We can't really do anything meaningful until we've added a
+@@ -990,95 +1042,23 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
+ */
+ smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+ if (!smmu_domain)
+- return -ENOMEM;
++ return NULL;
+
+- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
+- if (!pgd)
+- goto out_free_domain;
+- smmu_domain->cfg.pgd = pgd;
++ mutex_init(&smmu_domain->init_mutex);
++ spin_lock_init(&smmu_domain->pgtbl_lock);
+
+- spin_lock_init(&smmu_domain->lock);
+- domain->priv = smmu_domain;
+- return 0;
+-
+-out_free_domain:
+- kfree(smmu_domain);
+- return -ENOMEM;
++ return &smmu_domain->domain;
+ }
+
+-static void arm_smmu_free_ptes(pmd_t *pmd)
++static void arm_smmu_domain_free(struct iommu_domain *domain)
+ {
+- pgtable_t table = pmd_pgtable(*pmd);
+-
+- __free_page(table);
+-}
+-
+-static void arm_smmu_free_pmds(pud_t *pud)
+-{
+- int i;
+- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
+-
+- pmd = pmd_base;
+- for (i = 0; i < PTRS_PER_PMD; ++i) {
+- if (pmd_none(*pmd))
+- continue;
+-
+- arm_smmu_free_ptes(pmd);
+- pmd++;
+- }
+-
+- pmd_free(NULL, pmd_base);
+-}
+-
+-static void arm_smmu_free_puds(pgd_t *pgd)
+-{
+- int i;
+- pud_t *pud, *pud_base = pud_offset(pgd, 0);
+-
+- pud = pud_base;
+- for (i = 0; i < PTRS_PER_PUD; ++i) {
+- if (pud_none(*pud))
+- continue;
+-
+- arm_smmu_free_pmds(pud);
+- pud++;
+- }
+-
+- pud_free(NULL, pud_base);
+-}
+-
+-static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
+-{
+- int i;
+- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+- pgd_t *pgd, *pgd_base = cfg->pgd;
+-
+- /*
+- * Recursively free the page tables for this domain. We don't
+- * care about speculative TLB filling because the tables should
+- * not be active in any context bank at this point (SCTLR.M is 0).
+- */
+- pgd = pgd_base;
+- for (i = 0; i < PTRS_PER_PGD; ++i) {
+- if (pgd_none(*pgd))
+- continue;
+- arm_smmu_free_puds(pgd);
+- pgd++;
+- }
+-
+- kfree(pgd_base);
+-}
+-
+-static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+-{
+- struct arm_smmu_domain *smmu_domain = domain->priv;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ /*
+ * Free the domain resources. We assume that all devices have
+ * already been detached.
+ */
+ arm_smmu_destroy_domain_context(domain);
+- arm_smmu_free_pgtables(smmu_domain);
+ kfree(smmu_domain);
+ }
+
+@@ -1113,7 +1093,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
+
+ smrs[i] = (struct arm_smmu_smr) {
+ .idx = idx,
+- .mask = 0, /* We don't currently share SMRs */
++ .mask = cfg->mask,
+ .id = cfg->streamids[i],
+ };
+ }
+@@ -1209,8 +1189,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
+ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ {
+ int ret;
+- struct arm_smmu_domain *smmu_domain = domain->priv;
+- struct arm_smmu_device *smmu, *dom_smmu;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
++ struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg;
+
+ smmu = find_smmu_for_device(dev);
+@@ -1224,21 +1204,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ return -EEXIST;
+ }
+
++ /* Ensure that the domain is finalised */
++ ret = arm_smmu_init_domain_context(domain, smmu);
++ if (IS_ERR_VALUE(ret))
++ return ret;
++
+ /*
+ * Sanity check the domain. We don't support domains across
+ * different SMMUs.
+ */
+- dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
+- if (!dom_smmu) {
+- /* Now that we have a master, we can finalise the domain */
+- ret = arm_smmu_init_domain_context(domain, smmu);
+- if (IS_ERR_VALUE(ret))
+- return ret;
+-
+- dom_smmu = smmu_domain->smmu;
+- }
+-
+- if (dom_smmu != smmu) {
++ if (smmu_domain->smmu != smmu) {
+ dev_err(dev,
+ "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
+ dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
+@@ -1258,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+
+ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+ {
+- struct arm_smmu_domain *smmu_domain = domain->priv;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_master_cfg *cfg;
+
+ cfg = find_smmu_master_cfg(dev);
+@@ -1269,292 +1244,106 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+ arm_smmu_domain_remove_master(smmu_domain, cfg);
+ }
+
+-static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
+- unsigned long end)
+-{
+- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
+- (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
+-}
+-
+-static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
+- unsigned long addr, unsigned long end,
+- unsigned long pfn, int prot, int stage)
+-{
+- pte_t *pte, *start;
+- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
+-
+- if (pmd_none(*pmd)) {
+- /* Allocate a new set of tables */
+- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
+-
+- if (!table)
+- return -ENOMEM;
+-
+- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
+- pmd_populate(NULL, pmd, table);
+- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
+- }
+-
+- if (stage == 1) {
+- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
+- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+- pteval |= ARM_SMMU_PTE_AP_RDONLY;
+-
+- if (prot & IOMMU_CACHE)
+- pteval |= (MAIR_ATTR_IDX_CACHE <<
+- ARM_SMMU_PTE_ATTRINDX_SHIFT);
+- } else {
+- pteval |= ARM_SMMU_PTE_HAP_FAULT;
+- if (prot & IOMMU_READ)
+- pteval |= ARM_SMMU_PTE_HAP_READ;
+- if (prot & IOMMU_WRITE)
+- pteval |= ARM_SMMU_PTE_HAP_WRITE;
+- if (prot & IOMMU_CACHE)
+- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
+- else
+- pteval |= ARM_SMMU_PTE_MEMATTR_NC;
+- }
+-
+- /* If no access, create a faulting entry to avoid TLB fills */
+- if (prot & IOMMU_EXEC)
+- pteval &= ~ARM_SMMU_PTE_XN;
+- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+- pteval &= ~ARM_SMMU_PTE_PAGE;
+-
+- pteval |= ARM_SMMU_PTE_SH_IS;
+- start = pmd_page_vaddr(*pmd) + pte_index(addr);
+- pte = start;
+-
+- /*
+- * Install the page table entries. This is fairly complicated
+- * since we attempt to make use of the contiguous hint in the
+- * ptes where possible. The contiguous hint indicates a series
+- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
+- * contiguous region with the following constraints:
+- *
+- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
+- * - Each pte in the region has the contiguous hint bit set
+- *
+- * This complicates unmapping (also handled by this code, when
+- * neither IOMMU_READ or IOMMU_WRITE are set) because it is
+- * possible, yet highly unlikely, that a client may unmap only
+- * part of a contiguous range. This requires clearing of the
+- * contiguous hint bits in the range before installing the new
+- * faulting entries.
+- *
+- * Note that re-mapping an address range without first unmapping
+- * it is not supported, so TLB invalidation is not required here
+- * and is instead performed at unmap and domain-init time.
+- */
+- do {
+- int i = 1;
+-
+- pteval &= ~ARM_SMMU_PTE_CONT;
+-
+- if (arm_smmu_pte_is_contiguous_range(addr, end)) {
+- i = ARM_SMMU_PTE_CONT_ENTRIES;
+- pteval |= ARM_SMMU_PTE_CONT;
+- } else if (pte_val(*pte) &
+- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
+- int j;
+- pte_t *cont_start;
+- unsigned long idx = pte_index(addr);
+-
+- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
+- cont_start = pmd_page_vaddr(*pmd) + idx;
+- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
+- pte_val(*(cont_start + j)) &=
+- ~ARM_SMMU_PTE_CONT;
+-
+- arm_smmu_flush_pgtable(smmu, cont_start,
+- sizeof(*pte) *
+- ARM_SMMU_PTE_CONT_ENTRIES);
+- }
+-
+- do {
+- *pte = pfn_pte(pfn, __pgprot(pteval));
+- } while (pte++, pfn++, addr += PAGE_SIZE, --i);
+- } while (addr != end);
+-
+- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
+- return 0;
+-}
+-
+-static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
+- unsigned long addr, unsigned long end,
+- phys_addr_t phys, int prot, int stage)
++static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
++ phys_addr_t paddr, size_t size, int prot)
+ {
+ int ret;
+- pmd_t *pmd;
+- unsigned long next, pfn = __phys_to_pfn(phys);
+-
+-#ifndef __PAGETABLE_PMD_FOLDED
+- if (pud_none(*pud)) {
+- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
+- if (!pmd)
+- return -ENOMEM;
+-
+- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
+- pud_populate(NULL, pud, pmd);
+- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+-
+- pmd += pmd_index(addr);
+- } else
+-#endif
+- pmd = pmd_offset(pud, addr);
++ unsigned long flags;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+- do {
+- next = pmd_addr_end(addr, end);
+- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
+- prot, stage);
+- phys += next - addr;
+- pfn = __phys_to_pfn(phys);
+- } while (pmd++, addr = next, addr < end);
++ if (!ops)
++ return -ENODEV;
+
++ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
++ ret = ops->map(ops, iova, paddr, size, prot);
++ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+ }
+
+-static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
+- unsigned long addr, unsigned long end,
+- phys_addr_t phys, int prot, int stage)
++static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
++ size_t size)
+ {
+- int ret = 0;
+- pud_t *pud;
+- unsigned long next;
+-
+-#ifndef __PAGETABLE_PUD_FOLDED
+- if (pgd_none(*pgd)) {
+- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
+- if (!pud)
+- return -ENOMEM;
+-
+- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
+- pgd_populate(NULL, pgd, pud);
+- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+-
+- pud += pud_index(addr);
+- } else
+-#endif
+- pud = pud_offset(pgd, addr);
++ size_t ret;
++ unsigned long flags;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+- do {
+- next = pud_addr_end(addr, end);
+- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
+- prot, stage);
+- phys += next - addr;
+- } while (pud++, addr = next, addr < end);
++ if (!ops)
++ return 0;
+
++ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
++ ret = ops->unmap(ops, iova, size);
++ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+ }
+
+-static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+- unsigned long iova, phys_addr_t paddr,
+- size_t size, int prot)
++static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
++ dma_addr_t iova)
+ {
+- int ret, stage;
+- unsigned long end;
+- phys_addr_t input_mask, output_mask;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+- pgd_t *pgd = cfg->pgd;
+- unsigned long flags;
++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++ struct device *dev = smmu->dev;
++ void __iomem *cb_base;
++ u32 tmp;
++ u64 phys;
++
++ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+- if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
+- stage = 2;
+- input_mask = (1ULL << smmu->s2_input_size) - 1;
+- output_mask = (1ULL << smmu->s2_output_size) - 1;
++ if (smmu->version == 1) {
++ u32 reg = iova & ~0xfff;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
+ } else {
+- stage = 1;
+- input_mask = (1ULL << smmu->s1_input_size) - 1;
+- output_mask = (1ULL << smmu->s1_output_size) - 1;
++ u32 reg = iova & ~0xfff;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
++ reg = ((u64)iova & ~0xfff) >> 32;
++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
+ }
+
+- if (!pgd)
+- return -EINVAL;
+-
+- if (size & ~PAGE_MASK)
+- return -EINVAL;
+-
+- if ((phys_addr_t)iova & ~input_mask)
+- return -ERANGE;
+-
+- if (paddr & ~output_mask)
+- return -ERANGE;
+-
+- spin_lock_irqsave(&smmu_domain->lock, flags);
+- pgd += pgd_index(iova);
+- end = iova + size;
+- do {
+- unsigned long next = pgd_addr_end(iova, end);
+-
+- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
+- prot, stage);
+- if (ret)
+- goto out_unlock;
+-
+- paddr += next - iova;
+- iova = next;
+- } while (pgd++, iova != end);
+-
+-out_unlock:
+- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+-
+- return ret;
+-}
+-
+-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+- phys_addr_t paddr, size_t size, int prot)
+-{
+- struct arm_smmu_domain *smmu_domain = domain->priv;
+-
+- if (!smmu_domain)
+- return -ENODEV;
++ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
++ !(tmp & ATSR_ACTIVE), 5, 50)) {
++ dev_err(dev,
++ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
++ &iova);
++ return ops->iova_to_phys(ops, iova);
++ }
+
+- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
+-}
++ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
++ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
+
+-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+- size_t size)
+-{
+- int ret;
+- struct arm_smmu_domain *smmu_domain = domain->priv;
++ if (phys & CB_PAR_F) {
++ dev_err(dev, "translation fault!\n");
++ dev_err(dev, "PAR = 0x%llx\n", phys);
++ return 0;
++ }
+
+- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
+- arm_smmu_tlb_inv_context(smmu_domain);
+- return ret ? 0 : size;
++ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
+ }
+
+ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+- dma_addr_t iova)
++ dma_addr_t iova)
+ {
+- pgd_t *pgdp, pgd;
+- pud_t pud;
+- pmd_t pmd;
+- pte_t pte;
+- struct arm_smmu_domain *smmu_domain = domain->priv;
+- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+-
+- pgdp = cfg->pgd;
+- if (!pgdp)
+- return 0;
+-
+- pgd = *(pgdp + pgd_index(iova));
+- if (pgd_none(pgd))
+- return 0;
++ phys_addr_t ret;
++ unsigned long flags;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+- pud = *pud_offset(&pgd, iova);
+- if (pud_none(pud))
++ if (!ops)
+ return 0;
+
+- pmd = *pmd_offset(&pud, iova);
+- if (pmd_none(pmd))
+- return 0;
++ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
++ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
++ smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
++ ret = arm_smmu_iova_to_phys_hard(domain, iova);
++ } else {
++ ret = ops->iova_to_phys(ops, iova);
++ }
+
+- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
+- if (pte_none(pte))
+- return 0;
++ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+
+- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
++ return ret;
+ }
+
+ static bool arm_smmu_capable(enum iommu_cap cap)
+@@ -1568,6 +1357,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
+ return true;
+ case IOMMU_CAP_INTR_REMAP:
+ return true; /* MSIs are just memory writes */
++ case IOMMU_CAP_NOEXEC:
++ return true;
+ default:
+ return false;
+ }
+@@ -1584,81 +1375,248 @@ static void __arm_smmu_release_pci_iommudata(void *data)
+ kfree(data);
+ }
+
+-static int arm_smmu_add_device(struct device *dev)
++static int arm_smmu_add_pci_device(struct pci_dev *pdev)
+ {
+- struct arm_smmu_device *smmu;
++ int i, ret;
++ u16 sid;
++ struct iommu_group *group;
+ struct arm_smmu_master_cfg *cfg;
++#ifdef CONFIG_PCI_LAYERSCAPE
++ u32 streamid;
++#endif
++
++ group = iommu_group_get_for_dev(&pdev->dev);
++ if (IS_ERR(group))
++ return PTR_ERR(group);
++
++ cfg = iommu_group_get_iommudata(group);
++ if (!cfg) {
++ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
++ if (!cfg) {
++ ret = -ENOMEM;
++ goto out_put_group;
++ }
++
++ iommu_group_set_iommudata(group, cfg,
++ __arm_smmu_release_pci_iommudata);
++ }
++
++ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
++ ret = -ENOSPC;
++ goto out_put_group;
++ }
++
++ /*
++ * Assume Stream ID == Requester ID for now.
++ * We need a way to describe the ID mappings in FDT.
++ */
++ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
++ for (i = 0; i < cfg->num_streamids; ++i)
++ if (cfg->streamids[i] == sid)
++ break;
++
++ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
++ if (i == cfg->num_streamids)
++ cfg->streamids[cfg->num_streamids++] = sid;
++
++#ifdef CONFIG_PCI_LAYERSCAPE
++ streamid = set_pcie_streamid_translation(pdev, sid);
++ if (~streamid == 0) {
++ ret = -ENODEV;
++ goto out_put_group;
++ }
++ cfg->streamids[0] = streamid;
++ cfg->mask = 0x7c00;
++
++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID;
++ pdev->dma_alias_devid = streamid;
++#endif
++
++ return 0;
++out_put_group:
++ iommu_group_put(group);
++ return ret;
++}
++
++static int arm_smmu_add_platform_device(struct device *dev)
++{
+ struct iommu_group *group;
+- void (*releasefn)(void *) = NULL;
+- int ret;
++ struct arm_smmu_master *master;
++ struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+
+- smmu = find_smmu_for_device(dev);
+ if (!smmu)
+ return -ENODEV;
+
++ master = find_smmu_master(smmu, dev->of_node);
++ if (!master)
++ return -ENODEV;
++
++ /* No automatic group creation for platform devices */
+ group = iommu_group_alloc();
+- if (IS_ERR(group)) {
+- dev_err(dev, "Failed to allocate IOMMU group\n");
++ if (IS_ERR(group))
+ return PTR_ERR(group);
++
++ iommu_group_set_iommudata(group, &master->cfg, NULL);
++ return iommu_group_add_device(group, dev);
++}
++
++static int arm_smmu_add_device(struct device *dev)
++{
++ if (dev_is_pci(dev))
++ return arm_smmu_add_pci_device(to_pci_dev(dev));
++
++ return arm_smmu_add_platform_device(dev);
++}
++
++static void arm_smmu_remove_device(struct device *dev)
++{
++ iommu_group_remove_device(dev);
++}
++
++static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
++ enum iommu_attr attr, void *data)
++{
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
++
++ switch (attr) {
++ case DOMAIN_ATTR_NESTING:
++ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
++ return 0;
++ default:
++ return -ENODEV;
+ }
++}
+
+- if (dev_is_pci(dev)) {
+- struct pci_dev *pdev = to_pci_dev(dev);
++static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
++ enum iommu_attr attr, void *data)
++{
++ int ret = 0;
++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+- if (!cfg) {
+- ret = -ENOMEM;
+- goto out_put_group;
++ mutex_lock(&smmu_domain->init_mutex);
++
++ switch (attr) {
++ case DOMAIN_ATTR_NESTING:
++ if (smmu_domain->smmu) {
++ ret = -EPERM;
++ goto out_unlock;
+ }
+
+- cfg->num_streamids = 1;
++ if (*(int *)data)
++ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
++ else
++ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
++
++ break;
++ default:
++ ret = -ENODEV;
++ }
++
++out_unlock:
++ mutex_unlock(&smmu_domain->init_mutex);
++ return ret;
++}
++
++static struct iommu_ops arm_smmu_ops = {
++ .capable = arm_smmu_capable,
++ .domain_alloc = arm_smmu_domain_alloc,
++ .domain_free = arm_smmu_domain_free,
++ .attach_dev = arm_smmu_attach_dev,
++ .detach_dev = arm_smmu_detach_dev,
++ .map = arm_smmu_map,
++ .unmap = arm_smmu_unmap,
++ .iova_to_phys = arm_smmu_iova_to_phys,
++ .add_device = arm_smmu_add_device,
++ .remove_device = arm_smmu_remove_device,
++ .domain_get_attr = arm_smmu_domain_get_attr,
++ .domain_set_attr = arm_smmu_domain_set_attr,
++ .pgsize_bitmap = -1UL, /* Restricted during device attach */
++};
++
++#ifdef CONFIG_FSL_MC_BUS
++
++static void arm_smmu_release_fsl_mc_iommudata(void *data)
++{
++ kfree(data);
++}
++
++/*
++ * IOMMU group creation and stream ID programming for
++ * the LS devices
++ *
++ */
++static int arm_fsl_mc_smmu_add_device(struct device *dev)
++{
++ struct device *cont_dev;
++ struct fsl_mc_device *mc_dev;
++ struct iommu_group *group;
++ struct arm_smmu_master_cfg *cfg;
++ int ret = 0;
++
++ mc_dev = to_fsl_mc_device(dev);
++ if (mc_dev->flags & FSL_MC_IS_DPRC)
++ cont_dev = dev;
++ else
++ cont_dev = mc_dev->dev.parent;
++
++ get_device(cont_dev);
++ group = iommu_group_get(cont_dev);
++ put_device(cont_dev);
++ if (!group) {
++ void (*releasefn)(void *) = NULL;
++
++ group = iommu_group_alloc();
++ if (IS_ERR(group))
++ return PTR_ERR(group);
+ /*
+- * Assume Stream ID == Requester ID for now.
+- * We need a way to describe the ID mappings in FDT.
++ * allocate the cfg for the container and associate it with
++ * the iommu group. In the find cfg function we get the cfg
++ * from the iommu group.
+ */
+- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
+- &cfg->streamids[0]);
+- releasefn = __arm_smmu_release_pci_iommudata;
+- } else {
+- struct arm_smmu_master *master;
+-
+- master = find_smmu_master(smmu, dev->of_node);
+- if (!master) {
+- ret = -ENODEV;
+- goto out_put_group;
+- }
++ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
++ if (!cfg)
++ return -ENOMEM;
+
+- cfg = &master->cfg;
++ mc_dev = to_fsl_mc_device(cont_dev);
++ cfg->num_streamids = 1;
++ cfg->streamids[0] = mc_dev->icid;
++ cfg->mask = 0x7c00;
++ releasefn = arm_smmu_release_fsl_mc_iommudata;
++ iommu_group_set_iommudata(group, cfg, releasefn);
++ ret = iommu_group_add_device(group, cont_dev);
+ }
+
+- iommu_group_set_iommudata(group, cfg, releasefn);
+- ret = iommu_group_add_device(group, dev);
++ if (!ret && cont_dev != dev)
++ ret = iommu_group_add_device(group, dev);
+
+-out_put_group:
+ iommu_group_put(group);
++
+ return ret;
+ }
+
+-static void arm_smmu_remove_device(struct device *dev)
++static void arm_fsl_mc_smmu_remove_device(struct device *dev)
+ {
+ iommu_group_remove_device(dev);
++
+ }
+
+-static const struct iommu_ops arm_smmu_ops = {
+- .capable = arm_smmu_capable,
+- .domain_init = arm_smmu_domain_init,
+- .domain_destroy = arm_smmu_domain_destroy,
+- .attach_dev = arm_smmu_attach_dev,
+- .detach_dev = arm_smmu_detach_dev,
+- .map = arm_smmu_map,
+- .unmap = arm_smmu_unmap,
+- .iova_to_phys = arm_smmu_iova_to_phys,
+- .add_device = arm_smmu_add_device,
+- .remove_device = arm_smmu_remove_device,
+- .pgsize_bitmap = (SECTION_SIZE |
+- ARM_SMMU_PTE_CONT_SIZE |
+- PAGE_SIZE),
++static struct iommu_ops arm_fsl_mc_smmu_ops = {
++ .capable = arm_smmu_capable,
++ .domain_alloc = arm_smmu_domain_alloc,
++ .domain_free = arm_smmu_domain_free,
++ .attach_dev = arm_smmu_attach_dev,
++ .detach_dev = arm_smmu_detach_dev,
++ .map = arm_smmu_map,
++ .unmap = arm_smmu_unmap,
++ .map_sg = default_iommu_map_sg,
++ .iova_to_phys = arm_smmu_iova_to_phys,
++ .add_device = arm_fsl_mc_smmu_add_device,
++ .remove_device = arm_fsl_mc_smmu_remove_device,
++ .domain_get_attr = arm_smmu_domain_get_attr,
++ .domain_set_attr = arm_smmu_domain_set_attr,
++ .pgsize_bitmap = -1UL, /* Restricted during device attach */
+ };
++#endif
+
+ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+ {
+@@ -1686,7 +1644,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+ }
+
+ /* Invalidate the TLB, just in case */
+- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+
+@@ -1708,7 +1665,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+ reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+
+ /* Push the button */
+- arm_smmu_tlb_sync(smmu);
++ __arm_smmu_tlb_sync(smmu);
+ writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+ }
+
+@@ -1742,12 +1699,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+
+ /* ID0 */
+ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+-#ifndef CONFIG_64BIT
+- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
+- dev_err(smmu->dev, "\tno v7 descriptor support!\n");
+- return -ENODEV;
+- }
+-#endif
+
+ /* Restrict available stages based on module parameter */
+ if (force_stage == 1)
+@@ -1776,6 +1727,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+ return -ENODEV;
+ }
+
++ if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
++ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
++ dev_notice(smmu->dev, "\taddress translation ops\n");
++ }
++
+ if (id & ID0_CTTW) {
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+ dev_notice(smmu->dev, "\tcoherent table walk\n");
+@@ -1820,16 +1776,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+ smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
+
+ /* Check for size mismatch of SMMU address space from mapped region */
+- size = 1 <<
+- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
++ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+ size *= 2 << smmu->pgshift;
+ if (smmu->size != size)
+ dev_warn(smmu->dev,
+ "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
+ size, smmu->size);
+
+- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
+- ID1_NUMS2CB_MASK;
++ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
+ smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+ if (smmu->num_s2_context_banks > smmu->num_context_banks) {
+ dev_err(smmu->dev, "impossible number of S2 context banks!\n");
+@@ -1841,46 +1795,49 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+ /* ID2 */
+ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
+ size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
+-
+- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
+-#ifdef CONFIG_64BIT
+- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
+-#else
+- smmu->s2_input_size = min(32UL, size);
+-#endif
++ smmu->ipa_size = size;
+
+- /* The stage-2 output mask is also applied for bypass */
++ /* The output mask is also applied for bypass */
+ size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
++ smmu->pa_size = size;
++
++ /*
++ * What the page table walker can address actually depends on which
++ * descriptor format is in use, but since a) we don't know that yet,
++ * and b) it can vary per context bank, this will have to do...
++ */
++ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
++ dev_warn(smmu->dev,
++ "failed to set DMA mask for table walker\n");
+
+ if (smmu->version == ARM_SMMU_V1) {
+- smmu->s1_input_size = 32;
++ smmu->va_size = smmu->ipa_size;
++ size = SZ_4K | SZ_2M | SZ_1G;
+ } else {
+-#ifdef CONFIG_64BIT
+ size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+- size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
+-#else
+- size = 32;
++ smmu->va_size = arm_smmu_id_size_to_bits(size);
++#ifndef CONFIG_64BIT
++ smmu->va_size = min(32UL, smmu->va_size);
+ #endif
+- smmu->s1_input_size = size;
+-
+- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
+- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
+- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
+- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
+- PAGE_SIZE);
+- return -ENODEV;
+- }
++ size = 0;
++ if (id & ID2_PTFS_4K)
++ size |= SZ_4K | SZ_2M | SZ_1G;
++ if (id & ID2_PTFS_16K)
++ size |= SZ_16K | SZ_32M;
++ if (id & ID2_PTFS_64K)
++ size |= SZ_64K | SZ_512M;
+ }
+
++ arm_smmu_ops.pgsize_bitmap &= size;
++ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
++
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+ dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
+- smmu->s1_input_size, smmu->s1_output_size);
++ smmu->va_size, smmu->ipa_size);
+
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
+ dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
+- smmu->s2_input_size, smmu->s2_output_size);
++ smmu->ipa_size, smmu->pa_size);
+
+ return 0;
+ }
+@@ -2007,6 +1964,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+ spin_unlock(&arm_smmu_devices_lock);
+
+ arm_smmu_device_reset(smmu);
++ /* AIOP Rev1 errata work around */
++#ifdef CONFIG_AIOP_ERRATA
++ arm_smmu_aiop_attr_trans(smmu);
++#endif
+ return 0;
+
+ out_free_irqs:
+@@ -2062,7 +2023,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
+
+ static struct platform_driver arm_smmu_driver = {
+ .driver = {
+- .owner = THIS_MODULE,
+ .name = "arm-smmu",
+ .of_match_table = of_match_ptr(arm_smmu_of_match),
+ },
+@@ -2072,8 +2032,20 @@ static struct platform_driver arm_smmu_driver = {
+
+ static int __init arm_smmu_init(void)
+ {
++ struct device_node *np;
+ int ret;
+
++ /*
++ * Play nice with systems that don't have an ARM SMMU by checking that
++ * an ARM SMMU exists in the system before proceeding with the driver
++ * and IOMMU bus operation registration.
++ */
++ np = of_find_matching_node(NULL, arm_smmu_of_match);
++ if (!np)
++ return 0;
++
++ of_node_put(np);
++
+ ret = platform_driver_register(&arm_smmu_driver);
+ if (ret)
+ return ret;
+@@ -2092,6 +2064,10 @@ static int __init arm_smmu_init(void)
+ bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ #endif
+
++#ifdef CONFIG_FSL_MC_BUS
++ if (!iommu_present(&fsl_mc_bus_type))
++ bus_set_iommu(&fsl_mc_bus_type, &arm_fsl_mc_smmu_ops);
++#endif
+ return 0;
+ }
+
+diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
+index 7423318..7ce5273 100644
+--- a/drivers/iommu/exynos-iommu.c
++++ b/drivers/iommu/exynos-iommu.c
+@@ -684,7 +684,6 @@ static const struct of_device_id sysmmu_of_match[] __initconst = {
+ static struct platform_driver exynos_sysmmu_driver __refdata = {
+ .probe = exynos_sysmmu_probe,
+ .driver = {
+- .owner = THIS_MODULE,
+ .name = "exynos-sysmmu",
+ .of_match_table = sysmmu_of_match,
+ }
+@@ -1178,6 +1177,7 @@ static const struct iommu_ops exynos_iommu_ops = {
+ .detach_dev = exynos_iommu_detach_device,
+ .map = exynos_iommu_map,
+ .unmap = exynos_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = exynos_iommu_iova_to_phys,
+ .add_device = exynos_iommu_add_device,
+ .remove_device = exynos_iommu_remove_device,
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
+index 2b6ce93..9396c85 100644
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -31,7 +31,7 @@
+ #include
+ #include
+ #include
+-#include
++#include
+
+ #include "fsl_pamu.h"
+
+@@ -1227,7 +1227,6 @@ static const struct of_device_id fsl_of_pamu_ids[] = {
+ static struct platform_driver fsl_of_pamu_driver = {
+ .driver = {
+ .name = "fsl-of-pamu",
+- .owner = THIS_MODULE,
+ },
+ .probe = fsl_pamu_probe,
+ };
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 3d1fc73..9e97328 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4474,6 +4474,7 @@ static const struct iommu_ops intel_iommu_ops = {
+ .detach_dev = intel_iommu_detach_device,
+ .map = intel_iommu_map,
+ .unmap = intel_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .add_device = intel_iommu_add_device,
+ .remove_device = intel_iommu_remove_device,
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+new file mode 100644
+index 0000000..fd6dd22
+--- /dev/null
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -0,0 +1,997 @@
++/*
++ * CPU-agnostic ARM page table allocator.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see .
++ *
++ * Copyright (C) 2014 ARM Limited
++ *
++ * Author: Will Deacon
++ */
++
++#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
++
++#include
++#include
++#include
++#include
++#include
++
++#include "io-pgtable.h"
++
++#define ARM_LPAE_MAX_ADDR_BITS 48
++#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
++#define ARM_LPAE_MAX_LEVELS 4
++
++/* Struct accessors */
++#define io_pgtable_to_data(x) \
++ container_of((x), struct arm_lpae_io_pgtable, iop)
++
++#define io_pgtable_ops_to_pgtable(x) \
++ container_of((x), struct io_pgtable, ops)
++
++#define io_pgtable_ops_to_data(x) \
++ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
++
++/*
++ * For consistency with the architecture, we always consider
++ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
++ */
++#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
++
++/*
++ * Calculate the right shift amount to get to the portion describing level l
++ * in a virtual address mapped by the pagetable in d.
++ */
++#define ARM_LPAE_LVL_SHIFT(l,d) \
++ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
++ * (d)->bits_per_level) + (d)->pg_shift)
++
++#define ARM_LPAE_PAGES_PER_PGD(d) \
++ DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
++
++/*
++ * Calculate the index at level l used to map virtual address a using the
++ * pagetable in d.
++ */
++#define ARM_LPAE_PGD_IDX(l,d) \
++ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
++
++#define ARM_LPAE_LVL_IDX(a,l,d) \
++ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
++ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
++
++/* Calculate the block/page mapping size at level l for pagetable in d. */
++#define ARM_LPAE_BLOCK_SIZE(l,d) \
++ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
++ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
++
++/* Page table bits */
++#define ARM_LPAE_PTE_TYPE_SHIFT 0
++#define ARM_LPAE_PTE_TYPE_MASK 0x3
++
++#define ARM_LPAE_PTE_TYPE_BLOCK 1
++#define ARM_LPAE_PTE_TYPE_TABLE 3
++#define ARM_LPAE_PTE_TYPE_PAGE 3
++
++#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
++#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
++#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
++#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
++#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
++#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
++#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
++#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
++
++#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
++/* Ignore the contiguous bit for block splitting */
++#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
++#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
++ ARM_LPAE_PTE_ATTR_HI_MASK)
++
++/* Stage-1 PTE */
++#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
++#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
++#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
++#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
++
++/* Stage-2 PTE */
++#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
++#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
++#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
++#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
++#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
++#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
++
++/* Register bits */
++#define ARM_32_LPAE_TCR_EAE (1 << 31)
++#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
++
++#define ARM_LPAE_TCR_EPD1 (1 << 23)
++
++#define ARM_LPAE_TCR_TG0_4K (0 << 14)
++#define ARM_LPAE_TCR_TG0_64K (1 << 14)
++#define ARM_LPAE_TCR_TG0_16K (2 << 14)
++
++#define ARM_LPAE_TCR_SH0_SHIFT 12
++#define ARM_LPAE_TCR_SH0_MASK 0x3
++#define ARM_LPAE_TCR_SH_NS 0
++#define ARM_LPAE_TCR_SH_OS 2
++#define ARM_LPAE_TCR_SH_IS 3
++
++#define ARM_LPAE_TCR_ORGN0_SHIFT 10
++#define ARM_LPAE_TCR_IRGN0_SHIFT 8
++#define ARM_LPAE_TCR_RGN_MASK 0x3
++#define ARM_LPAE_TCR_RGN_NC 0
++#define ARM_LPAE_TCR_RGN_WBWA 1
++#define ARM_LPAE_TCR_RGN_WT 2
++#define ARM_LPAE_TCR_RGN_WB 3
++
++#define ARM_LPAE_TCR_SL0_SHIFT 6
++#define ARM_LPAE_TCR_SL0_MASK 0x3
++
++#define ARM_LPAE_TCR_T0SZ_SHIFT 0
++#define ARM_LPAE_TCR_SZ_MASK 0xf
++
++#define ARM_LPAE_TCR_PS_SHIFT 16
++#define ARM_LPAE_TCR_PS_MASK 0x7
++
++#define ARM_LPAE_TCR_IPS_SHIFT 32
++#define ARM_LPAE_TCR_IPS_MASK 0x7
++
++#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
++#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
++#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
++#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
++#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
++#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
++
++#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
++#define ARM_LPAE_MAIR_ATTR_MASK 0xff
++#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
++#define ARM_LPAE_MAIR_ATTR_NC 0x44
++#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
++#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
++#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
++#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
++
++/* IOPTE accessors */
++#define iopte_deref(pte,d) \
++ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
++ & ~((1ULL << (d)->pg_shift) - 1)))
++
++#define iopte_type(pte,l) \
++ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
++
++#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
++
++#define iopte_leaf(pte,l) \
++ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
++ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
++ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
++
++#define iopte_to_pfn(pte,d) \
++ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
++
++#define pfn_to_iopte(pfn,d) \
++ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
++
++struct arm_lpae_io_pgtable {
++ struct io_pgtable iop;
++
++ int levels;
++ size_t pgd_size;
++ unsigned long pg_shift;
++ unsigned long bits_per_level;
++
++ void *pgd;
++};
++
++typedef u64 arm_lpae_iopte;
++
++static bool selftest_running = false;
++
++static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
++ unsigned long iova, phys_addr_t paddr,
++ arm_lpae_iopte prot, int lvl,
++ arm_lpae_iopte *ptep)
++{
++ arm_lpae_iopte pte = prot;
++
++ /* We require an unmap first */
++ if (iopte_leaf(*ptep, lvl)) {
++ WARN_ON(!selftest_running);
++ return -EEXIST;
++ }
++
++ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
++ pte |= ARM_LPAE_PTE_NS;
++
++ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
++ pte |= ARM_LPAE_PTE_TYPE_PAGE;
++ else
++ pte |= ARM_LPAE_PTE_TYPE_BLOCK;
++
++ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
++ pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
++
++ *ptep = pte;
++ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
++ return 0;
++}
++
++static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
++ phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
++ int lvl, arm_lpae_iopte *ptep)
++{
++ arm_lpae_iopte *cptep, pte;
++ void *cookie = data->iop.cookie;
++ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
++
++ /* Find our entry at the current level */
++ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
++
++ /* If we can install a leaf entry at this level, then do so */
++ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
++ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
++
++ /* We can't allocate tables at the final level */
++ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
++ return -EINVAL;
++
++ /* Grab a pointer to the next level */
++ pte = *ptep;
++ if (!pte) {
++ cptep = alloc_pages_exact(1UL << data->pg_shift,
++ GFP_ATOMIC | __GFP_ZERO);
++ if (!cptep)
++ return -ENOMEM;
++
++ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
++ cookie);
++ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
++ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
++ pte |= ARM_LPAE_PTE_NSTABLE;
++ *ptep = pte;
++ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
++ } else {
++ cptep = iopte_deref(pte, data);
++ }
++
++ /* Rinse, repeat */
++ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
++}
++
++static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
++ int prot)
++{
++ arm_lpae_iopte pte;
++
++ if (data->iop.fmt == ARM_64_LPAE_S1 ||
++ data->iop.fmt == ARM_32_LPAE_S1) {
++ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
++
++ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
++ pte |= ARM_LPAE_PTE_AP_RDONLY;
++
++ if (prot & IOMMU_CACHE)
++ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
++ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
++ else if (prot & IOMMU_MMIO)
++ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
++ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
++ } else {
++ pte = ARM_LPAE_PTE_HAP_FAULT;
++ if (prot & IOMMU_READ)
++ pte |= ARM_LPAE_PTE_HAP_READ;
++ if (prot & IOMMU_WRITE)
++ pte |= ARM_LPAE_PTE_HAP_WRITE;
++ if (prot & IOMMU_CACHE)
++ pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
++ else if (prot & IOMMU_MMIO)
++ pte |= ARM_LPAE_PTE_MEMATTR_DEV;
++ else
++ pte |= ARM_LPAE_PTE_MEMATTR_NC;
++ }
++
++ if (prot & IOMMU_NOEXEC)
++ pte |= ARM_LPAE_PTE_XN;
++
++ return pte;
++}
++
++static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
++ phys_addr_t paddr, size_t size, int iommu_prot)
++{
++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ arm_lpae_iopte *ptep = data->pgd;
++ int lvl = ARM_LPAE_START_LVL(data);
++ arm_lpae_iopte prot;
++
++ /* If no access, then nothing to do */
++ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
++ return 0;
++
++ prot = arm_lpae_prot_to_pte(data, iommu_prot);
++ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
++}
++
++static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
++ arm_lpae_iopte *ptep)
++{
++ arm_lpae_iopte *start, *end;
++ unsigned long table_size;
++
++ /* Only leaf entries at the last level */
++ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
++ return;
++
++ if (lvl == ARM_LPAE_START_LVL(data))
++ table_size = data->pgd_size;
++ else
++ table_size = 1UL << data->pg_shift;
++
++ start = ptep;
++ end = (void *)ptep + table_size;
++
++ while (ptep != end) {
++ arm_lpae_iopte pte = *ptep++;
++
++ if (!pte || iopte_leaf(pte, lvl))
++ continue;
++
++ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
++ }
++
++ free_pages_exact(start, table_size);
++}
++
++static void arm_lpae_free_pgtable(struct io_pgtable *iop)
++{
++ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
++
++ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
++ kfree(data);
++}
++
++static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
++ unsigned long iova, size_t size,
++ arm_lpae_iopte prot, int lvl,
++ arm_lpae_iopte *ptep, size_t blk_size)
++{
++ unsigned long blk_start, blk_end;
++ phys_addr_t blk_paddr;
++ arm_lpae_iopte table = 0;
++ void *cookie = data->iop.cookie;
++ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
++
++ blk_start = iova & ~(blk_size - 1);
++ blk_end = blk_start + blk_size;
++ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
++
++ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
++ arm_lpae_iopte *tablep;
++
++ /* Unmap! */
++ if (blk_start == iova)
++ continue;
++
++ /* __arm_lpae_map expects a pointer to the start of the table */
++ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
++ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
++ tablep) < 0) {
++ if (table) {
++ /* Free the table we allocated */
++ tablep = iopte_deref(table, data);
++ __arm_lpae_free_pgtable(data, lvl + 1, tablep);
++ }
++ return 0; /* Bytes unmapped */
++ }
++ }
++
++ *ptep = table;
++ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
++ iova &= ~(blk_size - 1);
++ tlb->tlb_add_flush(iova, blk_size, true, cookie);
++ return size;
++}
++
++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
++ unsigned long iova, size_t size, int lvl,
++ arm_lpae_iopte *ptep)
++{
++ arm_lpae_iopte pte;
++ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
++ void *cookie = data->iop.cookie;
++ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
++
++ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
++ pte = *ptep;
++
++ /* Something went horribly wrong and we ran out of page table */
++ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
++ return 0;
++
++ /* If the size matches this level, we're in the right place */
++ if (size == blk_size) {
++ *ptep = 0;
++ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
++
++ if (!iopte_leaf(pte, lvl)) {
++ /* Also flush any partial walks */
++ tlb->tlb_add_flush(iova, size, false, cookie);
++ tlb->tlb_sync(data->iop.cookie);
++ ptep = iopte_deref(pte, data);
++ __arm_lpae_free_pgtable(data, lvl + 1, ptep);
++ } else {
++ tlb->tlb_add_flush(iova, size, true, cookie);
++ }
++
++ return size;
++ } else if (iopte_leaf(pte, lvl)) {
++ /*
++ * Insert a table at the next level to map the old region,
++ * minus the part we want to unmap
++ */
++ return arm_lpae_split_blk_unmap(data, iova, size,
++ iopte_prot(pte), lvl, ptep,
++ blk_size);
++ }
++
++ /* Keep on walkin' */
++ ptep = iopte_deref(pte, data);
++ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
++}
++
++static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
++ size_t size)
++{
++ size_t unmapped;
++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ struct io_pgtable *iop = &data->iop;
++ arm_lpae_iopte *ptep = data->pgd;
++ int lvl = ARM_LPAE_START_LVL(data);
++
++ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
++ if (unmapped)
++ iop->cfg.tlb->tlb_sync(iop->cookie);
++
++ return unmapped;
++}
++
++static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
++ unsigned long iova)
++{
++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ arm_lpae_iopte pte, *ptep = data->pgd;
++ int lvl = ARM_LPAE_START_LVL(data);
++
++ do {
++ /* Valid IOPTE pointer? */
++ if (!ptep)
++ return 0;
++
++ /* Grab the IOPTE we're interested in */
++ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
++
++ /* Valid entry? */
++ if (!pte)
++ return 0;
++
++ /* Leaf entry? */
++ if (iopte_leaf(pte,lvl))
++ goto found_translation;
++
++ /* Take it to the next level */
++ ptep = iopte_deref(pte, data);
++ } while (++lvl < ARM_LPAE_MAX_LEVELS);
++
++ /* Ran out of page tables to walk */
++ return 0;
++
++found_translation:
++ iova &= ((1 << data->pg_shift) - 1);
++ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
++}
++
++static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
++{
++ unsigned long granule;
++
++ /*
++ * We need to restrict the supported page sizes to match the
++ * translation regime for a particular granule. Aim to match
++ * the CPU page size if possible, otherwise prefer smaller sizes.
++ * While we're at it, restrict the block sizes to match the
++ * chosen granule.
++ */
++ if (cfg->pgsize_bitmap & PAGE_SIZE)
++ granule = PAGE_SIZE;
++ else if (cfg->pgsize_bitmap & ~PAGE_MASK)
++ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
++ else if (cfg->pgsize_bitmap & PAGE_MASK)
++ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
++ else
++ granule = 0;
++
++ switch (granule) {
++ case SZ_4K:
++ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
++ break;
++ case SZ_16K:
++ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
++ break;
++ case SZ_64K:
++ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
++ break;
++ default:
++ cfg->pgsize_bitmap = 0;
++ }
++}
++
++static struct arm_lpae_io_pgtable *
++arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
++{
++ unsigned long va_bits, pgd_bits;
++ struct arm_lpae_io_pgtable *data;
++
++ arm_lpae_restrict_pgsizes(cfg);
++
++ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
++ return NULL;
++
++ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
++ return NULL;
++
++ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
++ return NULL;
++
++ data = kmalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return NULL;
++
++ data->pg_shift = __ffs(cfg->pgsize_bitmap);
++ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
++
++ va_bits = cfg->ias - data->pg_shift;
++ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
++
++ /* Calculate the actual size of our pgd (without concatenation) */
++ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
++ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
++
++ data->iop.ops = (struct io_pgtable_ops) {
++ .map = arm_lpae_map,
++ .unmap = arm_lpae_unmap,
++ .iova_to_phys = arm_lpae_iova_to_phys,
++ };
++
++ return data;
++}
++
++static struct io_pgtable *
++arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
++{
++ u64 reg;
++ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
++
++ if (!data)
++ return NULL;
++
++ /* TCR */
++ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
++
++ switch (1 << data->pg_shift) {
++ case SZ_4K:
++ reg |= ARM_LPAE_TCR_TG0_4K;
++ break;
++ case SZ_16K:
++ reg |= ARM_LPAE_TCR_TG0_16K;
++ break;
++ case SZ_64K:
++ reg |= ARM_LPAE_TCR_TG0_64K;
++ break;
++ }
++
++ switch (cfg->oas) {
++ case 32:
++ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
++ break;
++ case 36:
++ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
++ break;
++ case 40:
++ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
++ break;
++ case 42:
++ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
++ break;
++ case 44:
++ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
++ break;
++ case 48:
++ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
++ break;
++ default:
++ goto out_free_data;
++ }
++
++ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
++
++ /* Disable speculative walks through TTBR1 */
++ reg |= ARM_LPAE_TCR_EPD1;
++ cfg->arm_lpae_s1_cfg.tcr = reg;
++
++ /* MAIRs */
++ reg = (ARM_LPAE_MAIR_ATTR_NC
++ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
++ (ARM_LPAE_MAIR_ATTR_WBRWA
++ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
++ (ARM_LPAE_MAIR_ATTR_DEVICE
++ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
++
++ cfg->arm_lpae_s1_cfg.mair[0] = reg;
++ cfg->arm_lpae_s1_cfg.mair[1] = 0;
++
++ /* Looking good; allocate a pgd */
++ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
++ if (!data->pgd)
++ goto out_free_data;
++
++ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
++
++ /* TTBRs */
++ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
++ cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
++ return &data->iop;
++
++out_free_data:
++ kfree(data);
++ return NULL;
++}
++
++static struct io_pgtable *
++arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
++{
++ u64 reg, sl;
++ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
++
++ if (!data)
++ return NULL;
++
++ /*
++ * Concatenate PGDs at level 1 if possible in order to reduce
++ * the depth of the stage-2 walk.
++ */
++ if (data->levels == ARM_LPAE_MAX_LEVELS) {
++ unsigned long pgd_pages;
++
++ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
++ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
++ data->pgd_size = pgd_pages << data->pg_shift;
++ data->levels--;
++ }
++ }
++
++ /* VTCR */
++ reg = ARM_64_LPAE_S2_TCR_RES1 |
++ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
++
++ sl = ARM_LPAE_START_LVL(data);
++
++ switch (1 << data->pg_shift) {
++ case SZ_4K:
++ reg |= ARM_LPAE_TCR_TG0_4K;
++ sl++; /* SL0 format is different for 4K granule size */
++ break;
++ case SZ_16K:
++ reg |= ARM_LPAE_TCR_TG0_16K;
++ break;
++ case SZ_64K:
++ reg |= ARM_LPAE_TCR_TG0_64K;
++ break;
++ }
++
++ switch (cfg->oas) {
++ case 32:
++ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
++ break;
++ case 36:
++ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
++ break;
++ case 40:
++ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
++ break;
++ case 42:
++ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
++ break;
++ case 44:
++ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
++ break;
++ case 48:
++ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
++ break;
++ default:
++ goto out_free_data;
++ }
++
++ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
++ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
++ cfg->arm_lpae_s2_cfg.vtcr = reg;
++
++ /* Allocate pgd pages */
++ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
++ if (!data->pgd)
++ goto out_free_data;
++
++ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
++
++ /* VTTBR */
++ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
++ return &data->iop;
++
++out_free_data:
++ kfree(data);
++ return NULL;
++}
++
++static struct io_pgtable *
++arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
++{
++ struct io_pgtable *iop;
++
++ if (cfg->ias > 32 || cfg->oas > 40)
++ return NULL;
++
++ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
++ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
++ if (iop) {
++ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
++ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
++ }
++
++ return iop;
++}
++
++static struct io_pgtable *
++arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
++{
++ struct io_pgtable *iop;
++
++ if (cfg->ias > 40 || cfg->oas > 40)
++ return NULL;
++
++ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
++ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
++ if (iop)
++ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
++
++ return iop;
++}
++
++struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
++ .alloc = arm_64_lpae_alloc_pgtable_s1,
++ .free = arm_lpae_free_pgtable,
++};
++
++struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
++ .alloc = arm_64_lpae_alloc_pgtable_s2,
++ .free = arm_lpae_free_pgtable,
++};
++
++struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
++ .alloc = arm_32_lpae_alloc_pgtable_s1,
++ .free = arm_lpae_free_pgtable,
++};
++
++struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
++ .alloc = arm_32_lpae_alloc_pgtable_s2,
++ .free = arm_lpae_free_pgtable,
++};
++
++#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
++
++static struct io_pgtable_cfg *cfg_cookie;
++
++static void dummy_tlb_flush_all(void *cookie)
++{
++ WARN_ON(cookie != cfg_cookie);
++}
++
++static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
++ void *cookie)
++{
++ WARN_ON(cookie != cfg_cookie);
++ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
++}
++
++static void dummy_tlb_sync(void *cookie)
++{
++ WARN_ON(cookie != cfg_cookie);
++}
++
++static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
++{
++ WARN_ON(cookie != cfg_cookie);
++}
++
++static struct iommu_gather_ops dummy_tlb_ops __initdata = {
++ .tlb_flush_all = dummy_tlb_flush_all,
++ .tlb_add_flush = dummy_tlb_add_flush,
++ .tlb_sync = dummy_tlb_sync,
++ .flush_pgtable = dummy_flush_pgtable,
++};
++
++static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
++{
++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ struct io_pgtable_cfg *cfg = &data->iop.cfg;
++
++ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
++ cfg->pgsize_bitmap, cfg->ias);
++ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
++ data->levels, data->pgd_size, data->pg_shift,
++ data->bits_per_level, data->pgd);
++}
++
++#define __FAIL(ops, i) ({ \
++ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
++ arm_lpae_dump_ops(ops); \
++ selftest_running = false; \
++ -EFAULT; \
++})
++
++static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
++{
++ static const enum io_pgtable_fmt fmts[] = {
++ ARM_64_LPAE_S1,
++ ARM_64_LPAE_S2,
++ };
++
++ int i, j;
++ unsigned long iova;
++ size_t size;
++ struct io_pgtable_ops *ops;
++
++ selftest_running = true;
++
++ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
++ cfg_cookie = cfg;
++ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
++ if (!ops) {
++ pr_err("selftest: failed to allocate io pgtable ops\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Initial sanity checks.
++ * Empty page tables shouldn't provide any translations.
++ */
++ if (ops->iova_to_phys(ops, 42))
++ return __FAIL(ops, i);
++
++ if (ops->iova_to_phys(ops, SZ_1G + 42))
++ return __FAIL(ops, i);
++
++ if (ops->iova_to_phys(ops, SZ_2G + 42))
++ return __FAIL(ops, i);
++
++ /*
++ * Distinct mappings of different granule sizes.
++ */
++ iova = 0;
++ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
++ while (j != BITS_PER_LONG) {
++ size = 1UL << j;
++
++ if (ops->map(ops, iova, iova, size, IOMMU_READ |
++ IOMMU_WRITE |
++ IOMMU_NOEXEC |
++ IOMMU_CACHE))
++ return __FAIL(ops, i);
++
++ /* Overlapping mappings */
++ if (!ops->map(ops, iova, iova + size, size,
++ IOMMU_READ | IOMMU_NOEXEC))
++ return __FAIL(ops, i);
++
++ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
++ return __FAIL(ops, i);
++
++ iova += SZ_1G;
++ j++;
++ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
++ }
++
++ /* Partial unmap */
++ size = 1UL << __ffs(cfg->pgsize_bitmap);
++ if (ops->unmap(ops, SZ_1G + size, size) != size)
++ return __FAIL(ops, i);
++
++ /* Remap of partial unmap */
++ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
++ return __FAIL(ops, i);
++
++ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
++ return __FAIL(ops, i);
++
++ /* Full unmap */
++ iova = 0;
++ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
++ while (j != BITS_PER_LONG) {
++ size = 1UL << j;
++
++ if (ops->unmap(ops, iova, size) != size)
++ return __FAIL(ops, i);
++
++ if (ops->iova_to_phys(ops, iova + 42))
++ return __FAIL(ops, i);
++
++ /* Remap full block */
++ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
++ return __FAIL(ops, i);
++
++ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
++ return __FAIL(ops, i);
++
++ iova += SZ_1G;
++ j++;
++ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
++ }
++
++ free_io_pgtable_ops(ops);
++ }
++
++ selftest_running = false;
++ return 0;
++}
++
++static int __init arm_lpae_do_selftests(void)
++{
++ static const unsigned long pgsize[] = {
++ SZ_4K | SZ_2M | SZ_1G,
++ SZ_16K | SZ_32M,
++ SZ_64K | SZ_512M,
++ };
++
++ static const unsigned int ias[] = {
++ 32, 36, 40, 42, 44, 48,
++ };
++
++ int i, j, pass = 0, fail = 0;
++ struct io_pgtable_cfg cfg = {
++ .tlb = &dummy_tlb_ops,
++ .oas = 48,
++ };
++
++ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
++ for (j = 0; j < ARRAY_SIZE(ias); ++j) {
++ cfg.pgsize_bitmap = pgsize[i];
++ cfg.ias = ias[j];
++ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
++ pgsize[i], ias[j]);
++ if (arm_lpae_run_tests(&cfg))
++ fail++;
++ else
++ pass++;
++ }
++ }
++
++ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
++ return fail ? -EFAULT : 0;
++}
++subsys_initcall(arm_lpae_do_selftests);
++#endif
+diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
+new file mode 100644
+index 0000000..6436fe2
+--- /dev/null
++++ b/drivers/iommu/io-pgtable.c
+@@ -0,0 +1,82 @@
++/*
++ * Generic page table allocator for IOMMUs.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see .
++ *
++ * Copyright (C) 2014 ARM Limited
++ *
++ * Author: Will Deacon
++ */
++
++#include
++#include
++#include
++
++#include "io-pgtable.h"
++
++extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
++extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
++extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
++extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
++
++static const struct io_pgtable_init_fns *
++io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
++{
++#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
++ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
++ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
++ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
++ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
++#endif
++};
++
++struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
++ struct io_pgtable_cfg *cfg,
++ void *cookie)
++{
++ struct io_pgtable *iop;
++ const struct io_pgtable_init_fns *fns;
++
++ if (fmt >= IO_PGTABLE_NUM_FMTS)
++ return NULL;
++
++ fns = io_pgtable_init_table[fmt];
++ if (!fns)
++ return NULL;
++
++ iop = fns->alloc(cfg, cookie);
++ if (!iop)
++ return NULL;
++
++ iop->fmt = fmt;
++ iop->cookie = cookie;
++ iop->cfg = *cfg;
++
++ return &iop->ops;
++}
++
++/*
++ * It is the IOMMU driver's responsibility to ensure that the page table
++ * is no longer accessible to the walker by this point.
++ */
++void free_io_pgtable_ops(struct io_pgtable_ops *ops)
++{
++ struct io_pgtable *iop;
++
++ if (!ops)
++ return;
++
++ iop = container_of(ops, struct io_pgtable, ops);
++ iop->cfg.tlb->tlb_flush_all(iop->cookie);
++ io_pgtable_init_table[iop->fmt]->free(iop);
++}
+diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
+new file mode 100644
+index 0000000..10e32f6
+--- /dev/null
++++ b/drivers/iommu/io-pgtable.h
+@@ -0,0 +1,143 @@
++#ifndef __IO_PGTABLE_H
++#define __IO_PGTABLE_H
++
++/*
++ * Public API for use by IOMMU drivers
++ */
++enum io_pgtable_fmt {
++ ARM_32_LPAE_S1,
++ ARM_32_LPAE_S2,
++ ARM_64_LPAE_S1,
++ ARM_64_LPAE_S2,
++ IO_PGTABLE_NUM_FMTS,
++};
++
++/**
++ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
++ *
++ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
++ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
++ * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
++ * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
++ *
++ * Note that these can all be called in atomic context and must therefore
++ * not block.
++ */
++struct iommu_gather_ops {
++ void (*tlb_flush_all)(void *cookie);
++ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
++ void *cookie);
++ void (*tlb_sync)(void *cookie);
++ void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
++};
++
++/**
++ * struct io_pgtable_cfg - Configuration data for a set of page tables.
++ *
++ * @quirks: A bitmap of hardware quirks that require some special
++ * action by the low-level page table allocator.
++ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
++ * tables.
++ * @ias: Input address (iova) size, in bits.
++ * @oas: Output address (paddr) size, in bits.
++ * @tlb: TLB management callbacks for this set of tables.
++ */
++struct io_pgtable_cfg {
++ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
++ int quirks;
++ unsigned long pgsize_bitmap;
++ unsigned int ias;
++ unsigned int oas;
++ const struct iommu_gather_ops *tlb;
++
++ /* Low-level data specific to the table format */
++ union {
++ struct {
++ u64 ttbr[2];
++ u64 tcr;
++ u64 mair[2];
++ } arm_lpae_s1_cfg;
++
++ struct {
++ u64 vttbr;
++ u64 vtcr;
++ } arm_lpae_s2_cfg;
++ };
++};
++
++/**
++ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
++ *
++ * @map: Map a physically contiguous memory region.
++ * @unmap: Unmap a physically contiguous memory region.
++ * @iova_to_phys: Translate iova to physical address.
++ *
++ * These functions map directly onto the iommu_ops member functions with
++ * the same names.
++ */
++struct io_pgtable_ops {
++ int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
++ phys_addr_t paddr, size_t size, int prot);
++ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
++ size_t size);
++ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
++ unsigned long iova);
++};
++
++/**
++ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
++ *
++ * @fmt: The page table format.
++ * @cfg: The page table configuration. This will be modified to represent
++ * the configuration actually provided by the allocator (e.g. the
++ * pgsize_bitmap may be restricted).
++ * @cookie: An opaque token provided by the IOMMU driver and passed back to
++ * the callback routines in cfg->tlb.
++ */
++struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
++ struct io_pgtable_cfg *cfg,
++ void *cookie);
++
++/**
++ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
++ * *must* ensure that the page table is no longer
++ * live, but the TLB can be dirty.
++ *
++ * @ops: The ops returned from alloc_io_pgtable_ops.
++ */
++void free_io_pgtable_ops(struct io_pgtable_ops *ops);
++
++
++/*
++ * Internal structures for page table allocator implementations.
++ */
++
++/**
++ * struct io_pgtable - Internal structure describing a set of page tables.
++ *
++ * @fmt: The page table format.
++ * @cookie: An opaque token provided by the IOMMU driver and passed back to
++ * any callback routines.
++ * @cfg: A copy of the page table configuration.
++ * @ops: The page table operations in use for this set of page tables.
++ */
++struct io_pgtable {
++ enum io_pgtable_fmt fmt;
++ void *cookie;
++ struct io_pgtable_cfg cfg;
++ struct io_pgtable_ops ops;
++};
++
++/**
++ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
++ * particular format.
++ *
++ * @alloc: Allocate a set of page tables described by cfg.
++ * @free: Free the page tables associated with iop.
++ */
++struct io_pgtable_init_fns {
++ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
++ void (*free)(struct io_pgtable *iop);
++};
++
++#endif /* __IO_PGTABLE_H */
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index ed8b048..8d8e5a7 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -591,10 +591,10 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
+ continue;
+
+ /* We alias them or they alias us */
+- if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
+- pdev->dma_alias_devfn == tmp->devfn) ||
+- ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
+- tmp->dma_alias_devfn == pdev->devfn)) {
++ if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) &&
++ (pdev->dma_alias_devid & 0xff) == tmp->devfn) ||
++ ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) &&
++ (tmp->dma_alias_devid & 0xff) == pdev->devfn)) {
+
+ group = get_pci_alias_group(tmp, devfns);
+ if (group) {
+@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data)
+ const struct iommu_ops *ops = cb->ops;
+
+ if (!ops->add_device)
+- return -ENODEV;
++ return 0;
+
+ WARN_ON(dev->iommu_group);
+
+@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
+ kfree(nb);
+ return err;
+ }
+- return bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
++
++ err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
++ if (err) {
++ bus_unregister_notifier(bus, nb);
++ kfree(nb);
++ return err;
++ }
++
++ return 0;
+ }
+
+ /**
+@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
+ */
+ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
+ {
++ int err;
++
+ if (bus->iommu_ops != NULL)
+ return -EBUSY;
+
+ bus->iommu_ops = ops;
+
+ /* Do IOMMU specific setup for this bus-type */
+- return iommu_bus_init(bus, ops);
++ err = iommu_bus_init(bus, ops);
++ if (err)
++ bus->iommu_ops = NULL;
++
++ return err;
+ }
+ EXPORT_SYMBOL_GPL(bus_set_iommu);
+
+@@ -887,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
+ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
+ {
+ struct iommu_domain *domain;
+- int ret;
+
+ if (bus == NULL || bus->iommu_ops == NULL)
+ return NULL;
+
+- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
++ domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+ if (!domain)
+ return NULL;
+
+- domain->ops = bus->iommu_ops;
+-
+- ret = domain->ops->domain_init(domain);
+- if (ret)
+- goto out_free;
++ domain->ops = bus->iommu_ops;
++ domain->type = IOMMU_DOMAIN_UNMANAGED;
+
+ return domain;
+-
+-out_free:
+- kfree(domain);
+-
+- return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
+
+ void iommu_domain_free(struct iommu_domain *domain)
+ {
+- if (likely(domain->ops->domain_destroy != NULL))
+- domain->ops->domain_destroy(domain);
+-
+- kfree(domain);
++ domain->ops->domain_free(domain);
+ }
+ EXPORT_SYMBOL_GPL(iommu_domain_free);
+
+@@ -943,6 +945,16 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(iommu_detach_device);
+
++struct iommu_domain *iommu_get_dev_domain(struct device *dev)
++{
++ const struct iommu_ops *ops = dev->bus->iommu_ops;
++
++ if (unlikely(ops == NULL || ops->get_dev_iommu_domain == NULL))
++ return NULL;
++
++ return ops->get_dev_iommu_domain(dev);
++}
++EXPORT_SYMBOL_GPL(iommu_get_dev_domain);
+ /*
+ * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * the IOMMU API works on domains and devices. Bridge that gap by
+@@ -1035,6 +1047,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ domain->ops->pgsize_bitmap == 0UL))
+ return -ENODEV;
+
++ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
++ return -EINVAL;
++
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+@@ -1070,7 +1085,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ if (ret)
+ iommu_unmap(domain, orig_iova, orig_size - size);
+ else
+- trace_map(iova, paddr, size);
++ trace_map(orig_iova, paddr, orig_size);
+
+ return ret;
+ }
+@@ -1080,11 +1095,15 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
+ {
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
++ unsigned long orig_iova = iova;
+
+ if (unlikely(domain->ops->unmap == NULL ||
+ domain->ops->pgsize_bitmap == 0UL))
+ return -ENODEV;
+
++ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
++ return -EINVAL;
++
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+@@ -1119,11 +1138,53 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
+ unmapped += unmapped_page;
+ }
+
+- trace_unmap(iova, 0, size);
++ trace_unmap(orig_iova, size, unmapped);
+ return unmapped;
+ }
+ EXPORT_SYMBOL_GPL(iommu_unmap);
+
++size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
++ struct scatterlist *sg, unsigned int nents, int prot)
++{
++ struct scatterlist *s;
++ size_t mapped = 0;
++ unsigned int i, min_pagesz;
++ int ret;
++
++ if (unlikely(domain->ops->pgsize_bitmap == 0UL))
++ return 0;
++
++ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
++
++ for_each_sg(sg, s, nents, i) {
++ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
++
++ /*
++ * We are mapping on IOMMU page boundaries, so offset within
++ * the page must be 0. However, the IOMMU may support pages
++ * smaller than PAGE_SIZE, so s->offset may still represent
++ * an offset of that boundary within the CPU page.
++ */
++ if (!IS_ALIGNED(s->offset, min_pagesz))
++ goto out_err;
++
++ ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
++ if (ret)
++ goto out_err;
++
++ mapped += s->length;
++ }
++
++ return mapped;
++
++out_err:
++ /* undo mappings already done */
++ iommu_unmap(domain, iova, mapped);
++
++ return 0;
++
++}
++EXPORT_SYMBOL_GPL(default_iommu_map_sg);
+
+ int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t paddr, u64 size, int prot)
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
+index 7dab5cb..f3c5ab6 100644
+--- a/drivers/iommu/ipmmu-vmsa.c
++++ b/drivers/iommu/ipmmu-vmsa.c
+@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = {
+ .detach_dev = ipmmu_detach_device,
+ .map = ipmmu_map,
+ .unmap = ipmmu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = ipmmu_iova_to_phys,
+ .add_device = ipmmu_add_device,
+ .remove_device = ipmmu_remove_device,
+@@ -1221,7 +1222,6 @@ static int ipmmu_remove(struct platform_device *pdev)
+
+ static struct platform_driver ipmmu_driver = {
+ .driver = {
+- .owner = THIS_MODULE,
+ .name = "ipmmu-vmsa",
+ },
+ .probe = ipmmu_probe,
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index 74a1767..2c3f5ad 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
+ unsigned int irq;
+ struct msi_desc *msidesc;
+
+- WARN_ON(!list_is_singular(&dev->msi_list));
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+- WARN_ON(msidesc->irq);
+- WARN_ON(msidesc->msi_attrib.multiple);
+- WARN_ON(msidesc->nvec_used);
+
+ irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev));
+ if (irq == 0)
+ return -ENOSPC;
+
+ nvec_pow2 = __roundup_pow_of_two(nvec);
+- msidesc->nvec_used = nvec;
+- msidesc->msi_attrib.multiple = ilog2(nvec_pow2);
+ for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
+ if (!sub_handle) {
+ index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
+@@ -96,8 +90,6 @@ error:
+ * IRQs from tearing down again in default_teardown_msi_irqs()
+ */
+ msidesc->irq = 0;
+- msidesc->nvec_used = 0;
+- msidesc->msi_attrib.multiple = 0;
+
+ return ret;
+ }
+diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
+index 6e3dcc2..1c7b78e 100644
+--- a/drivers/iommu/msm_iommu.c
++++ b/drivers/iommu/msm_iommu.c
+@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = {
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ };
+diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
+index e550ccb..43429ab 100644
+--- a/drivers/iommu/of_iommu.c
++++ b/drivers/iommu/of_iommu.c
+@@ -18,9 +18,14 @@
+ */
+
+ #include
++#include
+ #include
+ #include
+ #include
++#include
++
++static const struct of_device_id __iommu_of_table_sentinel
++ __used __section(__iommu_of_table_end);
+
+ /**
+ * of_get_dma_window - Parse *dma-window property and returns 0 if found.
+@@ -89,3 +94,93 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(of_get_dma_window);
++
++struct of_iommu_node {
++ struct list_head list;
++ struct device_node *np;
++ struct iommu_ops *ops;
++};
++static LIST_HEAD(of_iommu_list);
++static DEFINE_SPINLOCK(of_iommu_lock);
++
++void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
++{
++ struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
++
++ if (WARN_ON(!iommu))
++ return;
++
++ INIT_LIST_HEAD(&iommu->list);
++ iommu->np = np;
++ iommu->ops = ops;
++ spin_lock(&of_iommu_lock);
++ list_add_tail(&iommu->list, &of_iommu_list);
++ spin_unlock(&of_iommu_lock);
++}
++
++struct iommu_ops *of_iommu_get_ops(struct device_node *np)
++{
++ struct of_iommu_node *node;
++ struct iommu_ops *ops = NULL;
++
++ spin_lock(&of_iommu_lock);
++ list_for_each_entry(node, &of_iommu_list, list)
++ if (node->np == np) {
++ ops = node->ops;
++ break;
++ }
++ spin_unlock(&of_iommu_lock);
++ return ops;
++}
++
++struct iommu_ops *of_iommu_configure(struct device *dev,
++ struct device_node *master_np)
++{
++ struct of_phandle_args iommu_spec;
++ struct device_node *np;
++ struct iommu_ops *ops = NULL;
++ int idx = 0;
++
++ if (dev_is_pci(dev)) {
++ dev_err(dev, "IOMMU is currently not supported for PCI\n");
++ return NULL;
++ }
++
++ /*
++ * We don't currently walk up the tree looking for a parent IOMMU.
++ * See the `Notes:' section of
++ * Documentation/devicetree/bindings/iommu/iommu.txt
++ */
++ while (!of_parse_phandle_with_args(master_np, "iommus",
++ "#iommu-cells", idx,
++ &iommu_spec)) {
++ np = iommu_spec.np;
++ ops = of_iommu_get_ops(np);
++
++ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
++ goto err_put_node;
++
++ of_node_put(np);
++ idx++;
++ }
++
++ return ops;
++
++err_put_node:
++ of_node_put(np);
++ return NULL;
++}
++
++void __init of_iommu_init(void)
++{
++ struct device_node *np;
++ const struct of_device_id *match, *matches = &__iommu_of_table;
++
++ for_each_matching_node_and_match(np, matches, &match) {
++ const of_iommu_init_fn init_fn = match->data;
++
++ if (init_fn(np))
++ pr_err("Failed to initialise IOMMU %s\n",
++ of_node_full_name(np));
++ }
++}
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index 3627887..18003c0 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = {
+ .detach_dev = omap_iommu_detach_dev,
+ .map = omap_iommu_map,
+ .unmap = omap_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = omap_iommu_iova_to_phys,
+ .add_device = omap_iommu_add_device,
+ .remove_device = omap_iommu_remove_device,
+diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
+index 1333e6f..f1b0077 100644
+--- a/drivers/iommu/shmobile-iommu.c
++++ b/drivers/iommu/shmobile-iommu.c
+@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = {
+ .detach_dev = shmobile_iommu_detach_device,
+ .map = shmobile_iommu_map,
+ .unmap = shmobile_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = shmobile_iommu_iova_to_phys,
+ .add_device = shmobile_iommu_add_device,
+ .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
+diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
+index bd97ade..951651a 100644
+--- a/drivers/iommu/shmobile-ipmmu.c
++++ b/drivers/iommu/shmobile-ipmmu.c
+@@ -118,7 +118,6 @@ static int ipmmu_probe(struct platform_device *pdev)
+ static struct platform_driver ipmmu_driver = {
+ .probe = ipmmu_probe,
+ .driver = {
+- .owner = THIS_MODULE,
+ .name = "ipmmu",
+ },
+ };
+diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
+index a6d76ab..f722a0c 100644
+--- a/drivers/iommu/tegra-gart.c
++++ b/drivers/iommu/tegra-gart.c
+@@ -425,7 +425,6 @@ static struct platform_driver tegra_gart_driver = {
+ .probe = tegra_gart_probe,
+ .remove = tegra_gart_remove,
+ .driver = {
+- .owner = THIS_MODULE,
+ .name = "tegra-gart",
+ .pm = &tegra_gart_pm_ops,
+ .of_match_table = tegra_gart_of_match,
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index 3afdf43..cb0c9bf 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -955,6 +955,7 @@ static const struct iommu_ops smmu_iommu_ops = {
+ .detach_dev = smmu_iommu_detach_dev,
+ .map = smmu_iommu_map,
+ .unmap = smmu_iommu_unmap,
++ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = smmu_iommu_iova_to_phys,
+ .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
+ };
+@@ -1269,7 +1270,6 @@ static struct platform_driver tegra_smmu_driver = {
+ .probe = tegra_smmu_probe,
+ .remove = tegra_smmu_remove,
+ .driver = {
+- .owner = THIS_MODULE,
+ .name = "tegra-smmu",
+ .pm = &tegra_smmu_pm_ops,
+ .of_match_table = tegra_smmu_of_match,
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index b21f12f..e72e239 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -5,8 +5,15 @@ config IRQCHIP
+ config ARM_GIC
+ bool
+ select IRQ_DOMAIN
++ select IRQ_DOMAIN_HIERARCHY
+ select MULTI_IRQ_HANDLER
+
++config ARM_GIC_V2M
++ bool
++ depends on ARM_GIC
++ depends on PCI && PCI_MSI
++ select PCI_MSI_IRQ_DOMAIN
++
+ config GIC_NON_BANKED
+ bool
+
+@@ -14,6 +21,11 @@ config ARM_GIC_V3
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
++ select IRQ_DOMAIN_HIERARCHY
++
++config ARM_GIC_V3_ITS
++ bool
++ select PCI_MSI_IRQ_DOMAIN
+
+ config ARM_NVIC
+ bool
+diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
+index 173bb5f..1c4f9a4 100644
+--- a/drivers/irqchip/Makefile
++++ b/drivers/irqchip/Makefile
+@@ -19,7 +19,9 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
+ obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
+ obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
+ obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
++obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
+ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
++obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o
+ obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
+ obj-$(CONFIG_ARM_VIC) += irq-vic.o
+ obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 41ac85a..615075d 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -131,7 +131,7 @@ static void armada_370_xp_free_msi(int hwirq)
+ mutex_unlock(&msi_used_lock);
+ }
+
+-static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
++static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+ {
+@@ -158,11 +158,11 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
+ msg.address_hi = 0;
+ msg.data = 0xf00 | (hwirq + 16);
+
+- write_msi_msg(virq, &msg);
++ pci_write_msi_msg(virq, &msg);
+ return 0;
+ }
+
+-static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
++static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
+ unsigned int irq)
+ {
+ struct irq_data *d = irq_get_irq_data(irq);
+@@ -174,10 +174,10 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
+
+ static struct irq_chip armada_370_xp_msi_irq_chip = {
+ .name = "armada_370_xp_msi_irq",
+- .irq_enable = unmask_msi_irq,
+- .irq_disable = mask_msi_irq,
+- .irq_mask = mask_msi_irq,
+- .irq_unmask = unmask_msi_irq,
++ .irq_enable = pci_msi_unmask_irq,
++ .irq_disable = pci_msi_mask_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
+ };
+
+ static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
+@@ -197,7 +197,7 @@ static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
+ static int armada_370_xp_msi_init(struct device_node *node,
+ phys_addr_t main_int_phys_base)
+ {
+- struct msi_chip *msi_chip;
++ struct msi_controller *msi_chip;
+ u32 reg;
+ int ret;
+
+diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
+index 9a2cf3c..27fdd8c 100644
+--- a/drivers/irqchip/irq-atmel-aic.c
++++ b/drivers/irqchip/irq-atmel-aic.c
+@@ -65,11 +65,11 @@ aic_handle(struct pt_regs *regs)
+ u32 irqnr;
+ u32 irqstat;
+
+- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR);
+- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR);
++ irqnr = irq_reg_readl(gc, AT91_AIC_IVR);
++ irqstat = irq_reg_readl(gc, AT91_AIC_ISR);
+
+ if (!irqstat)
+- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR);
++ irq_reg_writel(gc, 0, AT91_AIC_EOICR);
+ else
+ handle_domain_irq(aic_domain, irqnr, regs);
+ }
+@@ -80,7 +80,7 @@ static int aic_retrigger(struct irq_data *d)
+
+ /* Enable interrupt on AIC5 */
+ irq_gc_lock(gc);
+- irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR);
++ irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
+ irq_gc_unlock(gc);
+
+ return 0;
+@@ -92,12 +92,12 @@ static int aic_set_type(struct irq_data *d, unsigned type)
+ unsigned int smr;
+ int ret;
+
+- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq));
++ smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq));
+ ret = aic_common_set_type(d, type, &smr);
+ if (ret)
+ return ret;
+
+- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq));
++ irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq));
+
+ return 0;
+ }
+@@ -108,8 +108,8 @@ static void aic_suspend(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ irq_gc_lock(gc);
+- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR);
+- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR);
++ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
++ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
+ irq_gc_unlock(gc);
+ }
+
+@@ -118,8 +118,8 @@ static void aic_resume(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ irq_gc_lock(gc);
+- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR);
+- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR);
++ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
++ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
+ irq_gc_unlock(gc);
+ }
+
+@@ -128,8 +128,8 @@ static void aic_pm_shutdown(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+
+ irq_gc_lock(gc);
+- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR);
+- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR);
++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
+ irq_gc_unlock(gc);
+ }
+ #else
+@@ -148,24 +148,24 @@ static void __init aic_hw_init(struct irq_domain *domain)
+ * will not Lock out nIRQ
+ */
+ for (i = 0; i < 8; i++)
+- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR);
++ irq_reg_writel(gc, 0, AT91_AIC_EOICR);
+
+ /*
+ * Spurious Interrupt ID in Spurious Vector Register.
+ * When there is no current interrupt, the IRQ Vector Register
+ * reads the value stored in AIC_SPU
+ */
+- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU);
++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU);
+
+ /* No debugging in AIC: Debug (Protect) Control Register */
+- irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR);
++ irq_reg_writel(gc, 0, AT91_AIC_DCR);
+
+ /* Disable and clear all interrupts initially */
+- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR);
+- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR);
++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
+
+ for (i = 0; i < 32; i++)
+- irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i));
++ irq_reg_writel(gc, i, AT91_AIC_SVR(i));
+ }
+
+ static int aic_irq_domain_xlate(struct irq_domain *d,
+@@ -195,10 +195,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
+ gc = dgc->gc[idx];
+
+ irq_gc_lock(gc);
+- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq));
++ smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
+ ret = aic_common_set_priority(intspec[2], &smr);
+ if (!ret)
+- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq));
++ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+ irq_gc_unlock(gc);
+
+ return ret;
+diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
+index a11aae8..a2e8c3f 100644
+--- a/drivers/irqchip/irq-atmel-aic5.c
++++ b/drivers/irqchip/irq-atmel-aic5.c
+@@ -75,11 +75,11 @@ aic5_handle(struct pt_regs *regs)
+ u32 irqnr;
+ u32 irqstat;
+
+- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR);
+- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR);
++ irqnr = irq_reg_readl(gc, AT91_AIC5_IVR);
++ irqstat = irq_reg_readl(gc, AT91_AIC5_ISR);
+
+ if (!irqstat)
+- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
++ irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
+ else
+ handle_domain_irq(aic5_domain, irqnr, regs);
+ }
+@@ -92,8 +92,8 @@ static void aic5_mask(struct irq_data *d)
+
+ /* Disable interrupt on AIC5 */
+ irq_gc_lock(gc);
+- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
+- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
++ irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
+ gc->mask_cache &= ~d->mask;
+ irq_gc_unlock(gc);
+ }
+@@ -106,8 +106,8 @@ static void aic5_unmask(struct irq_data *d)
+
+ /* Enable interrupt on AIC5 */
+ irq_gc_lock(gc);
+- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
+- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR);
++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
++ irq_reg_writel(gc, 1, AT91_AIC5_IECR);
+ gc->mask_cache |= d->mask;
+ irq_gc_unlock(gc);
+ }
+@@ -120,8 +120,8 @@ static int aic5_retrigger(struct irq_data *d)
+
+ /* Enable interrupt on AIC5 */
+ irq_gc_lock(gc);
+- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
+- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR);
++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
++ irq_reg_writel(gc, 1, AT91_AIC5_ISCR);
+ irq_gc_unlock(gc);
+
+ return 0;
+@@ -136,11 +136,11 @@ static int aic5_set_type(struct irq_data *d, unsigned type)
+ int ret;
+
+ irq_gc_lock(gc);
+- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
+- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
++ smr = irq_reg_readl(gc, AT91_AIC5_SMR);
+ ret = aic_common_set_type(d, type, &smr);
+ if (!ret)
+- irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR);
++ irq_reg_writel(gc, smr, AT91_AIC5_SMR);
+ irq_gc_unlock(gc);
+
+ return ret;
+@@ -162,12 +162,11 @@ static void aic5_suspend(struct irq_data *d)
+ if ((mask & gc->mask_cache) == (mask & gc->wake_active))
+ continue;
+
+- irq_reg_writel(i + gc->irq_base,
+- bgc->reg_base + AT91_AIC5_SSR);
++ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+ if (mask & gc->wake_active)
+- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
++ irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
+ else
+- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
++ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+ }
+ irq_gc_unlock(bgc);
+ }
+@@ -187,12 +186,11 @@ static void aic5_resume(struct irq_data *d)
+ if ((mask & gc->mask_cache) == (mask & gc->wake_active))
+ continue;
+
+- irq_reg_writel(i + gc->irq_base,
+- bgc->reg_base + AT91_AIC5_SSR);
++ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
+ if (mask & gc->mask_cache)
+- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
++ irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
+ else
+- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
++ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
+ }
+ irq_gc_unlock(bgc);
+ }
+@@ -207,10 +205,9 @@ static void aic5_pm_shutdown(struct irq_data *d)
+
+ irq_gc_lock(bgc);
+ for (i = 0; i < dgc->irqs_per_chip; i++) {
+- irq_reg_writel(i + gc->irq_base,
+- bgc->reg_base + AT91_AIC5_SSR);
+- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
+- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR);
++ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
++ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
++ irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
+ }
+ irq_gc_unlock(bgc);
+ }
+@@ -230,24 +227,24 @@ static void __init aic5_hw_init(struct irq_domain *domain)
+ * will not Lock out nIRQ
+ */
+ for (i = 0; i < 8; i++)
+- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
++ irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
+
+ /*
+ * Spurious Interrupt ID in Spurious Vector Register.
+ * When there is no current interrupt, the IRQ Vector Register
+ * reads the value stored in AIC_SPU
+ */
+- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU);
++ irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU);
+
+ /* No debugging in AIC: Debug (Protect) Control Register */
+- irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR);
++ irq_reg_writel(gc, 0, AT91_AIC5_DCR);
+
+ /* Disable and clear all interrupts initially */
+ for (i = 0; i < domain->revmap_size; i++) {
+- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR);
+- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR);
+- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
+- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR);
++ irq_reg_writel(gc, i, AT91_AIC5_SSR);
++ irq_reg_writel(gc, i, AT91_AIC5_SVR);
++ irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
++ irq_reg_writel(gc, 1, AT91_AIC5_ICCR);
+ }
+ }
+
+@@ -273,11 +270,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
+ gc = dgc->gc[0];
+
+ irq_gc_lock(gc);
+- irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR);
+- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
++ irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR);
++ smr = irq_reg_readl(gc, AT91_AIC5_SMR);
+ ret = aic_common_set_priority(intspec[2], &smr);
+ if (!ret)
+- irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR);
++ irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR);
+ irq_gc_unlock(gc);
+
+ return ret;
+diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
+index 61541ff..ad96ebb 100644
+--- a/drivers/irqchip/irq-gic-common.c
++++ b/drivers/irqchip/irq-gic-common.c
+@@ -21,7 +21,7 @@
+
+ #include "irq-gic-common.h"
+
+-void gic_configure_irq(unsigned int irq, unsigned int type,
++int gic_configure_irq(unsigned int irq, unsigned int type,
+ void __iomem *base, void (*sync_access)(void))
+ {
+ u32 enablemask = 1 << (irq % 32);
+@@ -29,16 +29,17 @@ void gic_configure_irq(unsigned int irq, unsigned int type,
+ u32 confmask = 0x2 << ((irq % 16) * 2);
+ u32 confoff = (irq / 16) * 4;
+ bool enabled = false;
+- u32 val;
++ u32 val, oldval;
++ int ret = 0;
+
+ /*
+ * Read current configuration register, and insert the config
+ * for "irq", depending on "type".
+ */
+- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+- if (type == IRQ_TYPE_LEVEL_HIGH)
++ val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
++ if (type & IRQ_TYPE_LEVEL_MASK)
+ val &= ~confmask;
+- else if (type == IRQ_TYPE_EDGE_RISING)
++ else if (type & IRQ_TYPE_EDGE_BOTH)
+ val |= confmask;
+
+ /*
+@@ -54,15 +55,20 @@ void gic_configure_irq(unsigned int irq, unsigned int type,
+
+ /*
+ * Write back the new configuration, and possibly re-enable
+- * the interrupt.
++ * the interrupt. If we tried to write a new configuration and failed,
++ * return an error.
+ */
+ writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
++ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval)
++ ret = -EINVAL;
+
+ if (enabled)
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+ if (sync_access)
+ sync_access();
++
++ return ret;
+ }
+
+ void __init gic_dist_config(void __iomem *base, int gic_irqs,
+diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
+index b41f024..35a9884 100644
+--- a/drivers/irqchip/irq-gic-common.h
++++ b/drivers/irqchip/irq-gic-common.h
+@@ -20,7 +20,7 @@
+ #include
+ #include
+
+-void gic_configure_irq(unsigned int irq, unsigned int type,
++int gic_configure_irq(unsigned int irq, unsigned int type,
+ void __iomem *base, void (*sync_access)(void));
+ void gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void));
+diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
+new file mode 100644
+index 0000000..fdf7065
+--- /dev/null
++++ b/drivers/irqchip/irq-gic-v2m.c
+@@ -0,0 +1,333 @@
++/*
++ * ARM GIC v2m MSI(-X) support
++ * Support for Message Signaled Interrupts for systems that
++ * implement ARM Generic Interrupt Controller: GICv2m.
++ *
++ * Copyright (C) 2014 Advanced Micro Devices, Inc.
++ * Authors: Suravee Suthikulpanit
++ * Harish Kasiviswanathan
++ * Brandon Anderson
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#define pr_fmt(fmt) "GICv2m: " fmt
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++/*
++* MSI_TYPER:
++* [31:26] Reserved
++* [25:16] lowest SPI assigned to MSI
++* [15:10] Reserved
++* [9:0] Numer of SPIs assigned to MSI
++*/
++#define V2M_MSI_TYPER 0x008
++#define V2M_MSI_TYPER_BASE_SHIFT 16
++#define V2M_MSI_TYPER_BASE_MASK 0x3FF
++#define V2M_MSI_TYPER_NUM_MASK 0x3FF
++#define V2M_MSI_SETSPI_NS 0x040
++#define V2M_MIN_SPI 32
++#define V2M_MAX_SPI 1019
++
++#define V2M_MSI_TYPER_BASE_SPI(x) \
++ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
++
++#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
++
++struct v2m_data {
++ spinlock_t msi_cnt_lock;
++ struct msi_controller mchip;
++ struct resource res; /* GICv2m resource */
++ void __iomem *base; /* GICv2m virt address */
++ u32 spi_start; /* The SPI number that MSIs start */
++ u32 nr_spis; /* The number of SPIs for MSIs */
++ unsigned long *bm; /* MSI vector bitmap */
++ struct irq_domain *domain;
++};
++
++static void gicv2m_mask_msi_irq(struct irq_data *d)
++{
++ pci_msi_mask_irq(d);
++ irq_chip_mask_parent(d);
++}
++
++static void gicv2m_unmask_msi_irq(struct irq_data *d)
++{
++ pci_msi_unmask_irq(d);
++ irq_chip_unmask_parent(d);
++}
++
++static struct irq_chip gicv2m_msi_irq_chip = {
++ .name = "MSI",
++ .irq_mask = gicv2m_mask_msi_irq,
++ .irq_unmask = gicv2m_unmask_msi_irq,
++ .irq_eoi = irq_chip_eoi_parent,
++ .irq_write_msi_msg = pci_msi_domain_write_msg,
++};
++
++static struct msi_domain_info gicv2m_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
++ MSI_FLAG_PCI_MSIX),
++ .chip = &gicv2m_msi_irq_chip,
++};
++
++static int gicv2m_set_affinity(struct irq_data *irq_data,
++ const struct cpumask *mask, bool force)
++{
++ int ret;
++
++ ret = irq_chip_set_affinity_parent(irq_data, mask, force);
++ if (ret == IRQ_SET_MASK_OK)
++ ret = IRQ_SET_MASK_OK_DONE;
++
++ return ret;
++}
++
++static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
++{
++ struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
++ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
++
++ msg->address_hi = (u32) (addr >> 32);
++ msg->address_lo = (u32) (addr);
++ msg->data = data->hwirq;
++}
++
++static struct irq_chip gicv2m_irq_chip = {
++ .name = "GICv2m",
++ .irq_mask = irq_chip_mask_parent,
++ .irq_unmask = irq_chip_unmask_parent,
++ .irq_eoi = irq_chip_eoi_parent,
++ .irq_set_affinity = gicv2m_set_affinity,
++ .irq_compose_msi_msg = gicv2m_compose_msi_msg,
++};
++
++static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
++ unsigned int virq,
++ irq_hw_number_t hwirq)
++{
++ struct of_phandle_args args;
++ struct irq_data *d;
++ int err;
++
++ args.np = domain->parent->of_node;
++ args.args_count = 3;
++ args.args[0] = 0;
++ args.args[1] = hwirq - 32;
++ args.args[2] = IRQ_TYPE_EDGE_RISING;
++
++ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
++ if (err)
++ return err;
++
++ /* Configure the interrupt line to be edge */
++ d = irq_domain_get_irq_data(domain->parent, virq);
++ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
++ return 0;
++}
++
++static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
++{
++ int pos;
++
++ pos = hwirq - v2m->spi_start;
++ if (pos < 0 || pos >= v2m->nr_spis) {
++ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
++ return;
++ }
++
++ spin_lock(&v2m->msi_cnt_lock);
++ __clear_bit(pos, v2m->bm);
++ spin_unlock(&v2m->msi_cnt_lock);
++}
++
++static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
++ unsigned int nr_irqs, void *args)
++{
++ struct v2m_data *v2m = domain->host_data;
++ int hwirq, offset, err = 0;
++
++ spin_lock(&v2m->msi_cnt_lock);
++ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis);
++ if (offset < v2m->nr_spis)
++ __set_bit(offset, v2m->bm);
++ else
++ err = -ENOSPC;
++ spin_unlock(&v2m->msi_cnt_lock);
++
++ if (err)
++ return err;
++
++ hwirq = v2m->spi_start + offset;
++
++ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
++ if (err) {
++ gicv2m_unalloc_msi(v2m, hwirq);
++ return err;
++ }
++
++ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
++ &gicv2m_irq_chip, v2m);
++
++ return 0;
++}
++
++static void gicv2m_irq_domain_free(struct irq_domain *domain,
++ unsigned int virq, unsigned int nr_irqs)
++{
++ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
++ struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
++
++ BUG_ON(nr_irqs != 1);
++ gicv2m_unalloc_msi(v2m, d->hwirq);
++ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
++}
++
++static const struct irq_domain_ops gicv2m_domain_ops = {
++ .alloc = gicv2m_irq_domain_alloc,
++ .free = gicv2m_irq_domain_free,
++};
++
++static bool is_msi_spi_valid(u32 base, u32 num)
++{
++ if (base < V2M_MIN_SPI) {
++ pr_err("Invalid MSI base SPI (base:%u)\n", base);
++ return false;
++ }
++
++ if ((num == 0) || (base + num > V2M_MAX_SPI)) {
++ pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
++ num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
++ return false;
++ }
++
++ return true;
++}
++
++static int __init gicv2m_init_one(struct device_node *node,
++ struct irq_domain *parent)
++{
++ int ret;
++ struct v2m_data *v2m;
++
++ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
++ if (!v2m) {
++ pr_err("Failed to allocate struct v2m_data.\n");
++ return -ENOMEM;
++ }
++
++ ret = of_address_to_resource(node, 0, &v2m->res);
++ if (ret) {
++ pr_err("Failed to allocate v2m resource.\n");
++ goto err_free_v2m;
++ }
++
++ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
++ if (!v2m->base) {
++ pr_err("Failed to map GICv2m resource\n");
++ ret = -ENOMEM;
++ goto err_free_v2m;
++ }
++
++ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) &&
++ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) {
++ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
++ v2m->spi_start, v2m->nr_spis);
++ } else {
++ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
++
++ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
++ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
++ }
++
++ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
++ ret = -EINVAL;
++ goto err_iounmap;
++ }
++
++ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
++ GFP_KERNEL);
++ if (!v2m->bm) {
++ ret = -ENOMEM;
++ goto err_iounmap;
++ }
++
++ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
++ if (!v2m->domain) {
++ pr_err("Failed to create GICv2m domain\n");
++ ret = -ENOMEM;
++ goto err_free_bm;
++ }
++
++ v2m->domain->parent = parent;
++ v2m->mchip.of_node = node;
++ v2m->mchip.domain = pci_msi_create_irq_domain(node,
++ &gicv2m_msi_domain_info,
++ v2m->domain);
++ if (!v2m->mchip.domain) {
++ pr_err("Failed to create MSI domain\n");
++ ret = -ENOMEM;
++ goto err_free_domains;
++ }
++
++ spin_lock_init(&v2m->msi_cnt_lock);
++
++ ret = of_pci_msi_chip_add(&v2m->mchip);
++ if (ret) {
++ pr_err("Failed to add msi_chip.\n");
++ goto err_free_domains;
++ }
++
++ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
++ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
++ v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
++
++ return 0;
++
++err_free_domains:
++ if (v2m->mchip.domain)
++ irq_domain_remove(v2m->mchip.domain);
++ if (v2m->domain)
++ irq_domain_remove(v2m->domain);
++err_free_bm:
++ kfree(v2m->bm);
++err_iounmap:
++ iounmap(v2m->base);
++err_free_v2m:
++ kfree(v2m);
++ return ret;
++}
++
++static struct of_device_id gicv2m_device_id[] = {
++ { .compatible = "arm,gic-v2m-frame", },
++ {},
++};
++
++int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
++{
++ int ret = 0;
++ struct device_node *child;
++
++ for (child = of_find_matching_node(node, gicv2m_device_id); child;
++ child = of_find_matching_node(child, gicv2m_device_id)) {
++ if (!of_find_property(child, "msi-controller", NULL))
++ continue;
++
++ ret = gicv2m_init_one(child, parent);
++ if (ret) {
++ of_node_put(node);
++ break;
++ }
++ }
++
++ return ret;
++}
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+new file mode 100644
+index 0000000..d689158
+--- /dev/null
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -0,0 +1,1630 @@
++/*
++ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
++ * Author: Marc Zyngier
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see .
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#include
++
++#include
++#include
++#include
++
++#include "irqchip.h"
++
++#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
++
++#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
++
++/*
++ * Collection structure - just an ID, and a redistributor address to
++ * ping. We use one per CPU as a bag of interrupts assigned to this
++ * CPU.
++ */
++struct its_collection {
++ u64 target_address;
++ u16 col_id;
++};
++
++/*
++ * The ITS structure - contains most of the infrastructure, with the
++ * msi_controller, the command queue, the collections, and the list of
++ * devices writing to it.
++ */
++struct its_node {
++ raw_spinlock_t lock;
++ struct list_head entry;
++ struct msi_controller msi_chip;
++ struct irq_domain *domain;
++ void __iomem *base;
++ unsigned long phys_base;
++ struct its_cmd_block *cmd_base;
++ struct its_cmd_block *cmd_write;
++ void *tables[GITS_BASER_NR_REGS];
++ struct its_collection *collections;
++ struct list_head its_device_list;
++ u64 flags;
++ u32 ite_size;
++};
++
++#define ITS_ITT_ALIGN SZ_256
++
++struct event_lpi_map {
++ unsigned long *lpi_map;
++ u16 *col_map;
++ irq_hw_number_t lpi_base;
++ int nr_lpis;
++};
++
++/*
++ * The ITS view of a device - belongs to an ITS, a collection, owns an
++ * interrupt translation table, and a list of interrupts.
++ */
++struct its_device {
++ struct list_head entry;
++ struct its_node *its;
++ struct event_lpi_map event_map;
++ void *itt;
++ u32 nr_ites;
++ u32 device_id;
++};
++
++static LIST_HEAD(its_nodes);
++static DEFINE_SPINLOCK(its_lock);
++static struct device_node *gic_root_node;
++static struct rdists *gic_rdists;
++
++#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
++#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
++
++static struct its_collection *dev_event_to_col(struct its_device *its_dev,
++ u32 event)
++{
++ struct its_node *its = its_dev->its;
++
++ return its->collections + its_dev->event_map.col_map[event];
++}
++
++/*
++ * ITS command descriptors - parameters to be encoded in a command
++ * block.
++ */
++struct its_cmd_desc {
++ union {
++ struct {
++ struct its_device *dev;
++ u32 event_id;
++ } its_inv_cmd;
++
++ struct {
++ struct its_device *dev;
++ u32 event_id;
++ } its_int_cmd;
++
++ struct {
++ struct its_device *dev;
++ int valid;
++ } its_mapd_cmd;
++
++ struct {
++ struct its_collection *col;
++ int valid;
++ } its_mapc_cmd;
++
++ struct {
++ struct its_device *dev;
++ u32 phys_id;
++ u32 event_id;
++ } its_mapvi_cmd;
++
++ struct {
++ struct its_device *dev;
++ struct its_collection *col;
++ u32 event_id;
++ } its_movi_cmd;
++
++ struct {
++ struct its_device *dev;
++ u32 event_id;
++ } its_discard_cmd;
++
++ struct {
++ struct its_collection *col;
++ } its_invall_cmd;
++ };
++};
++
++/*
++ * The ITS command block, which is what the ITS actually parses.
++ */
++struct its_cmd_block {
++ u64 raw_cmd[4];
++};
++
++#define ITS_CMD_QUEUE_SZ SZ_64K
++#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
++
++typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
++ struct its_cmd_desc *);
++
++static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
++{
++ cmd->raw_cmd[0] &= ~0xffUL;
++ cmd->raw_cmd[0] |= cmd_nr;
++}
++
++static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
++{
++ cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
++ cmd->raw_cmd[0] |= ((u64)devid) << 32;
++}
++
++static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
++{
++ cmd->raw_cmd[1] &= ~0xffffffffUL;
++ cmd->raw_cmd[1] |= id;
++}
++
++static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
++{
++ cmd->raw_cmd[1] &= 0xffffffffUL;
++ cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
++}
++
++static void its_encode_size(struct its_cmd_block *cmd, u8 size)
++{
++ cmd->raw_cmd[1] &= ~0x1fUL;
++ cmd->raw_cmd[1] |= size & 0x1f;
++}
++
++static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
++{
++ cmd->raw_cmd[2] &= ~0xffffffffffffUL;
++ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
++}
++
++static void its_encode_valid(struct its_cmd_block *cmd, int valid)
++{
++ cmd->raw_cmd[2] &= ~(1UL << 63);
++ cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
++}
++
++static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
++{
++ cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
++ cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
++}
++
++static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
++{
++ cmd->raw_cmd[2] &= ~0xffffUL;
++ cmd->raw_cmd[2] |= col;
++}
++
++static inline void its_fixup_cmd(struct its_cmd_block *cmd)
++{
++ /* Let's fixup BE commands */
++ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
++ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
++ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
++ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
++}
++
++static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ unsigned long itt_addr;
++ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
++
++ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
++ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
++
++ its_encode_cmd(cmd, GITS_CMD_MAPD);
++ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
++ its_encode_size(cmd, size - 1);
++ its_encode_itt(cmd, itt_addr);
++ its_encode_valid(cmd, desc->its_mapd_cmd.valid);
++
++ its_fixup_cmd(cmd);
++
++ return NULL;
++}
++
++static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ its_encode_cmd(cmd, GITS_CMD_MAPC);
++ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
++ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
++ its_encode_valid(cmd, desc->its_mapc_cmd.valid);
++
++ its_fixup_cmd(cmd);
++
++ return desc->its_mapc_cmd.col;
++}
++
++static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ struct its_collection *col;
++
++ col = dev_event_to_col(desc->its_mapvi_cmd.dev,
++ desc->its_mapvi_cmd.event_id);
++
++ its_encode_cmd(cmd, GITS_CMD_MAPVI);
++ its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
++ its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
++ its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
++ its_encode_collection(cmd, col->col_id);
++
++ its_fixup_cmd(cmd);
++
++ return col;
++}
++
++static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ struct its_collection *col;
++
++ col = dev_event_to_col(desc->its_movi_cmd.dev,
++ desc->its_movi_cmd.event_id);
++
++ its_encode_cmd(cmd, GITS_CMD_MOVI);
++ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
++ its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
++ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
++
++ its_fixup_cmd(cmd);
++
++ return col;
++}
++
++static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ struct its_collection *col;
++
++ col = dev_event_to_col(desc->its_discard_cmd.dev,
++ desc->its_discard_cmd.event_id);
++
++ its_encode_cmd(cmd, GITS_CMD_DISCARD);
++ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
++ its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
++
++ its_fixup_cmd(cmd);
++
++ return col;
++}
++
++static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ struct its_collection *col;
++
++ col = dev_event_to_col(desc->its_inv_cmd.dev,
++ desc->its_inv_cmd.event_id);
++
++ its_encode_cmd(cmd, GITS_CMD_INV);
++ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
++ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
++
++ its_fixup_cmd(cmd);
++
++ return col;
++}
++
++static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
++ struct its_cmd_desc *desc)
++{
++ its_encode_cmd(cmd, GITS_CMD_INVALL);
++ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
++
++ its_fixup_cmd(cmd);
++
++ return NULL;
++}
++
++static u64 its_cmd_ptr_to_offset(struct its_node *its,
++ struct its_cmd_block *ptr)
++{
++ return (ptr - its->cmd_base) * sizeof(*ptr);
++}
++
++static int its_queue_full(struct its_node *its)
++{
++ int widx;
++ int ridx;
++
++ widx = its->cmd_write - its->cmd_base;
++ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
++
++ /* This is incredibly unlikely to happen, unless the ITS locks up. */
++ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
++ return 1;
++
++ return 0;
++}
++
++static struct its_cmd_block *its_allocate_entry(struct its_node *its)
++{
++ struct its_cmd_block *cmd;
++ u32 count = 1000000; /* 1s! */
++
++ while (its_queue_full(its)) {
++ count--;
++ if (!count) {
++ pr_err_ratelimited("ITS queue not draining\n");
++ return NULL;
++ }
++ cpu_relax();
++ udelay(1);
++ }
++
++ cmd = its->cmd_write++;
++
++ /* Handle queue wrapping */
++ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
++ its->cmd_write = its->cmd_base;
++
++ return cmd;
++}
++
++static struct its_cmd_block *its_post_commands(struct its_node *its)
++{
++ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
++
++ writel_relaxed(wr, its->base + GITS_CWRITER);
++
++ return its->cmd_write;
++}
++
++static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
++{
++ /*
++ * Make sure the commands written to memory are observable by
++ * the ITS.
++ */
++ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
++ __flush_dcache_area(cmd, sizeof(*cmd));
++ else
++ dsb(ishst);
++}
++
++static void its_wait_for_range_completion(struct its_node *its,
++ struct its_cmd_block *from,
++ struct its_cmd_block *to)
++{
++ u64 rd_idx, from_idx, to_idx;
++ u32 count = 1000000; /* 1s! */
++
++ from_idx = its_cmd_ptr_to_offset(its, from);
++ to_idx = its_cmd_ptr_to_offset(its, to);
++
++ while (1) {
++ rd_idx = readl_relaxed(its->base + GITS_CREADR);
++ if (rd_idx >= to_idx || rd_idx < from_idx)
++ break;
++
++ count--;
++ if (!count) {
++ pr_err_ratelimited("ITS queue timeout\n");
++ return;
++ }
++ cpu_relax();
++ udelay(1);
++ }
++}
++
++static void its_send_single_command(struct its_node *its,
++ its_cmd_builder_t builder,
++ struct its_cmd_desc *desc)
++{
++ struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
++ struct its_collection *sync_col;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&its->lock, flags);
++
++ cmd = its_allocate_entry(its);
++ if (!cmd) { /* We're soooooo screewed... */
++ pr_err_ratelimited("ITS can't allocate, dropping command\n");
++ raw_spin_unlock_irqrestore(&its->lock, flags);
++ return;
++ }
++ sync_col = builder(cmd, desc);
++ its_flush_cmd(its, cmd);
++
++ if (sync_col) {
++ sync_cmd = its_allocate_entry(its);
++ if (!sync_cmd) {
++ pr_err_ratelimited("ITS can't SYNC, skipping\n");
++ goto post;
++ }
++ its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
++ its_encode_target(sync_cmd, sync_col->target_address);
++ its_fixup_cmd(sync_cmd);
++ its_flush_cmd(its, sync_cmd);
++ }
++
++post:
++ next_cmd = its_post_commands(its);
++ raw_spin_unlock_irqrestore(&its->lock, flags);
++
++ its_wait_for_range_completion(its, cmd, next_cmd);
++}
++
++static void its_send_inv(struct its_device *dev, u32 event_id)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_inv_cmd.dev = dev;
++ desc.its_inv_cmd.event_id = event_id;
++
++ its_send_single_command(dev->its, its_build_inv_cmd, &desc);
++}
++
++static void its_send_mapd(struct its_device *dev, int valid)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_mapd_cmd.dev = dev;
++ desc.its_mapd_cmd.valid = !!valid;
++
++ its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
++}
++
++static void its_send_mapc(struct its_node *its, struct its_collection *col,
++ int valid)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_mapc_cmd.col = col;
++ desc.its_mapc_cmd.valid = !!valid;
++
++ its_send_single_command(its, its_build_mapc_cmd, &desc);
++}
++
++static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_mapvi_cmd.dev = dev;
++ desc.its_mapvi_cmd.phys_id = irq_id;
++ desc.its_mapvi_cmd.event_id = id;
++
++ its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
++}
++
++static void its_send_movi(struct its_device *dev,
++ struct its_collection *col, u32 id)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_movi_cmd.dev = dev;
++ desc.its_movi_cmd.col = col;
++ desc.its_movi_cmd.event_id = id;
++
++ its_send_single_command(dev->its, its_build_movi_cmd, &desc);
++}
++
++static void its_send_discard(struct its_device *dev, u32 id)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_discard_cmd.dev = dev;
++ desc.its_discard_cmd.event_id = id;
++
++ its_send_single_command(dev->its, its_build_discard_cmd, &desc);
++}
++
++static void its_send_invall(struct its_node *its, struct its_collection *col)
++{
++ struct its_cmd_desc desc;
++
++ desc.its_invall_cmd.col = col;
++
++ its_send_single_command(its, its_build_invall_cmd, &desc);
++}
++
++/*
++ * irqchip functions - assumes MSI, mostly.
++ */
++
++static inline u32 its_get_event_id(struct irq_data *d)
++{
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ return d->hwirq - its_dev->event_map.lpi_base;
++}
++
++static void lpi_set_config(struct irq_data *d, bool enable)
++{
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ irq_hw_number_t hwirq = d->hwirq;
++ u32 id = its_get_event_id(d);
++ u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
++
++ if (enable)
++ *cfg |= LPI_PROP_ENABLED;
++ else
++ *cfg &= ~LPI_PROP_ENABLED;
++
++ /*
++ * Make the above write visible to the redistributors.
++ * And yes, we're flushing exactly: One. Single. Byte.
++ * Humpf...
++ */
++ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
++ __flush_dcache_area(cfg, sizeof(*cfg));
++ else
++ dsb(ishst);
++ its_send_inv(its_dev, id);
++}
++
++static void its_mask_irq(struct irq_data *d)
++{
++ lpi_set_config(d, false);
++}
++
++static void its_unmask_irq(struct irq_data *d)
++{
++ lpi_set_config(d, true);
++}
++
++static void its_eoi_irq(struct irq_data *d)
++{
++ gic_write_eoir(d->hwirq);
++}
++
++static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
++ bool force)
++{
++ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ struct its_collection *target_col;
++ u32 id = its_get_event_id(d);
++
++ if (cpu >= nr_cpu_ids)
++ return -EINVAL;
++
++ target_col = &its_dev->its->collections[cpu];
++ its_send_movi(its_dev, target_col, id);
++ its_dev->event_map.col_map[id] = cpu;
++
++ return IRQ_SET_MASK_OK_DONE;
++}
++
++static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
++{
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ struct its_node *its;
++ u64 addr;
++
++ its = its_dev->its;
++ addr = its->phys_base + GITS_TRANSLATER;
++
++ msg->address_lo = addr & ((1UL << 32) - 1);
++ msg->address_hi = addr >> 32;
++ msg->data = its_get_event_id(d);
++}
++
++static struct irq_chip its_irq_chip = {
++ .name = "ITS",
++ .irq_mask = its_mask_irq,
++ .irq_unmask = its_unmask_irq,
++ .irq_eoi = its_eoi_irq,
++ .irq_set_affinity = its_set_affinity,
++ .irq_compose_msi_msg = its_irq_compose_msi_msg,
++};
++
++static void its_mask_msi_irq(struct irq_data *d)
++{
++ pci_msi_mask_irq(d);
++ irq_chip_mask_parent(d);
++}
++
++static void its_unmask_msi_irq(struct irq_data *d)
++{
++ pci_msi_unmask_irq(d);
++ irq_chip_unmask_parent(d);
++}
++
++static struct irq_chip its_msi_irq_chip = {
++ .name = "ITS-MSI",
++ .irq_unmask = its_unmask_msi_irq,
++ .irq_mask = its_mask_msi_irq,
++ .irq_eoi = irq_chip_eoi_parent,
++ .irq_write_msi_msg = pci_msi_domain_write_msg,
++};
++
++/*
++ * How we allocate LPIs:
++ *
++ * The GIC has id_bits bits for interrupt identifiers. From there, we
++ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
++ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
++ * bits to the right.
++ *
++ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
++ */
++#define IRQS_PER_CHUNK_SHIFT 5
++#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
++
++static unsigned long *lpi_bitmap;
++static u32 lpi_chunks;
++static DEFINE_SPINLOCK(lpi_lock);
++
++static int its_lpi_to_chunk(int lpi)
++{
++ return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
++}
++
++static int its_chunk_to_lpi(int chunk)
++{
++ return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
++}
++
++static int its_lpi_init(u32 id_bits)
++{
++ lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
++
++ lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
++ GFP_KERNEL);
++ if (!lpi_bitmap) {
++ lpi_chunks = 0;
++ return -ENOMEM;
++ }
++
++ pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
++ return 0;
++}
++
++static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
++{
++ unsigned long *bitmap = NULL;
++ int chunk_id;
++ int nr_chunks;
++ int i;
++
++ nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
++
++ spin_lock(&lpi_lock);
++
++ do {
++ chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
++ 0, nr_chunks, 0);
++ if (chunk_id < lpi_chunks)
++ break;
++
++ nr_chunks--;
++ } while (nr_chunks > 0);
++
++ if (!nr_chunks)
++ goto out;
++
++ bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
++ GFP_ATOMIC);
++ if (!bitmap)
++ goto out;
++
++ for (i = 0; i < nr_chunks; i++)
++ set_bit(chunk_id + i, lpi_bitmap);
++
++ *base = its_chunk_to_lpi(chunk_id);
++ *nr_ids = nr_chunks * IRQS_PER_CHUNK;
++
++out:
++ spin_unlock(&lpi_lock);
++
++ if (!bitmap)
++ *base = *nr_ids = 0;
++
++ return bitmap;
++}
++
++static void its_lpi_free(struct event_lpi_map *map)
++{
++ int base = map->lpi_base;
++ int nr_ids = map->nr_lpis;
++ int lpi;
++
++ spin_lock(&lpi_lock);
++
++ for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
++ int chunk = its_lpi_to_chunk(lpi);
++ BUG_ON(chunk > lpi_chunks);
++ if (test_bit(chunk, lpi_bitmap)) {
++ clear_bit(chunk, lpi_bitmap);
++ } else {
++ pr_err("Bad LPI chunk %d\n", chunk);
++ }
++ }
++
++ spin_unlock(&lpi_lock);
++
++ kfree(map->lpi_map);
++ kfree(map->col_map);
++}
++
++/*
++ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
++ * deal with (one configuration byte per interrupt). PENDBASE has to
++ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
++ */
++#define LPI_PROPBASE_SZ SZ_64K
++#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
++
++/*
++ * This is how many bits of ID we need, including the useless ones.
++ */
++#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
++
++#define LPI_PROP_DEFAULT_PRIO 0xa0
++
++static int __init its_alloc_lpi_tables(void)
++{
++ phys_addr_t paddr;
++
++ gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
++ get_order(LPI_PROPBASE_SZ));
++ if (!gic_rdists->prop_page) {
++ pr_err("Failed to allocate PROPBASE\n");
++ return -ENOMEM;
++ }
++
++ paddr = page_to_phys(gic_rdists->prop_page);
++ pr_info("GIC: using LPI property table @%pa\n", &paddr);
++
++ /* Priority 0xa0, Group-1, disabled */
++ memset(page_address(gic_rdists->prop_page),
++ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
++ LPI_PROPBASE_SZ);
++
++ /* Make sure the GIC will observe the written configuration */
++ __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
++
++ return 0;
++}
++
++static const char *its_base_type_string[] = {
++ [GITS_BASER_TYPE_DEVICE] = "Devices",
++ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
++ [GITS_BASER_TYPE_CPU] = "Physical CPUs",
++ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
++ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
++ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
++ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
++};
++
++static void its_free_tables(struct its_node *its)
++{
++ int i;
++
++ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
++ if (its->tables[i]) {
++ free_page((unsigned long)its->tables[i]);
++ its->tables[i] = NULL;
++ }
++ }
++}
++
++static int its_alloc_tables(struct its_node *its)
++{
++ int err;
++ int i;
++ int psz = SZ_64K;
++ u64 shr = GITS_BASER_InnerShareable;
++ u64 cache = GITS_BASER_WaWb;
++
++ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
++ u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
++ u64 type = GITS_BASER_TYPE(val);
++ u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
++ int order = get_order(psz);
++ int alloc_size;
++ u64 tmp;
++ void *base;
++
++ if (type == GITS_BASER_TYPE_NONE)
++ continue;
++
++ /*
++ * Allocate as many entries as required to fit the
++ * range of device IDs that the ITS can grok... The ID
++ * space being incredibly sparse, this results in a
++ * massive waste of memory.
++ *
++ * For other tables, only allocate a single page.
++ */
++ if (type == GITS_BASER_TYPE_DEVICE) {
++ u64 typer = readq_relaxed(its->base + GITS_TYPER);
++ u32 ids = GITS_TYPER_DEVBITS(typer);
++
++ /*
++ * 'order' was initialized earlier to the default page
++ * granule of the the ITS. We can't have an allocation
++ * smaller than that. If the requested allocation
++ * is smaller, round up to the default page granule.
++ */
++ order = max(get_order((1UL << ids) * entry_size),
++ order);
++ if (order >= MAX_ORDER) {
++ order = MAX_ORDER - 1;
++ pr_warn("%s: Device Table too large, reduce its page order to %u\n",
++ its->msi_chip.of_node->full_name, order);
++ }
++ }
++
++ alloc_size = (1 << order) * PAGE_SIZE;
++ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
++ if (!base) {
++ err = -ENOMEM;
++ goto out_free;
++ }
++
++ its->tables[i] = base;
++
++retry_baser:
++ val = (virt_to_phys(base) |
++ (type << GITS_BASER_TYPE_SHIFT) |
++ ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
++ cache |
++ shr |
++ GITS_BASER_VALID);
++
++ switch (psz) {
++ case SZ_4K:
++ val |= GITS_BASER_PAGE_SIZE_4K;
++ break;
++ case SZ_16K:
++ val |= GITS_BASER_PAGE_SIZE_16K;
++ break;
++ case SZ_64K:
++ val |= GITS_BASER_PAGE_SIZE_64K;
++ break;
++ }
++
++ val |= (alloc_size / psz) - 1;
++
++ writeq_relaxed(val, its->base + GITS_BASER + i * 8);
++ tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
++
++ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
++ /*
++ * Shareability didn't stick. Just use
++ * whatever the read reported, which is likely
++ * to be the only thing this redistributor
++ * supports. If that's zero, make it
++ * non-cacheable as well.
++ */
++ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
++ if (!shr) {
++ cache = GITS_BASER_nC;
++ __flush_dcache_area(base, alloc_size);
++ }
++ goto retry_baser;
++ }
++
++ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
++ /*
++ * Page size didn't stick. Let's try a smaller
++ * size and retry. If we reach 4K, then
++ * something is horribly wrong...
++ */
++ switch (psz) {
++ case SZ_16K:
++ psz = SZ_4K;
++ goto retry_baser;
++ case SZ_64K:
++ psz = SZ_16K;
++ goto retry_baser;
++ }
++ }
++
++ if (val != tmp) {
++ pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
++ its->msi_chip.of_node->full_name, i,
++ (unsigned long) val, (unsigned long) tmp);
++ err = -ENXIO;
++ goto out_free;
++ }
++
++ pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
++ (int)(alloc_size / entry_size),
++ its_base_type_string[type],
++ (unsigned long)virt_to_phys(base),
++ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
++ }
++
++ return 0;
++
++out_free:
++ its_free_tables(its);
++
++ return err;
++}
++
++static int its_alloc_collections(struct its_node *its)
++{
++ its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
++ GFP_KERNEL);
++ if (!its->collections)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void its_cpu_init_lpis(void)
++{
++ void __iomem *rbase = gic_data_rdist_rd_base();
++ struct page *pend_page;
++ u64 val, tmp;
++
++ /* If we didn't allocate the pending table yet, do it now */
++ pend_page = gic_data_rdist()->pend_page;
++ if (!pend_page) {
++ phys_addr_t paddr;
++ /*
++ * The pending pages have to be at least 64kB aligned,
++ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
++ */
++ pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
++ get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
++ if (!pend_page) {
++ pr_err("Failed to allocate PENDBASE for CPU%d\n",
++ smp_processor_id());
++ return;
++ }
++
++ /* Make sure the GIC will observe the zero-ed page */
++ __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
++
++ paddr = page_to_phys(pend_page);
++ pr_info("CPU%d: using LPI pending table @%pa\n",
++ smp_processor_id(), &paddr);
++ gic_data_rdist()->pend_page = pend_page;
++ }
++
++ /* Disable LPIs */
++ val = readl_relaxed(rbase + GICR_CTLR);
++ val &= ~GICR_CTLR_ENABLE_LPIS;
++ writel_relaxed(val, rbase + GICR_CTLR);
++
++ /*
++ * Make sure any change to the table is observable by the GIC.
++ */
++ dsb(sy);
++
++ /* set PROPBASE */
++ val = (page_to_phys(gic_rdists->prop_page) |
++ GICR_PROPBASER_InnerShareable |
++ GICR_PROPBASER_WaWb |
++ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
++
++ writeq_relaxed(val, rbase + GICR_PROPBASER);
++ tmp = readq_relaxed(rbase + GICR_PROPBASER);
++
++ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
++ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
++ /*
++ * The HW reports non-shareable, we must
++ * remove the cacheability attributes as
++ * well.
++ */
++ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
++ GICR_PROPBASER_CACHEABILITY_MASK);
++ val |= GICR_PROPBASER_nC;
++ writeq_relaxed(val, rbase + GICR_PROPBASER);
++ }
++ pr_info_once("GIC: using cache flushing for LPI property table\n");
++ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
++ }
++
++ /* set PENDBASE */
++ val = (page_to_phys(pend_page) |
++ GICR_PENDBASER_InnerShareable |
++ GICR_PENDBASER_WaWb);
++
++ writeq_relaxed(val, rbase + GICR_PENDBASER);
++ tmp = readq_relaxed(rbase + GICR_PENDBASER);
++
++ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
++ /*
++ * The HW reports non-shareable, we must remove the
++ * cacheability attributes as well.
++ */
++ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
++ GICR_PENDBASER_CACHEABILITY_MASK);
++ val |= GICR_PENDBASER_nC;
++ writeq_relaxed(val, rbase + GICR_PENDBASER);
++ }
++
++ /* Enable LPIs */
++ val = readl_relaxed(rbase + GICR_CTLR);
++ val |= GICR_CTLR_ENABLE_LPIS;
++ writel_relaxed(val, rbase + GICR_CTLR);
++
++ /* Make sure the GIC has seen the above */
++ dsb(sy);
++}
++
++static void its_cpu_init_collection(void)
++{
++ struct its_node *its;
++ int cpu;
++
++ spin_lock(&its_lock);
++ cpu = smp_processor_id();
++
++ list_for_each_entry(its, &its_nodes, entry) {
++ u64 target;
++
++ /*
++ * We now have to bind each collection to its target
++ * redistributor.
++ */
++ if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
++ /*
++ * This ITS wants the physical address of the
++ * redistributor.
++ */
++ target = gic_data_rdist()->phys_base;
++ } else {
++ /*
++ * This ITS wants a linear CPU number.
++ */
++ target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
++ target = GICR_TYPER_CPU_NUMBER(target) << 16;
++ }
++
++ /* Perform collection mapping */
++ its->collections[cpu].target_address = target;
++ its->collections[cpu].col_id = cpu;
++
++ its_send_mapc(its, &its->collections[cpu], 1);
++ its_send_invall(its, &its->collections[cpu]);
++ }
++
++ spin_unlock(&its_lock);
++}
++
++static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
++{
++ struct its_device *its_dev = NULL, *tmp;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&its->lock, flags);
++
++ list_for_each_entry(tmp, &its->its_device_list, entry) {
++ if (tmp->device_id == dev_id) {
++ its_dev = tmp;
++ break;
++ }
++ }
++
++ raw_spin_unlock_irqrestore(&its->lock, flags);
++
++ return its_dev;
++}
++
++static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
++ int nvecs)
++{
++ struct its_device *dev;
++ unsigned long *lpi_map;
++ unsigned long flags;
++ u16 *col_map = NULL;
++ void *itt;
++ int lpi_base;
++ int nr_lpis;
++ int nr_ites;
++ int sz;
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ /*
++ * At least one bit of EventID is being used, hence a minimum
++ * of two entries. No, the architecture doesn't let you
++ * express an ITT with a single entry.
++ */
++ nr_ites = max(2UL, roundup_pow_of_two(nvecs));
++ sz = nr_ites * its->ite_size;
++ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
++ itt = kzalloc(sz, GFP_KERNEL);
++ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
++ if (lpi_map)
++ col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
++
++ if (!dev || !itt || !lpi_map || !col_map) {
++ kfree(dev);
++ kfree(itt);
++ kfree(lpi_map);
++ kfree(col_map);
++ return NULL;
++ }
++
++ __flush_dcache_area(itt, sz);
++
++ dev->its = its;
++ dev->itt = itt;
++ dev->nr_ites = nr_ites;
++ dev->event_map.lpi_map = lpi_map;
++ dev->event_map.col_map = col_map;
++ dev->event_map.lpi_base = lpi_base;
++ dev->event_map.nr_lpis = nr_lpis;
++ dev->device_id = dev_id;
++ INIT_LIST_HEAD(&dev->entry);
++
++ raw_spin_lock_irqsave(&its->lock, flags);
++ list_add(&dev->entry, &its->its_device_list);
++ raw_spin_unlock_irqrestore(&its->lock, flags);
++
++ /* Map device to its ITT */
++ its_send_mapd(dev, 1);
++
++ return dev;
++}
++
++static void its_free_device(struct its_device *its_dev)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&its_dev->its->lock, flags);
++ list_del(&its_dev->entry);
++ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
++ kfree(its_dev->itt);
++ kfree(its_dev);
++}
++
++static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
++{
++ int idx;
++
++ idx = find_first_zero_bit(dev->event_map.lpi_map,
++ dev->event_map.nr_lpis);
++ if (idx == dev->event_map.nr_lpis)
++ return -ENOSPC;
++
++ *hwirq = dev->event_map.lpi_base + idx;
++ set_bit(idx, dev->event_map.lpi_map);
++
++ return 0;
++}
++
++struct its_pci_alias {
++ struct pci_dev *pdev;
++ u32 dev_id;
++ u32 count;
++};
++
++static int its_pci_msi_vec_count(struct pci_dev *pdev)
++{
++ int msi, msix;
++
++ msi = max(pci_msi_vec_count(pdev), 0);
++ msix = max(pci_msix_vec_count(pdev), 0);
++
++ return max(msi, msix);
++}
++
++static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
++{
++ struct its_pci_alias *dev_alias = data;
++
++ dev_alias->dev_id = alias;
++ if (pdev != dev_alias->pdev)
++ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
++
++ return 0;
++}
++
++int __its_msi_prepare(struct irq_domain *domain, u32 dev_id,
++ struct device *dev, int nvec, msi_alloc_info_t *info)
++{
++ struct its_node *its;
++ struct its_device *its_dev;
++
++ its = domain->parent->host_data;
++
++ its_dev = its_find_device(its, dev_id);
++ if (its_dev) {
++ /*
++ * We already have seen this ID, probably through
++ * another alias (PCI bridge of some sort). No need to
++ * create the device.
++ */
++ dev_dbg(dev, "Reusing ITT for devID %x\n", dev_id);
++ goto out;
++ }
++
++ its_dev = its_create_device(its, dev_id, nvec);
++ if (!its_dev)
++ return -ENOMEM;
++
++ dev_dbg(dev, "ITT %d entries, %d bits\n",
++ nvec, ilog2(nvec));
++out:
++ info->scratchpad[0].ptr = its_dev;
++ info->scratchpad[1].ptr = dev;
++
++ return 0;
++}
++
++static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
++ int nvec, msi_alloc_info_t *info)
++{
++ struct pci_dev *pdev;
++ struct its_pci_alias dev_alias;
++ u32 dev_id;
++
++ if (!dev_is_pci(dev))
++ return -EINVAL;
++
++ pdev = to_pci_dev(dev);
++ dev_alias.pdev = pdev;
++ dev_alias.count = nvec;
++
++ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
++
++ dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
++ dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
++ return __its_msi_prepare(domain, dev_alias.dev_id,
++ dev, dev_alias.count, info);
++}
++
++static struct msi_domain_ops its_pci_msi_ops = {
++ .msi_prepare = its_msi_prepare,
++};
++
++static struct msi_domain_info its_pci_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
++ .ops = &its_pci_msi_ops,
++ .chip = &its_msi_irq_chip,
++};
++
++static int its_irq_gic_domain_alloc(struct irq_domain *domain,
++ unsigned int virq,
++ irq_hw_number_t hwirq)
++{
++ struct of_phandle_args args;
++
++ args.np = domain->parent->of_node;
++ args.args_count = 3;
++ args.args[0] = GIC_IRQ_TYPE_LPI;
++ args.args[1] = hwirq;
++ args.args[2] = IRQ_TYPE_EDGE_RISING;
++
++ return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
++}
++
++static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
++ unsigned int nr_irqs, void *args)
++{
++ msi_alloc_info_t *info = args;
++ struct its_device *its_dev = info->scratchpad[0].ptr;
++ irq_hw_number_t hwirq;
++ int err;
++ int i;
++
++ for (i = 0; i < nr_irqs; i++) {
++ err = its_alloc_device_irq(its_dev, &hwirq);
++ if (err)
++ return err;
++
++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
++ if (err)
++ return err;
++
++ irq_domain_set_hwirq_and_chip(domain, virq + i,
++ hwirq, &its_irq_chip, its_dev);
++ dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
++ (int)(hwirq - its_dev->event_map.lpi_base),
++ (int)hwirq, virq + i);
++ }
++
++ return 0;
++}
++
++static void its_irq_domain_activate(struct irq_domain *domain,
++ struct irq_data *d)
++{
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ u32 event = its_get_event_id(d);
++
++ /* Bind the LPI to the first possible CPU */
++ its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
++
++ /* Map the GIC IRQ and event to the device */
++ its_send_mapvi(its_dev, d->hwirq, event);
++}
++
++static void its_irq_domain_deactivate(struct irq_domain *domain,
++ struct irq_data *d)
++{
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ u32 event = its_get_event_id(d);
++
++ /* Stop the delivery of interrupts */
++ its_send_discard(its_dev, event);
++}
++
++static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
++ unsigned int nr_irqs)
++{
++ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
++ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ int i;
++
++ for (i = 0; i < nr_irqs; i++) {
++ struct irq_data *data = irq_domain_get_irq_data(domain,
++ virq + i);
++ u32 event = its_get_event_id(data);
++
++ /* Mark interrupt index as unused */
++ clear_bit(event, its_dev->event_map.lpi_map);
++
++ /* Nuke the entry in the domain */
++ irq_domain_reset_irq_data(data);
++ }
++
++ /* If all interrupts have been freed, start mopping the floor */
++ if (bitmap_empty(its_dev->event_map.lpi_map,
++ its_dev->event_map.nr_lpis)) {
++ its_lpi_free(&its_dev->event_map);
++
++ /* Unmap device/itt */
++ its_send_mapd(its_dev, 0);
++ its_free_device(its_dev);
++ }
++
++ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
++}
++
++static const struct irq_domain_ops its_domain_ops = {
++ .alloc = its_irq_domain_alloc,
++ .free = its_irq_domain_free,
++ .activate = its_irq_domain_activate,
++ .deactivate = its_irq_domain_deactivate,
++};
++
++static int its_force_quiescent(void __iomem *base)
++{
++ u32 count = 1000000; /* 1s */
++ u32 val;
++
++ val = readl_relaxed(base + GITS_CTLR);
++ if (val & GITS_CTLR_QUIESCENT)
++ return 0;
++
++ /* Disable the generation of all interrupts to this ITS */
++ val &= ~GITS_CTLR_ENABLE;
++ writel_relaxed(val, base + GITS_CTLR);
++
++ /* Poll GITS_CTLR and wait until ITS becomes quiescent */
++ while (1) {
++ val = readl_relaxed(base + GITS_CTLR);
++ if (val & GITS_CTLR_QUIESCENT)
++ return 0;
++
++ count--;
++ if (!count)
++ return -EBUSY;
++
++ cpu_relax();
++ udelay(1);
++ }
++}
++
++static int its_probe(struct device_node *node, struct irq_domain *parent)
++{
++ struct resource res;
++ struct its_node *its;
++ void __iomem *its_base;
++ u32 val;
++ u64 baser, tmp;
++ int err;
++
++ err = of_address_to_resource(node, 0, &res);
++ if (err) {
++ pr_warn("%s: no regs?\n", node->full_name);
++ return -ENXIO;
++ }
++
++ its_base = ioremap(res.start, resource_size(&res));
++ if (!its_base) {
++ pr_warn("%s: unable to map registers\n", node->full_name);
++ return -ENOMEM;
++ }
++
++ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
++ if (val != 0x30 && val != 0x40) {
++ pr_warn("%s: no ITS detected, giving up\n", node->full_name);
++ err = -ENODEV;
++ goto out_unmap;
++ }
++
++ err = its_force_quiescent(its_base);
++ if (err) {
++ pr_warn("%s: failed to quiesce, giving up\n",
++ node->full_name);
++ goto out_unmap;
++ }
++
++ pr_info("ITS: %s\n", node->full_name);
++
++ its = kzalloc(sizeof(*its), GFP_KERNEL);
++ if (!its) {
++ err = -ENOMEM;
++ goto out_unmap;
++ }
++
++ raw_spin_lock_init(&its->lock);
++ INIT_LIST_HEAD(&its->entry);
++ INIT_LIST_HEAD(&its->its_device_list);
++ its->base = its_base;
++ its->phys_base = res.start;
++ its->msi_chip.of_node = node;
++ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
++
++ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
++ if (!its->cmd_base) {
++ err = -ENOMEM;
++ goto out_free_its;
++ }
++ its->cmd_write = its->cmd_base;
++
++ err = its_alloc_tables(its);
++ if (err)
++ goto out_free_cmd;
++
++ err = its_alloc_collections(its);
++ if (err)
++ goto out_free_tables;
++
++ baser = (virt_to_phys(its->cmd_base) |
++ GITS_CBASER_WaWb |
++ GITS_CBASER_InnerShareable |
++ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
++ GITS_CBASER_VALID);
++
++ writeq_relaxed(baser, its->base + GITS_CBASER);
++ tmp = readq_relaxed(its->base + GITS_CBASER);
++
++ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
++ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
++ /*
++ * The HW reports non-shareable, we must
++ * remove the cacheability attributes as
++ * well.
++ */
++ baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
++ GITS_CBASER_CACHEABILITY_MASK);
++ baser |= GITS_CBASER_nC;
++ writeq_relaxed(baser, its->base + GITS_CBASER);
++ }
++ pr_info("ITS: using cache flushing for cmd queue\n");
++ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
++ }
++
++ writeq_relaxed(0, its->base + GITS_CWRITER);
++ writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
++
++ if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
++ its->domain = irq_domain_add_tree(node, &its_domain_ops, its);
++ if (!its->domain) {
++ err = -ENOMEM;
++ goto out_free_tables;
++ }
++
++ its->domain->parent = parent;
++ its->domain->bus_token = DOMAIN_BUS_NEXUS;
++
++ its->msi_chip.domain = pci_msi_create_irq_domain(node,
++ &its_pci_msi_domain_info,
++ its->domain);
++ if (!its->msi_chip.domain) {
++ err = -ENOMEM;
++ goto out_free_domains;
++ }
++
++ err = of_pci_msi_chip_add(&its->msi_chip);
++ if (err)
++ goto out_free_domains;
++ }
++
++ spin_lock(&its_lock);
++ list_add(&its->entry, &its_nodes);
++ spin_unlock(&its_lock);
++
++ return 0;
++
++out_free_domains:
++ if (its->msi_chip.domain)
++ irq_domain_remove(its->msi_chip.domain);
++ if (its->domain)
++ irq_domain_remove(its->domain);
++out_free_tables:
++ its_free_tables(its);
++out_free_cmd:
++ kfree(its->cmd_base);
++out_free_its:
++ kfree(its);
++out_unmap:
++ iounmap(its_base);
++ pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
++ return err;
++}
++
++static bool gic_rdists_supports_plpis(void)
++{
++ return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
++}
++
++int its_cpu_init(void)
++{
++ if (!list_empty(&its_nodes)) {
++ if (!gic_rdists_supports_plpis()) {
++ pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
++ return -ENXIO;
++ }
++ its_cpu_init_lpis();
++ its_cpu_init_collection();
++ }
++
++ return 0;
++}
++
++static struct of_device_id its_device_id[] = {
++ { .compatible = "arm,gic-v3-its", },
++ {},
++};
++
++int its_init(struct device_node *node, struct rdists *rdists,
++ struct irq_domain *parent_domain)
++{
++ struct device_node *np;
++
++ for (np = of_find_matching_node(node, its_device_id); np;
++ np = of_find_matching_node(np, its_device_id)) {
++ its_probe(np, parent_domain);
++ }
++
++ if (list_empty(&its_nodes)) {
++ pr_warn("ITS: No ITS available, not enabling LPIs\n");
++ return -ENXIO;
++ }
++
++ gic_rdists = rdists;
++ gic_root_node = node;
++
++ its_alloc_lpi_tables();
++ its_lpi_init(rdists->id_bits);
++
++ return 0;
++}
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index aa17ae8..fd8850d 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -34,20 +34,25 @@
+ #include "irq-gic-common.h"
+ #include "irqchip.h"
+
++struct redist_region {
++ void __iomem *redist_base;
++ phys_addr_t phys_base;
++};
++
+ struct gic_chip_data {
+ void __iomem *dist_base;
+- void __iomem **redist_base;
+- void __iomem * __percpu *rdist;
++ struct redist_region *redist_regions;
++ struct rdists rdists;
+ struct irq_domain *domain;
+ u64 redist_stride;
+- u32 redist_regions;
++ u32 nr_redist_regions;
+ unsigned int irq_nr;
+ };
+
+ static struct gic_chip_data gic_data __read_mostly;
+
+-#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
+-#define gic_data_rdist_rd_base() (*gic_data_rdist())
++#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
++#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+ #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
+
+ /* Our default, arbitrary priority value. Linux only uses one anyway. */
+@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
+ if (d->hwirq <= 1023) /* SPI -> dist_base */
+ return gic_data.dist_base;
+
+- if (d->hwirq >= 8192)
+- BUG(); /* LPI Detected!!! */
+-
+ return NULL;
+ }
+
+@@ -236,7 +238,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ if (irq < 16)
+ return -EINVAL;
+
+- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
++ /* SPIs have restrictions on the supported types */
++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
++ type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ if (gic_irq_in_rdist(d)) {
+@@ -247,9 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ rwp_wait = gic_dist_wait_for_rwp;
+ }
+
+- gic_configure_irq(irq, type, base, rwp_wait);
+-
+- return 0;
++ return gic_configure_irq(irq, type, base, rwp_wait);
+ }
+
+ static u64 gic_mpidr_to_affinity(u64 mpidr)
+@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ do {
+ irqnr = gic_read_iar();
+
+- if (likely(irqnr > 15 && irqnr < 1020)) {
++ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+ int err;
+ err = handle_domain_irq(gic_data.domain, irqnr, regs);
+ if (err) {
+- WARN_ONCE(true, "Unexpected SPI received!\n");
++ WARN_ONCE(true, "Unexpected interrupt received!\n");
+ gic_write_eoir(irqnr);
+ }
+ continue;
+@@ -333,8 +335,8 @@ static int gic_populate_rdist(void)
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+- for (i = 0; i < gic_data.redist_regions; i++) {
+- void __iomem *ptr = gic_data.redist_base[i];
++ for (i = 0; i < gic_data.nr_redist_regions; i++) {
++ void __iomem *ptr = gic_data.redist_regions[i].redist_base;
+ u32 reg;
+
+ reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+@@ -347,10 +349,13 @@ static int gic_populate_rdist(void)
+ do {
+ typer = readq_relaxed(ptr + GICR_TYPER);
+ if ((typer >> 32) == aff) {
++ u64 offset = ptr - gic_data.redist_regions[i].redist_base;
+ gic_data_rdist_rd_base() = ptr;
+- pr_info("CPU%d: found redistributor %llx @%p\n",
++ gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
++ pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
+ smp_processor_id(),
+- (unsigned long long)mpidr, ptr);
++ (unsigned long long)mpidr,
++ i, &gic_data_rdist()->phys_base);
+ return 0;
+ }
+
+@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void)
+ gic_write_grpen1(1);
+ }
+
++static int gic_dist_supports_lpis(void)
++{
++ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
++}
++
+ static void gic_cpu_init(void)
+ {
+ void __iomem *rbase;
+@@ -399,6 +409,10 @@ static void gic_cpu_init(void)
+
+ gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+
++ /* Give LPIs a spin */
++ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
++ its_cpu_init();
++
+ /* initialise system registers */
+ gic_cpu_sys_reg_init();
+ }
+@@ -452,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+ tlist |= 1 << (mpidr & 0xf);
+
+ cpu = cpumask_next(cpu, mask);
+- if (cpu == nr_cpu_ids)
++ if (cpu >= nr_cpu_ids)
+ goto out;
+
+ mpidr = cpu_logical_map(cpu);
+@@ -467,15 +481,19 @@ out:
+ return tlist;
+ }
+
++#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
++ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
++ << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
++
+ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+ {
+ u64 val;
+
+- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
+- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
+- irq << 24 |
+- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
+- tlist);
++ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
++ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
++ irq << ICC_SGI1R_SGI_ID_SHIFT |
++ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
++ tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
+
+ pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+ gic_write_sgi1r(val);
+@@ -585,26 +603,43 @@ static struct irq_chip gic_chip = {
+ .irq_set_affinity = gic_set_affinity,
+ };
+
++#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
++
+ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+ {
+ /* SGIs are private to the core kernel */
+ if (hw < 16)
+ return -EPERM;
++ /* Nothing here */
++ if (hw >= gic_data.irq_nr && hw < 8192)
++ return -EPERM;
++ /* Off limits */
++ if (hw >= GIC_ID_NR)
++ return -EPERM;
++
+ /* PPIs */
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+- irq_set_chip_and_handler(irq, &gic_chip,
+- handle_percpu_devid_irq);
++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
++ handle_percpu_devid_irq, NULL, NULL);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ }
+ /* SPIs */
+ if (hw >= 32 && hw < gic_data.irq_nr) {
+- irq_set_chip_and_handler(irq, &gic_chip,
+- handle_fasteoi_irq);
++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
++ handle_fasteoi_irq, NULL, NULL);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+- irq_set_chip_data(irq, d->host_data);
++ /* LPIs */
++ if (hw >= 8192 && hw < GIC_ID_NR) {
++ if (!gic_dist_supports_lpis())
++ return -EPERM;
++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
++ handle_fasteoi_irq, NULL, NULL);
++ set_irq_flags(irq, IRQF_VALID);
++ }
++
+ return 0;
+ }
+
+@@ -625,6 +660,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
+ case 1: /* PPI */
+ *out_hwirq = intspec[1] + 16;
+ break;
++ case GIC_IRQ_TYPE_LPI: /* LPI */
++ *out_hwirq = intspec[1];
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -633,17 +671,50 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
+ return 0;
+ }
+
++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
++ unsigned int nr_irqs, void *arg)
++{
++ int i, ret;
++ irq_hw_number_t hwirq;
++ unsigned int type = IRQ_TYPE_NONE;
++ struct of_phandle_args *irq_data = arg;
++
++ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
++ irq_data->args_count, &hwirq, &type);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < nr_irqs; i++)
++ gic_irq_domain_map(domain, virq + i, hwirq + i);
++
++ return 0;
++}
++
++static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
++ unsigned int nr_irqs)
++{
++ int i;
++
++ for (i = 0; i < nr_irqs; i++) {
++ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
++ irq_set_handler(virq + i, NULL);
++ irq_domain_reset_irq_data(d);
++ }
++}
++
+ static const struct irq_domain_ops gic_irq_domain_ops = {
+- .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
++ .alloc = gic_irq_domain_alloc,
++ .free = gic_irq_domain_free,
+ };
+
+ static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+ {
+ void __iomem *dist_base;
+- void __iomem **redist_base;
++ struct redist_region *rdist_regs;
+ u64 redist_stride;
+- u32 redist_regions;
++ u32 nr_redist_regions;
++ u32 typer;
+ u32 reg;
+ int gic_irqs;
+ int err;
+@@ -664,54 +735,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
+ goto out_unmap_dist;
+ }
+
+- if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
+- redist_regions = 1;
++ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
++ nr_redist_regions = 1;
+
+- redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
+- if (!redist_base) {
++ rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
++ if (!rdist_regs) {
+ err = -ENOMEM;
+ goto out_unmap_dist;
+ }
+
+- for (i = 0; i < redist_regions; i++) {
+- redist_base[i] = of_iomap(node, 1 + i);
+- if (!redist_base[i]) {
++ for (i = 0; i < nr_redist_regions; i++) {
++ struct resource res;
++ int ret;
++
++ ret = of_address_to_resource(node, 1 + i, &res);
++ rdist_regs[i].redist_base = of_iomap(node, 1 + i);
++ if (ret || !rdist_regs[i].redist_base) {
+ pr_err("%s: couldn't map region %d\n",
+ node->full_name, i);
+ err = -ENODEV;
+ goto out_unmap_rdist;
+ }
++ rdist_regs[i].phys_base = res.start;
+ }
+
+ if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+ redist_stride = 0;
+
+ gic_data.dist_base = dist_base;
+- gic_data.redist_base = redist_base;
+- gic_data.redist_regions = redist_regions;
++ gic_data.redist_regions = rdist_regs;
++ gic_data.nr_redist_regions = nr_redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+ */
+- gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
+- gic_irqs = (gic_irqs + 1) * 32;
++ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
++ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
++ gic_irqs = GICD_TYPER_IRQS(typer);
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic_data.irq_nr = gic_irqs;
+
+ gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
+ &gic_data);
+- gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
++ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+
+- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
++ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ set_handle_irq(gic_handle_irq);
+
++ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
++ its_init(node, &gic_data.rdists, gic_data.domain);
++
+ gic_smp_init();
+ gic_dist_init();
+ gic_cpu_init();
+@@ -722,12 +802,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
+ out_free:
+ if (gic_data.domain)
+ irq_domain_remove(gic_data.domain);
+- free_percpu(gic_data.rdist);
++ free_percpu(gic_data.rdists.rdist);
+ out_unmap_rdist:
+- for (i = 0; i < redist_regions; i++)
+- if (redist_base[i])
+- iounmap(redist_base[i]);
+- kfree(redist_base);
++ for (i = 0; i < nr_redist_regions; i++)
++ if (rdist_regs[i].redist_base)
++ iounmap(rdist_regs[i].redist_base);
++ kfree(rdist_regs);
+ out_unmap_dist:
+ iounmap(dist_base);
+ return err;
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 38493ff..ab0b1cb 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -188,12 +188,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ {
+ void __iomem *base = gic_dist_base(d);
+ unsigned int gicirq = gic_irq(d);
++ int ret;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (gicirq < 16)
+ return -EINVAL;
+
+- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
++ /* SPIs have restrictions on the supported types */
++ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
++ type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+@@ -201,11 +204,11 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+
+- gic_configure_irq(gicirq, type, base, NULL);
++ ret = gic_configure_irq(gicirq, type, base, NULL);
+
+ raw_spin_unlock(&irq_controller_lock);
+
+- return 0;
++ return ret;
+ }
+
+ static int gic_retrigger(struct irq_data *d)
+@@ -788,17 +791,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ {
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+- irq_set_chip_and_handler(irq, &gic_chip,
+- handle_percpu_devid_irq);
++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
++ handle_percpu_devid_irq, NULL, NULL);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ } else {
+- irq_set_chip_and_handler(irq, &gic_chip,
+- handle_fasteoi_irq);
++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
++ handle_fasteoi_irq, NULL, NULL);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ gic_routable_irq_domain_ops->map(d, irq, hw);
+ }
+- irq_set_chip_data(irq, d->host_data);
+ return 0;
+ }
+
+@@ -858,6 +860,31 @@ static struct notifier_block gic_cpu_notifier = {
+ };
+ #endif
+
++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
++ unsigned int nr_irqs, void *arg)
++{
++ int i, ret;
++ irq_hw_number_t hwirq;
++ unsigned int type = IRQ_TYPE_NONE;
++ struct of_phandle_args *irq_data = arg;
++
++ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
++ irq_data->args_count, &hwirq, &type);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < nr_irqs; i++)
++ gic_irq_domain_map(domain, virq + i, hwirq + i);
++
++ return 0;
++}
++
++static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
++ .xlate = gic_irq_domain_xlate,
++ .alloc = gic_irq_domain_alloc,
++ .free = irq_domain_free_irqs_top,
++};
++
+ static const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .unmap = gic_irq_domain_unmap,
+@@ -948,18 +975,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ gic_cpu_map[i] = 0xff;
+
+ /*
+- * For primary GICs, skip over SGIs.
+- * For secondary GICs, skip over PPIs, too.
+- */
+- if (gic_nr == 0 && (irq_start & 31) > 0) {
+- hwirq_base = 16;
+- if (irq_start != -1)
+- irq_start = (irq_start & ~31) + 16;
+- } else {
+- hwirq_base = 32;
+- }
+-
+- /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources.
+ */
+@@ -969,10 +984,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
+- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
++ if (node) { /* DT case */
++ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops;
++
++ if (!of_property_read_u32(node, "arm,routable-irqs",
++ &nr_routable_irqs)) {
++ ops = &gic_irq_domain_ops;
++ gic_irqs = nr_routable_irqs;
++ }
++
++ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic);
++ } else { /* Non-DT case */
++ /*
++ * For primary GICs, skip over SGIs.
++ * For secondary GICs, skip over PPIs, too.
++ */
++ if (gic_nr == 0 && (irq_start & 31) > 0) {
++ hwirq_base = 16;
++ if (irq_start != -1)
++ irq_start = (irq_start & ~31) + 16;
++ } else {
++ hwirq_base = 32;
++ }
++
++ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+
+- if (of_property_read_u32(node, "arm,routable-irqs",
+- &nr_routable_irqs)) {
+ irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+ numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+@@ -983,10 +1019,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+
+ gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+ hwirq_base, &gic_irq_domain_ops, gic);
+- } else {
+- gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
+- &gic_irq_domain_ops,
+- gic);
+ }
+
+ if (WARN_ON(!gic->domain))
+@@ -1037,6 +1069,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
+ irq = irq_of_parse_and_map(node, 0);
+ gic_cascade_irq(gic_cnt, irq);
+ }
++
++ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
++ gicv2m_of_init(node, gic_data[gic_cnt].domain);
++
+ gic_cnt++;
+ return 0;
+ }
+diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
+index 9c8f833..5507a0c 100644
+--- a/drivers/irqchip/irq-hip04.c
++++ b/drivers/irqchip/irq-hip04.c
+@@ -120,21 +120,24 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ void __iomem *base = hip04_dist_base(d);
+ unsigned int irq = hip04_irq(d);
++ int ret;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (irq < 16)
+ return -EINVAL;
+
+- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
++ /* SPIs have restrictions on the supported types */
++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
++ type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+
+- gic_configure_irq(irq, type, base, NULL);
++ ret = gic_configure_irq(irq, type, base, NULL);
+
+ raw_spin_unlock(&irq_controller_lock);
+
+- return 0;
++ return ret;
+ }
+
+ #ifdef CONFIG_SMP
+diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
+index eb9b59e..6b2b582 100644
+--- a/drivers/irqchip/irq-sunxi-nmi.c
++++ b/drivers/irqchip/irq-sunxi-nmi.c
+@@ -50,12 +50,12 @@ static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
+ static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
+ u32 val)
+ {
+- irq_reg_writel(val, gc->reg_base + off);
++ irq_reg_writel(gc, val, off);
+ }
+
+ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
+ {
+- return irq_reg_readl(gc->reg_base + off);
++ return irq_reg_readl(gc, off);
+ }
+
+ static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
+diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
+index 7c44c99..accc200 100644
+--- a/drivers/irqchip/irq-tb10x.c
++++ b/drivers/irqchip/irq-tb10x.c
+@@ -43,12 +43,12 @@
+ static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg,
+ u32 val)
+ {
+- irq_reg_writel(val, gc->reg_base + reg);
++ irq_reg_writel(gc, val, reg);
+ }
+
+ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
+ {
+- return irq_reg_readl(gc->reg_base + reg);
++ return irq_reg_readl(gc, reg);
+ }
+
+ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
+diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
+index 6d91c27..d6af99f 100644
+--- a/drivers/memory/Kconfig
++++ b/drivers/memory/Kconfig
+@@ -83,6 +83,6 @@ config FSL_CORENET_CF
+
+ config FSL_IFC
+ bool
+- depends on FSL_SOC
++ depends on FSL_SOC || ARCH_LAYERSCAPE
+
+ endif
+diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
+index 3d5d792..1b182b1 100644
+--- a/drivers/memory/fsl_ifc.c
++++ b/drivers/memory/fsl_ifc.c
+@@ -22,6 +22,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+ #include
+ #include
+@@ -30,7 +31,9 @@
+ #include
+ #include
+ #include
+-#include
++#include
++#include
++#include
+
+ struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+ EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
+@@ -58,11 +61,11 @@ int fsl_ifc_find(phys_addr_t addr_base)
+ {
+ int i = 0;
+
+- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
++ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs)
+ return -ENODEV;
+
+- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+- u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
++ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
++ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
+ if (cspr & CSPR_V && (cspr & CSPR_BA) ==
+ convert_ifc_address(addr_base))
+ return i;
+@@ -74,21 +77,21 @@ EXPORT_SYMBOL(fsl_ifc_find);
+
+ static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
+ {
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
+
+ /*
+ * Clear all the common status and event registers
+ */
+- if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
++ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
++ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
+
+ /* enable all error and events */
+- out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
++ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en);
+
+ /* enable all error and event interrupts */
+- out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
+- out_be32(&ifc->cm_erattr0, 0x0);
+- out_be32(&ifc->cm_erattr1, 0x0);
++ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en);
++ ifc_out32(0x0, &ifc->cm_erattr0);
++ ifc_out32(0x0, &ifc->cm_erattr1);
+
+ return 0;
+ }
+@@ -103,7 +106,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+ irq_dispose_mapping(ctrl->nand_irq);
+ irq_dispose_mapping(ctrl->irq);
+
+- iounmap(ctrl->regs);
++ iounmap(ctrl->gregs);
+
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ctrl);
+@@ -121,15 +124,15 @@ static DEFINE_SPINLOCK(nand_irq_lock);
+
+ static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
+ {
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ unsigned long flags;
+ u32 stat;
+
+ spin_lock_irqsave(&nand_irq_lock, flags);
+
+- stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
++ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
+ if (stat) {
+- out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
++ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat);
+ ctrl->nand_stat = stat;
+ wake_up(&ctrl->nand_wait);
+ }
+@@ -156,21 +159,21 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
+ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+ {
+ struct fsl_ifc_ctrl *ctrl = data;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
+ u32 err_axiid, err_srcid, status, cs_err, err_addr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* read for chip select error */
+- cs_err = in_be32(&ifc->cm_evter_stat);
++ cs_err = ifc_in32(&ifc->cm_evter_stat);
+ if (cs_err) {
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
+ "any memory bank 0x%08X\n", cs_err);
+ /* clear the chip select error */
+- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
++ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
+
+ /* read error attribute registers print the error information */
+- status = in_be32(&ifc->cm_erattr0);
+- err_addr = in_be32(&ifc->cm_erattr1);
++ status = ifc_in32(&ifc->cm_erattr0);
++ err_addr = ifc_in32(&ifc->cm_erattr1);
+
+ if (status & IFC_CM_ERATTR0_ERTYP_READ)
+ dev_err(ctrl->dev, "Read transaction error"
+@@ -213,7 +216,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+ {
+ int ret = 0;
+-
++ int version, banks;
++ void __iomem *addr;
+
+ dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+
+@@ -224,16 +228,41 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+ dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
+
+ /* IOMAP the entire IFC region */
+- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+- if (!fsl_ifc_ctrl_dev->regs) {
++ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
++ if (!fsl_ifc_ctrl_dev->gregs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
++ if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
++ fsl_ifc_ctrl_dev->little_endian = true;
++ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
++ } else {
++ fsl_ifc_ctrl_dev->little_endian = false;
++ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
++ }
++
++ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) &
++ FSL_IFC_VERSION_MASK;
++
++ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
++ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
++ version >> 24, (version >> 16) & 0xf, banks);
++
++ fsl_ifc_ctrl_dev->version = version;
++ fsl_ifc_ctrl_dev->banks = banks;
++
++ addr = fsl_ifc_ctrl_dev->gregs;
++ if (version >= FSL_IFC_VERSION_2_0_0)
++ addr += PGOFFSET_64K;
++ else
++ addr += PGOFFSET_4K;
++ fsl_ifc_ctrl_dev->rregs = addr;
++
+ /* get the Controller level irq */
+ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
++ if (fsl_ifc_ctrl_dev->irq == 0) {
+ dev_err(&dev->dev, "failed to get irq resource "
+ "for IFC\n");
+ ret = -ENODEV;
+diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
+index 9e21e4f..8f43ab8 100644
+--- a/drivers/mfd/vexpress-sysreg.c
++++ b/drivers/mfd/vexpress-sysreg.c
+@@ -223,7 +223,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
+ vexpress_config_set_master(vexpress_sysreg_get_master());
+
+ /* Confirm board type against DT property, if available */
+- if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) {
++ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER);
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
+
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 10ecc0a..d356dbc 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -2402,6 +2402,10 @@ static const struct mmc_fixup blk_fixups[] =
+ *
+ * N.B. This doesn't affect SD cards.
+ */
++ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_BLK_NO_CMD23),
++ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 1386065..b8c9b73 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -66,7 +66,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ has the effect of scrambling the addresses and formats of data
+ accessed in sizes other than the datum size.
+
+- This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
++ This is the case for the Nintendo Wii SDHCI.
+
+ config MMC_SDHCI_PCI
+ tristate "SDHCI support on PCI bus"
+@@ -130,8 +130,10 @@ config MMC_SDHCI_OF_ARASAN
+ config MMC_SDHCI_OF_ESDHC
+ tristate "SDHCI OF support for the Freescale eSDHC controller"
+ depends on MMC_SDHCI_PLTFM
+- depends on PPC_OF
+- select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
++ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
++ select MMC_SDHCI_IO_ACCESSORS
++ select FSL_SOC_DRIVERS
++ select FSL_GUTS
+ help
+ This selects the Freescale eSDHC controller support.
+
+@@ -142,7 +144,7 @@ config MMC_SDHCI_OF_ESDHC
+ config MMC_SDHCI_OF_HLWD
+ tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
+- depends on PPC_OF
++ depends on PPC
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index a870c42..f2baede 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -21,16 +21,23 @@
+ #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+ SDHCI_QUIRK_NO_BUSY_IRQ | \
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
+- SDHCI_QUIRK_PIO_NEEDS_DELAY)
++ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
++ SDHCI_QUIRK_NO_HISPD_BIT)
++
++#define ESDHC_PROCTL 0x28
+
+ #define ESDHC_SYSTEM_CONTROL 0x2c
+ #define ESDHC_CLOCK_MASK 0x0000fff0
+ #define ESDHC_PREDIV_SHIFT 8
+ #define ESDHC_DIVIDER_SHIFT 4
++#define ESDHC_CLOCK_CRDEN 0x00000008
+ #define ESDHC_CLOCK_PEREN 0x00000004
+ #define ESDHC_CLOCK_HCKEN 0x00000002
+ #define ESDHC_CLOCK_IPGEN 0x00000001
+
++#define ESDHC_PRESENT_STATE 0x24
++#define ESDHC_CLOCK_STABLE 0x00000008
++
+ /* pltfm-specific */
+ #define ESDHC_HOST_CONTROL_LE 0x20
+
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 8872c85..4a4a693 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -18,128 +18,334 @@
+ #include
+ #include
+ #include
++#include
++#include
+ #include
+ #include "sdhci-pltfm.h"
+ #include "sdhci-esdhc.h"
+
+ #define VENDOR_V_22 0x12
+ #define VENDOR_V_23 0x13
+-static u32 esdhc_readl(struct sdhci_host *host, int reg)
++
++struct sdhci_esdhc {
++ u8 vendor_ver;
++ u8 spec_ver;
++ u32 soc_ver;
++ u8 soc_rev;
++};
++
++/**
++ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
++ * to make it compatible with SD spec.
++ *
++ * @host: pointer to sdhci_host
++ * @spec_reg: SD spec register address
++ * @value: 32bit eSDHC register value on spec_reg address
++ *
++ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
++ * registers are 32 bits. There are differences in register size, register
++ * address, register function, bit position and function between eSDHC spec
++ * and SD spec.
++ *
++ * Return a fixed up register value
++ */
++static u32 esdhc_readl_fixup(struct sdhci_host *host,
++ int spec_reg, u32 value)
+ {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ u32 ret;
+
+- ret = in_be32(host->ioaddr + reg);
+ /*
+ * The bit of ADMA flag in eSDHC is not compatible with standard
+ * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
+ * supported by eSDHC.
+ * And for many FSL eSDHC controller, the reset value of field
+- * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
++ * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
+ * only these vendor version is greater than 2.2/0x12 support ADMA.
+- * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
+- * the verdor version number, oxFE is SDHCI_HOST_VERSION.
+ */
+- if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
+- u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+- if (tmp > VENDOR_V_22)
+- ret |= SDHCI_CAN_DO_ADMA2;
++ if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
++ if (esdhc->vendor_ver > VENDOR_V_22) {
++ ret = value | SDHCI_CAN_DO_ADMA2;
++ return ret;
++ }
+ }
+-
++ ret = value;
+ return ret;
+ }
+
+-static u16 esdhc_readw(struct sdhci_host *host, int reg)
++static u16 esdhc_readw_fixup(struct sdhci_host *host,
++ int spec_reg, u32 value)
+ {
+ u16 ret;
+- int base = reg & ~0x3;
+- int shift = (reg & 0x2) * 8;
++ int shift = (spec_reg & 0x2) * 8;
+
+- if (unlikely(reg == SDHCI_HOST_VERSION))
+- ret = in_be32(host->ioaddr + base) & 0xffff;
++ if (spec_reg == SDHCI_HOST_VERSION)
++ ret = value & 0xffff;
+ else
+- ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;
++ ret = (value >> shift) & 0xffff;
+ return ret;
+ }
+
+-static u8 esdhc_readb(struct sdhci_host *host, int reg)
++static u8 esdhc_readb_fixup(struct sdhci_host *host,
++ int spec_reg, u32 value)
+ {
+- int base = reg & ~0x3;
+- int shift = (reg & 0x3) * 8;
+- u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
++ u8 ret;
++ u8 dma_bits;
++ int shift = (spec_reg & 0x3) * 8;
++
++ ret = (value >> shift) & 0xff;
+
+ /*
+ * "DMA select" locates at offset 0x28 in SD specification, but on
+ * P5020 or P3041, it locates at 0x29.
+ */
+- if (reg == SDHCI_HOST_CONTROL) {
+- u32 dma_bits;
+-
+- dma_bits = in_be32(host->ioaddr + reg);
++ if (spec_reg == SDHCI_HOST_CONTROL) {
+ /* DMA select is 22,23 bits in Protocol Control Register */
+- dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
+-
++ dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
+ /* fixup the result */
+ ret &= ~SDHCI_CTRL_DMA_MASK;
+ ret |= dma_bits;
+ }
+-
+ return ret;
+ }
+
+-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
++/**
++ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
++ * written into eSDHC register.
++ *
++ * @host: pointer to sdhci_host
++ * @spec_reg: SD spec register address
++ * @value: 8/16/32bit SD spec register value that would be written
++ * @old_value: 32bit eSDHC register value on spec_reg address
++ *
++ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
++ * registers are 32 bits. There are differences in register size, register
++ * address, register function, bit position and function between eSDHC spec
++ * and SD spec.
++ *
++ * Return a fixed up register value
++ */
++static u32 esdhc_writel_fixup(struct sdhci_host *host,
++ int spec_reg, u32 value, u32 old_value)
+ {
++ u32 ret;
++
+ /*
+- * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
+- * when SYSCTL[RSTD]) is set for some special operations.
+- * No any impact other operation.
++ * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
++ * when SYSCTL[RSTD] is set for some special operations.
++ * No any impact on other operation.
+ */
+- if (reg == SDHCI_INT_ENABLE)
+- val |= SDHCI_INT_BLK_GAP;
+- sdhci_be32bs_writel(host, val, reg);
++ if (spec_reg == SDHCI_INT_ENABLE)
++ ret = value | SDHCI_INT_BLK_GAP;
++ else
++ ret = value;
++
++ return ret;
+ }
+
+-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
++static u32 esdhc_writew_fixup(struct sdhci_host *host,
++ int spec_reg, u16 value, u32 old_value)
+ {
+- if (reg == SDHCI_BLOCK_SIZE) {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ int shift = (spec_reg & 0x2) * 8;
++ u32 ret;
++
++ switch (spec_reg) {
++ case SDHCI_TRANSFER_MODE:
++ /*
++ * Postpone this write, we must do it together with a
++ * command write that is down below. Return old value.
++ */
++ pltfm_host->xfer_mode_shadow = value;
++ return old_value;
++ case SDHCI_COMMAND:
++ ret = (value << 16) | pltfm_host->xfer_mode_shadow;
++ return ret;
++ }
++
++ ret = old_value & (~(0xffff << shift));
++ ret |= (value << shift);
++
++ if (spec_reg == SDHCI_BLOCK_SIZE) {
+ /*
+ * Two last DMA bits are reserved, and first one is used for
+ * non-standard blksz of 4096 bytes that we don't support
+ * yet. So clear the DMA boundary bits.
+ */
+- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
++ ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
+ }
+- sdhci_be32bs_writew(host, val, reg);
++ return ret;
+ }
+
+-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
++static u32 esdhc_writeb_fixup(struct sdhci_host *host,
++ int spec_reg, u8 value, u32 old_value)
+ {
++ u32 ret;
++ u32 dma_bits;
++ u8 tmp;
++ int shift = (spec_reg & 0x3) * 8;
++
++ /*
++ * eSDHC doesn't have a standard power control register, so we do
++ * nothing here to avoid incorrect operation.
++ */
++ if (spec_reg == SDHCI_POWER_CONTROL)
++ return old_value;
+ /*
+ * "DMA select" location is offset 0x28 in SD specification, but on
+ * P5020 or P3041, it's located at 0x29.
+ */
+- if (reg == SDHCI_HOST_CONTROL) {
+- u32 dma_bits;
+-
++ if (spec_reg == SDHCI_HOST_CONTROL) {
+ /*
+ * If host control register is not standard, exit
+ * this function
+ */
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
+- return;
++ return old_value;
+
+ /* DMA select is 22,23 bits in Protocol Control Register */
+- dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
+- clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
+- dma_bits);
+- val &= ~SDHCI_CTRL_DMA_MASK;
+- val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
++ dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
++ ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
++ tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
++ (old_value & SDHCI_CTRL_DMA_MASK);
++ ret = (ret & (~0xff)) | tmp;
++
++ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
++ ret &= ~ESDHC_HOST_CONTROL_RES;
++ return ret;
+ }
+
+- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
+- if (reg == SDHCI_HOST_CONTROL)
+- val &= ~ESDHC_HOST_CONTROL_RES;
+- sdhci_be32bs_writeb(host, val, reg);
++ ret = (old_value & (~(0xff << shift))) | (value << shift);
++ return ret;
++}
++
++static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
++{
++ u32 ret;
++ u32 value;
++
++ value = ioread32be(host->ioaddr + reg);
++ ret = esdhc_readl_fixup(host, reg, value);
++
++ return ret;
++}
++
++static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
++{
++ u32 ret;
++ u32 value;
++
++ value = ioread32(host->ioaddr + reg);
++ ret = esdhc_readl_fixup(host, reg, value);
++
++ return ret;
++}
++
++static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
++{
++ u16 ret;
++ u32 value;
++ int base = reg & ~0x3;
++
++ value = ioread32be(host->ioaddr + base);
++ ret = esdhc_readw_fixup(host, reg, value);
++ return ret;
++}
++
++static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
++{
++ u16 ret;
++ u32 value;
++ int base = reg & ~0x3;
++
++ value = ioread32(host->ioaddr + base);
++ ret = esdhc_readw_fixup(host, reg, value);
++ return ret;
++}
++
++static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
++{
++ u8 ret;
++ u32 value;
++ int base = reg & ~0x3;
++
++ value = ioread32be(host->ioaddr + base);
++ ret = esdhc_readb_fixup(host, reg, value);
++ return ret;
++}
++
++static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
++{
++ u8 ret;
++ u32 value;
++ int base = reg & ~0x3;
++
++ value = ioread32(host->ioaddr + base);
++ ret = esdhc_readb_fixup(host, reg, value);
++ return ret;
++}
++
++static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
++{
++ u32 value;
++
++ value = esdhc_writel_fixup(host, reg, val, 0);
++ iowrite32be(value, host->ioaddr + reg);
++}
++
++static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
++{
++ u32 value;
++
++ value = esdhc_writel_fixup(host, reg, val, 0);
++ iowrite32(value, host->ioaddr + reg);
++}
++
++static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
++{
++ int base = reg & ~0x3;
++ u32 value;
++ u32 ret;
++
++ value = ioread32be(host->ioaddr + base);
++ ret = esdhc_writew_fixup(host, reg, val, value);
++ if (reg != SDHCI_TRANSFER_MODE)
++ iowrite32be(ret, host->ioaddr + base);
++}
++
++static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
++{
++ int base = reg & ~0x3;
++ u32 value;
++ u32 ret;
++
++ value = ioread32(host->ioaddr + base);
++ ret = esdhc_writew_fixup(host, reg, val, value);
++ if (reg != SDHCI_TRANSFER_MODE)
++ iowrite32(ret, host->ioaddr + base);
++}
++
++static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
++{
++ int base = reg & ~0x3;
++ u32 value;
++ u32 ret;
++
++ value = ioread32be(host->ioaddr + base);
++ ret = esdhc_writeb_fixup(host, reg, val, value);
++ iowrite32be(ret, host->ioaddr + base);
++}
++
++static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
++{
++ int base = reg & ~0x3;
++ u32 value;
++ u32 ret;
++
++ value = ioread32(host->ioaddr + base);
++ ret = esdhc_writeb_fixup(host, reg, val, value);
++ iowrite32(ret, host->ioaddr + base);
+ }
+
+ /*
+@@ -149,37 +355,116 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+ * For Continue, apply soft reset for data(SYSCTL[RSTD]);
+ * and re-issue the entire read transaction from beginning.
+ */
+-static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
++static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
+ {
+- u32 tmp;
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ bool applicable;
+ dma_addr_t dmastart;
+ dma_addr_t dmanow;
+
+- tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+-
+ applicable = (intmask & SDHCI_INT_DATA_END) &&
+- (intmask & SDHCI_INT_BLK_GAP) &&
+- (tmp == VENDOR_V_23);
+- if (!applicable)
++ (intmask & SDHCI_INT_BLK_GAP) &&
++ (esdhc->vendor_ver == VENDOR_V_23);
++ if (applicable) {
++
++ sdhci_reset(host, SDHCI_RESET_DATA);
++ host->data->error = 0;
++ dmastart = sg_dma_address(host->data->sg);
++ dmanow = dmastart + host->data->bytes_xfered;
++ /*
++ * Force update to the next DMA block boundary.
++ */
++ dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
++ SDHCI_DEFAULT_BOUNDARY_SIZE;
++ host->data->bytes_xfered = dmanow - dmastart;
++ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
++
+ return;
++ }
+
+- host->data->error = 0;
+- dmastart = sg_dma_address(host->data->sg);
+- dmanow = dmastart + host->data->bytes_xfered;
+ /*
+- * Force update to the next DMA block boundary.
++ * Check for A-004388: eSDHC DMA might not stop if error
++ * occurs on system transaction
++ * Impact list:
++ * T4240-4160-R1.0 B4860-4420-R1.0-R2.0 P1010-1014-R1.0
++ * P3041-R1.0-R2.0-R1.1 P2041-2040-R1.0-R1.1-R2.0
++ * P5020-5010-R2.0-R1.0 P5040-5021-R2.0-R2.1
+ */
+- dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
+- SDHCI_DEFAULT_BOUNDARY_SIZE;
+- host->data->bytes_xfered = dmanow - dmastart;
+- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
++ if (!(((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) ||
++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) ||
++ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) ||
++ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21))))
++ return;
++
++ sdhci_reset(host, SDHCI_RESET_DATA);
++
++ if (host->flags & SDHCI_USE_ADMA) {
++ u32 mod, i, offset;
++ u8 *desc;
++ dma_addr_t addr;
++ struct scatterlist *sg;
++ __le32 *dataddr;
++ __le32 *cmdlen;
++
++ /*
++ * If block count was enabled, in case read transfer there
++ * is no data was corrupted
++ */
++ mod = sdhci_readl(host, SDHCI_TRANSFER_MODE);
++ if ((mod & SDHCI_TRNS_BLK_CNT_EN) &&
++ (host->data->flags & MMC_DATA_READ))
++ host->data->error = 0;
++
++ BUG_ON(!host->data);
++ desc = host->adma_table;
++ for_each_sg(host->data->sg, sg, host->sg_count, i) {
++ addr = sg_dma_address(sg);
++ offset = (4 - (addr & 0x3)) & 0x3;
++ if (offset)
++ desc += 8;
++ desc += 8;
++ }
++
++ /*
++ * Add an extra zero descriptor next to the
++ * terminating descriptor.
++ */
++ desc += 8;
++ WARN_ON((desc - (u8 *)(host->adma_table)) > (128 * 2 + 1) * 4);
++
++ dataddr = (__le32 __force *)(desc + 4);
++ cmdlen = (__le32 __force *)desc;
++
++ cmdlen[0] = cpu_to_le32(0);
++ dataddr[0] = cpu_to_le32(0);
++ }
++
++ if ((host->flags & SDHCI_USE_SDMA) &&
++ (host->data->flags & MMC_DATA_READ))
++ host->data->error = 0;
++
++ return;
+ }
+
+ static int esdhc_of_enable_dma(struct sdhci_host *host)
+ {
+- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
++ u32 value;
++
++ value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
++ value |= ESDHC_DMA_SNOOP;
++ sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
+ return 0;
+ }
+
+@@ -199,15 +484,22 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
+
+ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+- int pre_div = 2;
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = pltfm_host->priv;
++ int pre_div = 1;
+ int div = 1;
+ u32 temp;
++ u32 timeout;
+
+ host->mmc->actual_clock = 0;
+
+ if (clock == 0)
+ return;
+
++ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
++ if (esdhc->vendor_ver < VENDOR_V_23)
++ pre_div = 2;
++
+ /* Workaround to reduce the clock frequency for p1010 esdhc */
+ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
+ if (clock > 20000000)
+@@ -218,7 +510,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+- | ESDHC_CLOCK_MASK);
++ | ESDHC_CLOCK_CRDEN | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+@@ -229,7 +521,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+-
++ host->mmc->actual_clock = host->max_clk / pre_div / div;
+ pre_div >>= 1;
+ div--;
+
+@@ -238,70 +530,117 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+- mdelay(1);
+-}
+
+-static void esdhc_of_platform_init(struct sdhci_host *host)
+-{
+- u32 vvn;
+-
+- vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+- vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+- if (vvn == VENDOR_V_22)
+- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
++ /* Wait max 20 ms */
++ timeout = 20;
++ while (!(sdhci_readl(host, ESDHC_PRESENT_STATE) & ESDHC_CLOCK_STABLE)) {
++ if (timeout == 0) {
++ pr_err("%s: Internal clock never stabilised.\n",
++ mmc_hostname(host->mmc));
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
+
+- if (vvn > VENDOR_V_22)
+- host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
++ temp |= ESDHC_CLOCK_CRDEN;
++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ }
+
+ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u32 ctrl;
+
++ ctrl = sdhci_readl(host, ESDHC_PROCTL);
++ ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+- ctrl = ESDHC_CTRL_8BITBUS;
++ ctrl |= ESDHC_CTRL_8BITBUS;
+ break;
+
+ case MMC_BUS_WIDTH_4:
+- ctrl = ESDHC_CTRL_4BITBUS;
++ ctrl |= ESDHC_CTRL_4BITBUS;
+ break;
+
+ default:
+- ctrl = 0;
+ break;
+ }
+
+- clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
+- ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
++ sdhci_writel(host, ctrl, ESDHC_PROCTL);
+ }
+
+-static const struct sdhci_ops sdhci_esdhc_ops = {
+- .read_l = esdhc_readl,
+- .read_w = esdhc_readw,
+- .read_b = esdhc_readb,
+- .write_l = esdhc_writel,
+- .write_w = esdhc_writew,
+- .write_b = esdhc_writeb,
+- .set_clock = esdhc_of_set_clock,
+- .enable_dma = esdhc_of_enable_dma,
+- .get_max_clock = esdhc_of_get_max_clock,
+- .get_min_clock = esdhc_of_get_min_clock,
+- .platform_init = esdhc_of_platform_init,
+- .adma_workaround = esdhci_of_adma_workaround,
+- .set_bus_width = esdhc_pltfm_set_bus_width,
+- .reset = sdhci_reset,
+- .set_uhs_signaling = sdhci_set_uhs_signaling,
+-};
++/*
++ * A-003980: SDHC: Glitch is generated on the card clock with software reset
++ * or clock divider change
++ * Workaround:
++ * A simple workaround is to disable the SD card clock before the software
++ * reset, and enable it when the module resumes normal operation. The Host
++ * and the SD card are in a master-slave relationship. The Host provides
++ * clock and control transfer across the interface. Therefore, any existing
++ * operation is discarded when the Host controller is reset.
++ */
++static int esdhc_of_reset_workaround(struct sdhci_host *host, u8 mask)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = pltfm_host->priv;
++ bool disable_clk_before_reset = false;
++ u32 temp;
+
+-#ifdef CONFIG_PM
++ /*
++ * Check for A-003980
++ * Impact list:
++ * T4240-4160-R1.0-R2.0 B4860-4420-R1.0-R2.0 P5040-5021-R1.0-R2.0-R2.1
++ * P5020-5010-R1.0-R2.0 P3041-R1.0-R1.1-R2.0 P2041-2040-R1.0-R1.1-R2.0
++ * P1010-1014-R1.0
++ */
++ if (((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x20)) ||
++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x20)) ||
++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) ||
++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) ||
++ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) ||
++ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)) ||
++ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) ||
++ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) ||
++ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10)))
++ disable_clk_before_reset = true;
++
++ if (disable_clk_before_reset && (mask & SDHCI_RESET_ALL)) {
++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
++ temp &= ~ESDHC_CLOCK_CRDEN;
++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
++ sdhci_reset(host, mask);
++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
++ temp |= ESDHC_CLOCK_CRDEN;
++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
++ return 1;
++ }
++ return 0;
++}
++
++static void esdhc_reset(struct sdhci_host *host, u8 mask)
++{
++ if (!esdhc_of_reset_workaround(host, mask))
++ sdhci_reset(host, mask);
+
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++}
++
++#ifdef CONFIG_PM
+ static u32 esdhc_proctl;
+ static int esdhc_of_suspend(struct device *dev)
+ {
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
++ esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
+
+ return sdhci_suspend_host(host);
+ }
+@@ -311,11 +650,8 @@ static int esdhc_of_resume(struct device *dev)
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret = sdhci_resume_host(host);
+
+- if (ret == 0) {
+- /* Isn't this already done by sdhci_resume_host() ? --rmk */
+- esdhc_of_enable_dma(host);
+- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+- }
++ if (ret == 0)
++ sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+
+ return ret;
+ }
+@@ -329,30 +665,120 @@ static const struct dev_pm_ops esdhc_pmops = {
+ #define ESDHC_PMOPS NULL
+ #endif
+
+-static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
+- /*
+- * card detection could be handled via GPIO
+- * eSDHC cannot support End Attribute in NOP ADMA descriptor
+- */
++static const struct sdhci_ops sdhci_esdhc_be_ops = {
++ .read_l = esdhc_be_readl,
++ .read_w = esdhc_be_readw,
++ .read_b = esdhc_be_readb,
++ .write_l = esdhc_be_writel,
++ .write_w = esdhc_be_writew,
++ .write_b = esdhc_be_writeb,
++ .set_clock = esdhc_of_set_clock,
++ .enable_dma = esdhc_of_enable_dma,
++ .get_max_clock = esdhc_of_get_max_clock,
++ .get_min_clock = esdhc_of_get_min_clock,
++ .adma_workaround = esdhc_of_adma_workaround,
++ .set_bus_width = esdhc_pltfm_set_bus_width,
++ .reset = esdhc_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
++};
++
++static const struct sdhci_ops sdhci_esdhc_le_ops = {
++ .read_l = esdhc_le_readl,
++ .read_w = esdhc_le_readw,
++ .read_b = esdhc_le_readb,
++ .write_l = esdhc_le_writel,
++ .write_w = esdhc_le_writew,
++ .write_b = esdhc_le_writeb,
++ .set_clock = esdhc_of_set_clock,
++ .enable_dma = esdhc_of_enable_dma,
++ .get_max_clock = esdhc_of_get_max_clock,
++ .get_min_clock = esdhc_of_get_min_clock,
++ .adma_workaround = esdhc_of_adma_workaround,
++ .set_bus_width = esdhc_pltfm_set_bus_width,
++ .reset = esdhc_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
++};
++
++static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+ | SDHCI_QUIRK_NO_CARD_NO_RESET
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+- .ops = &sdhci_esdhc_ops,
++ .ops = &sdhci_esdhc_be_ops,
+ };
+
++static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
++ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
++ | SDHCI_QUIRK_NO_CARD_NO_RESET
++ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++ .ops = &sdhci_esdhc_le_ops,
++};
++
++static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
++{
++ struct sdhci_pltfm_host *pltfm_host;
++ struct sdhci_esdhc *esdhc;
++ u16 host_ver;
++ u32 svr;
++
++ pltfm_host = sdhci_priv(host);
++ esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
++ GFP_KERNEL);
++ pltfm_host->priv = esdhc;
++
++ svr = guts_get_svr();
++ esdhc->soc_ver = SVR_SOC_VER(svr);
++ esdhc->soc_rev = SVR_REV(svr);
++
++ host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
++ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
++ SDHCI_VENDOR_VER_SHIFT;
++ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
++}
++
+ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ {
+ struct sdhci_host *host;
+ struct device_node *np;
++ struct sdhci_pltfm_host *pltfm_host;
++ struct sdhci_esdhc *esdhc;
+ int ret;
+
+- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0);
++ np = pdev->dev.of_node;
++
++ if (of_get_property(np, "little-endian", NULL))
++ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
++ else
++ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
++
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
++ esdhc_init(pdev, host);
++
+ sdhci_get_of_property(pdev);
+
+- np = pdev->dev.of_node;
++ pltfm_host = sdhci_priv(host);
++ esdhc = pltfm_host->priv;
++ if (esdhc->vendor_ver == VENDOR_V_22)
++ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
++
++ if (esdhc->vendor_ver > VENDOR_V_22)
++ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
++
++ if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
++ of_device_is_compatible(np, "fsl,p5020-esdhc") ||
++ of_device_is_compatible(np, "fsl,p4080-esdhc") ||
++ of_device_is_compatible(np, "fsl,p1020-esdhc") ||
++ of_device_is_compatible(np, "fsl,t1040-esdhc") ||
++ of_device_is_compatible(np, "fsl,ls1021a-esdhc") ||
++ of_device_is_compatible(np, "fsl,ls2080a-esdhc") ||
++ of_device_is_compatible(np, "fsl,ls2085a-esdhc") ||
++ of_device_is_compatible(np, "fsl,ls1043a-esdhc"))
++ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
++
++ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
++
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
+ /*
+ * Freescale messed up with P2020 as it has a non-standard
+@@ -362,13 +788,19 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ }
+
+ /* call to generic mmc_of_parse to support additional capabilities */
+- mmc_of_parse(host->mmc);
++ ret = mmc_of_parse(host->mmc);
++ if (ret)
++ goto err;
++
+ mmc_of_parse_voltage(np, &host->ocr_mask);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+- sdhci_pltfm_free(pdev);
++ goto err;
+
++ return 0;
++ err:
++ sdhci_pltfm_free(pdev);
+ return ret;
+ }
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 023c201..8af38a6 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -44,8 +44,6 @@
+
+ #define MAX_TUNING_LOOP 40
+
+-#define ADMA_SIZE ((128 * 2 + 1) * 4)
+-
+ static unsigned int debug_quirks = 0;
+ static unsigned int debug_quirks2;
+
+@@ -119,10 +117,17 @@ static void sdhci_dumpregs(struct sdhci_host *host)
+ pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ sdhci_readw(host, SDHCI_HOST_CONTROL2));
+
+- if (host->flags & SDHCI_USE_ADMA)
+- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+- readl(host->ioaddr + SDHCI_ADMA_ERROR),
+- readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
++ if (host->flags & SDHCI_USE_ADMA) {
++ if (host->flags & SDHCI_USE_64_BIT_DMA)
++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
++ readl(host->ioaddr + SDHCI_ADMA_ERROR),
++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
++ else
++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
++ readl(host->ioaddr + SDHCI_ADMA_ERROR),
++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
++ }
+
+ pr_debug(DRIVER_NAME ": ===========================================\n");
+ }
+@@ -231,6 +236,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
+ SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
+ SDHCI_INT_RESPONSE;
+
++ if (host->flags & SDHCI_AUTO_CMD12)
++ host->ier |= SDHCI_INT_ACMD12ERR;
++
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+@@ -448,18 +456,26 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+ local_irq_restore(*flags);
+ }
+
+-static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
++static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
++ dma_addr_t addr, int len, unsigned cmd)
+ {
+- __le32 *dataddr = (__le32 __force *)(desc + 4);
+- __le16 *cmdlen = (__le16 __force *)desc;
++ struct sdhci_adma2_64_desc *dma_desc = desc;
++
++ /* 32-bit and 64-bit descriptors have these members in same position */
++ dma_desc->cmd = cpu_to_le16(cmd);
++ dma_desc->len = cpu_to_le16(len);
++ dma_desc->addr_lo = cpu_to_le32((u32)addr);
+
+- /* SDHCI specification says ADMA descriptors should be 4 byte
+- * aligned, so using 16 or 32bit operations should be safe. */
++ if (host->flags & SDHCI_USE_64_BIT_DMA)
++ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
++}
+
+- cmdlen[0] = cpu_to_le16(cmd);
+- cmdlen[1] = cpu_to_le16(len);
++static void sdhci_adma_mark_end(void *desc)
++{
++ struct sdhci_adma2_64_desc *dma_desc = desc;
+
+- dataddr[0] = cpu_to_le32(addr);
++ /* 32-bit and 64-bit descriptors have 'cmd' in same position */
++ dma_desc->cmd |= cpu_to_le16(ADMA2_END);
+ }
+
+ static int sdhci_adma_table_pre(struct sdhci_host *host,
+@@ -467,8 +483,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ {
+ int direction;
+
+- u8 *desc;
+- u8 *align;
++ void *desc;
++ void *align;
+ dma_addr_t addr;
+ dma_addr_t align_addr;
+ int len, offset;
+@@ -489,17 +505,17 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ direction = DMA_TO_DEVICE;
+
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+- host->align_buffer, 128 * 4, direction);
++ host->align_buffer, host->align_buffer_sz, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+ goto fail;
+- BUG_ON(host->align_addr & 0x3);
++ BUG_ON(host->align_addr & host->align_mask);
+
+ host->sg_count = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ if (host->sg_count == 0)
+ goto unmap_align;
+
+- desc = host->adma_desc;
++ desc = host->adma_table;
+ align = host->align_buffer;
+
+ align_addr = host->align_addr;
+@@ -515,24 +531,27 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ * the (up to three) bytes that screw up the
+ * alignment.
+ */
+- offset = (4 - (addr & 0x3)) & 0x3;
++ offset = (host->align_sz - (addr & host->align_mask)) &
++ host->align_mask;
+ if (offset) {
+ if (data->flags & MMC_DATA_WRITE) {
+ buffer = sdhci_kmap_atomic(sg, &flags);
+- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
++ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) >
++ (PAGE_SIZE - offset));
+ memcpy(align, buffer, offset);
+ sdhci_kunmap_atomic(buffer, &flags);
+ }
+
+ /* tran, valid */
+- sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
++ sdhci_adma_write_desc(host, desc, align_addr, offset,
++ ADMA2_TRAN_VALID);
+
+ BUG_ON(offset > 65536);
+
+- align += 4;
+- align_addr += 4;
++ align += host->align_sz;
++ align_addr += host->align_sz;
+
+- desc += 8;
++ desc += host->desc_sz;
+
+ addr += offset;
+ len -= offset;
+@@ -541,23 +560,23 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ BUG_ON(len > 65536);
+
+ /* tran, valid */
+- sdhci_set_adma_desc(desc, addr, len, 0x21);
+- desc += 8;
++ sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
++ desc += host->desc_sz;
+
+ /*
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+- WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
++ WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
+ }
+
+ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+ /*
+ * Mark the last descriptor as the terminating descriptor
+ */
+- if (desc != host->adma_desc) {
+- desc -= 8;
+- desc[0] |= 0x2; /* end */
++ if (desc != host->adma_table) {
++ desc -= host->desc_sz;
++ sdhci_adma_mark_end(desc);
+ }
+ } else {
+ /*
+@@ -565,7 +584,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ */
+
+ /* nop, end, valid */
+- sdhci_set_adma_desc(desc, 0, 0, 0x3);
++ sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
+ }
+
+ /*
+@@ -573,14 +592,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ */
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_sync_single_for_device(mmc_dev(host->mmc),
+- host->align_addr, 128 * 4, direction);
++ host->align_addr, host->align_buffer_sz, direction);
+ }
+
+ return 0;
+
+ unmap_align:
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+- 128 * 4, direction);
++ host->align_buffer_sz, direction);
+ fail:
+ return -EINVAL;
+ }
+@@ -592,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+
+ struct scatterlist *sg;
+ int i, size;
+- u8 *align;
++ void *align;
+ char *buffer;
+ unsigned long flags;
+ bool has_unaligned;
+@@ -603,12 +622,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+ direction = DMA_TO_DEVICE;
+
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+- 128 * 4, direction);
++ host->align_buffer_sz, direction);
+
+ /* Do a quick scan of the SG list for any unaligned mappings */
+ has_unaligned = false;
+ for_each_sg(data->sg, sg, host->sg_count, i)
+- if (sg_dma_address(sg) & 3) {
++ if (sg_dma_address(sg) & host->align_mask) {
+ has_unaligned = true;
+ break;
+ }
+@@ -620,15 +639,17 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+ align = host->align_buffer;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+- if (sg_dma_address(sg) & 0x3) {
+- size = 4 - (sg_dma_address(sg) & 0x3);
++ if (sg_dma_address(sg) & host->align_mask) {
++ size = host->align_sz -
++ (sg_dma_address(sg) & host->align_mask);
+
+ buffer = sdhci_kmap_atomic(sg, &flags);
+- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
++ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) >
++ (PAGE_SIZE - size));
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
+
+- align += 4;
++ align += host->align_sz;
+ }
+ }
+ }
+@@ -822,6 +843,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ } else {
+ sdhci_writel(host, host->adma_addr,
+ SDHCI_ADMA_ADDRESS);
++ if (host->flags & SDHCI_USE_64_BIT_DMA)
++ sdhci_writel(host,
++ (u64)host->adma_addr >> 32,
++ SDHCI_ADMA_ADDRESS_HI);
+ }
+ } else {
+ int sg_cnt;
+@@ -855,10 +880,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
+- (host->flags & SDHCI_USE_ADMA))
+- ctrl |= SDHCI_CTRL_ADMA32;
+- else
++ (host->flags & SDHCI_USE_ADMA)) {
++ if (host->flags & SDHCI_USE_64_BIT_DMA)
++ ctrl |= SDHCI_CTRL_ADMA64;
++ else
++ ctrl |= SDHCI_CTRL_ADMA32;
++ } else {
+ ctrl |= SDHCI_CTRL_SDMA;
++ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+
+@@ -1797,6 +1826,10 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+ ctrl |= SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
++ /* Some controller need to do more when switching */
++ if (host->ops->voltage_switch)
++ host->ops->voltage_switch(host);
++
+ /* 1.8V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (ctrl & SDHCI_CTRL_VDD_180)
+@@ -2250,7 +2283,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+- SDHCI_INT_INDEX))
++ SDHCI_INT_INDEX | SDHCI_INT_ACMD12ERR))
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error) {
+@@ -2292,32 +2325,36 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+ }
+
+ #ifdef CONFIG_MMC_DEBUG
+-static void sdhci_show_adma_error(struct sdhci_host *host)
++static void sdhci_adma_show_error(struct sdhci_host *host)
+ {
+ const char *name = mmc_hostname(host->mmc);
+- u8 *desc = host->adma_desc;
+- __le32 *dma;
+- __le16 *len;
+- u8 attr;
++ void *desc = host->adma_table;
+
+ sdhci_dumpregs(host);
+
+ while (true) {
+- dma = (__le32 *)(desc + 4);
+- len = (__le16 *)(desc + 2);
+- attr = *desc;
+-
+- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+- name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
++ struct sdhci_adma2_64_desc *dma_desc = desc;
++
++ if (host->flags & SDHCI_USE_64_BIT_DMA)
++ DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
++ name, desc, le32_to_cpu(dma_desc->addr_hi),
++ le32_to_cpu(dma_desc->addr_lo),
++ le16_to_cpu(dma_desc->len),
++ le16_to_cpu(dma_desc->cmd));
++ else
++ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
++ name, desc, le32_to_cpu(dma_desc->addr_lo),
++ le16_to_cpu(dma_desc->len),
++ le16_to_cpu(dma_desc->cmd));
+
+- desc += 8;
++ desc += host->desc_sz;
+
+- if (attr & 2)
++ if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
+ break;
+ }
+ }
+ #else
+-static void sdhci_show_adma_error(struct sdhci_host *host) { }
++static void sdhci_adma_show_error(struct sdhci_host *host) { }
+ #endif
+
+ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+@@ -2380,7 +2417,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ host->data->error = -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+- sdhci_show_adma_error(host);
++ sdhci_adma_show_error(host);
+ host->data->error = -EIO;
+ if (host->ops->adma_workaround)
+ host->ops->adma_workaround(host, intmask);
+@@ -2859,6 +2896,16 @@ int sdhci_add_host(struct sdhci_host *host)
+ host->flags &= ~SDHCI_USE_ADMA;
+ }
+
++ /*
++ * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
++ * and *must* do 64-bit DMA. A driver has the opportunity to change
++ * that during the first call to ->enable_dma(). Similarly
++ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
++ * implement.
++ */
++ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
++ host->flags |= SDHCI_USE_64_BIT_DMA;
++
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma) {
+ if (host->ops->enable_dma(host)) {
+@@ -2870,33 +2917,59 @@ int sdhci_add_host(struct sdhci_host *host)
+ }
+ }
+
++ /* SDMA does not support 64-bit DMA */
++ if (host->flags & SDHCI_USE_64_BIT_DMA)
++ host->flags &= ~SDHCI_USE_SDMA;
++
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+- * We need to allocate descriptors for all sg entries
+- * (128) and potentially one alignment transfer for
+- * each of those entries.
++ * The DMA descriptor table size is calculated as the maximum
++ * number of segments times 2, to allow for an alignment
++ * descriptor for each segment, plus 1 for a nop end descriptor,
++ * all multipled by the descriptor size.
+ */
+- host->adma_desc = dma_alloc_coherent(mmc_dev(mmc),
+- ADMA_SIZE, &host->adma_addr,
+- GFP_KERNEL);
+- host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
+- if (!host->adma_desc || !host->align_buffer) {
+- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
+- host->adma_desc, host->adma_addr);
++ if (host->flags & SDHCI_USE_64_BIT_DMA) {
++ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
++ SDHCI_ADMA2_64_DESC_SZ;
++ host->align_buffer_sz = SDHCI_MAX_SEGS *
++ SDHCI_ADMA2_64_ALIGN;
++ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
++ host->align_sz = SDHCI_ADMA2_64_ALIGN;
++ host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
++ } else {
++ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
++ SDHCI_ADMA2_32_DESC_SZ;
++ host->align_buffer_sz = SDHCI_MAX_SEGS *
++ SDHCI_ADMA2_32_ALIGN;
++ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
++ host->align_sz = SDHCI_ADMA2_32_ALIGN;
++ host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
++ }
++ host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
++ host->adma_table_sz,
++ &host->adma_addr,
++ GFP_KERNEL);
++ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
++ if (!host->adma_table || !host->align_buffer) {
++ if (host->adma_table)
++ dma_free_coherent(mmc_dev(mmc),
++ host->adma_table_sz,
++ host->adma_table,
++ host->adma_addr);
+ kfree(host->align_buffer);
+ pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+- host->adma_desc = NULL;
++ host->adma_table = NULL;
+ host->align_buffer = NULL;
+- } else if (host->adma_addr & 3) {
++ } else if (host->adma_addr & host->align_mask) {
+ pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
+- host->adma_desc, host->adma_addr);
++ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
++ host->adma_table, host->adma_addr);
+ kfree(host->align_buffer);
+- host->adma_desc = NULL;
++ host->adma_table = NULL;
+ host->align_buffer = NULL;
+ }
+ }
+@@ -2995,7 +3068,8 @@ int sdhci_add_host(struct sdhci_host *host)
+ /* Auto-CMD23 stuff only works in ADMA or PIO. */
+ if ((host->version >= SDHCI_SPEC_300) &&
+ ((host->flags & SDHCI_USE_ADMA) ||
+- !(host->flags & SDHCI_USE_SDMA))) {
++ !(host->flags & SDHCI_USE_SDMA)) &&
++ !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
+ host->flags |= SDHCI_AUTO_CMD23;
+ DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
+ } else {
+@@ -3152,13 +3226,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ SDHCI_MAX_CURRENT_MULTIPLIER;
+ }
+
+- /* If OCR set by external regulators, use it instead */
++ /* If OCR set by host, use it instead. */
++ if (host->ocr_mask)
++ ocr_avail = host->ocr_mask;
++
++ /* If OCR set by external regulators, give it highest prio. */
+ if (mmc->ocr_avail)
+ ocr_avail = mmc->ocr_avail;
+
+- if (host->ocr_mask)
+- ocr_avail &= host->ocr_mask;
+-
+ mmc->ocr_avail = ocr_avail;
+ mmc->ocr_avail_sdio = ocr_avail;
+ if (host->ocr_avail_sdio)
+@@ -3185,11 +3260,11 @@ int sdhci_add_host(struct sdhci_host *host)
+ * can do scatter/gather or not.
+ */
+ if (host->flags & SDHCI_USE_ADMA)
+- mmc->max_segs = 128;
++ mmc->max_segs = SDHCI_MAX_SEGS;
+ else if (host->flags & SDHCI_USE_SDMA)
+ mmc->max_segs = 1;
+ else /* PIO */
+- mmc->max_segs = 128;
++ mmc->max_segs = SDHCI_MAX_SEGS;
+
+ /*
+ * Maximum number of sectors in one transfer. Limited by DMA boundary
+@@ -3287,7 +3362,8 @@ int sdhci_add_host(struct sdhci_host *host)
+
+ pr_info("%s: SDHCI controller on %s [%s] using %s\n",
+ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+- (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
++ (host->flags & SDHCI_USE_ADMA) ?
++ (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
+ (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
+
+ sdhci_enable_card_detection(host);
+@@ -3355,12 +3431,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+
+- if (host->adma_desc)
+- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
+- host->adma_desc, host->adma_addr);
++ if (host->adma_table)
++ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
++ host->adma_table, host->adma_addr);
+ kfree(host->align_buffer);
+
+- host->adma_desc = NULL;
++ host->adma_table = NULL;
+ host->align_buffer = NULL;
+ }
+
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 31896a7..5220f36 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -227,6 +227,7 @@
+ /* 55-57 reserved */
+
+ #define SDHCI_ADMA_ADDRESS 0x58
++#define SDHCI_ADMA_ADDRESS_HI 0x5C
+
+ /* 60-FB reserved */
+
+@@ -266,6 +267,46 @@
+ #define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
+ #define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
+
++/* ADMA2 32-bit DMA descriptor size */
++#define SDHCI_ADMA2_32_DESC_SZ 8
++
++/* ADMA2 32-bit DMA alignment */
++#define SDHCI_ADMA2_32_ALIGN 4
++
++/* ADMA2 32-bit descriptor */
++struct sdhci_adma2_32_desc {
++ __le16 cmd;
++ __le16 len;
++ __le32 addr;
++} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
++
++/* ADMA2 64-bit DMA descriptor size */
++#define SDHCI_ADMA2_64_DESC_SZ 12
++
++/* ADMA2 64-bit DMA alignment */
++#define SDHCI_ADMA2_64_ALIGN 8
++
++/*
++ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
++ * aligned.
++ */
++struct sdhci_adma2_64_desc {
++ __le16 cmd;
++ __le16 len;
++ __le32 addr_lo;
++ __le32 addr_hi;
++} __packed __aligned(4);
++
++#define ADMA2_TRAN_VALID 0x21
++#define ADMA2_NOP_END_VALID 0x3
++#define ADMA2_END 0x2
++
++/*
++ * Maximum segments assuming a 512KiB maximum requisition size and a minimum
++ * 4KiB page size.
++ */
++#define SDHCI_MAX_SEGS 128
++
+ struct sdhci_ops {
+ #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+ u32 (*read_l)(struct sdhci_host *host, int reg);
+@@ -296,6 +337,7 @@ struct sdhci_ops {
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*platform_init)(struct sdhci_host *host);
+ void (*card_event)(struct sdhci_host *host);
++ void (*voltage_switch)(struct sdhci_host *host);
+ };
+
+ #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
+index dd10646..34ce759 100644
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -429,7 +429,7 @@ config MTD_NAND_FSL_ELBC
+
+ config MTD_NAND_FSL_IFC
+ tristate "NAND support for Freescale IFC controller"
+- depends on MTD_NAND && FSL_SOC
++ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE)
+ select FSL_IFC
+ select MEMORY
+ help
+diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
+index 2338124..c8be272 100644
+--- a/drivers/mtd/nand/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/fsl_ifc_nand.c
+@@ -31,7 +31,6 @@
+ #include
+ #include
+
+-#define FSL_IFC_V1_1_0 0x01010000
+ #define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+ #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+@@ -234,13 +233,13 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+- iowrite32be(page_addr, &ifc->ifc_nand.row0);
+- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
++ ifc_out32(page_addr, &ifc->ifc_nand.row0);
++ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+@@ -297,28 +296,28 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 eccstat[4];
+ int i;
+
+ /* set the chip select for NAND Transaction */
+- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+- &ifc->ifc_nand.nand_csel);
++ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
++ &ifc->ifc_nand.nand_csel);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+- ioread32be(&ifc->ifc_nand.nand_fir0),
+- ioread32be(&ifc->ifc_nand.nand_fcr0));
++ ifc_in32(&ifc->ifc_nand.nand_fir0),
++ ifc_in32(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
++ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+- IFC_TIMEOUT_MSECS * HZ/1000);
++ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+@@ -337,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
+ int sector_end = sector + chip->ecc.steps - 1;
+
+ for (i = sector / 4; i <= sector_end / 4; i++)
+- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
++ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
+
+ for (i = sector; i <= sector_end; i++) {
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+@@ -373,37 +372,37 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
+ {
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+-
+- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+- &ifc->ifc_nand.nand_fcr0);
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
++ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
++
++ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
++ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
++ &ifc->ifc_nand.nand_fcr0);
+ } else {
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
++ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ if (oob)
+- iowrite32be(NAND_CMD_READOOB <<
+- IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(NAND_CMD_READOOB <<
++ IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc->ifc_nand.nand_fcr0);
+ else
+- iowrite32be(NAND_CMD_READ0 <<
+- IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(NAND_CMD_READ0 <<
++ IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc->ifc_nand.nand_fcr0);
+ }
+ }
+
+@@ -413,7 +412,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+@@ -423,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+@@ -438,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+@@ -454,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ if (command == NAND_CMD_PARAM)
+ timing = IFC_FIR_OP_RBCD;
+
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+- (timing << IFC_NAND_FIR0_OP2_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
+- iowrite32be(column, &ifc->ifc_nand.row3);
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
++ (timing << IFC_NAND_FIR0_OP2_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(column, &ifc->ifc_nand.row3);
+
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+- iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 256;
+
+ set_addr(mtd, 0, 0, 0);
+@@ -481,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
+
+- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+- &ifc->ifc_nand.nand_fcr0);
++ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
++ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
++ &ifc->ifc_nand.nand_fcr0);
+
+- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+@@ -507,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+- iowrite32be(
+- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(
+- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+- (IFC_FIR_OP_RDSTAT <<
+- IFC_NAND_FIR1_OP6_SHIFT) |
+- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+- &ifc->ifc_nand.nand_fir1);
++ ifc_out32(
++ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
++ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
++ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(
++ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
++ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
++ &ifc->ifc_nand.nand_fir1);
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+@@ -528,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
+
+- iowrite32be(
++ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+- iowrite32be(
+- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+- (IFC_FIR_OP_RDSTAT <<
+- IFC_NAND_FIR1_OP7_SHIFT) |
+- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+- &ifc->ifc_nand.nand_fir1);
++ ifc_out32(
++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
++ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
++ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
++ &ifc->ifc_nand.nand_fir1);
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+@@ -556,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+@@ -564,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+- iowrite32be(ifc_nand_ctrl->index -
+- ifc_nand_ctrl->column,
+- &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(ifc_nand_ctrl->index -
++ ifc_nand_ctrl->column,
++ &ifc->ifc_nand.nand_fbcr);
+ } else {
+- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+- case NAND_CMD_STATUS:
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
+- iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
++ case NAND_CMD_STATUS: {
++ void __iomem *addr;
++
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+@@ -591,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
++ addr = ifc_nand_ctrl->addr;
+ if (chip->options & NAND_BUSWIDTH_16)
+- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
++ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
+ else
+- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
++ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
+ return;
++ }
+
+ case NAND_CMD_RESET:
+- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc->ifc_nand.nand_fcr0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+@@ -659,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ offset = ifc_nand_ctrl->index++;
+- return in_8(ifc_nand_ctrl->addr + offset);
++ return ifc_in8(ifc_nand_ctrl->addr + offset);
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+@@ -681,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
++ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+@@ -723,22 +724,22 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+ {
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ u32 nand_fsr;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
+- iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
++ &ifc->ifc_nand.nand_fir0);
++ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc->ifc_nand.nand_fcr0);
++ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
++ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
+
+ /*
+ * The chip always seems to report that it is
+@@ -825,67 +826,72 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+ {
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
++ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+- csor = ioread32be(&ifc->csor_cs[cs].csor);
+- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
++ csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
++ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
++ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
++ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
+
+ /* READID */
+- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+- &ifc->ifc_nand.nand_fir0);
+- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+- &ifc->ifc_nand.nand_fcr0);
+- iowrite32be(0x0, &ifc->ifc_nand.row3);
++ &ifc_runtime->ifc_nand.nand_fir0);
++ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
++ &ifc_runtime->ifc_nand.nand_fcr0);
++ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
+
+- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
++ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
+
+ /* Program ROW0/COL0 */
+- iowrite32be(0x0, &ifc->ifc_nand.row0);
+- iowrite32be(0x0, &ifc->ifc_nand.col0);
++ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
++ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
+
+ /* set the chip select for NAND Transaction */
+- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
++ ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
++ &ifc_runtime->ifc_nand.nand_csel);
+
+ /* start read seq */
+- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
++ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
++ &ifc_runtime->ifc_nand.nandseq_strt);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+- IFC_TIMEOUT_MSECS * HZ/1000);
++ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+ /* Restore CSOR and CSOR_ext */
+- iowrite32be(csor, &ifc->csor_cs[cs].csor);
+- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
++ ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
++ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
+ }
+
+ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+ {
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
++ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
++ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+ struct nand_chip *chip = &priv->chip;
+ struct nand_ecclayout *layout;
+- u32 csor, ver;
++ u32 csor;
+
+ /* Fill in fsl_ifc_mtd structure */
+ priv->mtd.priv = chip;
+- priv->mtd.owner = THIS_MODULE;
++ priv->mtd.dev.parent = priv->dev;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
++ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
++ & CSPR_PORT_SIZE_16)
+ chip->read_byte = fsl_ifc_read_byte16;
+ else
+ chip->read_byte = fsl_ifc_read_byte;
+@@ -899,13 +905,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+- iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
++ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
+
+ /* set up nand options */
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->options = NAND_NO_SUBPAGE_WRITE;
+
+- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
++ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
++ & CSPR_PORT_SIZE_16) {
+ chip->read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+@@ -918,7 +925,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+- csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
++ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+@@ -984,8 +991,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+- ver = ioread32be(&ifc->ifc_rev);
+- if (ver == FSL_IFC_V1_1_0)
++ if (ctrl->version == FSL_IFC_VERSION_1_1_0)
+ fsl_ifc_sram_init(priv);
+
+ return 0;
+@@ -1005,10 +1011,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+ return 0;
+ }
+
+-static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
++static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
+ phys_addr_t addr)
+ {
+- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
++ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+@@ -1022,7 +1028,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+ static int fsl_ifc_nand_probe(struct platform_device *dev)
+ {
+- struct fsl_ifc_regs __iomem *ifc;
++ struct fsl_ifc_runtime __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+@@ -1033,9 +1039,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = dev->dev.of_node;
+- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
++ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
+ return -ENODEV;
+- ifc = fsl_ifc_ctrl_dev->regs;
++ ifc = fsl_ifc_ctrl_dev->rregs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+@@ -1045,12 +1051,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
+ }
+
+ /* find which chip select it is connected to */
+- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
+- if (match_bank(ifc, bank, res.start))
++ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
++ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
+ break;
+ }
+
+- if (bank >= FSL_IFC_BANK_COUNT) {
++ if (bank >= fsl_ifc_ctrl_dev->banks) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+@@ -1094,16 +1100,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
+
+ dev_set_drvdata(priv->dev, priv);
+
+- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+- IFC_NAND_EVTER_EN_FTOER_EN |
+- IFC_NAND_EVTER_EN_WPER_EN,
+- &ifc->ifc_nand.nand_evter_en);
++ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
++ IFC_NAND_EVTER_EN_FTOER_EN |
++ IFC_NAND_EVTER_EN_WPER_EN,
++ &ifc->ifc_nand.nand_evter_en);
+
+ /* enable NAND Machine Interrupts */
+- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+- IFC_NAND_EVTER_INTR_FTOERIR_EN |
+- IFC_NAND_EVTER_INTR_WPERIR_EN,
+- &ifc->ifc_nand.nand_evter_intr_en);
++ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
++ IFC_NAND_EVTER_INTR_FTOERIR_EN |
++ IFC_NAND_EVTER_INTR_WPERIR_EN,
++ &ifc->ifc_nand.nand_evter_intr_en);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+@@ -1163,6 +1169,7 @@ static const struct of_device_id fsl_ifc_nand_match[] = {
+ },
+ {}
+ };
++MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
+
+ static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
+index 2703083..0c1c97d 100644
+--- a/drivers/net/ethernet/freescale/Kconfig
++++ b/drivers/net/ethernet/freescale/Kconfig
+@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE
+ default y
+ depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
+ M523x || M527x || M5272 || M528x || M520x || M532x || \
+- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
++ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
++ ARCH_LAYERSCAPE
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+@@ -58,18 +59,17 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+
+ config FSL_PQ_MDIO
+ tristate "Freescale PQ MDIO"
+- depends on FSL_SOC
+ select PHYLIB
+ ---help---
+ This driver supports the MDIO bus used by the gianfar and UCC drivers.
+
+ config FSL_XGMAC_MDIO
+ tristate "Freescale XGMAC MDIO"
+- depends on FSL_SOC
+ select PHYLIB
+ select OF_MDIO
+ ---help---
+- This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
++ This driver supports the MDIO bus on the Fman 10G Ethernet MACs and
++ on mEMAC (which supports both Clauses 22 and 45)
+
+ config UCC_GETH
+ tristate "Freescale QE Gigabit Ethernet"
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+index ff55fbb..76ff046 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op)
+ }
+ #endif
+
+-static struct of_device_id mpc52xx_fec_match[] = {
++static const struct of_device_id mpc52xx_fec_match[] = {
+ { .compatible = "fsl,mpc5200b-fec", },
+ { .compatible = "fsl,mpc5200-fec", },
+ { .compatible = "mpc5200-fec", },
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+index e052890..1e647be 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+ return 0;
+ }
+
+-static struct of_device_id mpc52xx_fec_mdio_match[] = {
++static const struct of_device_id mpc52xx_fec_mdio_match[] = {
+ { .compatible = "fsl,mpc5200b-mdio", },
+ { .compatible = "fsl,mpc5200-mdio", },
+ { .compatible = "mpc5200b-fec-phy", },
+diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+index c92c3b7..dc0da6c 100644
+--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+@@ -886,7 +886,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
+ #endif
+ };
+
+-static struct of_device_id fs_enet_match[];
++static const struct of_device_id fs_enet_match[];
+ static int fs_enet_probe(struct platform_device *ofdev)
+ {
+ const struct of_device_id *match;
+@@ -1047,7 +1047,7 @@ static int fs_enet_remove(struct platform_device *ofdev)
+ return 0;
+ }
+
+-static struct of_device_id fs_enet_match[] = {
++static const struct of_device_id fs_enet_match[] = {
+ #ifdef CONFIG_FS_ENET_HAS_SCC
+ {
+ .compatible = "fsl,cpm1-scc-enet",
+diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+index 3d3fde6..9ec396b 100644
+--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
++++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
+ return 0;
+ }
+
+-static struct of_device_id fs_enet_mdio_bb_match[] = {
++static const struct of_device_id fs_enet_mdio_bb_match[] = {
+ {
+ .compatible = "fsl,cpm2-mdio-bitbang",
+ },
+diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+index ebf5d64..72205b0 100644
+--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
++++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
+
+ }
+
+-static struct of_device_id fs_enet_mdio_fec_match[];
++static const struct of_device_id fs_enet_mdio_fec_match[];
+ static int fs_enet_mdio_probe(struct platform_device *ofdev)
+ {
+ const struct of_device_id *match;
+@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
+ return 0;
+ }
+
+-static struct of_device_id fs_enet_mdio_fec_match[] = {
++static const struct of_device_id fs_enet_mdio_fec_match[] = {
+ {
+ .compatible = "fsl,pq1-fec-mdio",
+ },
+diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+index 964c6bf..f94fa63 100644
+--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
+
+ #endif
+
+-static struct of_device_id fsl_pq_mdio_match[] = {
++static const struct of_device_id fsl_pq_mdio_match[] = {
+ #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+ {
+ .compatible = "fsl,gianfar-tbi",
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 4fdf0aa..0359cfd 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -86,11 +86,11 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #ifdef CONFIG_PPC
+ #include
+-#include
+ #endif
+ #include
+ #include
+@@ -1720,8 +1720,10 @@ static void gfar_configure_serdes(struct net_device *dev)
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
++ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
++ put_device(&tbiphy->dev);
+ return;
++ }
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+@@ -3455,7 +3457,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
+ phy_print_status(phydev);
+ }
+
+-static struct of_device_id gfar_match[] =
++static const struct of_device_id gfar_match[] =
+ {
+ {
+ .type = "network",
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index bb56800..c7c75de 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -554,7 +554,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
+ return 0;
+ }
+
+-static struct of_device_id match_table[] = {
++static const struct of_device_id match_table[] = {
+ { .compatible = "fsl,etsec-ptp" },
+ {},
+ };
+diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
+index 3cf0478..741a7d4 100644
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -3930,7 +3930,7 @@ static int ucc_geth_remove(struct platform_device* ofdev)
+ return 0;
+ }
+
+-static struct of_device_id ucc_geth_match[] = {
++static const struct of_device_id ucc_geth_match[] = {
+ {
+ .type = "network",
+ .compatible = "ucc_geth",
+diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
+index 6e7db66..7b8fe86 100644
+--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
++++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
+@@ -32,31 +32,62 @@ struct tgec_mdio_controller {
+ __be32 mdio_addr; /* MDIO address */
+ } __packed;
+
++#define MDIO_STAT_ENC BIT(6)
+ #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+-#define MDIO_STAT_BSY (1 << 0)
+-#define MDIO_STAT_RD_ER (1 << 1)
++#define MDIO_STAT_BSY BIT(0)
++#define MDIO_STAT_RD_ER BIT(1)
+ #define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+ #define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+-#define MDIO_CTL_PRE_DIS (1 << 10)
+-#define MDIO_CTL_SCAN_EN (1 << 11)
+-#define MDIO_CTL_POST_INC (1 << 14)
+-#define MDIO_CTL_READ (1 << 15)
++#define MDIO_CTL_PRE_DIS BIT(10)
++#define MDIO_CTL_SCAN_EN BIT(11)
++#define MDIO_CTL_POST_INC BIT(14)
++#define MDIO_CTL_READ BIT(15)
+
+ #define MDIO_DATA(x) (x & 0xffff)
+-#define MDIO_DATA_BSY (1 << 31)
++#define MDIO_DATA_BSY BIT(31)
++
++struct mdio_fsl_priv {
++ struct tgec_mdio_controller __iomem *mdio_base;
++ bool is_little_endian;
++};
++
++static u32 xgmac_read32(void __iomem *regs,
++ bool is_little_endian)
++{
++ if (is_little_endian)
++ return ioread32(regs);
++ else
++ return ioread32be(regs);
++}
++
++static void xgmac_write32(u32 value,
++ void __iomem *regs,
++ bool is_little_endian)
++{
++ if (is_little_endian)
++ iowrite32(value, regs);
++ else
++ iowrite32be(value, regs);
++}
+
+ /*
+ * Wait until the MDIO bus is free
+ */
+ static int xgmac_wait_until_free(struct device *dev,
+- struct tgec_mdio_controller __iomem *regs)
++ struct tgec_mdio_controller __iomem *regs,
++ bool is_little_endian)
+ {
+- uint32_t status;
++ unsigned int timeout;
+
+ /* Wait till the bus is free */
+- status = spin_event_timeout(
+- !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+- if (!status) {
++ timeout = TIMEOUT;
++ while ((xgmac_read32(®s->mdio_stat, is_little_endian) &
++ MDIO_STAT_BSY) && timeout) {
++ cpu_relax();
++ timeout--;
++ }
++
++ if (!timeout) {
+ dev_err(dev, "timeout waiting for bus to be free\n");
+ return -ETIMEDOUT;
+ }
+@@ -68,14 +99,20 @@ static int xgmac_wait_until_free(struct device *dev,
+ * Wait till the MDIO read or write operation is complete
+ */
+ static int xgmac_wait_until_done(struct device *dev,
+- struct tgec_mdio_controller __iomem *regs)
++ struct tgec_mdio_controller __iomem *regs,
++ bool is_little_endian)
+ {
+- uint32_t status;
++ unsigned int timeout;
+
+ /* Wait till the MDIO write is complete */
+- status = spin_event_timeout(
+- !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+- if (!status) {
++ timeout = TIMEOUT;
++ while ((xgmac_read32(®s->mdio_stat, is_little_endian) &
++ MDIO_STAT_BSY) && timeout) {
++ cpu_relax();
++ timeout--;
++ }
++
++ if (!timeout) {
+ dev_err(dev, "timeout waiting for operation to complete\n");
+ return -ETIMEDOUT;
+ }
+@@ -90,32 +127,47 @@ static int xgmac_wait_until_done(struct device *dev,
+ */
+ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+ {
+- struct tgec_mdio_controller __iomem *regs = bus->priv;
+- uint16_t dev_addr = regnum >> 16;
++ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
++ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
++ uint16_t dev_addr;
++ u32 mdio_ctl, mdio_stat;
+ int ret;
++ bool endian = priv->is_little_endian;
++
++ mdio_stat = xgmac_read32(®s->mdio_stat, endian);
++ if (regnum & MII_ADDR_C45) {
++ /* Clause 45 (ie 10G) */
++ dev_addr = (regnum >> 16) & 0x1f;
++ mdio_stat |= MDIO_STAT_ENC;
++ } else {
++ /* Clause 22 (ie 1G) */
++ dev_addr = regnum & 0x1f;
++ mdio_stat &= ~MDIO_STAT_ENC;
++ }
+
+- /* Setup the MII Mgmt clock speed */
+- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100));
++ xgmac_write32(mdio_stat, ®s->mdio_stat, endian);
+
+- ret = xgmac_wait_until_free(&bus->dev, regs);
++ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+- out_be32(®s->mdio_ctl,
+- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
++ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
++ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian);
+
+ /* Set the register address */
+- out_be32(®s->mdio_addr, regnum & 0xffff);
++ if (regnum & MII_ADDR_C45) {
++ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian);
+
+- ret = xgmac_wait_until_free(&bus->dev, regs);
+- if (ret)
+- return ret;
++ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
++ if (ret)
++ return ret;
++ }
+
+ /* Write the value to the register */
+- out_be32(®s->mdio_data, MDIO_DATA(value));
++ xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian);
+
+- ret = xgmac_wait_until_done(&bus->dev, regs);
++ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+@@ -129,74 +181,70 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
+ */
+ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+ {
+- struct tgec_mdio_controller __iomem *regs = bus->priv;
+- uint16_t dev_addr = regnum >> 16;
++ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
++ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
++ uint16_t dev_addr;
++ uint32_t mdio_stat;
+ uint32_t mdio_ctl;
+ uint16_t value;
+ int ret;
++ bool endian = priv->is_little_endian;
++
++ mdio_stat = xgmac_read32(®s->mdio_stat, endian);
++ if (regnum & MII_ADDR_C45) {
++ dev_addr = (regnum >> 16) & 0x1f;
++ mdio_stat |= MDIO_STAT_ENC;
++ } else {
++ dev_addr = regnum & 0x1f;
++ mdio_stat &= ~MDIO_STAT_ENC;
++ }
+
+- /* Setup the MII Mgmt clock speed */
+- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100));
++ xgmac_write32(mdio_stat, ®s->mdio_stat, endian);
+
+- ret = xgmac_wait_until_free(&bus->dev, regs);
++ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+- out_be32(®s->mdio_ctl, mdio_ctl);
++ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian);
+
+ /* Set the register address */
+- out_be32(®s->mdio_addr, regnum & 0xffff);
++ if (regnum & MII_ADDR_C45) {
++ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian);
+
+- ret = xgmac_wait_until_free(&bus->dev, regs);
+- if (ret)
+- return ret;
++ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
++ if (ret)
++ return ret;
++ }
+
+ /* Initiate the read */
+- out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
++ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian);
+
+- ret = xgmac_wait_until_done(&bus->dev, regs);
++ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Return all Fs if nothing was there */
+- if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) {
++ if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ dev_err(&bus->dev,
+ "Error while reading PHY%d reg at %d.%hhu\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
+ }
+
+- value = in_be32(®s->mdio_data) & 0xffff;
++ value = xgmac_read32(®s->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", value);
+
+ return value;
+ }
+
+-/* Reset the MIIM registers, and wait for the bus to free */
+-static int xgmac_mdio_reset(struct mii_bus *bus)
+-{
+- struct tgec_mdio_controller __iomem *regs = bus->priv;
+- int ret;
+-
+- mutex_lock(&bus->mdio_lock);
+-
+- /* Setup the MII Mgmt clock speed */
+- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100));
+-
+- ret = xgmac_wait_until_free(&bus->dev, regs);
+-
+- mutex_unlock(&bus->mdio_lock);
+-
+- return ret;
+-}
+-
+ static int xgmac_mdio_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct resource res;
++ struct mdio_fsl_priv *priv;
+ int ret;
+
+ ret = of_address_to_resource(np, 0, &res);
+@@ -205,25 +253,30 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
++ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale XGMAC MDIO Bus";
+ bus->read = xgmac_mdio_read;
+ bus->write = xgmac_mdio_write;
+- bus->reset = xgmac_mdio_reset;
+- bus->irq = bus->priv;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+ /* Set the PHY base address */
+- bus->priv = of_iomap(np, 0);
+- if (!bus->priv) {
++ priv = bus->priv;
++ priv->mdio_base = of_iomap(np, 0);
++ if (!priv->mdio_base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
++ if (of_get_property(pdev->dev.of_node,
++ "little-endian", NULL))
++ priv->is_little_endian = true;
++ else
++ priv->is_little_endian = false;
++
+ ret = of_mdiobus_register(bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register MDIO bus\n");
+@@ -235,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
+ return 0;
+
+ err_registration:
+- iounmap(bus->priv);
++ iounmap(priv->mdio_base);
+
+ err_ioremap:
+ mdiobus_free(bus);
+@@ -254,10 +307,13 @@ static int xgmac_mdio_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct of_device_id xgmac_mdio_match[] = {
++static const struct of_device_id xgmac_mdio_match[] = {
+ {
+ .compatible = "fsl,fman-xmdio",
+ },
++ {
++ .compatible = "fsl,fman-memac-mdio",
++ },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+index 051ea94..2a04baa 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -286,6 +286,9 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ break;
++ case BCM54616_E_PHY_ID:
++ phy->type = e1000_phy_bcm54616;
++ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+@@ -1550,6 +1553,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
++ case e1000_i354:
+ phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+@@ -1593,6 +1597,8 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
+ case e1000_phy_82580:
+ ret_val = igb_copper_link_setup_82580(hw);
+ break;
++ case e1000_phy_bcm54616:
++ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
+index 217f813..5322fbf 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
+@@ -860,6 +860,7 @@
+ #define M88_VENDOR 0x0141
+ #define I210_I_PHY_ID 0x01410C00
+ #define M88E1543_E_PHY_ID 0x01410EA0
++#define BCM54616_E_PHY_ID 0x3625D10
+
+ /* M88E1000 Specific Registers */
+ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
+index 2003b37..d82c96b 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
+@@ -128,6 +128,7 @@ enum e1000_phy_type {
+ e1000_phy_ife,
+ e1000_phy_82580,
+ e1000_phy_i210,
++ e1000_phy_bcm54616,
+ };
+
+ enum e1000_bus_type {
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index e0f3664..013c1f1 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -108,6 +108,7 @@ static const struct pci_device_id igb_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII), board_82575 },
+ /* required last entry */
+ {0, }
+ };
+diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
+index 75472cf..cdc9f8a 100644
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -14,6 +14,11 @@ if PHYLIB
+
+ comment "MII PHY device drivers"
+
++config AQUANTIA_PHY
++ tristate "Drivers for the Aquantia PHYs"
++ ---help---
++ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
++
+ config AT803X_PHY
+ tristate "Drivers for Atheros AT803X PHYs"
+ ---help---
+@@ -60,6 +65,11 @@ config VITESSE_PHY
+ ---help---
+ Currently supports the vsc8244
+
++config TERANETICS_PHY
++ tristate "Drivers for the Teranetics PHYs"
++ ---help---
++ Currently supports the Teranetics TN2020
++
+ config SMSC_PHY
+ tristate "Drivers for SMSC PHYs"
+ ---help---
+@@ -119,8 +129,8 @@ config MICREL_PHY
+ Supports the KSZ9021, VSC8201, KS8001 PHYs.
+
+ config FIXED_PHY
+- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+- depends on PHYLIB=y
++ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
++ depends on PHYLIB
+ ---help---
+ Adds the platform "fixed" MDIO Bus to cover the boards that use
+ PHYs that are not connected to the real MDIO bus.
+@@ -202,6 +212,11 @@ config MDIO_BUS_MUX_MMIOREG
+ the FPGA's registers.
+
+ Currently, only 8-bit registers are supported.
++config FSL_10GBASE_KR
++ tristate "Support for 10GBASE-KR on Freescale XFI interface"
++ depends on OF_MDIO
++ help
++ This module provides a driver for Freescale XFI's 10GBASE-KR.
+
+ config MDIO_BCM_UNIMAC
+ tristate "Broadcom UniMAC MDIO bus controller"
+diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
+index eb3b18b..8ad4ac6 100644
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -3,12 +3,14 @@
+ libphy-objs := phy.o phy_device.o mdio_bus.o
+
+ obj-$(CONFIG_PHYLIB) += libphy.o
++obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
+ obj-$(CONFIG_MARVELL_PHY) += marvell.o
+ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
+ obj-$(CONFIG_CICADA_PHY) += cicada.o
+ obj-$(CONFIG_LXT_PHY) += lxt.o
+ obj-$(CONFIG_QSEMI_PHY) += qsemi.o
+ obj-$(CONFIG_SMSC_PHY) += smsc.o
++obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
+ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+@@ -17,7 +19,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+-obj-$(CONFIG_FIXED_PHY) += fixed.o
++obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
+ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
+ obj-$(CONFIG_NATIONAL_PHY) += national.o
+@@ -31,6 +33,7 @@ obj-$(CONFIG_AMD_PHY) += amd.o
+ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
++obj-$(CONFIG_FSL_10GBASE_KR) += fsl_10gkr.o
+ obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+ obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+ obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
+diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
+new file mode 100644
+index 0000000..d6111af
+--- /dev/null
++++ b/drivers/net/phy/aquantia.c
+@@ -0,0 +1,201 @@
++/*
++ * Driver for Aquantia PHY
++ *
++ * Author: Shaohui Xie
++ *
++ * Copyright 2015 Freescale Semiconductor, Inc.
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#define PHY_ID_AQ1202 0x03a1b445
++#define PHY_ID_AQ2104 0x03a1b460
++#define PHY_ID_AQR105 0x03a1b4a2
++#define PHY_ID_AQR405 0x03a1b4b0
++
++#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
++ SUPPORTED_1000baseT_Full | \
++ SUPPORTED_100baseT_Full | \
++ PHY_DEFAULT_FEATURES)
++
++static int aquantia_config_aneg(struct phy_device *phydev)
++{
++ phydev->supported = PHY_AQUANTIA_FEATURES;
++ phydev->advertising = phydev->supported;
++
++ return 0;
++}
++
++static int aquantia_aneg_done(struct phy_device *phydev)
++{
++ int reg;
++
++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
++}
++
++static int aquantia_config_intr(struct phy_device *phydev)
++{
++ int err;
++
++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
++ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
++ if (err < 0)
++ return err;
++
++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
++ if (err < 0)
++ return err;
++
++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
++ } else {
++ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
++ if (err < 0)
++ return err;
++
++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
++ if (err < 0)
++ return err;
++
++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
++ }
++
++ return err;
++}
++
++static int aquantia_ack_interrupt(struct phy_device *phydev)
++{
++ int reg;
++
++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
++ return (reg < 0) ? reg : 0;
++}
++
++static int aquantia_read_status(struct phy_device *phydev)
++{
++ int reg;
++
++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ if (reg & MDIO_STAT1_LSTATUS)
++ phydev->link = 1;
++ else
++ phydev->link = 0;
++
++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
++ mdelay(10);
++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
++
++ switch (reg) {
++ case 0x9:
++ phydev->speed = SPEED_2500;
++ break;
++ case 0x5:
++ phydev->speed = SPEED_1000;
++ break;
++ case 0x3:
++ phydev->speed = SPEED_100;
++ break;
++ case 0x7:
++ default:
++ phydev->speed = SPEED_10000;
++ break;
++ }
++ phydev->duplex = DUPLEX_FULL;
++
++ return 0;
++}
++
++static struct phy_driver aquantia_driver[] = {
++{
++ .phy_id = PHY_ID_AQ1202,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Aquantia AQ1202",
++ .features = PHY_AQUANTIA_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .aneg_done = aquantia_aneg_done,
++ .config_aneg = aquantia_config_aneg,
++ .config_intr = aquantia_config_intr,
++ .ack_interrupt = aquantia_ack_interrupt,
++ .read_status = aquantia_read_status,
++ .driver = { .owner = THIS_MODULE,},
++},
++{
++ .phy_id = PHY_ID_AQ2104,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Aquantia AQ2104",
++ .features = PHY_AQUANTIA_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .aneg_done = aquantia_aneg_done,
++ .config_aneg = aquantia_config_aneg,
++ .config_intr = aquantia_config_intr,
++ .ack_interrupt = aquantia_ack_interrupt,
++ .read_status = aquantia_read_status,
++ .driver = { .owner = THIS_MODULE,},
++},
++{
++ .phy_id = PHY_ID_AQR105,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Aquantia AQR105",
++ .features = PHY_AQUANTIA_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .aneg_done = aquantia_aneg_done,
++ .config_aneg = aquantia_config_aneg,
++ .config_intr = aquantia_config_intr,
++ .ack_interrupt = aquantia_ack_interrupt,
++ .read_status = aquantia_read_status,
++ .driver = { .owner = THIS_MODULE,},
++},
++{
++ .phy_id = PHY_ID_AQR405,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Aquantia AQR405",
++ .features = PHY_AQUANTIA_FEATURES,
++ .flags = PHY_HAS_INTERRUPT,
++ .aneg_done = aquantia_aneg_done,
++ .config_aneg = aquantia_config_aneg,
++ .config_intr = aquantia_config_intr,
++ .ack_interrupt = aquantia_ack_interrupt,
++ .read_status = aquantia_read_status,
++ .driver = { .owner = THIS_MODULE,},
++},
++};
++
++static int __init aquantia_init(void)
++{
++ return phy_drivers_register(aquantia_driver,
++ ARRAY_SIZE(aquantia_driver));
++}
++
++static void __exit aquantia_exit(void)
++{
++ return phy_drivers_unregister(aquantia_driver,
++ ARRAY_SIZE(aquantia_driver));
++}
++
++module_init(aquantia_init);
++module_exit(aquantia_exit);
++
++static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
++ { PHY_ID_AQ1202, 0xfffffff0 },
++ { PHY_ID_AQ2104, 0xfffffff0 },
++ { PHY_ID_AQR105, 0xfffffff0 },
++ { PHY_ID_AQR405, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
++
++MODULE_DESCRIPTION("Aquantia PHY driver");
++MODULE_AUTHOR("Shaohui Xie ");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index fdc1b41..a4f0886 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -307,6 +307,8 @@ static struct phy_driver at803x_driver[] = {
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
++ .ack_interrupt = at803x_ack_interrupt,
++ .config_intr = at803x_config_intr,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+@@ -326,6 +328,8 @@ static struct phy_driver at803x_driver[] = {
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
++ .ack_interrupt = at803x_ack_interrupt,
++ .config_intr = at803x_config_intr,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
+deleted file mode 100644
+index 47872ca..0000000
+--- a/drivers/net/phy/fixed.c
++++ /dev/null
+@@ -1,336 +0,0 @@
+-/*
+- * Fixed MDIO bus (MDIO bus emulation with fixed PHYs)
+- *
+- * Author: Vitaly Bordug
+- * Anton Vorontsov
+- *
+- * Copyright (c) 2006-2007 MontaVista Software, Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- */
+-
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-
+-#define MII_REGS_NUM 29
+-
+-struct fixed_mdio_bus {
+- int irqs[PHY_MAX_ADDR];
+- struct mii_bus *mii_bus;
+- struct list_head phys;
+-};
+-
+-struct fixed_phy {
+- int addr;
+- u16 regs[MII_REGS_NUM];
+- struct phy_device *phydev;
+- struct fixed_phy_status status;
+- int (*link_update)(struct net_device *, struct fixed_phy_status *);
+- struct list_head node;
+-};
+-
+-static struct platform_device *pdev;
+-static struct fixed_mdio_bus platform_fmb = {
+- .phys = LIST_HEAD_INIT(platform_fmb.phys),
+-};
+-
+-static int fixed_phy_update_regs(struct fixed_phy *fp)
+-{
+- u16 bmsr = BMSR_ANEGCAPABLE;
+- u16 bmcr = 0;
+- u16 lpagb = 0;
+- u16 lpa = 0;
+-
+- if (fp->status.duplex) {
+- bmcr |= BMCR_FULLDPLX;
+-
+- switch (fp->status.speed) {
+- case 1000:
+- bmsr |= BMSR_ESTATEN;
+- bmcr |= BMCR_SPEED1000;
+- lpagb |= LPA_1000FULL;
+- break;
+- case 100:
+- bmsr |= BMSR_100FULL;
+- bmcr |= BMCR_SPEED100;
+- lpa |= LPA_100FULL;
+- break;
+- case 10:
+- bmsr |= BMSR_10FULL;
+- lpa |= LPA_10FULL;
+- break;
+- default:
+- pr_warn("fixed phy: unknown speed\n");
+- return -EINVAL;
+- }
+- } else {
+- switch (fp->status.speed) {
+- case 1000:
+- bmsr |= BMSR_ESTATEN;
+- bmcr |= BMCR_SPEED1000;
+- lpagb |= LPA_1000HALF;
+- break;
+- case 100:
+- bmsr |= BMSR_100HALF;
+- bmcr |= BMCR_SPEED100;
+- lpa |= LPA_100HALF;
+- break;
+- case 10:
+- bmsr |= BMSR_10HALF;
+- lpa |= LPA_10HALF;
+- break;
+- default:
+- pr_warn("fixed phy: unknown speed\n");
+- return -EINVAL;
+- }
+- }
+-
+- if (fp->status.link)
+- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+-
+- if (fp->status.pause)
+- lpa |= LPA_PAUSE_CAP;
+-
+- if (fp->status.asym_pause)
+- lpa |= LPA_PAUSE_ASYM;
+-
+- fp->regs[MII_PHYSID1] = 0;
+- fp->regs[MII_PHYSID2] = 0;
+-
+- fp->regs[MII_BMSR] = bmsr;
+- fp->regs[MII_BMCR] = bmcr;
+- fp->regs[MII_LPA] = lpa;
+- fp->regs[MII_STAT1000] = lpagb;
+-
+- return 0;
+-}
+-
+-static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
+-{
+- struct fixed_mdio_bus *fmb = bus->priv;
+- struct fixed_phy *fp;
+-
+- if (reg_num >= MII_REGS_NUM)
+- return -1;
+-
+- /* We do not support emulating Clause 45 over Clause 22 register reads
+- * return an error instead of bogus data.
+- */
+- switch (reg_num) {
+- case MII_MMD_CTRL:
+- case MII_MMD_DATA:
+- return -1;
+- default:
+- break;
+- }
+-
+- list_for_each_entry(fp, &fmb->phys, node) {
+- if (fp->addr == phy_addr) {
+- /* Issue callback if user registered it. */
+- if (fp->link_update) {
+- fp->link_update(fp->phydev->attached_dev,
+- &fp->status);
+- fixed_phy_update_regs(fp);
+- }
+- return fp->regs[reg_num];
+- }
+- }
+-
+- return 0xFFFF;
+-}
+-
+-static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
+- u16 val)
+-{
+- return 0;
+-}
+-
+-/*
+- * If something weird is required to be done with link/speed,
+- * network driver is able to assign a function to implement this.
+- * May be useful for PHY's that need to be software-driven.
+- */
+-int fixed_phy_set_link_update(struct phy_device *phydev,
+- int (*link_update)(struct net_device *,
+- struct fixed_phy_status *))
+-{
+- struct fixed_mdio_bus *fmb = &platform_fmb;
+- struct fixed_phy *fp;
+-
+- if (!link_update || !phydev || !phydev->bus)
+- return -EINVAL;
+-
+- list_for_each_entry(fp, &fmb->phys, node) {
+- if (fp->addr == phydev->addr) {
+- fp->link_update = link_update;
+- fp->phydev = phydev;
+- return 0;
+- }
+- }
+-
+- return -ENOENT;
+-}
+-EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
+-
+-int fixed_phy_add(unsigned int irq, int phy_addr,
+- struct fixed_phy_status *status)
+-{
+- int ret;
+- struct fixed_mdio_bus *fmb = &platform_fmb;
+- struct fixed_phy *fp;
+-
+- fp = kzalloc(sizeof(*fp), GFP_KERNEL);
+- if (!fp)
+- return -ENOMEM;
+-
+- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
+-
+- fmb->irqs[phy_addr] = irq;
+-
+- fp->addr = phy_addr;
+- fp->status = *status;
+-
+- ret = fixed_phy_update_regs(fp);
+- if (ret)
+- goto err_regs;
+-
+- list_add_tail(&fp->node, &fmb->phys);
+-
+- return 0;
+-
+-err_regs:
+- kfree(fp);
+- return ret;
+-}
+-EXPORT_SYMBOL_GPL(fixed_phy_add);
+-
+-void fixed_phy_del(int phy_addr)
+-{
+- struct fixed_mdio_bus *fmb = &platform_fmb;
+- struct fixed_phy *fp, *tmp;
+-
+- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+- if (fp->addr == phy_addr) {
+- list_del(&fp->node);
+- kfree(fp);
+- return;
+- }
+- }
+-}
+-EXPORT_SYMBOL_GPL(fixed_phy_del);
+-
+-static int phy_fixed_addr;
+-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
+-
+-struct phy_device *fixed_phy_register(unsigned int irq,
+- struct fixed_phy_status *status,
+- struct device_node *np)
+-{
+- struct fixed_mdio_bus *fmb = &platform_fmb;
+- struct phy_device *phy;
+- int phy_addr;
+- int ret;
+-
+- /* Get the next available PHY address, up to PHY_MAX_ADDR */
+- spin_lock(&phy_fixed_addr_lock);
+- if (phy_fixed_addr == PHY_MAX_ADDR) {
+- spin_unlock(&phy_fixed_addr_lock);
+- return ERR_PTR(-ENOSPC);
+- }
+- phy_addr = phy_fixed_addr++;
+- spin_unlock(&phy_fixed_addr_lock);
+-
+- ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+- if (ret < 0)
+- return ERR_PTR(ret);
+-
+- phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+- if (!phy || IS_ERR(phy)) {
+- fixed_phy_del(phy_addr);
+- return ERR_PTR(-EINVAL);
+- }
+-
+- of_node_get(np);
+- phy->dev.of_node = np;
+-
+- ret = phy_device_register(phy);
+- if (ret) {
+- phy_device_free(phy);
+- of_node_put(np);
+- fixed_phy_del(phy_addr);
+- return ERR_PTR(ret);
+- }
+-
+- return phy;
+-}
+-
+-static int __init fixed_mdio_bus_init(void)
+-{
+- struct fixed_mdio_bus *fmb = &platform_fmb;
+- int ret;
+-
+- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
+- if (IS_ERR(pdev)) {
+- ret = PTR_ERR(pdev);
+- goto err_pdev;
+- }
+-
+- fmb->mii_bus = mdiobus_alloc();
+- if (fmb->mii_bus == NULL) {
+- ret = -ENOMEM;
+- goto err_mdiobus_reg;
+- }
+-
+- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
+- fmb->mii_bus->name = "Fixed MDIO Bus";
+- fmb->mii_bus->priv = fmb;
+- fmb->mii_bus->parent = &pdev->dev;
+- fmb->mii_bus->read = &fixed_mdio_read;
+- fmb->mii_bus->write = &fixed_mdio_write;
+- fmb->mii_bus->irq = fmb->irqs;
+-
+- ret = mdiobus_register(fmb->mii_bus);
+- if (ret)
+- goto err_mdiobus_alloc;
+-
+- return 0;
+-
+-err_mdiobus_alloc:
+- mdiobus_free(fmb->mii_bus);
+-err_mdiobus_reg:
+- platform_device_unregister(pdev);
+-err_pdev:
+- return ret;
+-}
+-module_init(fixed_mdio_bus_init);
+-
+-static void __exit fixed_mdio_bus_exit(void)
+-{
+- struct fixed_mdio_bus *fmb = &platform_fmb;
+- struct fixed_phy *fp, *tmp;
+-
+- mdiobus_unregister(fmb->mii_bus);
+- mdiobus_free(fmb->mii_bus);
+- platform_device_unregister(pdev);
+-
+- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+- list_del(&fp->node);
+- kfree(fp);
+- }
+-}
+-module_exit(fixed_mdio_bus_exit);
+-
+-MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)");
+-MODULE_AUTHOR("Vitaly Bordug");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
+new file mode 100644
+index 0000000..88b8194
+--- /dev/null
++++ b/drivers/net/phy/fixed_phy.c
+@@ -0,0 +1,370 @@
++/*
++ * Fixed MDIO bus (MDIO bus emulation with fixed PHYs)
++ *
++ * Author: Vitaly Bordug
++ * Anton Vorontsov
++ *
++ * Copyright (c) 2006-2007 MontaVista Software, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include
++#include
++#include
++#include